2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
49 #ifndef DEBUG_SHADOW_PAGING
51 #define PrintDebug(fmt, args...)
56 void delete_page_tables_32(pde32_t * pde) {
63 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
65 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
66 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
67 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
69 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
74 PrintDebug("Deleting PDE (%p)\n", pde);
75 V3_FreePage(V3_PAddr(pde));
78 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
79 PrintError("Unimplemented function\n");
82 void delete_page_tables_64(pml4e64_t * pml4) {
83 PrintError("Unimplemented function\n");
89 static int translate_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
90 addr_t * paddr = (addr_t *)private_data;
97 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
100 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
103 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
108 static int translate_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
109 addr_t * paddr = (addr_t *)private_data;
117 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
120 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
123 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
128 static int translate_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
129 addr_t * paddr = (addr_t *)private_data;
138 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
141 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
144 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
147 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
153 int v3_translate_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
154 return v3_drill_host_pt_32(host_cr3, vaddr, translate_pt_32_cb, paddr);
156 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
157 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
161 int v3_translate_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
162 return v3_drill_host_pt_32pae(host_cr3, vaddr, translate_pt_32pae_cb, paddr);
164 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
165 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
169 int v3_translate_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
170 return v3_drill_host_pt_64(host_cr3, vaddr, translate_pt_64_cb, paddr);
172 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
173 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
178 struct pt_find_data {
180 addr_t * pt_page_addr;
183 static int find_pt_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
184 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
186 if (type == pt_data->type) {
187 *(pt_data->pt_page_addr) = page_ptr;
195 int v3_find_host_pt_32_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
196 struct pt_find_data data;
199 data.pt_page_addr = page_addr;
201 return v3_drill_host_pt_32(host_cr3, vaddr, find_pt_cb, &data);
204 int v3_find_host_pt_32pae_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
205 struct pt_find_data data;
208 data.pt_page_addr = page_addr;
210 return v3_drill_host_pt_32pae(host_cr3, vaddr, find_pt_cb, &data);
213 int v3_find_host_pt_64_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
214 struct pt_find_data data;
217 data.pt_page_addr = page_addr;
219 return v3_drill_host_pt_64(host_cr3, vaddr, find_pt_cb, &data);
221 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
222 struct pt_find_data data;
225 data.pt_page_addr = page_addr;
227 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
230 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
231 struct pt_find_data data;
234 data.pt_page_addr = page_addr;
236 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
239 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
240 struct pt_find_data data;
243 data.pt_page_addr = page_addr;
245 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
251 * Page Table Access Checks
256 struct pt_check_data {
257 pf_error_t access_type;
258 pt_access_status_t * access_status;
261 static int check_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
262 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
266 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
269 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
275 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
279 if (chk_data->access_status != PT_ACCESS_OK) {
287 static int check_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
288 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
292 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
295 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
298 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
304 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
308 if (chk_data->access_status != PT_ACCESS_OK) {
316 static int check_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
317 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
321 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
324 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
327 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
330 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
337 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
341 if (chk_data->access_status != PT_ACCESS_OK) {
350 int v3_check_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
351 struct pt_check_data access_data;
353 access_data.access_type = access_type;
354 access_data.access_status = access_status;
356 return v3_drill_host_pt_32(host_cr3, vaddr, check_pt_32_cb, &access_data);
359 int v3_check_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
360 struct pt_check_data access_data;
362 access_data.access_type = access_type;
363 access_data.access_status = access_status;
365 return v3_drill_host_pt_32pae(host_cr3, vaddr, check_pt_32pae_cb, &access_data);
370 int v3_check_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
371 struct pt_check_data access_data;
373 access_data.access_type = access_type;
374 access_data.access_status = access_status;
376 return v3_drill_host_pt_64(host_cr3, vaddr, check_pt_64_cb, &access_data);
381 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
382 pf_error_t access_type, pt_access_status_t * access_status) {
383 struct pt_check_data access_data;
385 access_data.access_type = access_type;
386 access_data.access_status = access_status;
388 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
395 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
396 pf_error_t access_type, pt_access_status_t * access_status) {
397 struct pt_check_data access_data;
399 access_data.access_type = access_type;
400 access_data.access_status = access_status;
402 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
407 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
408 pf_error_t access_type, pt_access_status_t * access_status) {
409 struct pt_check_data access_data;
411 access_data.access_type = access_type;
412 access_data.access_status = access_status;
414 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
420 * PAGE TABLE LOOKUP FUNCTIONS
422 * The value of entry is a return type:
423 * Page not present: *entry = 0
428 * 32 bit Page Table lookup functions
432 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
433 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
435 if (!pde_entry->present) {
437 return PT_ENTRY_NOT_PRESENT;
438 } else if (pde_entry->large_page) {
439 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
441 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
443 return PT_ENTRY_LARGE_PAGE;
445 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
446 return PT_ENTRY_PAGE;
452 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
454 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
455 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
457 if (!pte_entry->present) {
459 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
460 return PT_ENTRY_NOT_PRESENT;
462 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
464 return PT_ENTRY_PAGE;
473 * 32 bit PAE Page Table lookup functions
476 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
477 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
479 if (!pdpe_entry->present) {
481 return PT_ENTRY_NOT_PRESENT;
483 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
484 return PT_ENTRY_PAGE;
488 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
489 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
491 if (!pde_entry->present) {
493 return PT_ENTRY_NOT_PRESENT;
494 } else if (pde_entry->large_page) {
495 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
497 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
499 return PT_ENTRY_LARGE_PAGE;
501 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
502 return PT_ENTRY_PAGE;
506 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
507 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
509 if (!pte_entry->present) {
511 return PT_ENTRY_NOT_PRESENT;
513 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
514 return PT_ENTRY_PAGE;
522 * 64 bit Page Table lookup functions
525 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
526 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
528 if (!pml_entry->present) {
530 return PT_ENTRY_NOT_PRESENT;
532 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
533 return PT_ENTRY_PAGE;
537 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
538 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
540 if (!pdpe_entry->present) {
542 return PT_ENTRY_NOT_PRESENT;
543 } else if (pdpe_entry->large_page) {
544 PrintError("1 Gigabyte pages not supported\n");
548 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
549 return PT_ENTRY_PAGE;
553 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
554 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
556 if (!pde_entry->present) {
558 return PT_ENTRY_NOT_PRESENT;
559 } else if (pde_entry->large_page) {
560 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
562 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
564 return PT_ENTRY_LARGE_PAGE;
566 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
567 return PT_ENTRY_PAGE;
571 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
572 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
574 if (!pte_entry->present) {
576 return PT_ENTRY_NOT_PRESENT;
578 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
579 return PT_ENTRY_PAGE;
586 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
587 if (pt->present == 0) {
588 return PT_ACCESS_NOT_PRESENT;
589 } else if ((pt->writable == 0) && (access_type.write == 1)) {
590 return PT_ACCESS_WRITE_ERROR;
591 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
593 return PT_ACCESS_USER_ERROR;
602 * 32 bit access checks
604 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
605 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
606 return can_access_pt_entry(entry, access_type);
609 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
610 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
611 return can_access_pt_entry(entry, access_type);
616 * 32 bit PAE access checks
618 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
619 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
620 return can_access_pt_entry(entry, access_type);
623 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
624 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
625 return can_access_pt_entry(entry, access_type);
628 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
629 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
630 return can_access_pt_entry(entry, access_type);
634 * 64 Bit access checks
636 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
637 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
638 return can_access_pt_entry(entry, access_type);
641 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
642 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
643 return can_access_pt_entry(entry, access_type);
646 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
647 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
648 return can_access_pt_entry(entry, access_type);
651 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
652 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
653 return can_access_pt_entry(entry, access_type);
665 /* We generate a page table to correspond to a given memory layout
666 * pulling pages from the mem_list when necessary
667 * If there are any gaps in the layout, we add them as unmapped pages
669 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
670 addr_t current_page_addr = 0;
673 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
675 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
677 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
680 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
681 struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr);
684 (region->host_type == SHDW_REGION_FULL_HOOK)) {
687 pte[j].user_page = 0;
688 pte[j].write_through = 0;
689 pte[j].cache_disable = 0;
693 pte[j].global_page = 0;
695 pte[j].page_base_addr = 0;
700 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
702 PrintDebug("Marking Write hook host_addr %p as RO\n", (void *)current_page_addr);
707 pte[j].user_page = 1;
708 pte[j].write_through = 0;
709 pte[j].cache_disable = 0;
713 pte[j].global_page = 0;
716 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
722 pte[j].page_base_addr = host_addr >> 12;
727 current_page_addr += PAGE_SIZE;
730 if (pte_present == 0) {
731 V3_FreePage(V3_PAddr(pte));
735 pde[i].user_page = 0;
736 pde[i].write_through = 0;
737 pde[i].cache_disable = 0;
740 pde[i].large_page = 0;
741 pde[i].global_page = 0;
743 pde[i].pt_base_addr = 0;
747 pde[i].user_page = 1;
748 pde[i].write_through = 0;
749 pde[i].cache_disable = 0;
752 pde[i].large_page = 0;
753 pde[i].global_page = 0;
755 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
764 /* We generate a page table to correspond to a given memory layout
765 * pulling pages from the mem_list when necessary
766 * If there are any gaps in the layout, we add them as unmapped pages
768 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
769 addr_t current_page_addr = 0;
772 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
773 memset(pdpe, 0, PAGE_SIZE);
775 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
777 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
779 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
783 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
786 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
787 struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr);
790 (region->host_type == SHDW_REGION_FULL_HOOK)) {
793 pte[k].user_page = 0;
794 pte[k].write_through = 0;
795 pte[k].cache_disable = 0;
799 pte[k].global_page = 0;
801 pte[k].page_base_addr = 0;
807 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
813 pte[k].user_page = 1;
814 pte[k].write_through = 0;
815 pte[k].cache_disable = 0;
819 pte[k].global_page = 0;
822 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
828 pte[k].page_base_addr = host_addr >> 12;
834 current_page_addr += PAGE_SIZE;
837 if (pte_present == 0) {
838 V3_FreePage(V3_PAddr(pte));
842 pde[j].user_page = 0;
843 pde[j].write_through = 0;
844 pde[j].cache_disable = 0;
847 pde[j].large_page = 0;
848 pde[j].global_page = 0;
850 pde[j].pt_base_addr = 0;
855 pde[j].user_page = 1;
856 pde[j].write_through = 0;
857 pde[j].cache_disable = 0;
860 pde[j].large_page = 0;
861 pde[j].global_page = 0;
863 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
871 if (pde_present == 0) {
872 V3_FreePage(V3_PAddr(pde));
876 pdpe[i].write_through = 0;
877 pdpe[i].cache_disable = 0;
878 pdpe[i].accessed = 0;
881 pdpe[i].vmm_info = 0;
882 pdpe[i].pd_base_addr = 0;
887 pdpe[i].write_through = 0;
888 pdpe[i].cache_disable = 0;
889 pdpe[i].accessed = 0;
892 pdpe[i].vmm_info = 0;
893 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
908 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
909 addr_t current_page_addr = 0;
912 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
914 for (i = 0; i < 1; i++) {
915 int pdpe_present = 0;
916 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
918 for (j = 0; j < 20; j++) {
920 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
922 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
924 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
927 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
928 struct v3_shadow_region * region = v3_get_shadow_region(info, current_page_addr);
933 (region->host_type == SHDW_REGION_FULL_HOOK)) {
936 pte[m].user_page = 0;
937 pte[m].write_through = 0;
938 pte[m].cache_disable = 0;
942 pte[m].global_page = 0;
944 pte[m].page_base_addr = 0;
949 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
955 pte[m].user_page = 1;
956 pte[m].write_through = 0;
957 pte[m].cache_disable = 0;
961 pte[m].global_page = 0;
964 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
970 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
972 //PrintPTE64(current_page_addr, &(pte[m]));
980 current_page_addr += PAGE_SIZE;
983 if (pte_present == 0) {
984 V3_FreePage(V3_PAddr(pte));
988 pde[k].user_page = 0;
989 pde[k].write_through = 0;
990 pde[k].cache_disable = 0;
993 pde[k].large_page = 0;
994 //pde[k].global_page = 0;
996 pde[k].pt_base_addr = 0;
1000 pde[k].user_page = 1;
1001 pde[k].write_through = 0;
1002 pde[k].cache_disable = 0;
1003 pde[k].accessed = 0;
1005 pde[k].large_page = 0;
1006 //pde[k].global_page = 0;
1007 pde[k].vmm_info = 0;
1008 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1014 if (pde_present == 0) {
1015 V3_FreePage(V3_PAddr(pde));
1017 pdpe[j].present = 0;
1018 pdpe[j].writable = 0;
1019 pdpe[j].user_page = 0;
1020 pdpe[j].write_through = 0;
1021 pdpe[j].cache_disable = 0;
1022 pdpe[j].accessed = 0;
1024 pdpe[j].large_page = 0;
1025 //pdpe[j].global_page = 0;
1026 pdpe[j].vmm_info = 0;
1027 pdpe[j].pd_base_addr = 0;
1029 pdpe[j].present = 1;
1030 pdpe[j].writable = 1;
1031 pdpe[j].user_page = 1;
1032 pdpe[j].write_through = 0;
1033 pdpe[j].cache_disable = 0;
1034 pdpe[j].accessed = 0;
1036 pdpe[j].large_page = 0;
1037 //pdpe[j].global_page = 0;
1038 pdpe[j].vmm_info = 0;
1039 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1047 PrintDebug("PML index=%d\n", i);
1049 if (pdpe_present == 0) {
1050 V3_FreePage(V3_PAddr(pdpe));
1053 pml[i].writable = 0;
1054 pml[i].user_page = 0;
1055 pml[i].write_through = 0;
1056 pml[i].cache_disable = 0;
1057 pml[i].accessed = 0;
1058 pml[i].reserved = 0;
1059 //pml[i].large_page = 0;
1060 //pml[i].global_page = 0;
1061 pml[i].vmm_info = 0;
1062 pml[i].pdp_base_addr = 0;
1065 pml[i].writable = 1;
1066 pml[i].user_page = 1;
1067 pml[i].write_through = 0;
1068 pml[i].cache_disable = 0;
1069 pml[i].accessed = 0;
1070 pml[i].reserved = 0;
1071 //pml[i].large_page = 0;
1072 //pml[i].global_page = 0;
1073 pml[i].vmm_info = 0;
1074 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));
1082 int v3_drill_host_pt_32(v3_reg_t host_cr3, addr_t vaddr,
1083 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1084 void * private_data) {
1085 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1086 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
1087 addr_t host_pte_pa = 0;
1091 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
1092 return (ret == -1) ? -1 : PAGE_PD32;
1095 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
1096 case PT_ENTRY_NOT_PRESENT:
1098 case PT_ENTRY_LARGE_PAGE:
1099 if ((ret == callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1100 return (ret == -1) ? -1 : PAGE_4MB;
1104 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1105 return (ret == -1) ? -1 : PAGE_PT32;
1108 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1111 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1112 return (ret == -1) ? -1 : PAGE_4KB;
1122 int v3_drill_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr,
1123 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1124 void * private_data) {
1125 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1126 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1127 addr_t host_pde_pa = 0;
1128 addr_t host_pte_pa = 0;
1132 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
1133 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1136 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
1137 case PT_ENTRY_NOT_PRESENT:
1141 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1142 return (ret == -1) ? -1 : PAGE_PD32PAE;
1145 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1146 case PT_ENTRY_NOT_PRESENT:
1148 case PT_ENTRY_LARGE_PAGE:
1149 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1150 return (ret == -1) ? -1 : PAGE_2MB;
1154 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1155 return (ret == -1) ? -1 : PAGE_PT32PAE;
1158 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1161 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1162 return (ret == -1) ? -1 : PAGE_4KB;
1171 // should never get here
1176 int v3_drill_host_pt_64(v3_reg_t host_cr3, addr_t vaddr,
1177 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1178 void * private_data) {
1179 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1180 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
1181 addr_t host_pdpe_pa = 0;
1182 addr_t host_pde_pa = 0;
1183 addr_t host_pte_pa = 0;
1187 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
1188 return (ret == -1) ? -1 : PAGE_PML464;
1191 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
1192 case PT_ENTRY_NOT_PRESENT:
1196 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
1197 return (ret == -1) ? -1 : PAGE_PDP64;
1200 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
1201 case PT_ENTRY_NOT_PRESENT:
1203 case PT_ENTRY_LARGE_PAGE:
1204 if ((ret == callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
1205 return (ret == -1) ? -1 : PAGE_1GB;
1207 PrintError("1 Gigabyte Pages not supported\n");
1211 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1212 return (ret == -1) ? -1 : PAGE_PD64;
1215 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1216 case PT_ENTRY_NOT_PRESENT:
1218 case PT_ENTRY_LARGE_PAGE:
1219 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1220 return (ret == -1) ? -1 : PAGE_2MB;
1225 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1226 return (ret == -1) ? -1 : PAGE_PT64;
1229 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1232 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1233 return (ret == -1) ? -1 : PAGE_4KB;
1242 // should never get here
1252 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1253 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1254 void * private_data) {
1255 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1256 pde32_t * guest_pde = NULL;
1257 addr_t guest_pte_pa = 0;
1261 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
1262 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1263 (void *)guest_pde_pa);
1267 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1268 return (ret == -1) ? -1 : PAGE_PD32;
1271 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1272 case PT_ENTRY_NOT_PRESENT:
1274 case PT_ENTRY_LARGE_PAGE:
1276 addr_t large_page_pa = (addr_t)guest_pte_pa;
1277 addr_t large_page_va = 0;
1279 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1280 PrintError("Could not get virtual address of Guest Page 4MB (PA=%p)\n",
1281 (void *)large_page_va);
1286 if ((ret == callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1287 return (ret == -1) ? -1 : PAGE_4MB;
1293 pte32_t * guest_pte = NULL;
1296 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
1297 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1298 (void *)guest_pte_pa);
1302 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1303 return (ret == -1) ? -1 : PAGE_PT32;
1306 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1311 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1312 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1317 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1318 return (ret == -1) ? -1 : PAGE_4KB;
1325 // should never get here
1326 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1332 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1333 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1334 void * private_data) {
1335 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1336 pdpe32pae_t * guest_pdpe = 0;
1337 addr_t guest_pde_pa = 0;
1340 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1341 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1342 (void *)guest_pdpe_pa);
1346 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1347 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1350 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1352 case PT_ENTRY_NOT_PRESENT:
1356 pde32pae_t * guest_pde = NULL;
1357 addr_t guest_pte_pa = 0;
1359 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1360 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1361 (void *)guest_pde_pa);
1365 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1366 return (ret == -1) ? -1 : PAGE_PD32PAE;
1369 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1371 case PT_ENTRY_NOT_PRESENT:
1373 case PT_ENTRY_LARGE_PAGE:
1375 addr_t large_page_pa = (addr_t)guest_pte_pa;
1376 addr_t large_page_va = 0;
1378 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1379 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1380 (void *)large_page_va);
1384 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1385 return (ret == -1) ? -1 : PAGE_2MB;
1391 pte32pae_t * guest_pte = NULL;
1394 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1395 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1396 (void *)guest_pte_pa);
1400 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1401 return (ret == -1) ? -1 : PAGE_PT32PAE;
1404 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1409 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1410 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1415 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1416 return (ret == -1) ? -1 : PAGE_4KB;
1424 PrintError("Invalid page type for PD32PAE\n");
1428 // should never get here
1429 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1433 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1434 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1435 void * private_data) {
1436 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1437 pml4e64_t * guest_pmle = 0;
1438 addr_t guest_pdpe_pa = 0;
1441 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1442 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1443 (void *)guest_pml4_pa);
1447 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1448 return (ret == -1) ? -1 : PAGE_PML464;
1451 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1452 case PT_ENTRY_NOT_PRESENT:
1456 pdpe64_t * guest_pdp = NULL;
1457 addr_t guest_pde_pa = 0;
1459 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1460 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1461 (void *)guest_pdpe_pa);
1465 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1466 return (ret == -1) ? -1 : PAGE_PDP64;
1469 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1470 case PT_ENTRY_NOT_PRESENT:
1472 case PT_ENTRY_LARGE_PAGE:
1474 addr_t large_page_pa = (addr_t)guest_pde_pa;
1475 addr_t large_page_va = 0;
1477 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1478 PrintDebug("Could not get virtual address of Guest Page 1GB (PA=%p)\n",
1479 (void *)large_page_va);
1483 if ((ret == callback(PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1484 return (ret == -1) ? -1 : PAGE_1GB;
1486 PrintError("1 Gigabyte Pages not supported\n");
1491 pde64_t * guest_pde = NULL;
1492 addr_t guest_pte_pa = 0;
1494 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1495 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1496 (void *)guest_pde_pa);
1500 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1501 return (ret == -1) ? -1 : PAGE_PD64;
1504 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1505 case PT_ENTRY_NOT_PRESENT:
1507 case PT_ENTRY_LARGE_PAGE:
1509 addr_t large_page_pa = (addr_t)guest_pte_pa;
1510 addr_t large_page_va = 0;
1512 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1513 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1514 (void *)large_page_va);
1518 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1519 return (ret == -1) ? -1 : PAGE_2MB;
1525 pte64_t * guest_pte = NULL;
1528 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1529 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1530 (void *)guest_pte_pa);
1534 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1535 return (ret == -1) ? -1 : PAGE_PT64;
1538 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1543 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1544 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1549 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1550 return (ret == -1) ? -1 : PAGE_4KB;
1564 // should never get here
1565 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1572 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1573 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1574 void * private_data) {
1575 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1576 pde32_t * guest_pde = NULL;
1581 PrintError("Call back was not specified\n");
1585 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1586 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1587 (void *)guest_pde_pa);
1591 callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data);
1593 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1594 if (guest_pde[i].present) {
1595 if (guest_pde[i].large_page) {
1596 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1597 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1598 addr_t large_page_va = 0;
1600 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1601 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1602 (void *)large_page_pa);
1603 // We'll let it through for data pages because they may be unmapped or hooked
1607 callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data);
1609 vaddr += PAGE_SIZE_4MB;
1611 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1612 pte32_t * tmp_pte = NULL;
1614 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1615 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1620 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1622 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1623 if (tmp_pte[j].present) {
1624 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1627 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1628 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1630 // We'll let it through for data pages because they may be unmapped or hooked
1634 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1637 vaddr += PAGE_SIZE_4KB;
1641 vaddr += PAGE_SIZE_4MB;
1648 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1649 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1650 void * private_data) {
1651 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1652 pdpe32pae_t * guest_pdpe = NULL;
1657 PrintError("Call back was not specified\n");
1661 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1662 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1663 (void *)guest_pdpe_pa);
1669 callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data);
1671 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1672 if (guest_pdpe[i].present) {
1673 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1674 pde32pae_t * tmp_pde = NULL;
1676 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1677 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1682 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1684 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1685 if (tmp_pde[j].present) {
1686 if (tmp_pde[j].large_page) {
1687 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1688 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1689 addr_t large_page_va = 0;
1691 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1692 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1693 (void *)large_page_pa);
1694 // We'll let it through for data pages because they may be unmapped or hooked
1698 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1700 vaddr += PAGE_SIZE_2MB;
1702 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1703 pte32pae_t * tmp_pte = NULL;
1705 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1706 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1711 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1713 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1714 if (tmp_pte[k].present) {
1715 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1718 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1719 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1721 // We'll let it through for data pages because they may be unmapped or hooked
1725 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1728 vaddr += PAGE_SIZE_4KB;
1732 vaddr += PAGE_SIZE_2MB;
1736 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1745 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1746 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1747 void * private_data) {
1748 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1749 pml4e64_t * guest_pml = NULL;
1754 PrintError("Call back was not specified\n");
1758 if (guest_pa_to_host_va(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1759 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1765 callback(PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data);
1767 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1768 if (guest_pml[i].present) {
1769 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1770 pdpe64_t * tmp_pdpe = NULL;
1773 if (guest_pa_to_host_va(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1774 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1779 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
1781 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1782 if (tmp_pdpe[j].present) {
1783 if (tmp_pdpe[j].large_page) {
1784 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1785 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1786 addr_t large_page_va = 0;
1788 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1789 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1790 (void *)large_page_pa);
1791 // We'll let it through for data pages because they may be unmapped or hooked
1795 callback(PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data);
1797 vaddr += PAGE_SIZE_1GB;
1799 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1800 pde64_t * tmp_pde = NULL;
1802 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1803 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1808 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1810 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1811 if (tmp_pde[k].present) {
1812 if (tmp_pde[k].large_page) {
1813 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1814 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1815 addr_t large_page_va = 0;
1817 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1818 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1819 (void *)large_page_pa);
1820 // We'll let it through for data pages because they may be unmapped or hooked
1824 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1826 vaddr += PAGE_SIZE_2MB;
1828 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1829 pte64_t * tmp_pte = NULL;
1831 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1832 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1837 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1839 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1840 if (tmp_pte[m].present) {
1841 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1844 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1845 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1847 // We'll let it through for data pages because they may be unmapped or hooked
1851 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1854 vaddr += PAGE_SIZE_4KB;
1858 vaddr += PAGE_SIZE_2MB;
1863 vaddr += PAGE_SIZE_1GB;
1867 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1873 int v3_walk_host_pt_32(v3_reg_t host_cr3,
1874 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1875 void * private_data) {
1876 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1877 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1882 PrintError("Call back was not specified\n");
1886 callback(PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data);
1888 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1889 if (host_pde[i].present) {
1890 if (host_pde[i].large_page) {
1891 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1892 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1894 callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1896 vaddr += PAGE_SIZE_4MB;
1898 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1899 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1901 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1903 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1904 if (tmp_pte[j].present) {
1905 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1906 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1909 vaddr += PAGE_SIZE_4KB;
1913 vaddr += PAGE_SIZE_4MB;
1923 int v3_walk_host_pt_32pae(v3_reg_t host_cr3,
1924 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1925 void * private_data) {
1926 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1927 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1932 PrintError("Callback was not specified\n");
1936 callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data);
1938 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1939 if (host_pdpe[i].present) {
1940 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
1941 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
1943 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1945 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1946 if (tmp_pde[j].present) {
1948 if (tmp_pde[j].large_page) {
1949 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1950 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1952 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1954 vaddr += PAGE_SIZE_2MB;
1956 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1957 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
1959 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1961 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1962 if (tmp_pte[k].present) {
1963 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1964 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1967 vaddr += PAGE_SIZE_4KB;
1971 vaddr += PAGE_SIZE_2MB;
1975 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1982 int v3_walk_host_pt_64(v3_reg_t host_cr3,
1983 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1984 void * private_data) {
1985 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1986 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
1991 PrintError("Callback was not specified\n");
1995 callback(PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data);
1997 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1998 if (host_pml[i].present) {
1999 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
2000 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
2002 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
2004 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
2005 if (tmp_pdpe[j].present) {
2006 if (tmp_pdpe[j].large_page) {
2007 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
2008 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
2010 callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2012 vaddr += PAGE_SIZE_1GB;
2014 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
2015 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
2017 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
2019 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
2020 if (tmp_pde[k].present) {
2021 if (tmp_pde[k].large_page) {
2022 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
2023 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
2025 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2027 vaddr += PAGE_SIZE_2MB;
2029 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
2030 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
2032 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
2034 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
2035 if (tmp_pte[m].present) {
2036 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
2037 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
2039 vaddr += PAGE_SIZE_4KB;
2043 vaddr += PAGE_SIZE_2MB;
2048 vaddr += PAGE_SIZE_1GB;
2052 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
2060 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
2061 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
2062 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
2063 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
2064 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
2065 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
2066 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
2067 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
2068 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
2069 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
2070 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
2071 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
2072 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
2075 const uchar_t * v3_page_type_to_str(page_type_t type) {
2078 return PAGE_4KB_STR;
2080 return PAGE_2MB_STR;
2082 return PAGE_4MB_STR;
2084 return PAGE_1GB_STR;
2086 return PAGE_PT32_STR;
2088 return PAGE_PD32_STR;
2090 return PAGE_PDP32PAE_STR;
2092 return PAGE_PD32PAE_STR;
2094 return PAGE_PT32PAE_STR;
2096 return PAGE_PML464_STR;
2098 return PAGE_PDP64_STR;
2100 return PAGE_PD64_STR;
2102 return PAGE_PT64_STR;