2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
49 #ifndef DEBUG_SHADOW_PAGING
51 #define PrintDebug(fmt, args...)
56 void delete_page_tables_32(pde32_t * pde) {
63 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
65 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
66 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
67 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
69 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
74 PrintDebug("Deleting PDE (%p)\n", pde);
75 V3_FreePage(V3_PAddr(pde));
78 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
79 PrintError("Unimplemented function\n");
82 void delete_page_tables_64(pml4e64_t * pml4) {
83 PrintError("Unimplemented function\n");
89 static int translate_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
90 addr_t * paddr = (addr_t *)private_data;
97 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
100 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
103 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
108 static int translate_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
109 addr_t * paddr = (addr_t *)private_data;
117 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
120 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
123 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
128 static int translate_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
129 addr_t * paddr = (addr_t *)private_data;
138 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
141 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
144 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
147 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
153 int v3_translate_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
154 return v3_drill_host_pt_32(host_cr3, vaddr, translate_pt_32_cb, paddr);
156 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
157 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
161 int v3_translate_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
162 return v3_drill_host_pt_32pae(host_cr3, vaddr, translate_pt_32pae_cb, paddr);
164 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
165 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
169 int v3_translate_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
170 return v3_drill_host_pt_64(host_cr3, vaddr, translate_pt_64_cb, paddr);
172 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
173 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
178 struct pt_find_data {
180 addr_t * pt_page_addr;
183 static int find_pt_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
184 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
186 if (type == pt_data->type) {
187 *(pt_data->pt_page_addr) = page_ptr;
195 int v3_find_host_pt_32_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
196 struct pt_find_data data;
199 data.pt_page_addr = page_addr;
201 return v3_drill_host_pt_32(host_cr3, vaddr, find_pt_cb, &data);
204 int v3_find_host_pt_32pae_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
205 struct pt_find_data data;
208 data.pt_page_addr = page_addr;
210 return v3_drill_host_pt_32pae(host_cr3, vaddr, find_pt_cb, &data);
213 int v3_find_host_pt_64_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
214 struct pt_find_data data;
217 data.pt_page_addr = page_addr;
219 return v3_drill_host_pt_64(host_cr3, vaddr, find_pt_cb, &data);
221 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
222 struct pt_find_data data;
225 data.pt_page_addr = page_addr;
227 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
230 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
231 struct pt_find_data data;
234 data.pt_page_addr = page_addr;
236 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
239 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
240 struct pt_find_data data;
243 data.pt_page_addr = page_addr;
245 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
251 * Page Table Access Checks
256 struct pt_check_data {
257 pf_error_t access_type;
258 pt_access_status_t * access_status;
261 static int check_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
262 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
266 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
269 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
275 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
279 if (chk_data->access_status != PT_ACCESS_OK) {
287 static int check_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
288 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
292 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
295 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
298 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
304 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
308 if (chk_data->access_status != PT_ACCESS_OK) {
316 static int check_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
317 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
321 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
324 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
327 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
330 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
337 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
341 if (chk_data->access_status != PT_ACCESS_OK) {
350 int v3_check_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
351 struct pt_check_data access_data;
353 access_data.access_type = access_type;
354 access_data.access_status = access_status;
356 return v3_drill_host_pt_32(host_cr3, vaddr, check_pt_32_cb, &access_data);
359 int v3_check_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
360 struct pt_check_data access_data;
362 access_data.access_type = access_type;
363 access_data.access_status = access_status;
365 return v3_drill_host_pt_32pae(host_cr3, vaddr, check_pt_32pae_cb, &access_data);
370 int v3_check_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
371 struct pt_check_data access_data;
373 access_data.access_type = access_type;
374 access_data.access_status = access_status;
376 return v3_drill_host_pt_64(host_cr3, vaddr, check_pt_64_cb, &access_data);
381 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
382 pf_error_t access_type, pt_access_status_t * access_status) {
383 struct pt_check_data access_data;
385 access_data.access_type = access_type;
386 access_data.access_status = access_status;
388 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
395 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
396 pf_error_t access_type, pt_access_status_t * access_status) {
397 struct pt_check_data access_data;
399 access_data.access_type = access_type;
400 access_data.access_status = access_status;
402 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
407 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
408 pf_error_t access_type, pt_access_status_t * access_status) {
409 struct pt_check_data access_data;
411 access_data.access_type = access_type;
412 access_data.access_status = access_status;
414 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
420 * PAGE TABLE LOOKUP FUNCTIONS
422 * The value of entry is a return type:
423 * Page not present: *entry = 0
428 * 32 bit Page Table lookup functions
432 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
433 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
435 if (!pde_entry->present) {
437 return PT_ENTRY_NOT_PRESENT;
438 } else if (pde_entry->large_page) {
439 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
441 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
443 return PT_ENTRY_LARGE_PAGE;
445 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
446 return PT_ENTRY_PAGE;
452 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
454 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
455 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
457 if (!pte_entry->present) {
459 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
460 return PT_ENTRY_NOT_PRESENT;
462 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
464 return PT_ENTRY_PAGE;
473 * 32 bit PAE Page Table lookup functions
476 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
477 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
479 if (!pdpe_entry->present) {
481 return PT_ENTRY_NOT_PRESENT;
483 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
484 return PT_ENTRY_PAGE;
488 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
489 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
491 if (!pde_entry->present) {
493 return PT_ENTRY_NOT_PRESENT;
494 } else if (pde_entry->large_page) {
495 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
497 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
499 return PT_ENTRY_LARGE_PAGE;
501 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
502 return PT_ENTRY_PAGE;
506 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
507 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
509 if (!pte_entry->present) {
511 return PT_ENTRY_NOT_PRESENT;
513 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
514 return PT_ENTRY_PAGE;
522 * 64 bit Page Table lookup functions
525 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
526 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
528 if (!pml_entry->present) {
530 return PT_ENTRY_NOT_PRESENT;
532 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
533 return PT_ENTRY_PAGE;
537 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
538 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
540 if (!pdpe_entry->present) {
542 return PT_ENTRY_NOT_PRESENT;
543 } else if (pdpe_entry->large_page) {
544 PrintError("1 Gigabyte pages not supported\n");
548 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
549 return PT_ENTRY_PAGE;
553 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
554 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
556 if (!pde_entry->present) {
558 return PT_ENTRY_NOT_PRESENT;
559 } else if (pde_entry->large_page) {
560 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
562 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
564 return PT_ENTRY_LARGE_PAGE;
566 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
567 return PT_ENTRY_PAGE;
571 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
572 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
574 if (!pte_entry->present) {
576 return PT_ENTRY_NOT_PRESENT;
578 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
579 return PT_ENTRY_PAGE;
586 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
587 if (pt->present == 0) {
588 return PT_ACCESS_NOT_PRESENT;
589 } else if ((pt->writable == 0) && (access_type.write == 1)) {
590 return PT_ACCESS_WRITE_ERROR;
591 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
593 return PT_ACCESS_USER_ERROR;
602 * 32 bit access checks
604 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
605 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
606 return can_access_pt_entry(entry, access_type);
609 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
610 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
611 return can_access_pt_entry(entry, access_type);
616 * 32 bit PAE access checks
618 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
619 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
620 return can_access_pt_entry(entry, access_type);
623 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
624 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
625 return can_access_pt_entry(entry, access_type);
628 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
629 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
630 return can_access_pt_entry(entry, access_type);
634 * 64 Bit access checks
636 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
637 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
638 return can_access_pt_entry(entry, access_type);
641 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
642 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
643 return can_access_pt_entry(entry, access_type);
646 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
647 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
648 return can_access_pt_entry(entry, access_type);
651 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
652 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
653 return can_access_pt_entry(entry, access_type);
665 /* We generate a page table to correspond to a given memory layout
666 * pulling pages from the mem_list when necessary
667 * If there are any gaps in the layout, we add them as unmapped pages
669 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
670 addr_t current_page_addr = 0;
672 struct shadow_map * map = &(guest_info->mem_map);
674 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
676 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
678 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
681 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
682 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
685 (region->host_type == SHDW_REGION_FULL_HOOK) ||
686 (region->host_type == SHDW_REGION_UNALLOCATED)) {
689 pte[j].user_page = 0;
690 pte[j].write_through = 0;
691 pte[j].cache_disable = 0;
695 pte[j].global_page = 0;
697 pte[j].page_base_addr = 0;
702 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
704 PrintDebug("Marking Write hook host_addr %p as RO\n", (void *)current_page_addr);
709 pte[j].user_page = 1;
710 pte[j].write_through = 0;
711 pte[j].cache_disable = 0;
715 pte[j].global_page = 0;
718 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
724 pte[j].page_base_addr = host_addr >> 12;
729 current_page_addr += PAGE_SIZE;
732 if (pte_present == 0) {
733 V3_FreePage(V3_PAddr(pte));
737 pde[i].user_page = 0;
738 pde[i].write_through = 0;
739 pde[i].cache_disable = 0;
742 pde[i].large_page = 0;
743 pde[i].global_page = 0;
745 pde[i].pt_base_addr = 0;
749 pde[i].user_page = 1;
750 pde[i].write_through = 0;
751 pde[i].cache_disable = 0;
754 pde[i].large_page = 0;
755 pde[i].global_page = 0;
757 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
766 /* We generate a page table to correspond to a given memory layout
767 * pulling pages from the mem_list when necessary
768 * If there are any gaps in the layout, we add them as unmapped pages
770 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
771 addr_t current_page_addr = 0;
773 struct shadow_map * map = &(guest_info->mem_map);
775 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
776 memset(pdpe, 0, PAGE_SIZE);
778 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
780 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
782 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
786 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
789 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
790 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
793 (region->host_type == SHDW_REGION_FULL_HOOK) ||
794 (region->host_type == SHDW_REGION_UNALLOCATED)) {
797 pte[k].user_page = 0;
798 pte[k].write_through = 0;
799 pte[k].cache_disable = 0;
803 pte[k].global_page = 0;
805 pte[k].page_base_addr = 0;
811 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
817 pte[k].user_page = 1;
818 pte[k].write_through = 0;
819 pte[k].cache_disable = 0;
823 pte[k].global_page = 0;
826 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
832 pte[k].page_base_addr = host_addr >> 12;
838 current_page_addr += PAGE_SIZE;
841 if (pte_present == 0) {
842 V3_FreePage(V3_PAddr(pte));
846 pde[j].user_page = 0;
847 pde[j].write_through = 0;
848 pde[j].cache_disable = 0;
851 pde[j].large_page = 0;
852 pde[j].global_page = 0;
854 pde[j].pt_base_addr = 0;
859 pde[j].user_page = 1;
860 pde[j].write_through = 0;
861 pde[j].cache_disable = 0;
864 pde[j].large_page = 0;
865 pde[j].global_page = 0;
867 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
875 if (pde_present == 0) {
876 V3_FreePage(V3_PAddr(pde));
880 pdpe[i].write_through = 0;
881 pdpe[i].cache_disable = 0;
882 pdpe[i].accessed = 0;
885 pdpe[i].vmm_info = 0;
886 pdpe[i].pd_base_addr = 0;
891 pdpe[i].write_through = 0;
892 pdpe[i].cache_disable = 0;
893 pdpe[i].accessed = 0;
896 pdpe[i].vmm_info = 0;
897 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
912 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
913 addr_t current_page_addr = 0;
915 struct shadow_map * map = &(info->mem_map);
917 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
919 for (i = 0; i < 1; i++) {
920 int pdpe_present = 0;
921 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
923 for (j = 0; j < 20; j++) {
925 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
927 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
929 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
932 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
933 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
938 (region->host_type == SHDW_REGION_FULL_HOOK) ||
939 (region->host_type == SHDW_REGION_UNALLOCATED)) {
942 pte[m].user_page = 0;
943 pte[m].write_through = 0;
944 pte[m].cache_disable = 0;
948 pte[m].global_page = 0;
950 pte[m].page_base_addr = 0;
955 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
961 pte[m].user_page = 1;
962 pte[m].write_through = 0;
963 pte[m].cache_disable = 0;
967 pte[m].global_page = 0;
970 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
976 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
978 //PrintPTE64(current_page_addr, &(pte[m]));
986 current_page_addr += PAGE_SIZE;
989 if (pte_present == 0) {
990 V3_FreePage(V3_PAddr(pte));
994 pde[k].user_page = 0;
995 pde[k].write_through = 0;
996 pde[k].cache_disable = 0;
999 pde[k].large_page = 0;
1000 //pde[k].global_page = 0;
1001 pde[k].vmm_info = 0;
1002 pde[k].pt_base_addr = 0;
1005 pde[k].writable = 1;
1006 pde[k].user_page = 1;
1007 pde[k].write_through = 0;
1008 pde[k].cache_disable = 0;
1009 pde[k].accessed = 0;
1011 pde[k].large_page = 0;
1012 //pde[k].global_page = 0;
1013 pde[k].vmm_info = 0;
1014 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1020 if (pde_present == 0) {
1021 V3_FreePage(V3_PAddr(pde));
1023 pdpe[j].present = 0;
1024 pdpe[j].writable = 0;
1025 pdpe[j].user_page = 0;
1026 pdpe[j].write_through = 0;
1027 pdpe[j].cache_disable = 0;
1028 pdpe[j].accessed = 0;
1030 pdpe[j].large_page = 0;
1031 //pdpe[j].global_page = 0;
1032 pdpe[j].vmm_info = 0;
1033 pdpe[j].pd_base_addr = 0;
1035 pdpe[j].present = 1;
1036 pdpe[j].writable = 1;
1037 pdpe[j].user_page = 1;
1038 pdpe[j].write_through = 0;
1039 pdpe[j].cache_disable = 0;
1040 pdpe[j].accessed = 0;
1042 pdpe[j].large_page = 0;
1043 //pdpe[j].global_page = 0;
1044 pdpe[j].vmm_info = 0;
1045 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1053 PrintDebug("PML index=%d\n", i);
1055 if (pdpe_present == 0) {
1056 V3_FreePage(V3_PAddr(pdpe));
1059 pml[i].writable = 0;
1060 pml[i].user_page = 0;
1061 pml[i].write_through = 0;
1062 pml[i].cache_disable = 0;
1063 pml[i].accessed = 0;
1064 pml[i].reserved = 0;
1065 //pml[i].large_page = 0;
1066 //pml[i].global_page = 0;
1067 pml[i].vmm_info = 0;
1068 pml[i].pdp_base_addr = 0;
1071 pml[i].writable = 1;
1072 pml[i].user_page = 1;
1073 pml[i].write_through = 0;
1074 pml[i].cache_disable = 0;
1075 pml[i].accessed = 0;
1076 pml[i].reserved = 0;
1077 //pml[i].large_page = 0;
1078 //pml[i].global_page = 0;
1079 pml[i].vmm_info = 0;
1080 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));
1088 int v3_drill_host_pt_32(v3_reg_t host_cr3, addr_t vaddr,
1089 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1090 void * private_data) {
1091 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1092 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
1093 addr_t host_pte_pa = 0;
1097 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
1098 return (ret == -1) ? -1 : PAGE_PD32;
1101 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
1102 case PT_ENTRY_NOT_PRESENT:
1104 case PT_ENTRY_LARGE_PAGE:
1105 if ((ret == callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1106 return (ret == -1) ? -1 : PAGE_4MB;
1110 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1111 return (ret == -1) ? -1 : PAGE_PT32;
1114 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1117 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1118 return (ret == -1) ? -1 : PAGE_4KB;
1128 int v3_drill_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr,
1129 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1130 void * private_data) {
1131 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1132 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1133 addr_t host_pde_pa = 0;
1134 addr_t host_pte_pa = 0;
1138 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
1139 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1142 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
1143 case PT_ENTRY_NOT_PRESENT:
1147 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1148 return (ret == -1) ? -1 : PAGE_PD32PAE;
1151 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1152 case PT_ENTRY_NOT_PRESENT:
1154 case PT_ENTRY_LARGE_PAGE:
1155 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1156 return (ret == -1) ? -1 : PAGE_2MB;
1160 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1161 return (ret == -1) ? -1 : PAGE_PT32PAE;
1164 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1167 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1168 return (ret == -1) ? -1 : PAGE_4KB;
1177 // should never get here
1182 int v3_drill_host_pt_64(v3_reg_t host_cr3, addr_t vaddr,
1183 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1184 void * private_data) {
1185 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1186 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
1187 addr_t host_pdpe_pa = 0;
1188 addr_t host_pde_pa = 0;
1189 addr_t host_pte_pa = 0;
1193 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
1194 return (ret == -1) ? -1 : PAGE_PML464;
1197 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
1198 case PT_ENTRY_NOT_PRESENT:
1202 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
1203 return (ret == -1) ? -1 : PAGE_PDP64;
1206 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
1207 case PT_ENTRY_NOT_PRESENT:
1209 case PT_ENTRY_LARGE_PAGE:
1210 if ((ret == callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
1211 return (ret == -1) ? -1 : PAGE_1GB;
1213 PrintError("1 Gigabyte Pages not supported\n");
1217 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1218 return (ret == -1) ? -1 : PAGE_PD64;
1221 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1222 case PT_ENTRY_NOT_PRESENT:
1224 case PT_ENTRY_LARGE_PAGE:
1225 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1226 return (ret == -1) ? -1 : PAGE_2MB;
1231 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1232 return (ret == -1) ? -1 : PAGE_PT64;
1235 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1238 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1239 return (ret == -1) ? -1 : PAGE_4KB;
1248 // should never get here
1258 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1259 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1260 void * private_data) {
1261 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1262 pde32_t * guest_pde = NULL;
1263 addr_t guest_pte_pa = 0;
1267 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
1268 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1269 (void *)guest_pde_pa);
1273 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1274 return (ret == -1) ? -1 : PAGE_PD32;
1277 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1278 case PT_ENTRY_NOT_PRESENT:
1280 case PT_ENTRY_LARGE_PAGE:
1282 addr_t large_page_pa = (addr_t)guest_pte_pa;
1283 addr_t large_page_va = 0;
1285 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1286 PrintError("Could not get virtual address of Guest Page 4MB (PA=%p)\n",
1287 (void *)large_page_va);
1292 if ((ret == callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1293 return (ret == -1) ? -1 : PAGE_4MB;
1299 pte32_t * guest_pte = NULL;
1302 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
1303 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1304 (void *)guest_pte_pa);
1308 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1309 return (ret == -1) ? -1 : PAGE_PT32;
1312 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1317 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1318 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1323 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1324 return (ret == -1) ? -1 : PAGE_4KB;
1331 // should never get here
1332 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1338 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1339 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1340 void * private_data) {
1341 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1342 pdpe32pae_t * guest_pdpe = 0;
1343 addr_t guest_pde_pa = 0;
1346 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1347 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1348 (void *)guest_pdpe_pa);
1352 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1353 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1356 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1358 case PT_ENTRY_NOT_PRESENT:
1362 pde32pae_t * guest_pde = NULL;
1363 addr_t guest_pte_pa = 0;
1365 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1366 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1367 (void *)guest_pde_pa);
1371 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1372 return (ret == -1) ? -1 : PAGE_PD32PAE;
1375 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1377 case PT_ENTRY_NOT_PRESENT:
1379 case PT_ENTRY_LARGE_PAGE:
1381 addr_t large_page_pa = (addr_t)guest_pte_pa;
1382 addr_t large_page_va = 0;
1384 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1385 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1386 (void *)large_page_va);
1390 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1391 return (ret == -1) ? -1 : PAGE_2MB;
1397 pte32pae_t * guest_pte = NULL;
1400 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1401 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1402 (void *)guest_pte_pa);
1406 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1407 return (ret == -1) ? -1 : PAGE_PT32PAE;
1410 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1415 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1416 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1421 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1422 return (ret == -1) ? -1 : PAGE_4KB;
1430 PrintError("Invalid page type for PD32PAE\n");
1434 // should never get here
1435 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1439 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1440 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1441 void * private_data) {
1442 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1443 pml4e64_t * guest_pmle = 0;
1444 addr_t guest_pdpe_pa = 0;
1447 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1448 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1449 (void *)guest_pml4_pa);
1453 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1454 return (ret == -1) ? -1 : PAGE_PML464;
1457 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1458 case PT_ENTRY_NOT_PRESENT:
1462 pdpe64_t * guest_pdp = NULL;
1463 addr_t guest_pde_pa = 0;
1465 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1466 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1467 (void *)guest_pdpe_pa);
1471 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1472 return (ret == -1) ? -1 : PAGE_PDP64;
1475 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1476 case PT_ENTRY_NOT_PRESENT:
1478 case PT_ENTRY_LARGE_PAGE:
1480 addr_t large_page_pa = (addr_t)guest_pde_pa;
1481 addr_t large_page_va = 0;
1483 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1484 PrintDebug("Could not get virtual address of Guest Page 1GB (PA=%p)\n",
1485 (void *)large_page_va);
1489 if ((ret == callback(PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1490 return (ret == -1) ? -1 : PAGE_1GB;
1492 PrintError("1 Gigabyte Pages not supported\n");
1497 pde64_t * guest_pde = NULL;
1498 addr_t guest_pte_pa = 0;
1500 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1501 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1502 (void *)guest_pde_pa);
1506 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1507 return (ret == -1) ? -1 : PAGE_PD64;
1510 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1511 case PT_ENTRY_NOT_PRESENT:
1513 case PT_ENTRY_LARGE_PAGE:
1515 addr_t large_page_pa = (addr_t)guest_pte_pa;
1516 addr_t large_page_va = 0;
1518 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1519 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1520 (void *)large_page_va);
1524 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1525 return (ret == -1) ? -1 : PAGE_2MB;
1531 pte64_t * guest_pte = NULL;
1534 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1535 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1536 (void *)guest_pte_pa);
1540 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1541 return (ret == -1) ? -1 : PAGE_PT64;
1544 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1549 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1550 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1555 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1556 return (ret == -1) ? -1 : PAGE_4KB;
1570 // should never get here
1571 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1578 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1579 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1580 void * private_data) {
1581 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1582 pde32_t * guest_pde = NULL;
1587 PrintError("Call back was not specified\n");
1591 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1592 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1593 (void *)guest_pde_pa);
1597 callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data);
1599 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1600 if (guest_pde[i].present) {
1601 if (guest_pde[i].large_page) {
1602 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1603 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1604 addr_t large_page_va = 0;
1606 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1607 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1608 (void *)large_page_pa);
1609 // We'll let it through for data pages because they may be unmapped or hooked
1613 callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data);
1615 vaddr += PAGE_SIZE_4MB;
1617 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1618 pte32_t * tmp_pte = NULL;
1620 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1621 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1626 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1628 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1629 if (tmp_pte[j].present) {
1630 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1633 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1634 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1636 // We'll let it through for data pages because they may be unmapped or hooked
1640 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1643 vaddr += PAGE_SIZE_4KB;
1647 vaddr += PAGE_SIZE_4MB;
1654 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1655 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1656 void * private_data) {
1657 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1658 pdpe32pae_t * guest_pdpe = NULL;
1663 PrintError("Call back was not specified\n");
1667 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1668 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1669 (void *)guest_pdpe_pa);
1675 callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data);
1677 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1678 if (guest_pdpe[i].present) {
1679 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1680 pde32pae_t * tmp_pde = NULL;
1682 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1683 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1688 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1690 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1691 if (tmp_pde[j].present) {
1692 if (tmp_pde[j].large_page) {
1693 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1694 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1695 addr_t large_page_va = 0;
1697 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1698 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1699 (void *)large_page_pa);
1700 // We'll let it through for data pages because they may be unmapped or hooked
1704 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1706 vaddr += PAGE_SIZE_2MB;
1708 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1709 pte32pae_t * tmp_pte = NULL;
1711 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1712 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1717 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1719 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1720 if (tmp_pte[k].present) {
1721 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1724 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1725 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1727 // We'll let it through for data pages because they may be unmapped or hooked
1731 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1734 vaddr += PAGE_SIZE_4KB;
1738 vaddr += PAGE_SIZE_2MB;
1742 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1751 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1752 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1753 void * private_data) {
1754 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1755 pml4e64_t * guest_pml = NULL;
1760 PrintError("Call back was not specified\n");
1764 if (guest_pa_to_host_va(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1765 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1771 callback(PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data);
1773 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1774 if (guest_pml[i].present) {
1775 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1776 pdpe64_t * tmp_pdpe = NULL;
1779 if (guest_pa_to_host_va(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1780 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1785 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
1787 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1788 if (tmp_pdpe[j].present) {
1789 if (tmp_pdpe[j].large_page) {
1790 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1791 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1792 addr_t large_page_va = 0;
1794 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1795 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1796 (void *)large_page_pa);
1797 // We'll let it through for data pages because they may be unmapped or hooked
1801 callback(PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data);
1803 vaddr += PAGE_SIZE_1GB;
1805 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1806 pde64_t * tmp_pde = NULL;
1808 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1809 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1814 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1816 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1817 if (tmp_pde[k].present) {
1818 if (tmp_pde[k].large_page) {
1819 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1820 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1821 addr_t large_page_va = 0;
1823 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1824 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1825 (void *)large_page_pa);
1826 // We'll let it through for data pages because they may be unmapped or hooked
1830 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1832 vaddr += PAGE_SIZE_2MB;
1834 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1835 pte64_t * tmp_pte = NULL;
1837 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1838 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1843 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1845 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1846 if (tmp_pte[m].present) {
1847 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1850 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1851 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1853 // We'll let it through for data pages because they may be unmapped or hooked
1857 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1860 vaddr += PAGE_SIZE_4KB;
1864 vaddr += PAGE_SIZE_2MB;
1869 vaddr += PAGE_SIZE_1GB;
1873 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1879 int v3_walk_host_pt_32(v3_reg_t host_cr3,
1880 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1881 void * private_data) {
1882 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1883 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1888 PrintError("Call back was not specified\n");
1892 callback(PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data);
1894 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1895 if (host_pde[i].present) {
1896 if (host_pde[i].large_page) {
1897 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1898 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1900 callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1902 vaddr += PAGE_SIZE_4MB;
1904 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1905 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1907 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1909 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1910 if (tmp_pte[j].present) {
1911 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1912 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1915 vaddr += PAGE_SIZE_4KB;
1919 vaddr += PAGE_SIZE_4MB;
1929 int v3_walk_host_pt_32pae(v3_reg_t host_cr3,
1930 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1931 void * private_data) {
1932 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1933 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1938 PrintError("Callback was not specified\n");
1942 callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data);
1944 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1945 if (host_pdpe[i].present) {
1946 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
1947 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
1949 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1951 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1952 if (tmp_pde[j].present) {
1954 if (tmp_pde[j].large_page) {
1955 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1956 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1958 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1960 vaddr += PAGE_SIZE_2MB;
1962 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1963 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
1965 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1967 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1968 if (tmp_pte[k].present) {
1969 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1970 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1973 vaddr += PAGE_SIZE_4KB;
1977 vaddr += PAGE_SIZE_2MB;
1981 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1988 int v3_walk_host_pt_64(v3_reg_t host_cr3,
1989 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1990 void * private_data) {
1991 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1992 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
1997 PrintError("Callback was not specified\n");
2001 callback(PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data);
2003 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
2004 if (host_pml[i].present) {
2005 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
2006 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
2008 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
2010 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
2011 if (tmp_pdpe[j].present) {
2012 if (tmp_pdpe[j].large_page) {
2013 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
2014 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
2016 callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2018 vaddr += PAGE_SIZE_1GB;
2020 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
2021 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
2023 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
2025 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
2026 if (tmp_pde[k].present) {
2027 if (tmp_pde[k].large_page) {
2028 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
2029 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
2031 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2033 vaddr += PAGE_SIZE_2MB;
2035 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
2036 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
2038 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
2040 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
2041 if (tmp_pte[m].present) {
2042 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
2043 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
2045 vaddr += PAGE_SIZE_4KB;
2049 vaddr += PAGE_SIZE_2MB;
2054 vaddr += PAGE_SIZE_1GB;
2058 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
2066 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
2067 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
2068 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
2069 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
2070 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
2071 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
2072 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
2073 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
2074 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
2075 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
2076 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
2077 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
2078 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
2081 const uchar_t * v3_page_type_to_str(page_type_t type) {
2084 return PAGE_4KB_STR;
2086 return PAGE_2MB_STR;
2088 return PAGE_4MB_STR;
2090 return PAGE_1GB_STR;
2092 return PAGE_PT32_STR;
2094 return PAGE_PD32_STR;
2096 return PAGE_PDP32PAE_STR;
2098 return PAGE_PD32PAE_STR;
2100 return PAGE_PT32PAE_STR;
2102 return PAGE_PML464_STR;
2104 return PAGE_PDP64_STR;
2106 return PAGE_PD64_STR;
2108 return PAGE_PT64_STR;