2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
51 void delete_page_tables_32(pde32_t * pde) {
58 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
60 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
61 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
62 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
64 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
69 PrintDebug("Deleting PDE (%p)\n", pde);
70 V3_FreePage(V3_PAddr(pde));
73 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
74 PrintError("Unimplemented function\n");
77 void delete_page_tables_64(pml4e64_t * pml4) {
78 PrintError("Unimplemented function\n");
84 static int translate_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
85 addr_t * paddr = (addr_t *)private_data;
92 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
95 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
98 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
103 static int translate_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
104 addr_t * paddr = (addr_t *)private_data;
112 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
115 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
118 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
123 static int translate_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
124 addr_t * paddr = (addr_t *)private_data;
133 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
136 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
139 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
142 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
148 int v3_translate_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
149 return v3_drill_host_pt_32(host_cr3, vaddr, translate_pt_32_cb, paddr);
151 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
152 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
156 int v3_translate_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
157 return v3_drill_host_pt_32pae(host_cr3, vaddr, translate_pt_32pae_cb, paddr);
159 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
160 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
164 int v3_translate_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
165 return v3_drill_host_pt_64(host_cr3, vaddr, translate_pt_64_cb, paddr);
167 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
168 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
173 struct pt_find_data {
175 addr_t * pt_page_addr;
178 static int find_pt_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
179 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
181 if (type == pt_data->type) {
182 *(pt_data->pt_page_addr) = page_ptr;
190 int v3_find_host_pt_32_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
191 struct pt_find_data data;
194 data.pt_page_addr = page_addr;
196 return v3_drill_host_pt_32(host_cr3, vaddr, find_pt_cb, &data);
199 int v3_find_host_pt_32pae_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
200 struct pt_find_data data;
203 data.pt_page_addr = page_addr;
205 return v3_drill_host_pt_32pae(host_cr3, vaddr, find_pt_cb, &data);
208 int v3_find_host_pt_64_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
209 struct pt_find_data data;
212 data.pt_page_addr = page_addr;
214 return v3_drill_host_pt_64(host_cr3, vaddr, find_pt_cb, &data);
216 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
217 struct pt_find_data data;
220 data.pt_page_addr = page_addr;
222 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
225 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
226 struct pt_find_data data;
229 data.pt_page_addr = page_addr;
231 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
234 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
235 struct pt_find_data data;
238 data.pt_page_addr = page_addr;
240 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
246 * Page Table Access Checks
251 struct pt_check_data {
252 pf_error_t access_type;
253 pt_access_status_t * access_status;
256 static int check_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
257 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
261 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
264 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
270 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
274 if (chk_data->access_status != PT_ACCESS_OK) {
282 static int check_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
283 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
287 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
290 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
293 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
299 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
303 if (chk_data->access_status != PT_ACCESS_OK) {
311 static int check_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
312 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
316 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
319 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
322 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
325 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
332 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
336 if (chk_data->access_status != PT_ACCESS_OK) {
345 int v3_check_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
346 struct pt_check_data access_data;
348 access_data.access_type = access_type;
349 access_data.access_status = access_status;
351 return v3_drill_host_pt_32(host_cr3, vaddr, check_pt_32_cb, &access_data);
354 int v3_check_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
355 struct pt_check_data access_data;
357 access_data.access_type = access_type;
358 access_data.access_status = access_status;
360 return v3_drill_host_pt_32pae(host_cr3, vaddr, check_pt_32pae_cb, &access_data);
365 int v3_check_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
366 struct pt_check_data access_data;
368 access_data.access_type = access_type;
369 access_data.access_status = access_status;
371 return v3_drill_host_pt_64(host_cr3, vaddr, check_pt_64_cb, &access_data);
376 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
377 pf_error_t access_type, pt_access_status_t * access_status) {
378 struct pt_check_data access_data;
380 access_data.access_type = access_type;
381 access_data.access_status = access_status;
383 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
390 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
391 pf_error_t access_type, pt_access_status_t * access_status) {
392 struct pt_check_data access_data;
394 access_data.access_type = access_type;
395 access_data.access_status = access_status;
397 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
402 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
403 pf_error_t access_type, pt_access_status_t * access_status) {
404 struct pt_check_data access_data;
406 access_data.access_type = access_type;
407 access_data.access_status = access_status;
409 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
415 * PAGE TABLE LOOKUP FUNCTIONS
417 * The value of entry is a return type:
418 * Page not present: *entry = 0
423 * 32 bit Page Table lookup functions
427 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
428 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
430 if (!pde_entry->present) {
432 return PT_ENTRY_NOT_PRESENT;
433 } else if (pde_entry->large_page) {
434 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
436 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
438 return PT_ENTRY_LARGE_PAGE;
440 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
441 return PT_ENTRY_PAGE;
447 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
449 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
450 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
452 if (!pte_entry->present) {
454 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
455 return PT_ENTRY_NOT_PRESENT;
457 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
459 return PT_ENTRY_PAGE;
468 * 32 bit PAE Page Table lookup functions
471 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
472 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
474 if (!pdpe_entry->present) {
476 return PT_ENTRY_NOT_PRESENT;
478 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
479 return PT_ENTRY_PAGE;
483 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
484 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
486 if (!pde_entry->present) {
488 return PT_ENTRY_NOT_PRESENT;
489 } else if (pde_entry->large_page) {
490 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
492 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
494 return PT_ENTRY_LARGE_PAGE;
496 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
497 return PT_ENTRY_PAGE;
501 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
502 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
504 if (!pte_entry->present) {
506 return PT_ENTRY_NOT_PRESENT;
508 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
509 return PT_ENTRY_PAGE;
517 * 64 bit Page Table lookup functions
520 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
521 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
523 if (!pml_entry->present) {
525 return PT_ENTRY_NOT_PRESENT;
527 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
528 return PT_ENTRY_PAGE;
532 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
533 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
535 if (!pdpe_entry->present) {
537 return PT_ENTRY_NOT_PRESENT;
538 } else if (pdpe_entry->large_page) {
539 PrintError("1 Gigabyte pages not supported\n");
543 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
544 return PT_ENTRY_PAGE;
548 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
549 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
551 if (!pde_entry->present) {
553 return PT_ENTRY_NOT_PRESENT;
554 } else if (pde_entry->large_page) {
555 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
557 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
559 return PT_ENTRY_LARGE_PAGE;
561 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
562 return PT_ENTRY_PAGE;
566 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
567 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
569 if (!pte_entry->present) {
571 return PT_ENTRY_NOT_PRESENT;
573 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
574 return PT_ENTRY_PAGE;
581 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
582 if (pt->present == 0) {
583 return PT_ACCESS_NOT_PRESENT;
584 } else if ((pt->writable == 0) && (access_type.write == 1)) {
585 return PT_ACCESS_WRITE_ERROR;
586 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
588 return PT_ACCESS_USER_ERROR;
597 * 32 bit access checks
599 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
600 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
601 return can_access_pt_entry(entry, access_type);
604 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
605 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
606 return can_access_pt_entry(entry, access_type);
611 * 32 bit PAE access checks
613 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
614 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
615 return can_access_pt_entry(entry, access_type);
618 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
619 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
620 return can_access_pt_entry(entry, access_type);
623 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
624 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
625 return can_access_pt_entry(entry, access_type);
629 * 64 Bit access checks
631 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
632 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
633 return can_access_pt_entry(entry, access_type);
636 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
637 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
638 return can_access_pt_entry(entry, access_type);
641 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
642 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
643 return can_access_pt_entry(entry, access_type);
646 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
647 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
648 return can_access_pt_entry(entry, access_type);
660 /* We generate a page table to correspond to a given memory layout
661 * pulling pages from the mem_list when necessary
662 * If there are any gaps in the layout, we add them as unmapped pages
664 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
665 addr_t current_page_addr = 0;
667 struct shadow_map * map = &(guest_info->mem_map);
669 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
671 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
673 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
676 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
677 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
680 (region->host_type == HOST_REGION_HOOK) ||
681 (region->host_type == HOST_REGION_UNALLOCATED) ||
682 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
683 (region->host_type == HOST_REGION_REMOTE) ||
684 (region->host_type == HOST_REGION_SWAPPED)) {
687 pte[j].user_page = 0;
688 pte[j].write_through = 0;
689 pte[j].cache_disable = 0;
693 pte[j].global_page = 0;
695 pte[j].page_base_addr = 0;
700 pte[j].user_page = 1;
701 pte[j].write_through = 0;
702 pte[j].cache_disable = 0;
706 pte[j].global_page = 0;
709 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
715 pte[j].page_base_addr = host_addr >> 12;
720 current_page_addr += PAGE_SIZE;
723 if (pte_present == 0) {
724 V3_FreePage(V3_PAddr(pte));
728 pde[i].user_page = 0;
729 pde[i].write_through = 0;
730 pde[i].cache_disable = 0;
733 pde[i].large_page = 0;
734 pde[i].global_page = 0;
736 pde[i].pt_base_addr = 0;
740 pde[i].user_page = 1;
741 pde[i].write_through = 0;
742 pde[i].cache_disable = 0;
745 pde[i].large_page = 0;
746 pde[i].global_page = 0;
748 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
757 /* We generate a page table to correspond to a given memory layout
758 * pulling pages from the mem_list when necessary
759 * If there are any gaps in the layout, we add them as unmapped pages
761 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
762 addr_t current_page_addr = 0;
764 struct shadow_map * map = &(guest_info->mem_map);
766 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
767 memset(pdpe, 0, PAGE_SIZE);
769 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
771 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
773 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
777 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
780 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
781 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
784 (region->host_type == HOST_REGION_HOOK) ||
785 (region->host_type == HOST_REGION_UNALLOCATED) ||
786 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
787 (region->host_type == HOST_REGION_REMOTE) ||
788 (region->host_type == HOST_REGION_SWAPPED)) {
791 pte[k].user_page = 0;
792 pte[k].write_through = 0;
793 pte[k].cache_disable = 0;
797 pte[k].global_page = 0;
799 pte[k].page_base_addr = 0;
805 pte[k].user_page = 1;
806 pte[k].write_through = 0;
807 pte[k].cache_disable = 0;
811 pte[k].global_page = 0;
814 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
820 pte[k].page_base_addr = host_addr >> 12;
826 current_page_addr += PAGE_SIZE;
829 if (pte_present == 0) {
830 V3_FreePage(V3_PAddr(pte));
834 pde[j].user_page = 0;
835 pde[j].write_through = 0;
836 pde[j].cache_disable = 0;
839 pde[j].large_page = 0;
840 pde[j].global_page = 0;
842 pde[j].pt_base_addr = 0;
847 pde[j].user_page = 1;
848 pde[j].write_through = 0;
849 pde[j].cache_disable = 0;
852 pde[j].large_page = 0;
853 pde[j].global_page = 0;
855 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
863 if (pde_present == 0) {
864 V3_FreePage(V3_PAddr(pde));
868 pdpe[i].write_through = 0;
869 pdpe[i].cache_disable = 0;
870 pdpe[i].accessed = 0;
873 pdpe[i].vmm_info = 0;
874 pdpe[i].pd_base_addr = 0;
879 pdpe[i].write_through = 0;
880 pdpe[i].cache_disable = 0;
881 pdpe[i].accessed = 0;
884 pdpe[i].vmm_info = 0;
885 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
900 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
901 addr_t current_page_addr = 0;
903 struct shadow_map * map = &(info->mem_map);
905 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
907 for (i = 0; i < 1; i++) {
908 int pdpe_present = 0;
909 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
911 for (j = 0; j < 20; j++) {
913 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
915 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
917 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
920 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
921 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
926 (region->host_type == HOST_REGION_HOOK) ||
927 (region->host_type == HOST_REGION_UNALLOCATED) ||
928 (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
929 (region->host_type == HOST_REGION_REMOTE) ||
930 (region->host_type == HOST_REGION_SWAPPED)) {
933 pte[m].user_page = 0;
934 pte[m].write_through = 0;
935 pte[m].cache_disable = 0;
939 pte[m].global_page = 0;
941 pte[m].page_base_addr = 0;
946 pte[m].user_page = 1;
947 pte[m].write_through = 0;
948 pte[m].cache_disable = 0;
952 pte[m].global_page = 0;
955 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
961 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
963 //PrintPTE64(current_page_addr, &(pte[m]));
971 current_page_addr += PAGE_SIZE;
974 if (pte_present == 0) {
975 V3_FreePage(V3_PAddr(pte));
979 pde[k].user_page = 0;
980 pde[k].write_through = 0;
981 pde[k].cache_disable = 0;
984 pde[k].large_page = 0;
985 //pde[k].global_page = 0;
987 pde[k].pt_base_addr = 0;
991 pde[k].user_page = 1;
992 pde[k].write_through = 0;
993 pde[k].cache_disable = 0;
996 pde[k].large_page = 0;
997 //pde[k].global_page = 0;
999 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1005 if (pde_present == 0) {
1006 V3_FreePage(V3_PAddr(pde));
1008 pdpe[j].present = 0;
1009 pdpe[j].writable = 0;
1010 pdpe[j].user_page = 0;
1011 pdpe[j].write_through = 0;
1012 pdpe[j].cache_disable = 0;
1013 pdpe[j].accessed = 0;
1015 pdpe[j].large_page = 0;
1016 //pdpe[j].global_page = 0;
1017 pdpe[j].vmm_info = 0;
1018 pdpe[j].pd_base_addr = 0;
1020 pdpe[j].present = 1;
1021 pdpe[j].writable = 1;
1022 pdpe[j].user_page = 1;
1023 pdpe[j].write_through = 0;
1024 pdpe[j].cache_disable = 0;
1025 pdpe[j].accessed = 0;
1027 pdpe[j].large_page = 0;
1028 //pdpe[j].global_page = 0;
1029 pdpe[j].vmm_info = 0;
1030 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1038 PrintDebug("PML index=%d\n", i);
1040 if (pdpe_present == 0) {
1041 V3_FreePage(V3_PAddr(pdpe));
1044 pml[i].writable = 0;
1045 pml[i].user_page = 0;
1046 pml[i].write_through = 0;
1047 pml[i].cache_disable = 0;
1048 pml[i].accessed = 0;
1049 pml[i].reserved = 0;
1050 //pml[i].large_page = 0;
1051 //pml[i].global_page = 0;
1052 pml[i].vmm_info = 0;
1053 pml[i].pdp_base_addr = 0;
1056 pml[i].writable = 1;
1057 pml[i].user_page = 1;
1058 pml[i].write_through = 0;
1059 pml[i].cache_disable = 0;
1060 pml[i].accessed = 0;
1061 pml[i].reserved = 0;
1062 //pml[i].large_page = 0;
1063 //pml[i].global_page = 0;
1064 pml[i].vmm_info = 0;
1065 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));
1073 int v3_drill_host_pt_32(v3_reg_t host_cr3, addr_t vaddr,
1074 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1075 void * private_data) {
1076 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1077 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
1078 addr_t host_pte_pa = 0;
1082 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
1083 return (ret == -1) ? -1 : PAGE_PD32;
1086 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
1087 case PT_ENTRY_NOT_PRESENT:
1089 case PT_ENTRY_LARGE_PAGE:
1090 if ((ret == callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1091 return (ret == -1) ? -1 : PAGE_4MB;
1095 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1096 return (ret == -1) ? -1 : PAGE_PT32;
1099 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1102 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1103 return (ret == -1) ? -1 : PAGE_4KB;
1113 int v3_drill_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr,
1114 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1115 void * private_data) {
1116 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1117 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1118 addr_t host_pde_pa = 0;
1119 addr_t host_pte_pa = 0;
1123 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
1124 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1127 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
1128 case PT_ENTRY_NOT_PRESENT:
1132 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1133 return (ret == -1) ? -1 : PAGE_PD32PAE;
1136 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1137 case PT_ENTRY_NOT_PRESENT:
1139 case PT_ENTRY_LARGE_PAGE:
1140 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1141 return (ret == -1) ? -1 : PAGE_2MB;
1145 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1146 return (ret == -1) ? -1 : PAGE_PT32PAE;
1149 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1152 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1153 return (ret == -1) ? -1 : PAGE_4KB;
1162 // should never get here
1167 int v3_drill_host_pt_64(v3_reg_t host_cr3, addr_t vaddr,
1168 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1169 void * private_data) {
1170 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1171 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
1172 addr_t host_pdpe_pa = 0;
1173 addr_t host_pde_pa = 0;
1174 addr_t host_pte_pa = 0;
1178 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
1179 return (ret == -1) ? -1 : PAGE_PML464;
1182 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
1183 case PT_ENTRY_NOT_PRESENT:
1187 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
1188 return (ret == -1) ? -1 : PAGE_PDP64;
1191 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
1192 case PT_ENTRY_NOT_PRESENT:
1194 case PT_ENTRY_LARGE_PAGE:
1195 if ((ret == callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
1196 return (ret == -1) ? -1 : PAGE_1GB;
1198 PrintError("1 Gigabyte Pages not supported\n");
1202 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1203 return (ret == -1) ? -1 : PAGE_PD64;
1206 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1207 case PT_ENTRY_NOT_PRESENT:
1209 case PT_ENTRY_LARGE_PAGE:
1210 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1211 return (ret == -1) ? -1 : PAGE_2MB;
1216 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1217 return (ret == -1) ? -1 : PAGE_PT64;
1220 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1223 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1224 return (ret == -1) ? -1 : PAGE_4KB;
1233 // should never get here
1243 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1244 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1245 void * private_data) {
1246 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1247 pde32_t * guest_pde = NULL;
1248 addr_t guest_pte_pa = 0;
1252 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
1253 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1254 (void *)guest_pde_pa);
1258 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1259 return (ret == -1) ? -1 : PAGE_PD32;
1262 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1263 case PT_ENTRY_NOT_PRESENT:
1265 case PT_ENTRY_LARGE_PAGE:
1267 addr_t large_page_pa = (addr_t)guest_pte_pa;
1268 addr_t large_page_va = 0;
1270 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1271 PrintError("Could not get virtual address of Guest Page 4MB (PA=%p)\n",
1272 (void *)large_page_va);
1277 if ((ret == callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1278 return (ret == -1) ? -1 : PAGE_4MB;
1284 pte32_t * guest_pte = NULL;
1287 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
1288 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1289 (void *)guest_pte_pa);
1293 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1294 return (ret == -1) ? -1 : PAGE_PT32;
1297 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1302 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1303 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1308 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1309 return (ret == -1) ? -1 : PAGE_4KB;
1316 // should never get here
1317 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1323 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1324 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1325 void * private_data) {
1326 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1327 pdpe32pae_t * guest_pdpe = 0;
1328 addr_t guest_pde_pa = 0;
1331 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1332 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1333 (void *)guest_pdpe_pa);
1337 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1338 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1341 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1343 case PT_ENTRY_NOT_PRESENT:
1347 pde32pae_t * guest_pde = NULL;
1348 addr_t guest_pte_pa = 0;
1350 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1351 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1352 (void *)guest_pde_pa);
1356 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1357 return (ret == -1) ? -1 : PAGE_PD32PAE;
1360 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1362 case PT_ENTRY_NOT_PRESENT:
1364 case PT_ENTRY_LARGE_PAGE:
1366 addr_t large_page_pa = (addr_t)guest_pte_pa;
1367 addr_t large_page_va = 0;
1369 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1370 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1371 (void *)large_page_va);
1375 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1376 return (ret == -1) ? -1 : PAGE_2MB;
1382 pte32pae_t * guest_pte = NULL;
1385 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1386 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1387 (void *)guest_pte_pa);
1391 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1392 return (ret == -1) ? -1 : PAGE_PT32PAE;
1395 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1400 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1401 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1406 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1407 return (ret == -1) ? -1 : PAGE_4KB;
1415 PrintError("Invalid page type for PD32PAE\n");
1419 // should never get here
1420 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1424 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1425 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1426 void * private_data) {
1427 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1428 pml4e64_t * guest_pmle = 0;
1429 addr_t guest_pdpe_pa = 0;
1432 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1433 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1434 (void *)guest_pml4_pa);
1438 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1439 return (ret == -1) ? -1 : PAGE_PML464;
1442 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1443 case PT_ENTRY_NOT_PRESENT:
1447 pdpe64_t * guest_pdp = NULL;
1448 addr_t guest_pde_pa = 0;
1450 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1451 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1452 (void *)guest_pdpe_pa);
1456 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1457 return (ret == -1) ? -1 : PAGE_PDP64;
1460 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1461 case PT_ENTRY_NOT_PRESENT:
1463 case PT_ENTRY_LARGE_PAGE:
1465 addr_t large_page_pa = (addr_t)guest_pde_pa;
1466 addr_t large_page_va = 0;
1468 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1469 PrintDebug("Could not get virtual address of Guest Page 1GB (PA=%p)\n",
1470 (void *)large_page_va);
1474 if ((ret == callback(PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1475 return (ret == -1) ? -1 : PAGE_1GB;
1477 PrintError("1 Gigabyte Pages not supported\n");
1482 pde64_t * guest_pde = NULL;
1483 addr_t guest_pte_pa = 0;
1485 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1486 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1487 (void *)guest_pde_pa);
1491 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1492 return (ret == -1) ? -1 : PAGE_PD64;
1495 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1496 case PT_ENTRY_NOT_PRESENT:
1498 case PT_ENTRY_LARGE_PAGE:
1500 addr_t large_page_pa = (addr_t)guest_pte_pa;
1501 addr_t large_page_va = 0;
1503 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1504 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1505 (void *)large_page_va);
1509 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1510 return (ret == -1) ? -1 : PAGE_2MB;
1516 pte64_t * guest_pte = NULL;
1519 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1520 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1521 (void *)guest_pte_pa);
1525 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1526 return (ret == -1) ? -1 : PAGE_PT64;
1529 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1534 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1535 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1540 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1541 return (ret == -1) ? -1 : PAGE_4KB;
1555 // should never get here
1556 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1563 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1564 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1565 void * private_data) {
1566 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1567 pde32_t * guest_pde = NULL;
1572 PrintError("Call back was not specified\n");
1576 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1577 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1578 (void *)guest_pde_pa);
1582 callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data);
1584 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1585 if (guest_pde[i].present) {
1586 if (guest_pde[i].large_page) {
1587 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1588 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1589 addr_t large_page_va = 0;
1591 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1592 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1593 (void *)large_page_pa);
1594 // We'll let it through for data pages because they may be unmapped or hooked
1598 callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data);
1600 vaddr += PAGE_SIZE_4MB;
1602 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1603 pte32_t * tmp_pte = NULL;
1605 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1606 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1611 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1613 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1614 if (tmp_pte[j].present) {
1615 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1618 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1619 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1621 // We'll let it through for data pages because they may be unmapped or hooked
1625 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1628 vaddr += PAGE_SIZE_4KB;
1632 vaddr += PAGE_SIZE_4MB;
1639 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1640 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1641 void * private_data) {
1642 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1643 pdpe32pae_t * guest_pdpe = NULL;
1648 PrintError("Call back was not specified\n");
1652 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1653 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1654 (void *)guest_pdpe_pa);
1660 callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data);
1662 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1663 if (guest_pdpe[i].present) {
1664 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1665 pde32pae_t * tmp_pde = NULL;
1667 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1668 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1673 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1675 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1676 if (tmp_pde[j].present) {
1677 if (tmp_pde[j].large_page) {
1678 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1679 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1680 addr_t large_page_va = 0;
1682 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1683 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1684 (void *)large_page_pa);
1685 // We'll let it through for data pages because they may be unmapped or hooked
1689 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1691 vaddr += PAGE_SIZE_2MB;
1693 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1694 pte32pae_t * tmp_pte = NULL;
1696 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1697 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1702 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1704 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1705 if (tmp_pte[k].present) {
1706 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1709 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1710 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1712 // We'll let it through for data pages because they may be unmapped or hooked
1716 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1719 vaddr += PAGE_SIZE_4KB;
1723 vaddr += PAGE_SIZE_2MB;
1727 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1736 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1737 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1738 void * private_data) {
1739 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1740 pml4e64_t * guest_pml = NULL;
1745 PrintError("Call back was not specified\n");
1749 if (guest_pa_to_host_va(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1750 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1756 callback(PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data);
1758 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1759 if (guest_pml[i].present) {
1760 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1761 pdpe64_t * tmp_pdpe = NULL;
1764 if (guest_pa_to_host_va(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1765 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1770 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
1772 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1773 if (tmp_pdpe[j].present) {
1774 if (tmp_pdpe[j].large_page) {
1775 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1776 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1777 addr_t large_page_va = 0;
1779 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1780 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1781 (void *)large_page_pa);
1782 // We'll let it through for data pages because they may be unmapped or hooked
1786 callback(PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data);
1788 vaddr += PAGE_SIZE_1GB;
1790 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1791 pde64_t * tmp_pde = NULL;
1793 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1794 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1799 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1801 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1802 if (tmp_pde[k].present) {
1803 if (tmp_pde[k].large_page) {
1804 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1805 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1806 addr_t large_page_va = 0;
1808 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1809 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1810 (void *)large_page_pa);
1811 // We'll let it through for data pages because they may be unmapped or hooked
1815 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1817 vaddr += PAGE_SIZE_2MB;
1819 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1820 pte64_t * tmp_pte = NULL;
1822 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1823 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1828 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1830 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1831 if (tmp_pte[m].present) {
1832 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1835 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1836 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1838 // We'll let it through for data pages because they may be unmapped or hooked
1842 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1845 vaddr += PAGE_SIZE_4KB;
1849 vaddr += PAGE_SIZE_2MB;
1854 vaddr += PAGE_SIZE_1GB;
1858 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1864 int v3_walk_host_pt_32(v3_reg_t host_cr3,
1865 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1866 void * private_data) {
1867 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1868 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1873 PrintError("Call back was not specified\n");
1877 callback(PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data);
1879 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1880 if (host_pde[i].present) {
1881 if (host_pde[i].large_page) {
1882 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1883 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1885 callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1887 vaddr += PAGE_SIZE_4MB;
1889 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1890 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1892 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1894 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1895 if (tmp_pte[j].present) {
1896 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1897 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1900 vaddr += PAGE_SIZE_4KB;
1904 vaddr += PAGE_SIZE_4MB;
1914 int v3_walk_host_pt_32pae(v3_reg_t host_cr3,
1915 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1916 void * private_data) {
1917 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1918 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1923 PrintError("Callback was not specified\n");
1927 callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data);
1929 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1930 if (host_pdpe[i].present) {
1931 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
1932 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
1934 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1936 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1937 if (tmp_pde[j].present) {
1939 if (tmp_pde[j].large_page) {
1940 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1941 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1943 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1945 vaddr += PAGE_SIZE_2MB;
1947 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1948 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
1950 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1952 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1953 if (tmp_pte[k].present) {
1954 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1955 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1958 vaddr += PAGE_SIZE_4KB;
1962 vaddr += PAGE_SIZE_2MB;
1966 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1973 int v3_walk_host_pt_64(v3_reg_t host_cr3,
1974 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1975 void * private_data) {
1976 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1977 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
1982 PrintError("Callback was not specified\n");
1986 callback(PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data);
1988 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1989 if (host_pml[i].present) {
1990 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
1991 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
1993 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
1995 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1996 if (tmp_pdpe[j].present) {
1997 if (tmp_pdpe[j].large_page) {
1998 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1999 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
2001 callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2003 vaddr += PAGE_SIZE_1GB;
2005 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
2006 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
2008 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
2010 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
2011 if (tmp_pde[k].present) {
2012 if (tmp_pde[k].large_page) {
2013 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
2014 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
2016 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2018 vaddr += PAGE_SIZE_2MB;
2020 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
2021 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
2023 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
2025 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
2026 if (tmp_pte[m].present) {
2027 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
2028 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
2030 vaddr += PAGE_SIZE_4KB;
2034 vaddr += PAGE_SIZE_2MB;
2039 vaddr += PAGE_SIZE_1GB;
2043 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
2051 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
2052 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
2053 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
2054 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
2055 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
2056 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
2057 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
2058 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
2059 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
2060 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
2061 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
2062 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
2063 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
2066 const uchar_t * v3_page_type_to_str(page_type_t type) {
2069 return PAGE_4KB_STR;
2071 return PAGE_2MB_STR;
2073 return PAGE_4MB_STR;
2075 return PAGE_1GB_STR;
2077 return PAGE_PT32_STR;
2079 return PAGE_PD32_STR;
2081 return PAGE_PDP32PAE_STR;
2083 return PAGE_PD32PAE_STR;
2085 return PAGE_PT32PAE_STR;
2087 return PAGE_PML464_STR;
2089 return PAGE_PDP64_STR;
2091 return PAGE_PD64_STR;
2093 return PAGE_PT64_STR;