2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
51 void delete_page_tables_32(pde32_t * pde) {
58 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
60 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
61 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
62 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
64 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
69 PrintDebug("Deleting PDE (%p)\n", pde);
70 V3_FreePage(V3_PAddr(pde));
73 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
74 PrintError("Unimplemented function\n");
77 void delete_page_tables_64(pml4e64_t * pml4) {
78 PrintError("Unimplemented function\n");
84 static int translate_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
85 addr_t * paddr = (addr_t *)private_data;
92 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
95 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
98 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
103 static int translate_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
104 addr_t * paddr = (addr_t *)private_data;
112 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
115 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
118 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
123 static int translate_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
124 addr_t * paddr = (addr_t *)private_data;
133 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
136 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
139 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
142 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
148 int v3_translate_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
149 return v3_drill_host_pt_32(host_cr3, vaddr, translate_pt_32_cb, paddr);
151 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
152 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
156 int v3_translate_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
157 return v3_drill_host_pt_32pae(host_cr3, vaddr, translate_pt_32pae_cb, paddr);
159 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
160 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
164 int v3_translate_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
165 return v3_drill_host_pt_64(host_cr3, vaddr, translate_pt_64_cb, paddr);
167 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
168 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
173 struct pt_find_data {
175 addr_t * pt_page_addr;
178 static int find_pt_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
179 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
181 if (type == pt_data->type) {
182 *(pt_data->pt_page_addr) = page_ptr;
190 int v3_find_host_pt_32_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
191 struct pt_find_data data;
194 data.pt_page_addr = page_addr;
196 return v3_drill_host_pt_32(host_cr3, vaddr, find_pt_cb, &data);
199 int v3_find_host_pt_32pae_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
200 struct pt_find_data data;
203 data.pt_page_addr = page_addr;
205 return v3_drill_host_pt_32pae(host_cr3, vaddr, find_pt_cb, &data);
208 int v3_find_host_pt_64_page(v3_reg_t host_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
209 struct pt_find_data data;
212 data.pt_page_addr = page_addr;
214 return v3_drill_host_pt_64(host_cr3, vaddr, find_pt_cb, &data);
216 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
217 struct pt_find_data data;
220 data.pt_page_addr = page_addr;
222 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
225 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
226 struct pt_find_data data;
229 data.pt_page_addr = page_addr;
231 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
234 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr, addr_t * page_addr) {
235 struct pt_find_data data;
238 data.pt_page_addr = page_addr;
240 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
246 * Page Table Access Checks
251 struct pt_check_data {
252 pf_error_t access_type;
253 pt_access_status_t * access_status;
256 static int check_pt_32_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
257 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
261 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
264 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
270 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
274 if (chk_data->access_status != PT_ACCESS_OK) {
282 static int check_pt_32pae_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
283 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
287 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
290 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
293 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
299 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
303 if (chk_data->access_status != PT_ACCESS_OK) {
311 static int check_pt_64_cb(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
312 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
316 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
319 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
322 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
325 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
332 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
336 if (chk_data->access_status != PT_ACCESS_OK) {
345 int v3_check_host_pt_32(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
346 struct pt_check_data access_data;
348 access_data.access_type = access_type;
349 access_data.access_status = access_status;
351 return v3_drill_host_pt_32(host_cr3, vaddr, check_pt_32_cb, &access_data);
354 int v3_check_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
355 struct pt_check_data access_data;
357 access_data.access_type = access_type;
358 access_data.access_status = access_status;
360 return v3_drill_host_pt_32pae(host_cr3, vaddr, check_pt_32pae_cb, &access_data);
365 int v3_check_host_pt_64(v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
366 struct pt_check_data access_data;
368 access_data.access_type = access_type;
369 access_data.access_status = access_status;
371 return v3_drill_host_pt_64(host_cr3, vaddr, check_pt_64_cb, &access_data);
376 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
377 pf_error_t access_type, pt_access_status_t * access_status) {
378 struct pt_check_data access_data;
380 access_data.access_type = access_type;
381 access_data.access_status = access_status;
383 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
390 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
391 pf_error_t access_type, pt_access_status_t * access_status) {
392 struct pt_check_data access_data;
394 access_data.access_type = access_type;
395 access_data.access_status = access_status;
397 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
402 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
403 pf_error_t access_type, pt_access_status_t * access_status) {
404 struct pt_check_data access_data;
406 access_data.access_type = access_type;
407 access_data.access_status = access_status;
409 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
415 * PAGE TABLE LOOKUP FUNCTIONS
417 * The value of entry is a return type:
418 * Page not present: *entry = 0
423 * 32 bit Page Table lookup functions
427 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
428 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
430 if (!pde_entry->present) {
432 return PT_ENTRY_NOT_PRESENT;
433 } else if (pde_entry->large_page) {
434 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
436 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
438 return PT_ENTRY_LARGE_PAGE;
440 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
441 return PT_ENTRY_PAGE;
447 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
449 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
450 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
452 if (!pte_entry->present) {
454 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
455 return PT_ENTRY_NOT_PRESENT;
457 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
459 return PT_ENTRY_PAGE;
468 * 32 bit PAE Page Table lookup functions
471 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
472 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
474 if (!pdpe_entry->present) {
476 return PT_ENTRY_NOT_PRESENT;
478 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
479 return PT_ENTRY_PAGE;
483 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
484 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
486 if (!pde_entry->present) {
488 return PT_ENTRY_NOT_PRESENT;
489 } else if (pde_entry->large_page) {
490 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
492 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
494 return PT_ENTRY_LARGE_PAGE;
496 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
497 return PT_ENTRY_PAGE;
501 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
502 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
504 if (!pte_entry->present) {
506 return PT_ENTRY_NOT_PRESENT;
508 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
509 return PT_ENTRY_PAGE;
517 * 64 bit Page Table lookup functions
520 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
521 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
523 if (!pml_entry->present) {
525 return PT_ENTRY_NOT_PRESENT;
527 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
528 return PT_ENTRY_PAGE;
532 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
533 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
535 if (!pdpe_entry->present) {
537 return PT_ENTRY_NOT_PRESENT;
538 } else if (pdpe_entry->large_page) {
539 PrintError("1 Gigabyte pages not supported\n");
543 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
544 return PT_ENTRY_PAGE;
548 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
549 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
551 if (!pde_entry->present) {
553 return PT_ENTRY_NOT_PRESENT;
554 } else if (pde_entry->large_page) {
555 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
557 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
559 return PT_ENTRY_LARGE_PAGE;
561 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
562 return PT_ENTRY_PAGE;
566 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
567 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
569 if (!pte_entry->present) {
571 return PT_ENTRY_NOT_PRESENT;
573 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
574 return PT_ENTRY_PAGE;
581 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
582 if (pt->present == 0) {
583 return PT_ACCESS_NOT_PRESENT;
584 } else if ((pt->writable == 0) && (access_type.write == 1)) {
585 return PT_ACCESS_WRITE_ERROR;
586 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
588 return PT_ACCESS_USER_ERROR;
597 * 32 bit access checks
599 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
600 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
601 return can_access_pt_entry(entry, access_type);
604 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
605 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
606 return can_access_pt_entry(entry, access_type);
611 * 32 bit PAE access checks
613 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
614 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
615 return can_access_pt_entry(entry, access_type);
618 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
619 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
620 return can_access_pt_entry(entry, access_type);
623 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
624 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
625 return can_access_pt_entry(entry, access_type);
629 * 64 Bit access checks
631 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
632 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
633 return can_access_pt_entry(entry, access_type);
636 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
637 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
638 return can_access_pt_entry(entry, access_type);
641 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
642 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
643 return can_access_pt_entry(entry, access_type);
646 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
647 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
648 return can_access_pt_entry(entry, access_type);
660 /* We generate a page table to correspond to a given memory layout
661 * pulling pages from the mem_list when necessary
662 * If there are any gaps in the layout, we add them as unmapped pages
664 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
665 addr_t current_page_addr = 0;
667 struct shadow_map * map = &(guest_info->mem_map);
669 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
671 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
673 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
676 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
677 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
680 (region->host_type == SHDW_REGION_FULL_HOOK) ||
681 (region->host_type == SHDW_REGION_UNALLOCATED)) {
684 pte[j].user_page = 0;
685 pte[j].write_through = 0;
686 pte[j].cache_disable = 0;
690 pte[j].global_page = 0;
692 pte[j].page_base_addr = 0;
697 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
699 PrintDebug("Marking Write hook host_addr %p as RO\n", (void *)current_page_addr);
704 pte[j].user_page = 1;
705 pte[j].write_through = 0;
706 pte[j].cache_disable = 0;
710 pte[j].global_page = 0;
713 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
719 pte[j].page_base_addr = host_addr >> 12;
724 current_page_addr += PAGE_SIZE;
727 if (pte_present == 0) {
728 V3_FreePage(V3_PAddr(pte));
732 pde[i].user_page = 0;
733 pde[i].write_through = 0;
734 pde[i].cache_disable = 0;
737 pde[i].large_page = 0;
738 pde[i].global_page = 0;
740 pde[i].pt_base_addr = 0;
744 pde[i].user_page = 1;
745 pde[i].write_through = 0;
746 pde[i].cache_disable = 0;
749 pde[i].large_page = 0;
750 pde[i].global_page = 0;
752 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
761 /* We generate a page table to correspond to a given memory layout
762 * pulling pages from the mem_list when necessary
763 * If there are any gaps in the layout, we add them as unmapped pages
765 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
766 addr_t current_page_addr = 0;
768 struct shadow_map * map = &(guest_info->mem_map);
770 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
771 memset(pdpe, 0, PAGE_SIZE);
773 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
775 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
777 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
781 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
784 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
785 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
788 (region->host_type == SHDW_REGION_FULL_HOOK) ||
789 (region->host_type == SHDW_REGION_UNALLOCATED)) {
792 pte[k].user_page = 0;
793 pte[k].write_through = 0;
794 pte[k].cache_disable = 0;
798 pte[k].global_page = 0;
800 pte[k].page_base_addr = 0;
806 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
812 pte[k].user_page = 1;
813 pte[k].write_through = 0;
814 pte[k].cache_disable = 0;
818 pte[k].global_page = 0;
821 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
827 pte[k].page_base_addr = host_addr >> 12;
833 current_page_addr += PAGE_SIZE;
836 if (pte_present == 0) {
837 V3_FreePage(V3_PAddr(pte));
841 pde[j].user_page = 0;
842 pde[j].write_through = 0;
843 pde[j].cache_disable = 0;
846 pde[j].large_page = 0;
847 pde[j].global_page = 0;
849 pde[j].pt_base_addr = 0;
854 pde[j].user_page = 1;
855 pde[j].write_through = 0;
856 pde[j].cache_disable = 0;
859 pde[j].large_page = 0;
860 pde[j].global_page = 0;
862 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
870 if (pde_present == 0) {
871 V3_FreePage(V3_PAddr(pde));
875 pdpe[i].write_through = 0;
876 pdpe[i].cache_disable = 0;
877 pdpe[i].accessed = 0;
880 pdpe[i].vmm_info = 0;
881 pdpe[i].pd_base_addr = 0;
886 pdpe[i].write_through = 0;
887 pdpe[i].cache_disable = 0;
888 pdpe[i].accessed = 0;
891 pdpe[i].vmm_info = 0;
892 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
907 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
908 addr_t current_page_addr = 0;
910 struct shadow_map * map = &(info->mem_map);
912 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
914 for (i = 0; i < 1; i++) {
915 int pdpe_present = 0;
916 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
918 for (j = 0; j < 20; j++) {
920 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
922 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
924 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
927 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
928 struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
933 (region->host_type == SHDW_REGION_FULL_HOOK) ||
934 (region->host_type == SHDW_REGION_UNALLOCATED)) {
937 pte[m].user_page = 0;
938 pte[m].write_through = 0;
939 pte[m].cache_disable = 0;
943 pte[m].global_page = 0;
945 pte[m].page_base_addr = 0;
950 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
956 pte[m].user_page = 1;
957 pte[m].write_through = 0;
958 pte[m].cache_disable = 0;
962 pte[m].global_page = 0;
965 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
971 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
973 //PrintPTE64(current_page_addr, &(pte[m]));
981 current_page_addr += PAGE_SIZE;
984 if (pte_present == 0) {
985 V3_FreePage(V3_PAddr(pte));
989 pde[k].user_page = 0;
990 pde[k].write_through = 0;
991 pde[k].cache_disable = 0;
994 pde[k].large_page = 0;
995 //pde[k].global_page = 0;
997 pde[k].pt_base_addr = 0;
1000 pde[k].writable = 1;
1001 pde[k].user_page = 1;
1002 pde[k].write_through = 0;
1003 pde[k].cache_disable = 0;
1004 pde[k].accessed = 0;
1006 pde[k].large_page = 0;
1007 //pde[k].global_page = 0;
1008 pde[k].vmm_info = 0;
1009 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1015 if (pde_present == 0) {
1016 V3_FreePage(V3_PAddr(pde));
1018 pdpe[j].present = 0;
1019 pdpe[j].writable = 0;
1020 pdpe[j].user_page = 0;
1021 pdpe[j].write_through = 0;
1022 pdpe[j].cache_disable = 0;
1023 pdpe[j].accessed = 0;
1025 pdpe[j].large_page = 0;
1026 //pdpe[j].global_page = 0;
1027 pdpe[j].vmm_info = 0;
1028 pdpe[j].pd_base_addr = 0;
1030 pdpe[j].present = 1;
1031 pdpe[j].writable = 1;
1032 pdpe[j].user_page = 1;
1033 pdpe[j].write_through = 0;
1034 pdpe[j].cache_disable = 0;
1035 pdpe[j].accessed = 0;
1037 pdpe[j].large_page = 0;
1038 //pdpe[j].global_page = 0;
1039 pdpe[j].vmm_info = 0;
1040 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1048 PrintDebug("PML index=%d\n", i);
1050 if (pdpe_present == 0) {
1051 V3_FreePage(V3_PAddr(pdpe));
1054 pml[i].writable = 0;
1055 pml[i].user_page = 0;
1056 pml[i].write_through = 0;
1057 pml[i].cache_disable = 0;
1058 pml[i].accessed = 0;
1059 pml[i].reserved = 0;
1060 //pml[i].large_page = 0;
1061 //pml[i].global_page = 0;
1062 pml[i].vmm_info = 0;
1063 pml[i].pdp_base_addr = 0;
1066 pml[i].writable = 1;
1067 pml[i].user_page = 1;
1068 pml[i].write_through = 0;
1069 pml[i].cache_disable = 0;
1070 pml[i].accessed = 0;
1071 pml[i].reserved = 0;
1072 //pml[i].large_page = 0;
1073 //pml[i].global_page = 0;
1074 pml[i].vmm_info = 0;
1075 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));
1083 int v3_drill_host_pt_32(v3_reg_t host_cr3, addr_t vaddr,
1084 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1085 void * private_data) {
1086 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1087 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
1088 addr_t host_pte_pa = 0;
1092 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
1093 return (ret == -1) ? -1 : PAGE_PD32;
1096 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
1097 case PT_ENTRY_NOT_PRESENT:
1099 case PT_ENTRY_LARGE_PAGE:
1100 if ((ret == callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1101 return (ret == -1) ? -1 : PAGE_4MB;
1105 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1106 return (ret == -1) ? -1 : PAGE_PT32;
1109 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1112 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1113 return (ret == -1) ? -1 : PAGE_4KB;
1123 int v3_drill_host_pt_32pae(v3_reg_t host_cr3, addr_t vaddr,
1124 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1125 void * private_data) {
1126 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1127 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1128 addr_t host_pde_pa = 0;
1129 addr_t host_pte_pa = 0;
1133 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
1134 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1137 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
1138 case PT_ENTRY_NOT_PRESENT:
1142 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1143 return (ret == -1) ? -1 : PAGE_PD32PAE;
1146 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1147 case PT_ENTRY_NOT_PRESENT:
1149 case PT_ENTRY_LARGE_PAGE:
1150 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1151 return (ret == -1) ? -1 : PAGE_2MB;
1155 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1156 return (ret == -1) ? -1 : PAGE_PT32PAE;
1159 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1162 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1163 return (ret == -1) ? -1 : PAGE_4KB;
1172 // should never get here
1177 int v3_drill_host_pt_64(v3_reg_t host_cr3, addr_t vaddr,
1178 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1179 void * private_data) {
1180 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1181 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
1182 addr_t host_pdpe_pa = 0;
1183 addr_t host_pde_pa = 0;
1184 addr_t host_pte_pa = 0;
1188 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
1189 return (ret == -1) ? -1 : PAGE_PML464;
1192 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
1193 case PT_ENTRY_NOT_PRESENT:
1197 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
1198 return (ret == -1) ? -1 : PAGE_PDP64;
1201 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
1202 case PT_ENTRY_NOT_PRESENT:
1204 case PT_ENTRY_LARGE_PAGE:
1205 if ((ret == callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
1206 return (ret == -1) ? -1 : PAGE_1GB;
1208 PrintError("1 Gigabyte Pages not supported\n");
1212 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1213 return (ret == -1) ? -1 : PAGE_PD64;
1216 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1217 case PT_ENTRY_NOT_PRESENT:
1219 case PT_ENTRY_LARGE_PAGE:
1220 if ((ret == callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1221 return (ret == -1) ? -1 : PAGE_2MB;
1226 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1227 return (ret == -1) ? -1 : PAGE_PT64;
1230 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1233 if ((ret = callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1234 return (ret == -1) ? -1 : PAGE_4KB;
1243 // should never get here
1253 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1254 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1255 void * private_data) {
1256 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1257 pde32_t * guest_pde = NULL;
1258 addr_t guest_pte_pa = 0;
1262 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t*)&guest_pde) == -1) {
1263 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1264 (void *)guest_pde_pa);
1268 if ((ret = callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1269 return (ret == -1) ? -1 : PAGE_PD32;
1272 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1273 case PT_ENTRY_NOT_PRESENT:
1275 case PT_ENTRY_LARGE_PAGE:
1277 addr_t large_page_pa = (addr_t)guest_pte_pa;
1278 addr_t large_page_va = 0;
1280 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1281 PrintError("Could not get virtual address of Guest Page 4MB (PA=%p)\n",
1282 (void *)large_page_va);
1287 if ((ret == callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1288 return (ret == -1) ? -1 : PAGE_4MB;
1294 pte32_t * guest_pte = NULL;
1297 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
1298 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1299 (void *)guest_pte_pa);
1303 if ((ret = callback(PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1304 return (ret == -1) ? -1 : PAGE_PT32;
1307 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1312 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1313 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1318 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1319 return (ret == -1) ? -1 : PAGE_4KB;
1326 // should never get here
1327 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1333 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1334 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1335 void * private_data) {
1336 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1337 pdpe32pae_t * guest_pdpe = 0;
1338 addr_t guest_pde_pa = 0;
1341 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1342 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1343 (void *)guest_pdpe_pa);
1347 if ((ret = callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1348 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1351 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1353 case PT_ENTRY_NOT_PRESENT:
1357 pde32pae_t * guest_pde = NULL;
1358 addr_t guest_pte_pa = 0;
1360 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1361 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1362 (void *)guest_pde_pa);
1366 if ((ret = callback(PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1367 return (ret == -1) ? -1 : PAGE_PD32PAE;
1370 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1372 case PT_ENTRY_NOT_PRESENT:
1374 case PT_ENTRY_LARGE_PAGE:
1376 addr_t large_page_pa = (addr_t)guest_pte_pa;
1377 addr_t large_page_va = 0;
1379 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1380 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1381 (void *)large_page_va);
1385 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1386 return (ret == -1) ? -1 : PAGE_2MB;
1392 pte32pae_t * guest_pte = NULL;
1395 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1396 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1397 (void *)guest_pte_pa);
1401 if ((ret = callback(PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1402 return (ret == -1) ? -1 : PAGE_PT32PAE;
1405 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1410 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1411 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1416 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1417 return (ret == -1) ? -1 : PAGE_4KB;
1425 PrintError("Invalid page type for PD32PAE\n");
1429 // should never get here
1430 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1434 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1435 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1436 void * private_data) {
1437 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1438 pml4e64_t * guest_pmle = 0;
1439 addr_t guest_pdpe_pa = 0;
1442 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1443 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1444 (void *)guest_pml4_pa);
1448 if ((ret = callback(PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1449 return (ret == -1) ? -1 : PAGE_PML464;
1452 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1453 case PT_ENTRY_NOT_PRESENT:
1457 pdpe64_t * guest_pdp = NULL;
1458 addr_t guest_pde_pa = 0;
1460 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1461 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1462 (void *)guest_pdpe_pa);
1466 if ((ret = callback(PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1467 return (ret == -1) ? -1 : PAGE_PDP64;
1470 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1471 case PT_ENTRY_NOT_PRESENT:
1473 case PT_ENTRY_LARGE_PAGE:
1475 addr_t large_page_pa = (addr_t)guest_pde_pa;
1476 addr_t large_page_va = 0;
1478 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1479 PrintDebug("Could not get virtual address of Guest Page 1GB (PA=%p)\n",
1480 (void *)large_page_va);
1484 if ((ret == callback(PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1485 return (ret == -1) ? -1 : PAGE_1GB;
1487 PrintError("1 Gigabyte Pages not supported\n");
1492 pde64_t * guest_pde = NULL;
1493 addr_t guest_pte_pa = 0;
1495 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1496 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1497 (void *)guest_pde_pa);
1501 if ((ret = callback(PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1502 return (ret == -1) ? -1 : PAGE_PD64;
1505 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1506 case PT_ENTRY_NOT_PRESENT:
1508 case PT_ENTRY_LARGE_PAGE:
1510 addr_t large_page_pa = (addr_t)guest_pte_pa;
1511 addr_t large_page_va = 0;
1513 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1514 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1515 (void *)large_page_va);
1519 if ((ret == callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1520 return (ret == -1) ? -1 : PAGE_2MB;
1526 pte64_t * guest_pte = NULL;
1529 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1530 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1531 (void *)guest_pte_pa);
1535 if ((ret = callback(PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1536 return (ret == -1) ? -1 : PAGE_PT64;
1539 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1544 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1545 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1550 if ((ret = callback(PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1551 return (ret == -1) ? -1 : PAGE_4KB;
1565 // should never get here
1566 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1573 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1574 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1575 void * private_data) {
1576 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1577 pde32_t * guest_pde = NULL;
1582 PrintError("Call back was not specified\n");
1586 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1587 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1588 (void *)guest_pde_pa);
1592 callback(PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data);
1594 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1595 if (guest_pde[i].present) {
1596 if (guest_pde[i].large_page) {
1597 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1598 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1599 addr_t large_page_va = 0;
1601 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1602 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1603 (void *)large_page_pa);
1604 // We'll let it through for data pages because they may be unmapped or hooked
1608 callback(PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data);
1610 vaddr += PAGE_SIZE_4MB;
1612 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1613 pte32_t * tmp_pte = NULL;
1615 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1616 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1621 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1623 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1624 if (tmp_pte[j].present) {
1625 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1628 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1629 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1631 // We'll let it through for data pages because they may be unmapped or hooked
1635 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1638 vaddr += PAGE_SIZE_4KB;
1642 vaddr += PAGE_SIZE_4MB;
1649 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1650 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1651 void * private_data) {
1652 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1653 pdpe32pae_t * guest_pdpe = NULL;
1658 PrintError("Call back was not specified\n");
1662 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1663 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1664 (void *)guest_pdpe_pa);
1670 callback(PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data);
1672 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1673 if (guest_pdpe[i].present) {
1674 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1675 pde32pae_t * tmp_pde = NULL;
1677 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1678 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1683 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1685 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1686 if (tmp_pde[j].present) {
1687 if (tmp_pde[j].large_page) {
1688 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1689 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1690 addr_t large_page_va = 0;
1692 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1693 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1694 (void *)large_page_pa);
1695 // We'll let it through for data pages because they may be unmapped or hooked
1699 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1701 vaddr += PAGE_SIZE_2MB;
1703 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1704 pte32pae_t * tmp_pte = NULL;
1706 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1707 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1712 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1714 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1715 if (tmp_pte[k].present) {
1716 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1719 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1720 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1722 // We'll let it through for data pages because they may be unmapped or hooked
1726 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1729 vaddr += PAGE_SIZE_4KB;
1733 vaddr += PAGE_SIZE_2MB;
1737 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1746 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1747 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1748 void * private_data) {
1749 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1750 pml4e64_t * guest_pml = NULL;
1755 PrintError("Call back was not specified\n");
1759 if (guest_pa_to_host_va(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1760 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1766 callback(PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data);
1768 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1769 if (guest_pml[i].present) {
1770 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1771 pdpe64_t * tmp_pdpe = NULL;
1774 if (guest_pa_to_host_va(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1775 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1780 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
1782 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1783 if (tmp_pdpe[j].present) {
1784 if (tmp_pdpe[j].large_page) {
1785 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1786 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1787 addr_t large_page_va = 0;
1789 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1790 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1791 (void *)large_page_pa);
1792 // We'll let it through for data pages because they may be unmapped or hooked
1796 callback(PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data);
1798 vaddr += PAGE_SIZE_1GB;
1800 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1801 pde64_t * tmp_pde = NULL;
1803 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1804 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1809 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1811 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1812 if (tmp_pde[k].present) {
1813 if (tmp_pde[k].large_page) {
1814 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1815 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1816 addr_t large_page_va = 0;
1818 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1819 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1820 (void *)large_page_pa);
1821 // We'll let it through for data pages because they may be unmapped or hooked
1825 callback(PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data);
1827 vaddr += PAGE_SIZE_2MB;
1829 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1830 pte64_t * tmp_pte = NULL;
1832 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1833 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1838 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1840 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1841 if (tmp_pte[m].present) {
1842 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1845 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1846 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1848 // We'll let it through for data pages because they may be unmapped or hooked
1852 callback(PAGE_4KB, vaddr, page_va, page_pa, private_data);
1855 vaddr += PAGE_SIZE_4KB;
1859 vaddr += PAGE_SIZE_2MB;
1864 vaddr += PAGE_SIZE_1GB;
1868 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1874 int v3_walk_host_pt_32(v3_reg_t host_cr3,
1875 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1876 void * private_data) {
1877 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1878 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1883 PrintError("Call back was not specified\n");
1887 callback(PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data);
1889 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1890 if (host_pde[i].present) {
1891 if (host_pde[i].large_page) {
1892 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1893 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1895 callback(PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1897 vaddr += PAGE_SIZE_4MB;
1899 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1900 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1902 callback(PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1904 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1905 if (tmp_pte[j].present) {
1906 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1907 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1910 vaddr += PAGE_SIZE_4KB;
1914 vaddr += PAGE_SIZE_4MB;
1924 int v3_walk_host_pt_32pae(v3_reg_t host_cr3,
1925 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1926 void * private_data) {
1927 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1928 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1933 PrintError("Callback was not specified\n");
1937 callback(PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data);
1939 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1940 if (host_pdpe[i].present) {
1941 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
1942 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
1944 callback(PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
1946 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1947 if (tmp_pde[j].present) {
1949 if (tmp_pde[j].large_page) {
1950 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1951 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1953 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
1955 vaddr += PAGE_SIZE_2MB;
1957 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1958 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
1960 callback(PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
1962 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1963 if (tmp_pte[k].present) {
1964 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1965 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
1968 vaddr += PAGE_SIZE_4KB;
1972 vaddr += PAGE_SIZE_2MB;
1976 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1983 int v3_walk_host_pt_64(v3_reg_t host_cr3,
1984 int (*callback)(page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1985 void * private_data) {
1986 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1987 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
1992 PrintError("Callback was not specified\n");
1996 callback(PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data);
1998 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1999 if (host_pml[i].present) {
2000 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
2001 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
2003 callback(PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data);
2005 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
2006 if (tmp_pdpe[j].present) {
2007 if (tmp_pdpe[j].large_page) {
2008 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
2009 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
2011 callback(PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2013 vaddr += PAGE_SIZE_1GB;
2015 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
2016 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
2018 callback(PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data);
2020 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
2021 if (tmp_pde[k].present) {
2022 if (tmp_pde[k].large_page) {
2023 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
2024 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
2026 callback(PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data);
2028 vaddr += PAGE_SIZE_2MB;
2030 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
2031 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
2033 callback(PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data);
2035 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
2036 if (tmp_pte[m].present) {
2037 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
2038 callback(PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data);
2040 vaddr += PAGE_SIZE_4KB;
2044 vaddr += PAGE_SIZE_2MB;
2049 vaddr += PAGE_SIZE_1GB;
2053 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
2061 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
2062 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
2063 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
2064 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
2065 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
2066 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
2067 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
2068 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
2069 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
2070 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
2071 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
2072 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
2073 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
2076 const uchar_t * v3_page_type_to_str(page_type_t type) {
2079 return PAGE_4KB_STR;
2081 return PAGE_2MB_STR;
2083 return PAGE_4MB_STR;
2085 return PAGE_1GB_STR;
2087 return PAGE_PT32_STR;
2089 return PAGE_PD32_STR;
2091 return PAGE_PDP32PAE_STR;
2093 return PAGE_PD32PAE_STR;
2095 return PAGE_PT32PAE_STR;
2097 return PAGE_PML464_STR;
2099 return PAGE_PDP64_STR;
2101 return PAGE_PD64_STR;
2103 return PAGE_PT64_STR;