2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_paging.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vm_guest_mem.h>
27 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
28 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry);
30 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry);
31 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry);
32 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry);
34 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry);
35 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry);
36 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry);
37 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry);
42 #define USE_VMM_PAGING_DEBUG
43 // All of the debug functions defined in vmm_paging.h are implemented in this file
44 #include "vmm_paging_debug.h"
45 #undef USE_VMM_PAGING_DEBUG
49 #ifndef DEBUG_SHADOW_PAGING
51 #define PrintDebug(fmt, args...)
56 void delete_page_tables_32(pde32_t * pde) {
63 for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
65 // We double cast, first to an addr_t to handle 64 bit issues, then to the pointer
66 PrintDebug("PTE base addr %x \n", pde[i].pt_base_addr);
67 pte32_t * pte = (pte32_t *)((addr_t)(uint_t)(pde[i].pt_base_addr << PAGE_POWER));
69 PrintDebug("Deleting PTE %d (%p)\n", i, pte);
74 PrintDebug("Deleting PDE (%p)\n", pde);
75 V3_FreePage(V3_PAddr(pde));
78 void delete_page_tables_32PAE(pdpe32pae_t * pdpe) {
79 PrintError("Unimplemented function\n");
82 void delete_page_tables_64(pml4e64_t * pml4) {
83 PrintError("Unimplemented function\n");
89 static int translate_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
90 addr_t * paddr = (addr_t *)private_data;
97 *paddr = page_pa + PAGE_OFFSET_4MB(vaddr);
100 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
103 PrintError("Inavlid page type (%s) in tranlate pt 32 callback\n", v3_page_type_to_str(type));
108 static int translate_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
109 addr_t * paddr = (addr_t *)private_data;
117 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
120 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
123 PrintError("Inavlid page type (%s) in translate pt 32pae callback\n", v3_page_type_to_str(type));
128 static int translate_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
129 addr_t * paddr = (addr_t *)private_data;
138 *paddr = page_pa + PAGE_OFFSET_1GB(vaddr);
141 *paddr = page_pa + PAGE_OFFSET_2MB(vaddr);
144 *paddr = page_pa + PAGE_OFFSET_4KB(vaddr);
147 PrintError("Inavlid page type (%s) in translate pt 64 callback\n", v3_page_type_to_str(type));
153 int v3_translate_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
154 return v3_drill_host_pt_32(info, host_cr3, vaddr, translate_pt_32_cb, paddr);
156 int v3_translate_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
157 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, translate_pt_32_cb, paddr);
161 int v3_translate_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
162 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, translate_pt_32pae_cb, paddr);
164 int v3_translate_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
165 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, translate_pt_32pae_cb, paddr);
169 int v3_translate_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, addr_t * paddr) {
170 return v3_drill_host_pt_64(info, host_cr3, vaddr, translate_pt_64_cb, paddr);
172 int v3_translate_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr, addr_t * paddr) {
173 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, translate_pt_64_cb, paddr);
178 struct pt_find_data {
180 addr_t * pt_page_ptr;
184 static int find_pt_cb(struct guest_info * info, page_type_t type, addr_t vaddr,
185 addr_t page_ptr, addr_t page_pa, void * private_data) {
186 struct pt_find_data * pt_data = (struct pt_find_data *)private_data;
188 PrintDebug("FIND_PT Type=%s, page_pa = %p\n",
189 v3_page_type_to_str(type),
192 if (type == pt_data->type) {
193 *(pt_data->pt_page_ptr) = page_ptr;
194 *(pt_data->pt_page_pa) = page_pa;
202 int v3_find_host_pt_32_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
203 addr_t * page_ptr, addr_t * page_pa) {
204 struct pt_find_data data;
207 data.pt_page_ptr = page_ptr;
208 data.pt_page_pa = page_pa;
210 return v3_drill_host_pt_32(info, host_cr3, vaddr, find_pt_cb, &data);
213 int v3_find_host_pt_32pae_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
214 addr_t * page_ptr, addr_t * page_pa) {
215 struct pt_find_data data;
218 data.pt_page_ptr = page_ptr;
219 data.pt_page_pa = page_pa;
221 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, find_pt_cb, &data);
224 int v3_find_host_pt_64_page(struct guest_info * info, v3_reg_t host_cr3, page_type_t type, addr_t vaddr,
225 addr_t * page_ptr, addr_t * page_pa) {
226 struct pt_find_data data;
229 data.pt_page_ptr = page_ptr;
230 data.pt_page_pa = page_pa;
232 return v3_drill_host_pt_64(info, host_cr3, vaddr, find_pt_cb, &data);
234 int v3_find_guest_pt_32_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
235 addr_t * page_ptr, addr_t * page_pa) {
236 struct pt_find_data data;
239 data.pt_page_ptr = page_ptr;
240 data.pt_page_pa = page_pa;
242 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, find_pt_cb, &data);
245 int v3_find_guest_pt_32pae_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
246 addr_t * page_ptr, addr_t * page_pa) {
247 struct pt_find_data data;
250 data.pt_page_ptr = page_ptr;
251 data.pt_page_pa = page_pa;
253 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, find_pt_cb, &data);
256 int v3_find_guest_pt_64_page(struct guest_info * info, v3_reg_t guest_cr3, page_type_t type, addr_t vaddr,
257 addr_t * page_ptr, addr_t * page_pa) {
258 struct pt_find_data data;
261 data.pt_page_ptr = page_ptr;
262 data.pt_page_pa = page_pa;
264 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, find_pt_cb, &data);
270 * Page Table Access Checks
275 struct pt_check_data {
276 pf_error_t access_type;
277 pt_access_status_t * access_status;
280 static int check_pt_32_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
281 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
285 *(chk_data->access_status) = v3_can_access_pde32((pde32_t *)page_ptr, vaddr, chk_data->access_type);
288 *(chk_data->access_status) = v3_can_access_pte32((pte32_t *)page_ptr, vaddr, chk_data->access_type);
294 PrintError("Inavlid page type (%s) in check pt 32 callback\n", v3_page_type_to_str(type));
298 if (chk_data->access_status != PT_ACCESS_OK) {
306 static int check_pt_32pae_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
307 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
311 *(chk_data->access_status) = v3_can_access_pdpe32pae((pdpe32pae_t *)page_ptr, vaddr, chk_data->access_type);
314 *(chk_data->access_status) = v3_can_access_pde32pae((pde32pae_t *)page_ptr, vaddr, chk_data->access_type);
317 *(chk_data->access_status) = v3_can_access_pte32pae((pte32pae_t *)page_ptr, vaddr, chk_data->access_type);
323 PrintError("Inavlid page type (%s) in check pt 32pae callback\n", v3_page_type_to_str(type));
327 if (chk_data->access_status != PT_ACCESS_OK) {
335 static int check_pt_64_cb(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data) {
336 struct pt_check_data * chk_data = (struct pt_check_data *)private_data;
340 *(chk_data->access_status) = v3_can_access_pml4e64((pml4e64_t *)page_ptr, vaddr, chk_data->access_type);
343 *(chk_data->access_status) = v3_can_access_pdpe64((pdpe64_t *)page_ptr, vaddr, chk_data->access_type);
346 *(chk_data->access_status) = v3_can_access_pde64((pde64_t *)page_ptr, vaddr, chk_data->access_type);
349 *(chk_data->access_status) = v3_can_access_pte64((pte64_t *)page_ptr, vaddr, chk_data->access_type);
356 PrintError("Inavlid page type (%s) in check pt 64 callback\n", v3_page_type_to_str(type));
360 if (chk_data->access_status != PT_ACCESS_OK) {
369 int v3_check_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
370 struct pt_check_data access_data;
372 access_data.access_type = access_type;
373 access_data.access_status = access_status;
375 return v3_drill_host_pt_32(info, host_cr3, vaddr, check_pt_32_cb, &access_data);
378 int v3_check_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
379 struct pt_check_data access_data;
381 access_data.access_type = access_type;
382 access_data.access_status = access_status;
384 return v3_drill_host_pt_32pae(info, host_cr3, vaddr, check_pt_32pae_cb, &access_data);
389 int v3_check_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr, pf_error_t access_type, pt_access_status_t * access_status) {
390 struct pt_check_data access_data;
392 access_data.access_type = access_type;
393 access_data.access_status = access_status;
395 return v3_drill_host_pt_64(info, host_cr3, vaddr, check_pt_64_cb, &access_data);
400 int v3_check_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
401 pf_error_t access_type, pt_access_status_t * access_status) {
402 struct pt_check_data access_data;
404 access_data.access_type = access_type;
405 access_data.access_status = access_status;
407 return v3_drill_guest_pt_32(info, guest_cr3, vaddr, check_pt_32_cb, &access_data);
414 int v3_check_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
415 pf_error_t access_type, pt_access_status_t * access_status) {
416 struct pt_check_data access_data;
418 access_data.access_type = access_type;
419 access_data.access_status = access_status;
421 return v3_drill_guest_pt_32pae(info, guest_cr3, vaddr, check_pt_32pae_cb, &access_data);
426 int v3_check_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
427 pf_error_t access_type, pt_access_status_t * access_status) {
428 struct pt_check_data access_data;
430 access_data.access_type = access_type;
431 access_data.access_status = access_status;
433 return v3_drill_guest_pt_64(info, guest_cr3, vaddr, check_pt_64_cb, &access_data);
439 * PAGE TABLE LOOKUP FUNCTIONS
441 * The value of entry is a return type:
442 * Page not present: *entry = 0
447 * 32 bit Page Table lookup functions
451 static pt_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry) {
452 pde32_t * pde_entry = &(pd[PDE32_INDEX(addr)]);
454 if (!pde_entry->present) {
456 return PT_ENTRY_NOT_PRESENT;
457 } else if (pde_entry->large_page) {
458 pde32_4MB_t * large_pde = (pde32_4MB_t *)pde_entry;
460 *entry = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
462 return PT_ENTRY_LARGE_PAGE;
464 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
465 return PT_ENTRY_PAGE;
471 /* Takes a virtual addr (addr) and returns the physical addr (entry) as defined in the page table
473 static pt_entry_type_t pte32_lookup(pte32_t * pt, addr_t addr, addr_t * entry) {
474 pte32_t * pte_entry = &(pt[PTE32_INDEX(addr)]);
476 if (!pte_entry->present) {
478 // PrintDebug("Lookup at non present page (index=%d)\n", PTE32_INDEX(addr));
479 return PT_ENTRY_NOT_PRESENT;
481 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
483 return PT_ENTRY_PAGE;
492 * 32 bit PAE Page Table lookup functions
495 static pt_entry_type_t pdpe32pae_lookup(pdpe32pae_t * pdp, addr_t addr, addr_t * entry) {
496 pdpe32pae_t * pdpe_entry = &(pdp[PDPE32PAE_INDEX(addr)]);
498 if (!pdpe_entry->present) {
500 return PT_ENTRY_NOT_PRESENT;
502 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
503 return PT_ENTRY_PAGE;
507 static pt_entry_type_t pde32pae_lookup(pde32pae_t * pd, addr_t addr, addr_t * entry) {
508 pde32pae_t * pde_entry = &(pd[PDE32PAE_INDEX(addr)]);
510 if (!pde_entry->present) {
512 return PT_ENTRY_NOT_PRESENT;
513 } else if (pde_entry->large_page) {
514 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)pde_entry;
516 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
518 return PT_ENTRY_LARGE_PAGE;
520 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
521 return PT_ENTRY_PAGE;
525 static pt_entry_type_t pte32pae_lookup(pte32pae_t * pt, addr_t addr, addr_t * entry) {
526 pte32pae_t * pte_entry = &(pt[PTE32PAE_INDEX(addr)]);
528 if (!pte_entry->present) {
530 return PT_ENTRY_NOT_PRESENT;
532 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
533 return PT_ENTRY_PAGE;
541 * 64 bit Page Table lookup functions
544 static pt_entry_type_t pml4e64_lookup(pml4e64_t * pml, addr_t addr, addr_t * entry) {
545 pml4e64_t * pml_entry = &(pml[PML4E64_INDEX(addr)]);
547 if (!pml_entry->present) {
549 return PT_ENTRY_NOT_PRESENT;
551 *entry = BASE_TO_PAGE_ADDR(pml_entry->pdp_base_addr);
552 return PT_ENTRY_PAGE;
556 static pt_entry_type_t pdpe64_lookup(pdpe64_t * pdp, addr_t addr, addr_t * entry) {
557 pdpe64_t * pdpe_entry = &(pdp[PDPE64_INDEX(addr)]);
559 if (!pdpe_entry->present) {
561 return PT_ENTRY_NOT_PRESENT;
562 } else if (pdpe_entry->large_page) {
563 PrintError("1 Gigabyte pages not supported\n");
567 *entry = BASE_TO_PAGE_ADDR(pdpe_entry->pd_base_addr);
568 return PT_ENTRY_PAGE;
572 static pt_entry_type_t pde64_lookup(pde64_t * pd, addr_t addr, addr_t * entry) {
573 pde64_t * pde_entry = &(pd[PDE64_INDEX(addr)]);
575 if (!pde_entry->present) {
577 return PT_ENTRY_NOT_PRESENT;
578 } else if (pde_entry->large_page) {
579 pde64_2MB_t * large_pde = (pde64_2MB_t *)pde_entry;
581 *entry = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
583 return PT_ENTRY_LARGE_PAGE;
585 *entry = BASE_TO_PAGE_ADDR(pde_entry->pt_base_addr);
586 return PT_ENTRY_PAGE;
590 static pt_entry_type_t pte64_lookup(pte64_t * pt, addr_t addr, addr_t * entry) {
591 pte64_t * pte_entry = &(pt[PTE64_INDEX(addr)]);
593 if (!pte_entry->present) {
595 return PT_ENTRY_NOT_PRESENT;
597 *entry = BASE_TO_PAGE_ADDR(pte_entry->page_base_addr);
598 return PT_ENTRY_PAGE;
605 static pt_access_status_t can_access_pt_entry(gen_pt_t * pt, pf_error_t access_type) {
606 if (pt->present == 0) {
607 return PT_ACCESS_NOT_PRESENT;
608 } else if ((pt->writable == 0) && (access_type.write == 1)) {
609 return PT_ACCESS_WRITE_ERROR;
610 } else if ((pt->user_page == 0) && (access_type.user == 1)) {
612 return PT_ACCESS_USER_ERROR;
621 * 32 bit access checks
623 pt_access_status_t inline v3_can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type) {
624 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32_INDEX(addr)];
625 return can_access_pt_entry(entry, access_type);
628 pt_access_status_t inline v3_can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type) {
629 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32_INDEX(addr)];
630 return can_access_pt_entry(entry, access_type);
635 * 32 bit PAE access checks
637 pt_access_status_t inline v3_can_access_pdpe32pae(pdpe32pae_t * pdpe, addr_t addr, pf_error_t access_type) {
638 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE32PAE_INDEX(addr)];
639 return can_access_pt_entry(entry, access_type);
642 pt_access_status_t inline v3_can_access_pde32pae(pde32pae_t * pde, addr_t addr, pf_error_t access_type) {
643 gen_pt_t * entry = (gen_pt_t *)&pde[PDE32PAE_INDEX(addr)];
644 return can_access_pt_entry(entry, access_type);
647 pt_access_status_t inline v3_can_access_pte32pae(pte32pae_t * pte, addr_t addr, pf_error_t access_type) {
648 gen_pt_t * entry = (gen_pt_t *)&pte[PTE32PAE_INDEX(addr)];
649 return can_access_pt_entry(entry, access_type);
653 * 64 Bit access checks
655 pt_access_status_t inline v3_can_access_pml4e64(pml4e64_t * pmle, addr_t addr, pf_error_t access_type) {
656 gen_pt_t * entry = (gen_pt_t *)&pmle[PML4E64_INDEX(addr)];
657 return can_access_pt_entry(entry, access_type);
660 pt_access_status_t inline v3_can_access_pdpe64(pdpe64_t * pdpe, addr_t addr, pf_error_t access_type) {
661 gen_pt_t * entry = (gen_pt_t *)&pdpe[PDPE64_INDEX(addr)];
662 return can_access_pt_entry(entry, access_type);
665 pt_access_status_t inline v3_can_access_pde64(pde64_t * pde, addr_t addr, pf_error_t access_type) {
666 gen_pt_t * entry = (gen_pt_t *)&pde[PDE64_INDEX(addr)];
667 return can_access_pt_entry(entry, access_type);
670 pt_access_status_t inline v3_can_access_pte64(pte64_t * pte, addr_t addr, pf_error_t access_type) {
671 gen_pt_t * entry = (gen_pt_t *)&pte[PTE64_INDEX(addr)];
672 return can_access_pt_entry(entry, access_type);
684 /* We generate a page table to correspond to a given memory layout
685 * pulling pages from the mem_list when necessary
686 * If there are any gaps in the layout, we add them as unmapped pages
688 pde32_t * create_passthrough_pts_32(struct guest_info * guest_info) {
689 addr_t current_page_addr = 0;
692 pde32_t * pde = V3_VAddr(V3_AllocPages(1));
694 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
696 pte32_t * pte = V3_VAddr(V3_AllocPages(1));
699 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
700 struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr);
703 (region->host_type == SHDW_REGION_FULL_HOOK)) {
706 pte[j].user_page = 0;
707 pte[j].write_through = 0;
708 pte[j].cache_disable = 0;
712 pte[j].global_page = 0;
714 pte[j].page_base_addr = 0;
719 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
721 PrintDebug("Marking Write hook host_addr %p as RO\n", (void *)current_page_addr);
726 pte[j].user_page = 1;
727 pte[j].write_through = 0;
728 pte[j].cache_disable = 0;
732 pte[j].global_page = 0;
735 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
741 pte[j].page_base_addr = host_addr >> 12;
746 current_page_addr += PAGE_SIZE;
749 if (pte_present == 0) {
750 V3_FreePage(V3_PAddr(pte));
754 pde[i].user_page = 0;
755 pde[i].write_through = 0;
756 pde[i].cache_disable = 0;
759 pde[i].large_page = 0;
760 pde[i].global_page = 0;
762 pde[i].pt_base_addr = 0;
766 pde[i].user_page = 1;
767 pde[i].write_through = 0;
768 pde[i].cache_disable = 0;
771 pde[i].large_page = 0;
772 pde[i].global_page = 0;
774 pde[i].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
783 /* We generate a page table to correspond to a given memory layout
784 * pulling pages from the mem_list when necessary
785 * If there are any gaps in the layout, we add them as unmapped pages
787 pdpe32pae_t * create_passthrough_pts_32PAE(struct guest_info * guest_info) {
788 addr_t current_page_addr = 0;
791 pdpe32pae_t * pdpe = V3_VAddr(V3_AllocPages(1));
792 memset(pdpe, 0, PAGE_SIZE);
794 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
796 pde32pae_t * pde = V3_VAddr(V3_AllocPages(1));
798 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
802 pte32pae_t * pte = V3_VAddr(V3_AllocPages(1));
805 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
806 struct v3_shadow_region * region = v3_get_shadow_region(guest_info, current_page_addr);
809 (region->host_type == SHDW_REGION_FULL_HOOK)) {
812 pte[k].user_page = 0;
813 pte[k].write_through = 0;
814 pte[k].cache_disable = 0;
818 pte[k].global_page = 0;
820 pte[k].page_base_addr = 0;
826 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
832 pte[k].user_page = 1;
833 pte[k].write_through = 0;
834 pte[k].cache_disable = 0;
838 pte[k].global_page = 0;
841 if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
847 pte[k].page_base_addr = host_addr >> 12;
853 current_page_addr += PAGE_SIZE;
856 if (pte_present == 0) {
857 V3_FreePage(V3_PAddr(pte));
861 pde[j].user_page = 0;
862 pde[j].write_through = 0;
863 pde[j].cache_disable = 0;
866 pde[j].large_page = 0;
867 pde[j].global_page = 0;
869 pde[j].pt_base_addr = 0;
874 pde[j].user_page = 1;
875 pde[j].write_through = 0;
876 pde[j].cache_disable = 0;
879 pde[j].large_page = 0;
880 pde[j].global_page = 0;
882 pde[j].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
890 if (pde_present == 0) {
891 V3_FreePage(V3_PAddr(pde));
895 pdpe[i].write_through = 0;
896 pdpe[i].cache_disable = 0;
897 pdpe[i].accessed = 0;
900 pdpe[i].vmm_info = 0;
901 pdpe[i].pd_base_addr = 0;
906 pdpe[i].write_through = 0;
907 pdpe[i].cache_disable = 0;
908 pdpe[i].accessed = 0;
911 pdpe[i].vmm_info = 0;
912 pdpe[i].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
927 pml4e64_t * create_passthrough_pts_64(struct guest_info * info) {
928 addr_t current_page_addr = 0;
931 pml4e64_t * pml = V3_VAddr(V3_AllocPages(1));
933 for (i = 0; i < 1; i++) {
934 int pdpe_present = 0;
935 pdpe64_t * pdpe = V3_VAddr(V3_AllocPages(1));
937 for (j = 0; j < 20; j++) {
939 pde64_t * pde = V3_VAddr(V3_AllocPages(1));
941 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
943 pte64_t * pte = V3_VAddr(V3_AllocPages(1));
946 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
947 struct v3_shadow_region * region = v3_get_shadow_region(info, current_page_addr);
952 (region->host_type == SHDW_REGION_FULL_HOOK)) {
955 pte[m].user_page = 0;
956 pte[m].write_through = 0;
957 pte[m].cache_disable = 0;
961 pte[m].global_page = 0;
963 pte[m].page_base_addr = 0;
968 if (region->host_type == SHDW_REGION_WRITE_HOOK) {
974 pte[m].user_page = 1;
975 pte[m].write_through = 0;
976 pte[m].cache_disable = 0;
980 pte[m].global_page = 0;
983 if (guest_pa_to_host_pa(info, current_page_addr, &host_addr) == -1) {
989 pte[m].page_base_addr = PAGE_BASE_ADDR(host_addr);
991 //PrintPTE64(current_page_addr, &(pte[m]));
999 current_page_addr += PAGE_SIZE;
1002 if (pte_present == 0) {
1003 V3_FreePage(V3_PAddr(pte));
1006 pde[k].writable = 0;
1007 pde[k].user_page = 0;
1008 pde[k].write_through = 0;
1009 pde[k].cache_disable = 0;
1010 pde[k].accessed = 0;
1012 pde[k].large_page = 0;
1013 //pde[k].global_page = 0;
1014 pde[k].vmm_info = 0;
1015 pde[k].pt_base_addr = 0;
1018 pde[k].writable = 1;
1019 pde[k].user_page = 1;
1020 pde[k].write_through = 0;
1021 pde[k].cache_disable = 0;
1022 pde[k].accessed = 0;
1024 pde[k].large_page = 0;
1025 //pde[k].global_page = 0;
1026 pde[k].vmm_info = 0;
1027 pde[k].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
1033 if (pde_present == 0) {
1034 V3_FreePage(V3_PAddr(pde));
1036 pdpe[j].present = 0;
1037 pdpe[j].writable = 0;
1038 pdpe[j].user_page = 0;
1039 pdpe[j].write_through = 0;
1040 pdpe[j].cache_disable = 0;
1041 pdpe[j].accessed = 0;
1043 pdpe[j].large_page = 0;
1044 //pdpe[j].global_page = 0;
1045 pdpe[j].vmm_info = 0;
1046 pdpe[j].pd_base_addr = 0;
1048 pdpe[j].present = 1;
1049 pdpe[j].writable = 1;
1050 pdpe[j].user_page = 1;
1051 pdpe[j].write_through = 0;
1052 pdpe[j].cache_disable = 0;
1053 pdpe[j].accessed = 0;
1055 pdpe[j].large_page = 0;
1056 //pdpe[j].global_page = 0;
1057 pdpe[j].vmm_info = 0;
1058 pdpe[j].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
1066 PrintDebug("PML index=%d\n", i);
1068 if (pdpe_present == 0) {
1069 V3_FreePage(V3_PAddr(pdpe));
1072 pml[i].writable = 0;
1073 pml[i].user_page = 0;
1074 pml[i].write_through = 0;
1075 pml[i].cache_disable = 0;
1076 pml[i].accessed = 0;
1077 pml[i].reserved = 0;
1078 //pml[i].large_page = 0;
1079 //pml[i].global_page = 0;
1080 pml[i].vmm_info = 0;
1081 pml[i].pdp_base_addr = 0;
1084 pml[i].writable = 1;
1085 pml[i].user_page = 1;
1086 pml[i].write_through = 0;
1087 pml[i].cache_disable = 0;
1088 pml[i].accessed = 0;
1089 pml[i].reserved = 0;
1090 //pml[i].large_page = 0;
1091 //pml[i].global_page = 0;
1092 pml[i].vmm_info = 0;
1093 pml[i].pdp_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pdpe));
1101 int v3_drill_host_pt_32(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
1102 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1103 void * private_data) {
1104 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1105 addr_t host_pde_pa = CR3_TO_PDE32_PA(host_cr3);
1106 addr_t host_pte_pa = 0;
1110 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, host_pde_pa, private_data)) != 0) {
1111 return (ret == -1) ? -1 : PAGE_PD32;
1114 switch (pde32_lookup(host_pde, vaddr, &host_pte_pa)) {
1115 case PT_ENTRY_NOT_PRESENT:
1117 case PT_ENTRY_LARGE_PAGE:
1118 if ((ret == callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1119 return (ret == -1) ? -1 : PAGE_4MB;
1123 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1124 return (ret == -1) ? -1 : PAGE_PT32;
1127 if (pte32_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1130 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1131 return (ret == -1) ? -1 : PAGE_4KB;
1141 int v3_drill_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
1142 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1143 void * private_data) {
1144 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1145 addr_t host_pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1146 addr_t host_pde_pa = 0;
1147 addr_t host_pte_pa = 0;
1151 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, host_pdpe_pa, private_data)) != 0) {
1152 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1155 switch (pdpe32pae_lookup(host_pdpe, vaddr, &host_pde_pa)) {
1156 case PT_ENTRY_NOT_PRESENT:
1160 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1161 return (ret == -1) ? -1 : PAGE_PD32PAE;
1164 switch (pde32pae_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1165 case PT_ENTRY_NOT_PRESENT:
1167 case PT_ENTRY_LARGE_PAGE:
1168 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1169 return (ret == -1) ? -1 : PAGE_2MB;
1173 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1174 return (ret == -1) ? -1 : PAGE_PT32PAE;
1177 if (pte32pae_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1180 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1181 return (ret == -1) ? -1 : PAGE_4KB;
1190 // should never get here
1195 int v3_drill_host_pt_64(struct guest_info * info, v3_reg_t host_cr3, addr_t vaddr,
1196 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1197 void * private_data) {
1198 pml4e64_t * host_pmle = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
1199 addr_t host_pmle_pa = CR3_TO_PML4E64_PA(host_cr3);
1200 addr_t host_pdpe_pa = 0;
1201 addr_t host_pde_pa = 0;
1202 addr_t host_pte_pa = 0;
1206 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pmle, host_pmle_pa, private_data)) != 0) {
1207 return (ret == -1) ? -1 : PAGE_PML464;
1210 switch(pml4e64_lookup(host_pmle, vaddr, &host_pdpe_pa)) {
1211 case PT_ENTRY_NOT_PRESENT:
1215 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)V3_VAddr((void *)host_pdpe_pa), host_pdpe_pa, private_data)) != 0) {
1216 return (ret == -1) ? -1 : PAGE_PDP64;
1219 switch(pdpe64_lookup(V3_VAddr((void *)host_pdpe_pa), vaddr, &host_pde_pa)) {
1220 case PT_ENTRY_NOT_PRESENT:
1222 case PT_ENTRY_LARGE_PAGE:
1223 if ((ret == callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
1224 return (ret == -1) ? -1 : PAGE_1GB;
1226 PrintError("1 Gigabyte Pages not supported\n");
1230 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data) != 0)) {
1231 return (ret == -1) ? -1 : PAGE_PD64;
1234 switch (pde64_lookup(V3_VAddr((void *)host_pde_pa), vaddr, &host_pte_pa)) {
1235 case PT_ENTRY_NOT_PRESENT:
1237 case PT_ENTRY_LARGE_PAGE:
1238 if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
1239 return (ret == -1) ? -1 : PAGE_2MB;
1244 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data) != 0)) {
1245 return (ret == -1) ? -1 : PAGE_PT64;
1248 if (pte64_lookup(V3_VAddr((void *)host_pte_pa), vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1251 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)PAGE_BASE_ADDR(page_pa)), page_pa, private_data)) != 0) {
1252 return (ret == -1) ? -1 : PAGE_4KB;
1261 // should never get here
1271 int v3_drill_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1272 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1273 void * private_data) {
1274 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1275 pde32_t * guest_pde = NULL;
1276 addr_t guest_pte_pa = 0;
1280 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1281 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1282 (void *)guest_pde_pa);
1286 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1287 return (ret == -1) ? -1 : PAGE_PD32;
1290 switch (pde32_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1291 case PT_ENTRY_NOT_PRESENT:
1293 case PT_ENTRY_LARGE_PAGE:
1295 addr_t large_page_pa = (addr_t)guest_pte_pa;
1296 addr_t large_page_va = 0;
1298 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1299 PrintError("Could not get virtual address of Guest Page 4MB (PA=%p)\n",
1300 (void *)large_page_va);
1305 if ((ret == callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1306 return (ret == -1) ? -1 : PAGE_4MB;
1312 pte32_t * guest_pte = NULL;
1315 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t*)&guest_pte) == -1) {
1316 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1317 (void *)guest_pte_pa);
1321 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1322 return (ret == -1) ? -1 : PAGE_PT32;
1325 if (pte32_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1330 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1331 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1336 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1337 return (ret == -1) ? -1 : PAGE_4KB;
1344 // should never get here
1345 PrintError("End of drill function (guest 32)... Should never have gotten here...\n");
1351 int v3_drill_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1352 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1353 void * private_data) {
1354 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1355 pdpe32pae_t * guest_pdpe = 0;
1356 addr_t guest_pde_pa = 0;
1359 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t*)&guest_pdpe) == -1) {
1360 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1361 (void *)guest_pdpe_pa);
1365 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1366 return (ret == -1) ? -1 : PAGE_PDP32PAE;
1369 switch (pdpe32pae_lookup(guest_pdpe, vaddr, &guest_pde_pa))
1371 case PT_ENTRY_NOT_PRESENT:
1375 pde32pae_t * guest_pde = NULL;
1376 addr_t guest_pte_pa = 0;
1378 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1379 PrintError("Could not get virtual Address of Guest PDE32PAE (PA=%p)\n",
1380 (void *)guest_pde_pa);
1384 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1385 return (ret == -1) ? -1 : PAGE_PD32PAE;
1388 switch (pde32pae_lookup(guest_pde, vaddr, &guest_pte_pa))
1390 case PT_ENTRY_NOT_PRESENT:
1392 case PT_ENTRY_LARGE_PAGE:
1394 addr_t large_page_pa = (addr_t)guest_pte_pa;
1395 addr_t large_page_va = 0;
1397 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1398 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1399 (void *)large_page_va);
1403 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1404 return (ret == -1) ? -1 : PAGE_2MB;
1410 pte32pae_t * guest_pte = NULL;
1413 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1414 PrintError("Could not get virtual Address of Guest PTE32PAE (PA=%p)\n",
1415 (void *)guest_pte_pa);
1419 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1420 return (ret == -1) ? -1 : PAGE_PT32PAE;
1423 if (pte32pae_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1428 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1429 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1434 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1435 return (ret == -1) ? -1 : PAGE_4KB;
1443 PrintError("Invalid page type for PD32PAE\n");
1447 // should never get here
1448 PrintError("End of drill function (guest 32pae)... Should never have gotten here...\n");
1452 int v3_drill_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3, addr_t vaddr,
1453 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1454 void * private_data) {
1455 addr_t guest_pml4_pa = CR3_TO_PML4E64_PA(guest_cr3);
1456 pml4e64_t * guest_pmle = 0;
1457 addr_t guest_pdpe_pa = 0;
1460 if (guest_pa_to_host_va(info, guest_pml4_pa, (addr_t*)&guest_pmle) == -1) {
1461 PrintError("Could not get virtual address of Guest PML4E64 (PA=%p)\n",
1462 (void *)guest_pml4_pa);
1466 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pmle, guest_pml4_pa, private_data)) != 0) {
1467 return (ret == -1) ? -1 : PAGE_PML464;
1470 switch (pml4e64_lookup(guest_pmle, vaddr, &guest_pdpe_pa)) {
1471 case PT_ENTRY_NOT_PRESENT:
1475 pdpe64_t * guest_pdp = NULL;
1476 addr_t guest_pde_pa = 0;
1478 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdp) == -1) {
1479 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1480 (void *)guest_pdpe_pa);
1484 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)guest_pdp, guest_pdpe_pa, private_data)) != 0) {
1485 return (ret == -1) ? -1 : PAGE_PDP64;
1488 switch (pdpe64_lookup(guest_pdp, vaddr, &guest_pde_pa)) {
1489 case PT_ENTRY_NOT_PRESENT:
1491 case PT_ENTRY_LARGE_PAGE:
1493 addr_t large_page_pa = (addr_t)guest_pde_pa;
1494 addr_t large_page_va = 0;
1496 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1497 PrintDebug("Could not get virtual address of Guest Page 1GB (PA=%p)\n",
1498 (void *)large_page_va);
1502 if ((ret == callback(info, PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1503 return (ret == -1) ? -1 : PAGE_1GB;
1505 PrintError("1 Gigabyte Pages not supported\n");
1510 pde64_t * guest_pde = NULL;
1511 addr_t guest_pte_pa = 0;
1513 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1514 PrintError("Could not get virtual address of guest PDE64 (PA=%p)\n",
1515 (void *)guest_pde_pa);
1519 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1520 return (ret == -1) ? -1 : PAGE_PD64;
1523 switch (pde64_lookup(guest_pde, vaddr, &guest_pte_pa)) {
1524 case PT_ENTRY_NOT_PRESENT:
1526 case PT_ENTRY_LARGE_PAGE:
1528 addr_t large_page_pa = (addr_t)guest_pte_pa;
1529 addr_t large_page_va = 0;
1531 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1532 PrintDebug("Could not get virtual address of Guest Page 2MB (PA=%p)\n",
1533 (void *)large_page_va);
1537 if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1538 return (ret == -1) ? -1 : PAGE_2MB;
1544 pte64_t * guest_pte = NULL;
1547 if (guest_pa_to_host_va(info, guest_pte_pa, (addr_t *)&guest_pte) == -1) {
1548 PrintError("Could not get virtual address of guest PTE64 (PA=%p)\n",
1549 (void *)guest_pte_pa);
1553 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)guest_pte, guest_pte_pa, private_data) != 0)) {
1554 return (ret == -1) ? -1 : PAGE_PT64;
1557 if (pte64_lookup(guest_pte, vaddr, &page_pa) == PT_ENTRY_NOT_PRESENT) {
1562 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1563 PrintError("Could not get virtual address of Guest Page 4KB (PA=%p)\n",
1568 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1569 return (ret == -1) ? -1 : PAGE_4KB;
1583 // should never get here
1584 PrintError("End of drill function (guest 64)... Should never have gotten here...\n");
1591 int v3_walk_guest_pt_32(struct guest_info * info, v3_reg_t guest_cr3,
1592 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1593 void * private_data) {
1594 addr_t guest_pde_pa = CR3_TO_PDE32_PA(guest_cr3);
1595 pde32_t * guest_pde = NULL;
1601 PrintError("Call back was not specified\n");
1605 if (guest_pa_to_host_va(info, guest_pde_pa, (addr_t *)&guest_pde) == -1) {
1606 PrintError("Could not get virtual address of Guest PDE32 (PA=%p)\n",
1607 (void *)guest_pde_pa);
1611 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)guest_pde, guest_pde_pa, private_data)) != 0) {
1615 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1616 if (guest_pde[i].present) {
1617 if (guest_pde[i].large_page) {
1618 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(guest_pde[i]);
1619 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1620 addr_t large_page_va = 0;
1622 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1623 PrintDebug("Could not get virtual address of Guest 4MB Page (PA=%p)\n",
1624 (void *)large_page_pa);
1625 // We'll let it through for data pages because they may be unmapped or hooked
1629 if ((ret = callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1633 vaddr += PAGE_SIZE_4MB;
1635 addr_t pte_pa = BASE_TO_PAGE_ADDR(guest_pde[i].pt_base_addr);
1636 pte32_t * tmp_pte = NULL;
1638 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1639 PrintError("Could not get virtual address of Guest PTE32 (PA=%p)\n",
1644 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1648 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1649 if (tmp_pte[j].present) {
1650 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1653 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1654 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1656 // We'll let it through for data pages because they may be unmapped or hooked
1660 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1665 vaddr += PAGE_SIZE_4KB;
1669 vaddr += PAGE_SIZE_4MB;
1676 int v3_walk_guest_pt_32pae(struct guest_info * info, v3_reg_t guest_cr3,
1677 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1678 void * private_data) {
1679 addr_t guest_pdpe_pa = CR3_TO_PDPE32PAE_PA(guest_cr3);
1680 pdpe32pae_t * guest_pdpe = NULL;
1686 PrintError("Call back was not specified\n");
1690 if (guest_pa_to_host_va(info, guest_pdpe_pa, (addr_t *)&guest_pdpe) == -1) {
1691 PrintError("Could not get virtual address of Guest PDPE32PAE (PA=%p)\n",
1692 (void *)guest_pdpe_pa);
1697 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)guest_pdpe, guest_pdpe_pa, private_data)) != 0) {
1701 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
1702 if (guest_pdpe[i].present) {
1703 addr_t pde_pa = BASE_TO_PAGE_ADDR(guest_pdpe[i].pd_base_addr);
1704 pde32pae_t * tmp_pde = NULL;
1706 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1707 PrintError("Could not get virtual address of Guest PDE32PAE (PA=%p)\n",
1712 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1716 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
1717 if (tmp_pde[j].present) {
1718 if (tmp_pde[j].large_page) {
1719 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
1720 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1721 addr_t large_page_va = 0;
1723 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1724 PrintDebug("Could not get virtual address of Guest 2MB Page (PA=%p)\n",
1725 (void *)large_page_pa);
1726 // We'll let it through for data pages because they may be unmapped or hooked
1730 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1734 vaddr += PAGE_SIZE_2MB;
1736 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
1737 pte32pae_t * tmp_pte = NULL;
1739 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1740 PrintError("Could not get virtual address of Guest PTE32PAE (PA=%p)\n",
1745 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1749 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
1750 if (tmp_pte[k].present) {
1751 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
1754 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1755 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1757 // We'll let it through for data pages because they may be unmapped or hooked
1761 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1766 vaddr += PAGE_SIZE_4KB;
1770 vaddr += PAGE_SIZE_2MB;
1774 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
1783 int v3_walk_guest_pt_64(struct guest_info * info, v3_reg_t guest_cr3,
1784 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1785 void * private_data) {
1786 addr_t guest_pml_pa = CR3_TO_PML4E64_PA(guest_cr3);
1787 pml4e64_t * guest_pml = NULL;
1793 PrintError("Call back was not specified\n");
1797 if (guest_pa_to_host_va(info, guest_pml_pa, (addr_t *)&guest_pml) == -1) {
1798 PrintError("Could not get virtual address of Guest PML464 (PA=%p)\n",
1804 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)guest_pml, guest_pml_pa, private_data)) != 0) {
1808 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
1809 if (guest_pml[i].present) {
1810 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(guest_pml[i].pdp_base_addr);
1811 pdpe64_t * tmp_pdpe = NULL;
1814 if (guest_pa_to_host_va(info, pdpe_pa, (addr_t *)&tmp_pdpe) == -1) {
1815 PrintError("Could not get virtual address of Guest PDPE64 (PA=%p)\n",
1820 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
1824 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
1825 if (tmp_pdpe[j].present) {
1826 if (tmp_pdpe[j].large_page) {
1827 pdpe64_1GB_t * large_pdpe = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
1828 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdpe->page_base_addr);
1829 addr_t large_page_va = 0;
1831 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1832 PrintDebug("Could not get virtual address of Guest 1GB page (PA=%p)\n",
1833 (void *)large_page_pa);
1834 // We'll let it through for data pages because they may be unmapped or hooked
1838 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)large_page_va, large_page_pa, private_data)) != 0) {
1842 vaddr += PAGE_SIZE_1GB;
1844 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
1845 pde64_t * tmp_pde = NULL;
1847 if (guest_pa_to_host_va(info, pde_pa, (addr_t *)&tmp_pde) == -1) {
1848 PrintError("Could not get virtual address of Guest PDE64 (PA=%p)\n",
1853 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
1857 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
1858 if (tmp_pde[k].present) {
1859 if (tmp_pde[k].large_page) {
1860 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
1861 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
1862 addr_t large_page_va = 0;
1864 if (guest_pa_to_host_va(info, large_page_pa, &large_page_va) == -1) {
1865 PrintDebug("Could not get virtual address of Guest 2MB page (PA=%p)\n",
1866 (void *)large_page_pa);
1867 // We'll let it through for data pages because they may be unmapped or hooked
1871 if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
1875 vaddr += PAGE_SIZE_2MB;
1877 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
1878 pte64_t * tmp_pte = NULL;
1880 if (guest_pa_to_host_va(info, pte_pa, (addr_t *)&tmp_pte) == -1) {
1881 PrintError("Could not get virtual address of Guest PTE64 (PA=%p)\n",
1886 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1890 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
1891 if (tmp_pte[m].present) {
1892 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
1895 if (guest_pa_to_host_va(info, page_pa, &page_va) == -1) {
1896 PrintDebug("Could not get virtual address of Guest 4KB Page (PA=%p)\n",
1898 // We'll let it through for data pages because they may be unmapped or hooked
1902 if ((ret = callback(info, PAGE_4KB, vaddr, page_va, page_pa, private_data)) != 0) {
1907 vaddr += PAGE_SIZE_4KB;
1911 vaddr += PAGE_SIZE_2MB;
1916 vaddr += PAGE_SIZE_1GB;
1920 vaddr += ((ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES);
1926 int v3_walk_host_pt_32(struct guest_info * info, v3_reg_t host_cr3,
1927 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1928 void * private_data) {
1929 pde32_t * host_pde = (pde32_t *)CR3_TO_PDE32_VA(host_cr3);
1930 addr_t pde_pa = CR3_TO_PDE32_PA(host_cr3);
1936 PrintError("Call back was not specified\n");
1940 if ((ret = callback(info, PAGE_PD32, vaddr, (addr_t)host_pde, pde_pa, private_data)) != 0) {
1944 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
1945 if (host_pde[i].present) {
1946 if (host_pde[i].large_page) {
1947 pde32_4MB_t * large_pde = (pde32_4MB_t *)&(host_pde[i]);
1948 addr_t large_page_pa = BASE_TO_PAGE_ADDR_4MB(large_pde->page_base_addr);
1950 if ((ret = callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
1954 vaddr += PAGE_SIZE_4MB;
1956 addr_t pte_pa = BASE_TO_PAGE_ADDR(host_pde[i].pt_base_addr);
1957 pte32_t * tmp_pte = (pte32_t *)V3_VAddr((void *)pte_pa);
1959 if ((ret = callback(info, PAGE_PT32, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
1963 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
1964 if (tmp_pte[j].present) {
1965 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[j].page_base_addr);
1966 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
1971 vaddr += PAGE_SIZE_4KB;
1975 vaddr += PAGE_SIZE_4MB;
1985 int v3_walk_host_pt_32pae(struct guest_info * info, v3_reg_t host_cr3,
1986 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
1987 void * private_data) {
1988 pdpe32pae_t * host_pdpe = (pdpe32pae_t *)CR3_TO_PDPE32PAE_VA(host_cr3);
1989 addr_t pdpe_pa = CR3_TO_PDPE32PAE_PA(host_cr3);
1995 PrintError("Callback was not specified\n");
1999 if ((ret = callback(info, PAGE_PDP32PAE, vaddr, (addr_t)host_pdpe, pdpe_pa, private_data)) != 0) {
2003 for (i = 0; i < MAX_PDPE32PAE_ENTRIES; i++) {
2004 if (host_pdpe[i].present) {
2005 addr_t pde_pa = BASE_TO_PAGE_ADDR(host_pdpe[i].pd_base_addr);
2006 pde32pae_t * tmp_pde = (pde32pae_t *)V3_VAddr((void *)pde_pa);
2008 if ((ret = callback(info, PAGE_PD32PAE, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
2012 for (j = 0; j < MAX_PDE32PAE_ENTRIES; j++) {
2013 if (tmp_pde[j].present) {
2015 if (tmp_pde[j].large_page) {
2016 pde32pae_2MB_t * large_pde = (pde32pae_2MB_t *)&(tmp_pde[j]);
2017 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
2019 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
2023 vaddr += PAGE_SIZE_2MB;
2025 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[j].pt_base_addr);
2026 pte32pae_t * tmp_pte = (pte32pae_t *)V3_VAddr((void *)pte_pa);
2028 if ((ret = callback(info, PAGE_PT32PAE, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
2032 for (k = 0; k < MAX_PTE32PAE_ENTRIES; k++) {
2033 if (tmp_pte[k].present) {
2034 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[k].page_base_addr);
2035 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
2040 vaddr += PAGE_SIZE_4KB;
2044 vaddr += PAGE_SIZE_2MB;
2048 vaddr += PAGE_SIZE_2MB * MAX_PDE32PAE_ENTRIES;
2055 int v3_walk_host_pt_64(struct guest_info * info, v3_reg_t host_cr3,
2056 int (*callback)(struct guest_info * info, page_type_t type, addr_t vaddr, addr_t page_ptr, addr_t page_pa, void * private_data),
2057 void * private_data) {
2058 pml4e64_t * host_pml = (pml4e64_t *)CR3_TO_PML4E64_VA(host_cr3);
2059 addr_t pml_pa = CR3_TO_PML4E64_PA(host_cr3);
2065 PrintError("Callback was not specified\n");
2069 if ((ret = callback(info, PAGE_PML464, vaddr, (addr_t)host_pml, pml_pa, private_data)) != 0) {
2073 for (i = 0; i < MAX_PML4E64_ENTRIES; i++) {
2074 if (host_pml[i].present) {
2075 addr_t pdpe_pa = BASE_TO_PAGE_ADDR(host_pml[i].pdp_base_addr);
2076 pdpe64_t * tmp_pdpe = (pdpe64_t *)V3_VAddr((void *)pdpe_pa);
2078 if ((ret = callback(info, PAGE_PDP64, vaddr, (addr_t)tmp_pdpe, pdpe_pa, private_data)) != 0) {
2082 for (j = 0; j < MAX_PDPE64_ENTRIES; j++) {
2083 if (tmp_pdpe[j].present) {
2084 if (tmp_pdpe[j].large_page) {
2085 pdpe64_1GB_t * large_pdp = (pdpe64_1GB_t *)&(tmp_pdpe[j]);
2086 addr_t large_page_pa = BASE_TO_PAGE_ADDR_1GB(large_pdp->page_base_addr);
2088 if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
2092 vaddr += PAGE_SIZE_1GB;
2094 addr_t pde_pa = BASE_TO_PAGE_ADDR(tmp_pdpe[j].pd_base_addr);
2095 pde64_t * tmp_pde = (pde64_t *)V3_VAddr((void *)pde_pa);
2097 if ((ret = callback(info, PAGE_PD64, vaddr, (addr_t)tmp_pde, pde_pa, private_data)) != 0) {
2101 for (k = 0; k < MAX_PDE64_ENTRIES; k++) {
2102 if (tmp_pde[k].present) {
2103 if (tmp_pde[k].large_page) {
2104 pde64_2MB_t * large_pde = (pde64_2MB_t *)&(tmp_pde[k]);
2105 addr_t large_page_pa = BASE_TO_PAGE_ADDR_2MB(large_pde->page_base_addr);
2107 if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)large_page_pa), large_page_pa, private_data)) != 0) {
2111 vaddr += PAGE_SIZE_2MB;
2113 addr_t pte_pa = BASE_TO_PAGE_ADDR(tmp_pde[k].pt_base_addr);
2114 pte64_t * tmp_pte = (pte64_t *)V3_VAddr((void *)pte_pa);
2116 if ((ret = callback(info, PAGE_PT64, vaddr, (addr_t)tmp_pte, pte_pa, private_data)) != 0) {
2120 for (m = 0; m < MAX_PTE64_ENTRIES; m++) {
2121 if (tmp_pte[m].present) {
2122 addr_t page_pa = BASE_TO_PAGE_ADDR(tmp_pte[m].page_base_addr);
2123 if ((ret = callback(info, PAGE_4KB, vaddr, (addr_t)V3_VAddr((void *)page_pa), page_pa, private_data)) != 0) {
2127 vaddr += PAGE_SIZE_4KB;
2131 vaddr += PAGE_SIZE_2MB;
2136 vaddr += PAGE_SIZE_1GB;
2140 vaddr += (ullong_t)PAGE_SIZE_1GB * (ullong_t)MAX_PDPE64_ENTRIES;
2148 static const uchar_t PAGE_4KB_STR[] = "4KB_PAGE";
2149 static const uchar_t PAGE_2MB_STR[] = "2MB_PAGE";
2150 static const uchar_t PAGE_4MB_STR[] = "4MB_PAGE";
2151 static const uchar_t PAGE_1GB_STR[] = "1GB_PAGE";
2152 static const uchar_t PAGE_PT32_STR[] = "32 Bit PT";
2153 static const uchar_t PAGE_PD32_STR[] = "32 Bit PD";
2154 static const uchar_t PAGE_PDP32PAE_STR[] = "32 Bit PAE PDP";
2155 static const uchar_t PAGE_PD32PAE_STR[] = "32 Bit PAE PD";
2156 static const uchar_t PAGE_PT32PAE_STR[] = "32 Bit PAE PT";
2157 static const uchar_t PAGE_PML464_STR[] = "64 Bit PML4";
2158 static const uchar_t PAGE_PDP64_STR[] = "64 Bit PDP";
2159 static const uchar_t PAGE_PD64_STR[] = "64 Bit PD";
2160 static const uchar_t PAGE_PT64_STR[] = "64 Bit PT";
2163 const uchar_t * v3_page_type_to_str(page_type_t type) {
2166 return PAGE_4KB_STR;
2168 return PAGE_2MB_STR;
2170 return PAGE_4MB_STR;
2172 return PAGE_1GB_STR;
2174 return PAGE_PT32_STR;
2176 return PAGE_PD32_STR;
2178 return PAGE_PDP32PAE_STR;
2180 return PAGE_PD32PAE_STR;
2182 return PAGE_PT32PAE_STR;
2184 return PAGE_PML464_STR;
2186 return PAGE_PDP64_STR;
2188 return PAGE_PD64_STR;
2190 return PAGE_PT64_STR;