2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm_shadow_paging.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vmm_decoder.h>
27 #include <palacios/vmm_ctrl_regs.h>
29 #ifndef DEBUG_SHADOW_PAGING
31 #define PrintDebug(fmt, args...)
43 DEFINE_HASHTABLE_INSERT(add_cr3_to_cache, addr_t, struct hashtable *);
44 DEFINE_HASHTABLE_SEARCH(find_cr3_in_cache, addr_t, struct hashtable *);
45 DEFINE_HASHTABLE_REMOVE(del_cr3_from_cache, addr_t, struct hashtable *, 0);
48 DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t);
49 DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t);
50 DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0);
54 static uint_t pte_hash_fn(addr_t key) {
55 return hash_long(key, 32);
58 static int pte_equals(addr_t key1, addr_t key2) {
59 return (key1 == key2);
62 static uint_t cr3_hash_fn(addr_t key) {
63 return hash_long(key, 32);
66 static int cr3_equals(addr_t key1, addr_t key2) {
67 return (key1 == key2);
72 static int activate_shadow_pt_32(struct guest_info * info);
73 static int activate_shadow_pt_32pae(struct guest_info * info);
74 static int activate_shadow_pt_64(struct guest_info * info);
77 static int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
78 static int handle_shadow_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
79 static int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
82 static int cache_page_tables_32(struct guest_info * info, addr_t pde);
84 int v3_init_shadow_page_state(struct guest_info * info) {
85 struct shadow_page_state * state = &(info->shdw_pg_state);
90 state->cr3_cache = create_hashtable(0, &cr3_hash_fn, &cr3_equals);
92 state->cached_cr3 = 0;
93 state->cached_ptes = NULL;
102 For now we'll do something a little more lightweight
103 int cache_page_tables32(struct guest_info * info, addr_t pde) {
104 struct shadow_page_state * state = &(info->shdw_pg_state);
105 addr_t pde_host_addr;
107 struct hashtable * pte_cache = NULL;
111 pte_cache = (struct hashtable *)find_cr3_in_cache(state->cr3_cache, pde);
112 if (pte_cache != NULL) {
113 PrintError("CR3 already present in cache\n");
114 state->current_ptes = pte_cache;
117 PrintError("Creating new CR3 cache entry\n");
118 pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
119 state->current_ptes = pte_cache;
120 add_cr3_to_cache(state->cr3_cache, pde, pte_cache);
123 if (guest_pa_to_host_va(info, pde, &pde_host_addr) == -1) {
124 PrintError("Could not lookup host address of guest PDE\n");
128 tmp_pde = (pde32_t *)pde_host_addr;
130 add_pte_map(pte_cache, pde, pde_host_addr);
133 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
134 if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
135 addr_t pte_host_addr;
137 if (guest_pa_to_host_va(info, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), &pte_host_addr) == -1) {
138 PrintError("Could not lookup host address of guest PDE\n");
142 add_pte_map(pte_cache, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), pte_host_addr);
152 int v3_cache_page_tables(struct guest_info * info, addr_t cr3) {
153 switch(v3_get_cpu_mode(info)) {
155 return cache_page_tables_32(info, CR3_TO_PDE32_PA(cr3));
161 static int cache_page_tables_32(struct guest_info * info, addr_t pde) {
162 struct shadow_page_state * state = &(info->shdw_pg_state);
163 addr_t pde_host_addr;
165 struct hashtable * pte_cache = NULL;
168 if (pde == state->cached_cr3) {
172 if (state->cached_ptes != NULL) {
173 hashtable_destroy(state->cached_ptes, 0, 0);
174 state->cached_ptes = NULL;
177 state->cached_cr3 = pde;
179 pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
180 state->cached_ptes = pte_cache;
182 if (guest_pa_to_host_va(info, pde, &pde_host_addr) == -1) {
183 PrintError("Could not lookup host address of guest PDE\n");
187 tmp_pde = (pde32_t *)pde_host_addr;
189 add_pte_map(pte_cache, pde, pde_host_addr);
192 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
193 if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
194 addr_t pte_host_addr;
196 if (guest_pa_to_host_va(info, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), &pte_host_addr) == -1) {
197 PrintError("Could not lookup host address of guest PDE\n");
201 add_pte_map(pte_cache, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), pte_host_addr);
210 static int cache_page_tables_64(struct guest_info * info, addr_t pde) {
215 int v3_replace_shdw_page32(struct guest_info * info, addr_t location, pte32_t * new_page, pte32_t * old_page) {
216 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
217 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(location)]);
219 if (shadow_pde->large_page == 0) {
220 pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr);
221 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(location)]);
223 //if (shadow_pte->present == 1) {
224 *(uint_t *)old_page = *(uint_t *)shadow_pte;
227 *(uint_t *)shadow_pte = *(uint_t *)new_page;
230 // currently unhandled
231 PrintError("Replacing large shadow pages not implemented\n");
242 // We assume that shdw_pg_state.guest_cr3 is pointing to the page tables we want to activate
243 // We also assume that the CPU mode has not changed during this page table transition
244 static int activate_shadow_pt_32(struct guest_info * info) {
245 struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
246 struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
249 // Check if shadow page tables are in the cache
250 cached = cache_page_tables_32(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
253 PrintError("CR3 Cache failed\n");
255 } else if (cached == 0) {
258 PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
259 delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
261 shadow_pt = v3_create_new_shadow_pt();
263 shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
264 PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
266 PrintDebug("Reusing cached shadow Page table\n");
269 shadow_cr3->pwt = guest_cr3->pwt;
270 shadow_cr3->pcd = guest_cr3->pcd;
275 static int activate_shadow_pt_32pae(struct guest_info * info) {
276 PrintError("Activating 32 bit PAE page tables not implemented\n");
280 static int activate_shadow_pt_64(struct guest_info * info) {
281 struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
282 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
286 // Check if shadow page tables are in the cache
287 cached = cache_page_tables_64(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
290 PrintError("CR3 Cache failed\n");
292 } else if (cached == 0) {
295 PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
296 delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
298 shadow_pt = v3_create_new_shadow_pt();
300 shadow_cr3->pml4t_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
301 PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pml4t_base_addr );
303 PrintDebug("Reusing cached shadow Page table\n");
306 shadow_cr3->pwt = guest_cr3->pwt;
307 shadow_cr3->pcd = guest_cr3->pcd;
313 // Reads the guest CR3 register
314 // creates new shadow page tables
315 // updates the shadow CR3 register to point to the new pts
316 int v3_activate_shadow_pt(struct guest_info * info) {
317 switch (info->cpu_mode) {
320 return activate_shadow_pt_32(info);
322 return activate_shadow_pt_32pae(info);
326 return activate_shadow_pt_64(info);
328 PrintError("Invalid CPU mode: %d\n", info->cpu_mode);
336 int v3_activate_passthrough_pt(struct guest_info * info) {
337 // For now... But we need to change this....
338 // As soon as shadow paging becomes active the passthrough tables are hosed
339 // So this will cause chaos if it is called at that time
341 info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
342 //PrintError("Activate Passthrough Page tables not implemented\n");
348 int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
350 if (info->mem_mode == PHYSICAL_MEM) {
351 // If paging is not turned on we need to handle the special cases
353 #ifdef DEBUG_SHADOW_PAGING
354 PrintPageTree(info->cpu_mode, fault_addr, info->ctrl_regs.cr3);
357 return handle_special_page_fault(info, fault_addr, fault_addr, error_code);
358 } else if (info->mem_mode == VIRTUAL_MEM) {
360 switch (info->cpu_mode) {
362 return handle_shadow_pagefault_32(info, fault_addr, error_code);
365 return handle_shadow_pagefault_32pae(info, fault_addr, error_code);
367 return handle_shadow_pagefault_64(info, fault_addr, error_code);
370 PrintError("Unhandled CPU Mode\n");
374 PrintError("Invalid Memory mode\n");
379 addr_t v3_create_new_shadow_pt() {
382 host_pde = V3_VAddr(V3_AllocPages(1));
383 memset(host_pde, 0, PAGE_SIZE);
385 return (addr_t)host_pde;
389 static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
390 info->ctrl_regs.cr2 = fault_addr;
391 v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
395 static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access) {
396 /* basically the reasoning is that there can be multiple reasons for a page fault:
397 If there is a permissions failure for a page present in the guest _BUT_
398 the reason for the fault was that the page is not present in the shadow,
399 _THEN_ we have to map the shadow page in and reexecute, this will generate
400 a permissions fault which is _THEN_ valid to send to the guest
401 _UNLESS_ both the guest and shadow have marked the page as not present
405 if (guest_access != PT_ACCESS_OK) {
406 // Guest Access Error
408 if ((shadow_access != PT_ACCESS_NOT_PRESENT) &&
409 (guest_access != PT_ACCESS_NOT_PRESENT)) {
410 // aka (guest permission error)
414 if ((shadow_access == PT_ACCESS_NOT_PRESENT) &&
415 (guest_access == PT_ACCESS_NOT_PRESENT)) {
416 // Page tables completely blank, handle guest first
420 // Otherwise we'll handle the guest fault later...?
432 * * 64 bit Page table fault handlers
437 static int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
438 pt_access_status_t guest_access;
439 pt_access_status_t shadow_access;
441 PrintDebug("64 bit shadow page fault\n");
443 ret = v3_check_guest_pt_32(info, info->shdw_pg_state.guest_cr3, fault_addr, error_code, &guest_access);
445 PrintDebug("Guest Access Check: %d (access=%d)\n", ret, guest_access);
447 ret = v3_check_host_pt_32(info->ctrl_regs.cr3, fault_addr, error_code, &shadow_access);
449 PrintDebug("Shadow Access Check: %d (access=%d)\n", ret, shadow_access);
452 PrintError("64 bit shadow paging not implemented\n");
460 * * 32 bit PAE Page table fault handlers
465 static int handle_shadow_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
466 PrintError("32 bit PAE shadow paging not implemented\n");
479 * * 32 bit Page table fault handlers
483 static int handle_large_pagefault_32(struct guest_info * info,
484 addr_t fault_addr, pf_error_t error_code,
485 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde);
487 static int handle_shadow_pte32_fault(struct guest_info * info,
489 pf_error_t error_code,
494 static int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
495 pde32_t * guest_pd = NULL;
496 pde32_t * shadow_pd = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
497 addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
498 pt_access_status_t guest_pde_access;
499 pt_access_status_t shadow_pde_access;
500 pde32_t * guest_pde = NULL;
501 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
503 PrintDebug("Shadow page fault handler: %p\n", (void*) fault_addr );
505 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
506 PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
510 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
513 // Check the guest page permissions
514 guest_pde_access = v3_can_access_pde32(guest_pd, fault_addr, error_code);
516 // Check the shadow page permissions
517 shadow_pde_access = v3_can_access_pde32(shadow_pd, fault_addr, error_code);
519 /* Was the page fault caused by the Guest's page tables? */
520 if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
521 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
522 *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
523 inject_guest_pf(info, fault_addr, error_code);
528 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT)
530 pte32_t * shadow_pt = (pte32_t *)v3_create_new_shadow_pt();
532 shadow_pde->present = 1;
533 shadow_pde->user_page = guest_pde->user_page;
534 // shadow_pde->large_page = guest_pde->large_page;
535 shadow_pde->large_page = 0;
538 // VMM Specific options
539 shadow_pde->write_through = 0;
540 shadow_pde->cache_disable = 0;
541 shadow_pde->global_page = 0;
544 guest_pde->accessed = 1;
546 shadow_pde->pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(shadow_pt));
548 if (guest_pde->large_page == 0) {
549 shadow_pde->writable = guest_pde->writable;
551 // ?? What if guest pde is dirty a this point?
552 ((pde32_4MB_t *)guest_pde)->dirty = 0;
553 shadow_pde->writable = 0;
556 else if (shadow_pde_access == PT_ACCESS_OK)
561 pte32_t * shadow_pt = (pte32_t *)V3_VAddr( (void*)(addr_t) BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr) );
563 if (guest_pde->large_page == 0) {
564 pte32_t * guest_pt = NULL;
565 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
566 // Machine check the guest
567 PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
568 v3_raise_exception(info, MC_EXCEPTION);
572 if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
573 PrintError("Error handling Page fault caused by PTE\n");
576 } else if (guest_pde->large_page == 1) {
577 if (handle_large_pagefault_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
578 PrintError("Error handling large pagefault\n");
583 else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
584 (guest_pde->large_page == 1) &&
585 (((pde32_4MB_t *)guest_pde)->dirty == 0))
588 // Page Directory Entry marked read-only
589 // Its a large page and we need to update the dirty bit in the guest
592 PrintDebug("Large page write error... Setting dirty bit and returning\n");
593 ((pde32_4MB_t *)guest_pde)->dirty = 1;
594 shadow_pde->writable = guest_pde->writable;
598 else if (shadow_pde_access == PT_ACCESS_USER_ERROR)
601 // Page Directory Entry marked non-user
603 PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n",
604 shadow_pde_access, guest_pde_access);
605 inject_guest_pf(info, fault_addr, error_code);
610 // inject page fault in guest
611 inject_guest_pf(info, fault_addr, error_code);
612 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
613 PrintDebug("Manual Says to inject page fault into guest\n");
614 #ifdef DEBUG_SHADOW_PAGING
615 PrintDebug("Guest PDE: (access=%d)\n\t", guest_pde_access);
616 PrintPDE32(fault_addr, guest_pde);
617 PrintDebug("Shadow PDE: (access=%d)\n\t", shadow_pde_access);
618 PrintPDE32(fault_addr, shadow_pde);
624 PrintDebug("Returning end of PDE function (rip=%p)\n", (void *)(addr_t)(info->rip));
630 /* The guest status checks have already been done,
631 * only special case shadow checks remain
633 static int handle_large_pagefault_32(struct guest_info * info,
634 addr_t fault_addr, pf_error_t error_code,
635 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde)
637 pt_access_status_t shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
638 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
640 if (shadow_pte_access == PT_ACCESS_OK) {
641 // Inconsistent state...
642 // Guest Re-Entry will flush tables and everything should now workd
643 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
648 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
649 // Get the guest physical address of the fault
650 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
651 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_fault_pa);
654 if (host_page_type == HOST_REGION_INVALID) {
655 // Inject a machine check in the guest
656 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
657 v3_raise_exception(info, MC_EXCEPTION);
661 if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
662 struct shadow_page_state * state = &(info->shdw_pg_state);
663 addr_t shadow_pa = get_shadow_addr(info, guest_fault_pa);
665 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
667 shadow_pte->present = 1;
669 /* We are assuming that the PDE entry has precedence
670 * so the Shadow PDE will mirror the guest PDE settings,
671 * and we don't have to worry about them here
674 shadow_pte->user_page = 1;
676 if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) {
677 // Check if the entry is a page table...
678 PrintDebug("Marking page as Guest Page Table (large page)\n");
679 shadow_pte->vmm_info = PT32_GUEST_PT;
680 shadow_pte->writable = 0;
682 shadow_pte->writable = 1;
686 //set according to VMM policy
687 shadow_pte->write_through = 0;
688 shadow_pte->cache_disable = 0;
689 shadow_pte->global_page = 0;
693 // Handle hooked pages as well as other special pages
694 if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
695 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
699 } else if ((shadow_pte_access == PT_ACCESS_WRITE_ERROR) &&
700 (shadow_pte->vmm_info == PT32_GUEST_PT)) {
702 struct shadow_page_state * state = &(info->shdw_pg_state);
703 PrintDebug("Write operation on Guest PAge Table Page (large page)\n");
704 state->cached_cr3 = 0;
705 shadow_pte->writable = 1;
708 PrintError("Error in large page fault handler...\n");
709 PrintError("This case should have been handled at the top level handler\n");
713 PrintDebug("Returning from large page fault handler\n");
721 * We assume the the guest pte pointer has already been translated to a host virtual address
723 static int handle_shadow_pte32_fault(struct guest_info * info,
725 pf_error_t error_code,
727 pte32_t * guest_pt) {
729 pt_access_status_t guest_pte_access;
730 pt_access_status_t shadow_pte_access;
731 pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
732 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
735 // Check the guest page permissions
736 guest_pte_access = v3_can_access_pte32(guest_pt, fault_addr, error_code);
738 // Check the shadow page permissions
739 shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
741 #ifdef DEBUG_SHADOW_PAGING
742 PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access);
743 PrintPTE32(fault_addr, guest_pte);
744 PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access);
745 PrintPTE32(fault_addr, shadow_pte);
748 /* Was the page fault caused by the Guest's page tables? */
749 if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
750 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
751 guest_pte_access, *(uint_t*)&error_code);
752 inject_guest_pf(info, fault_addr, error_code);
757 if (shadow_pte_access == PT_ACCESS_OK) {
758 // Inconsistent state...
759 // Guest Re-Entry will flush page tables and everything should now work
760 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
765 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
767 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
769 // Page Table Entry Not Present
770 PrintDebug("guest_pa =%p\n", (void *)guest_pa);
772 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
774 if (host_page_type == HOST_REGION_INVALID) {
775 // Inject a machine check in the guest
776 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
777 v3_raise_exception(info, MC_EXCEPTION);
783 if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
784 struct shadow_page_state * state = &(info->shdw_pg_state);
785 addr_t shadow_pa = get_shadow_addr(info, guest_pa);
787 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
789 shadow_pte->present = guest_pte->present;
790 shadow_pte->user_page = guest_pte->user_page;
792 //set according to VMM policy
793 shadow_pte->write_through = 0;
794 shadow_pte->cache_disable = 0;
795 shadow_pte->global_page = 0;
798 guest_pte->accessed = 1;
800 if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
801 // Check if the entry is a page table...
802 PrintDebug("Marking page as Guest Page Table %d\n", shadow_pte->writable);
803 shadow_pte->vmm_info = PT32_GUEST_PT;
806 if (guest_pte->dirty == 1) {
807 shadow_pte->writable = guest_pte->writable;
808 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
809 shadow_pte->writable = guest_pte->writable;
810 guest_pte->dirty = 1;
812 if (shadow_pte->vmm_info == PT32_GUEST_PT) {
813 // Well that was quick...
814 struct shadow_page_state * state = &(info->shdw_pg_state);
815 PrintDebug("Immediate Write operation on Guest PAge Table Page\n");
816 state->cached_cr3 = 0;
819 } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) { // was =
820 shadow_pte->writable = 0;
826 // Page fault handled by hook functions
827 if (handle_special_page_fault(info, fault_addr, guest_pa, error_code) == -1) {
828 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
833 } else if ((shadow_pte_access == PT_ACCESS_WRITE_ERROR) &&
834 (guest_pte->dirty == 0)) {
836 PrintDebug("Shadow PTE Write Error\n");
837 guest_pte->dirty = 1;
838 shadow_pte->writable = guest_pte->writable;
840 if (shadow_pte->vmm_info == PT32_GUEST_PT) {
841 struct shadow_page_state * state = &(info->shdw_pg_state);
842 PrintDebug("Write operation on Guest PAge Table Page\n");
843 state->cached_cr3 = 0;
849 // Inject page fault into the guest
850 inject_guest_pf(info, fault_addr, error_code);
851 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
852 PrintError("Manual Says to inject page fault into guest\n");
856 PrintDebug("Returning end of function\n");
865 /* Currently Does not work with Segmentation!!! */
866 int v3_handle_shadow_invlpg(struct guest_info * info)
868 if (info->mem_mode != VIRTUAL_MEM) {
869 // Paging must be turned on...
870 // should handle with some sort of fault I think
871 PrintError("ERROR: INVLPG called in non paged mode\n");
876 if (info->cpu_mode != PROTECTED) {
877 PrintError("Unsupported CPU mode (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
884 int ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
886 PrintError("Could not read instruction 0x%p (ret=%d)\n", (void *)(addr_t)(info->rip), ret);
891 /* Can INVLPG work with Segments?? */
892 while (is_prefix_byte(instr[index])) {
897 if( (instr[index + 0] != (uchar_t) 0x0f) ||
898 (instr[index + 1] != (uchar_t) 0x01) ) {
899 PrintError("invalid Instruction Opcode\n");
900 PrintTraceMemDump(instr, 15);
904 addr_t first_operand;
905 addr_t second_operand;
906 addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
908 pde32_t * guest_pd = NULL;
910 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
911 PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
917 v3_operand_type_t addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32);
919 if (addr_type != MEM_OPERAND) {
920 PrintError("Invalid Operand type\n");
924 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
925 pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
928 //PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand));
929 //PrintDebug("FirstOperand = %x\n", first_operand);
931 PrintDebug("Invalidating page for %p\n", (void *)first_operand);
933 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(first_operand)]);
935 if (guest_pde->large_page == 1) {
936 shadow_pde->present = 0;
937 PrintDebug("Invalidating Large Page\n");
939 if (shadow_pde->present == 1) {
940 pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr);
941 pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(first_operand)] );
943 #ifdef DEBUG_SHADOW_PAGING
944 PrintDebug("Setting not present\n");
945 PrintPTE32(first_operand, shadow_pte );
948 shadow_pte->present = 0;
960 static int create_pd32_nonaligned_4MB_page(struct guest_info * info, pte32_t * pt, addr_t guest_addr, pde32_4MB_t * large_shadow_pde) {
962 pte32_t * pte_cursor;
965 for (i = 0; i < 1024; i++) {
966 guest_pa = guest_addr + (PAGE_SIZE * i);
967 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
969 pte_cursor = &(pt[i]);
971 if (host_page_type == HOST_REGION_INVALID) {
972 // Currently we don't support this, but in theory we could
973 PrintError("Invalid Host Memory Type\n");
975 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
976 addr_t shadow_pa = get_shadow_addr(info, guest_pa);
979 pte_cursor->page_base_addr = PT32_BASE_ADDR(shadow_pa);
980 pte_cursor->present = 1;
981 pte_cursor->writable = large_shadow_pde->writable;
982 pte_cursor->user_page = large_shadow_pde->user_page;
983 pte_cursor->write_through = 0;
984 pte_cursor->cache_disable = 0;
985 pte_cursor->global_page = 0;
988 PrintError("Unsupported Host Memory Type\n");
996 static int handle_large_pagefault32(struct guest_info * info,
997 pde32_t * guest_pde, pde32_t * shadow_pde,
998 addr_t fault_addr, pf_error_t error_code ) {
999 struct shadow_region * mem_reg;
1000 pde32_4MB_t * large_guest_pde = (pde32_4MB_t *)guest_pde;
1001 pde32_4MB_t * large_shadow_pde = (pde32_4MB_t *)shadow_pde;
1002 host_region_type_t host_page_type;
1003 addr_t guest_start_addr = PDE32_4MB_T_ADDR(*large_guest_pde);
1004 // addr_t guest_end_addr = guest_start_addr + PAGE_SIZE_4MB; // start address + 4MB
1007 // Check that the Guest PDE entry points to valid memory
1008 // else Machine Check the guest
1009 PrintDebug("Large Page: Page Base Addr=%x\n", guest_start_addr);
1011 host_page_type = get_shadow_addr_type(info, guest_start_addr);
1013 if (host_page_type == HOST_REGION_INVALID) {
1014 PrintError("Invalid guest address in large page (0x%x)\n", guest_start_addr);
1015 v3_raise_exception(info, MC_EXCEPTION);
1021 if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
1023 addr_t host_start_addr = 0;
1024 addr_t region_end_addr = 0;
1026 // Check for a large enough region in host memory
1027 mem_reg = get_shadow_region_by_addr(&(info->mem_map), guest_start_addr);
1028 PrintDebug("Host region: host_addr=%x (guest_start=%x, end=%x)\n",
1029 mem_reg->host_addr, mem_reg->guest_start, mem_reg->guest_end);
1030 host_start_addr = mem_reg->host_addr + (guest_start_addr - mem_reg->guest_start);
1031 region_end_addr = mem_reg->host_addr + (mem_reg->guest_end - mem_reg->guest_start);
1033 PrintDebug("Host Start Addr=%x; Region End Addr=%x\n", host_start_addr, region_end_addr);
1037 if (large_guest_pde->dirty == 1) { // dirty
1038 large_shadow_pde->writable = guest_pde->writable;
1039 } else if (error_code.write == 1) { // not dirty, access is write
1040 large_shadow_pde->writable = guest_pde->writable;
1041 large_guest_pde->dirty = 1;
1042 } else { // not dirty, access is read
1043 large_shadow_pde->writable = 0;
1047 // Check if the region is at least an additional 4MB
1051 if ((PD32_4MB_PAGE_OFFSET(host_start_addr) == 0) &&
1052 (region_end_addr >= host_start_addr + PAGE_SIZE_4MB)) { // if 4MB boundary
1053 large_shadow_pde->page_base_addr = PD32_4MB_BASE_ADDR(host_start_addr);
1054 } else { // else generate 4k pages
1055 pte32_t * shadow_pt = NULL;
1056 PrintDebug("Handling non aligned large page\n");
1058 shadow_pde->large_page = 0;
1060 shadow_pt = create_new_shadow_pt32();
1062 if (create_pd32_nonaligned_4MB_page(info, shadow_pt, guest_start_addr, large_shadow_pde) == -1) {
1063 PrintError("Non Aligned Large Page Error\n");
1069 #ifdef DEBUG_SHADOW_PAGING
1070 PrintDebug("non-aligned Shadow PT\n");
1071 PrintPT32(PT32_PAGE_ADDR(fault_addr), shadow_pt);
1073 shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
1077 // Handle hooked pages as well as other special pages
1078 if (handle_special_page_fault(info, fault_addr, guest_start_addr, error_code) == -1) {
1079 PrintError("Special Page Fault handler returned error for address: %x\n", fault_addr);