2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm_shadow_paging.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vmm_decoder.h>
28 #ifndef DEBUG_SHADOW_PAGING
30 #define PrintDebug(fmt, args...)
42 DEFINE_HASHTABLE_INSERT(add_cr3_to_cache, addr_t, struct hashtable *);
43 DEFINE_HASHTABLE_SEARCH(find_cr3_in_cache, addr_t, struct hashtable *);
44 DEFINE_HASHTABLE_REMOVE(del_cr3_from_cache, addr_t, struct hashtable *, 0);
47 DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t);
48 DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t);
49 DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0);
54 static uint_t pte_hash_fn(addr_t key) {
55 return hash_long(key, 32);
58 static int pte_equals(addr_t key1, addr_t key2) {
59 return (key1 == key2);
62 static uint_t cr3_hash_fn(addr_t key) {
63 return hash_long(key, 32);
66 static int cr3_equals(addr_t key1, addr_t key2) {
67 return (key1 == key2);
71 static int handle_shadow_pte32_fault(struct guest_info* info,
73 pf_error_t error_code,
77 static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
79 int v3_init_shadow_page_state(struct guest_info * info) {
80 struct shadow_page_state * state = &(info->shdw_pg_state);
81 state->guest_mode = PDE32;
82 state->shadow_mode = PDE32;
85 state->shadow_cr3 = 0;
88 state->cr3_cache = create_hashtable(0, &cr3_hash_fn, &cr3_equals);
90 state->cached_cr3 = 0;
91 state->cached_ptes = NULL;
100 For now we'll do something a little more lightweight
101 int cache_page_tables32(struct guest_info * info, addr_t pde) {
102 struct shadow_page_state * state = &(info->shdw_pg_state);
103 addr_t pde_host_addr;
105 struct hashtable * pte_cache = NULL;
109 pte_cache = (struct hashtable *)find_cr3_in_cache(state->cr3_cache, pde);
110 if (pte_cache != NULL) {
111 PrintError("CR3 already present in cache\n");
112 state->current_ptes = pte_cache;
115 PrintError("Creating new CR3 cache entry\n");
116 pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
117 state->current_ptes = pte_cache;
118 add_cr3_to_cache(state->cr3_cache, pde, pte_cache);
121 if (guest_pa_to_host_va(info, pde, &pde_host_addr) == -1) {
122 PrintError("Could not lookup host address of guest PDE\n");
126 tmp_pde = (pde32_t *)pde_host_addr;
128 add_pte_map(pte_cache, pde, pde_host_addr);
131 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
132 if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
133 addr_t pte_host_addr;
135 if (guest_pa_to_host_va(info, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), &pte_host_addr) == -1) {
136 PrintError("Could not lookup host address of guest PDE\n");
140 add_pte_map(pte_cache, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), pte_host_addr);
149 int v3_cache_page_tables32(struct guest_info * info, addr_t pde) {
150 struct shadow_page_state * state = &(info->shdw_pg_state);
151 addr_t pde_host_addr;
153 struct hashtable * pte_cache = NULL;
156 if (pde == state->cached_cr3) {
160 if (state->cached_ptes != NULL) {
161 hashtable_destroy(state->cached_ptes, 0, 0);
162 state->cached_ptes = NULL;
165 state->cached_cr3 = pde;
167 pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
168 state->cached_ptes = pte_cache;
170 if (guest_pa_to_host_pa(info, pde, &pde_host_addr) == -1) {
171 PrintError("Could not lookup host address of guest PDE\n");
175 tmp_pde = (pde32_t *)pde_host_addr;
177 add_pte_map(pte_cache, pde, pde_host_addr);
180 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
181 if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
182 addr_t pte_host_addr;
184 if (guest_pa_to_host_pa(info, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), &pte_host_addr) == -1) {
185 PrintError("Could not lookup host address of guest PDE\n");
189 add_pte_map(pte_cache, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), pte_host_addr);
199 int v3_replace_shdw_page32(struct guest_info * info, addr_t location, pte32_t * new_page, pte32_t * old_page) {
200 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
201 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(location)]);
203 if (shadow_pde->large_page == 0) {
204 pte32_t * shadow_pt = (pte32_t *)(addr_t)PDE32_T_ADDR((*shadow_pde));
205 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(location)]);
207 //if (shadow_pte->present == 1) {
208 *(uint_t *)old_page = *(uint_t *)shadow_pte;
211 *(uint_t *)shadow_pte = *(uint_t *)new_page;
214 // currently unhandled
226 int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
228 if (info->mem_mode == PHYSICAL_MEM) {
229 // If paging is not turned on we need to handle the special cases
230 return handle_special_page_fault(info, fault_addr, fault_addr, error_code);
231 } else if (info->mem_mode == VIRTUAL_MEM) {
233 switch (info->cpu_mode) {
235 return handle_shadow_pagefault32(info, fault_addr, error_code);
240 PrintError("Unhandled CPU Mode\n");
244 PrintError("Invalid Memory mode\n");
249 addr_t v3_create_new_shadow_pt32() {
252 host_pde = V3_AllocPages(1);
253 memset(host_pde, 0, PAGE_SIZE);
255 return (addr_t)host_pde;
259 static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
260 info->ctrl_regs.cr2 = fault_addr;
261 v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
265 static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access) {
266 /* basically the reasoning is that there can be multiple reasons for a page fault:
267 If there is a permissions failure for a page present in the guest _BUT_
268 the reason for the fault was that the page is not present in the shadow,
269 _THEN_ we have to map the shadow page in and reexecute, this will generate
270 a permissions fault which is _THEN_ valid to send to the guest
271 _UNLESS_ both the guest and shadow have marked the page as not present
275 if (guest_access != PT_ACCESS_OK) {
276 // Guest Access Error
278 if ((shadow_access != PT_ENTRY_NOT_PRESENT) &&
279 (guest_access != PT_ENTRY_NOT_PRESENT)) {
280 // aka (guest permission error)
284 if ((shadow_access == PT_ENTRY_NOT_PRESENT) &&
285 (guest_access == PT_ENTRY_NOT_PRESENT)) {
286 // Page tables completely blank, handle guest first
290 // Otherwise we'll handle the guest fault later...?
299 /* The guest status checks have already been done,
300 * only special case shadow checks remain
302 static int handle_large_pagefault32(struct guest_info * info,
303 addr_t fault_addr, pf_error_t error_code,
304 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde)
306 pt_access_status_t shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code);
307 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
309 if (shadow_pte_access == PT_ACCESS_OK) {
310 // Inconsistent state...
311 // Guest Re-Entry will flush tables and everything should now workd
312 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
317 if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
318 // Get the guest physical address of the fault
319 addr_t guest_fault_pa = PDE32_4MB_T_ADDR(*large_guest_pde) + PD32_4MB_PAGE_OFFSET(fault_addr);
320 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_fault_pa);
323 if (host_page_type == HOST_REGION_INVALID) {
324 // Inject a machine check in the guest
325 PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_fault_pa);
326 v3_raise_exception(info, MC_EXCEPTION);
330 if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
331 struct shadow_page_state * state = &(info->shdw_pg_state);
332 addr_t shadow_pa = get_shadow_addr(info, guest_fault_pa);
334 shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
336 shadow_pte->present = 1;
338 /* We are assuming that the PDE entry has precedence
339 * so the Shadow PDE will mirror the guest PDE settings,
340 * and we don't have to worry about them here
343 shadow_pte->user_page = 1;
345 if (find_pte_map(state->cached_ptes, PT32_PAGE_ADDR(guest_fault_pa)) != NULL) {
346 // Check if the entry is a page table...
347 PrintDebug("Marking page as Guest Page Table (large page)\n");
348 shadow_pte->vmm_info = PT32_GUEST_PT;
349 shadow_pte->writable = 0;
351 shadow_pte->writable = 1;
355 //set according to VMM policy
356 shadow_pte->write_through = 0;
357 shadow_pte->cache_disable = 0;
358 shadow_pte->global_page = 0;
362 // Handle hooked pages as well as other special pages
363 if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
364 PrintError("Special Page Fault handler returned error for address: %x\n", fault_addr);
368 } else if ((shadow_pte_access == PT_WRITE_ERROR) &&
369 (shadow_pte->vmm_info == PT32_GUEST_PT)) {
371 struct shadow_page_state * state = &(info->shdw_pg_state);
372 PrintDebug("Write operation on Guest PAge Table Page (large page)\n");
373 state->cached_cr3 = 0;
374 shadow_pte->writable = 1;
377 PrintError("Error in large page fault handler...\n");
378 PrintError("This case should have been handled at the top level handler\n");
382 PrintDebug("Returning from large page fault handler\n");
387 static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
388 pde32_t * guest_pd = NULL;
389 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
390 addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
391 pt_access_status_t guest_pde_access;
392 pt_access_status_t shadow_pde_access;
393 pde32_t * guest_pde = NULL;
394 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
396 PrintDebug("Shadow page fault handler\n");
398 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
399 PrintError("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
403 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
406 // Check the guest page permissions
407 guest_pde_access = can_access_pde32(guest_pd, fault_addr, error_code);
409 // Check the shadow page permissions
410 shadow_pde_access = can_access_pde32(shadow_pd, fault_addr, error_code);
412 /* Was the page fault caused by the Guest's page tables? */
413 if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
414 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
415 guest_pde_access, error_code);
416 inject_guest_pf(info, fault_addr, error_code);
421 if (shadow_pde_access == PT_ENTRY_NOT_PRESENT)
423 pte32_t * shadow_pt = (pte32_t *)v3_create_new_shadow_pt32();
425 shadow_pde->present = 1;
426 shadow_pde->user_page = guest_pde->user_page;
427 // shadow_pde->large_page = guest_pde->large_page;
428 shadow_pde->large_page = 0;
431 // VMM Specific options
432 shadow_pde->write_through = 0;
433 shadow_pde->cache_disable = 0;
434 shadow_pde->global_page = 0;
437 guest_pde->accessed = 1;
439 shadow_pde->pt_base_addr = PD32_BASE_ADDR((addr_t)shadow_pt);
441 if (guest_pde->large_page == 0) {
442 shadow_pde->writable = guest_pde->writable;
444 ((pde32_4MB_t *)guest_pde)->dirty = 0;
445 shadow_pde->writable = 0;
448 else if (shadow_pde_access == PT_ACCESS_OK)
453 pte32_t * shadow_pt = (pte32_t *)(addr_t)PDE32_T_ADDR((*shadow_pde));
455 if (guest_pde->large_page == 0) {
456 pte32_t * guest_pt = NULL;
457 if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde)), (addr_t*)&guest_pt) == -1) {
458 // Machine check the guest
459 PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde)));
460 v3_raise_exception(info, MC_EXCEPTION);
464 if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
465 PrintError("Error handling Page fault caused by PTE\n");
468 } else if (guest_pde->large_page == 1) {
469 if (handle_large_pagefault32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
470 PrintError("Error handling large pagefault\n");
475 else if ((shadow_pde_access == PT_WRITE_ERROR) &&
476 (guest_pde->large_page == 1) &&
477 (((pde32_4MB_t *)guest_pde)->dirty == 0))
480 // Page Directory Entry marked read-only
481 // Its a large page and we need to update the dirty bit in the guest
484 PrintDebug("Large page write error... Setting dirty bit and returning\n");
485 ((pde32_4MB_t *)guest_pde)->dirty = 1;
486 shadow_pde->writable = guest_pde->writable;
490 else if (shadow_pde_access == PT_USER_ERROR)
493 // Page Directory Entry marked non-user
495 PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n",
496 shadow_pde_access, guest_pde_access);
497 inject_guest_pf(info, fault_addr, error_code);
502 // inject page fault in guest
503 inject_guest_pf(info, fault_addr, error_code);
504 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
505 PrintDebug("Manual Says to inject page fault into guest\n");
506 #ifdef DEBUG_SHADOW_PAGING
507 PrintDebug("Guest PDE: (access=%d)\n\t", guest_pde_access);
508 PrintPDE32(fault_addr, guest_pde);
509 PrintDebug("Shadow PDE: (access=%d)\n\t", shadow_pde_access);
510 PrintPDE32(fault_addr, shadow_pde);
516 PrintDebug("Returning end of PDE function (rip=%x)\n", info->rip);
523 * We assume the the guest pte pointer has already been translated to a host virtual address
525 static int handle_shadow_pte32_fault(struct guest_info * info,
527 pf_error_t error_code,
529 pte32_t * guest_pt) {
531 pt_access_status_t guest_pte_access;
532 pt_access_status_t shadow_pte_access;
533 pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
534 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
537 // Check the guest page permissions
538 guest_pte_access = can_access_pte32(guest_pt, fault_addr, error_code);
540 // Check the shadow page permissions
541 shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code);
543 #ifdef DEBUG_SHADOW_PAGING
544 PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access);
545 PrintPTE32(fault_addr, guest_pte);
546 PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access);
547 PrintPTE32(fault_addr, shadow_pte);
550 /* Was the page fault caused by the Guest's page tables? */
551 if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
552 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
553 guest_pte_access, *(uint_t*)&error_code);
554 inject_guest_pf(info, fault_addr, error_code);
559 if (shadow_pte_access == PT_ACCESS_OK) {
560 // Inconsistent state...
561 // Guest Re-Entry will flush page tables and everything should now work
562 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
567 if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
569 addr_t guest_pa = PTE32_T_ADDR((*guest_pte)) + PT32_PAGE_OFFSET(fault_addr);
571 // Page Table Entry Not Present
573 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
575 if (host_page_type == HOST_REGION_INVALID) {
576 // Inject a machine check in the guest
577 PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_pa);
578 v3_raise_exception(info, MC_EXCEPTION);
584 if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
585 struct shadow_page_state * state = &(info->shdw_pg_state);
586 addr_t shadow_pa = get_shadow_addr(info, guest_pa);
588 shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
590 shadow_pte->present = guest_pte->present;
591 shadow_pte->user_page = guest_pte->user_page;
593 //set according to VMM policy
594 shadow_pte->write_through = 0;
595 shadow_pte->cache_disable = 0;
596 shadow_pte->global_page = 0;
599 guest_pte->accessed = 1;
601 if (find_pte_map(state->cached_ptes, PT32_PAGE_ADDR(guest_pa)) != NULL) {
602 // Check if the entry is a page table...
603 PrintDebug("Marking page as Guest Page Table\n", shadow_pte->writable);
604 shadow_pte->vmm_info = PT32_GUEST_PT;
607 if (guest_pte->dirty == 1) {
608 shadow_pte->writable = guest_pte->writable;
609 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
610 shadow_pte->writable = guest_pte->writable;
611 guest_pte->dirty = 1;
613 if (shadow_pte->vmm_info == PT32_GUEST_PT) {
614 // Well that was quick...
615 struct shadow_page_state * state = &(info->shdw_pg_state);
616 PrintDebug("Immediate Write operation on Guest PAge Table Page\n");
617 state->cached_cr3 = 0;
620 } else if ((guest_pte->dirty = 0) && (error_code.write == 0)) {
621 shadow_pte->writable = 0;
627 // Page fault handled by hook functions
628 if (handle_special_page_fault(info, fault_addr, guest_pa, error_code) == -1) {
629 PrintError("Special Page fault handler returned error for address: %x\n", fault_addr);
634 } else if ((shadow_pte_access == PT_WRITE_ERROR) &&
635 (guest_pte->dirty == 0)) {
637 PrintDebug("Shadow PTE Write Error\n");
638 guest_pte->dirty = 1;
639 shadow_pte->writable = guest_pte->writable;
641 if (shadow_pte->vmm_info == PT32_GUEST_PT) {
642 struct shadow_page_state * state = &(info->shdw_pg_state);
643 PrintDebug("Write operation on Guest PAge Table Page\n");
644 state->cached_cr3 = 0;
650 // Inject page fault into the guest
651 inject_guest_pf(info, fault_addr, error_code);
652 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
653 PrintError("Manual Says to inject page fault into guest\n");
657 PrintDebug("Returning end of function\n");
666 /* Currently Does not work with Segmentation!!! */
667 int v3_handle_shadow_invlpg(struct guest_info * info) {
668 if (info->mem_mode != VIRTUAL_MEM) {
669 // Paging must be turned on...
670 // should handle with some sort of fault I think
671 PrintError("ERROR: INVLPG called in non paged mode\n");
676 if (info->cpu_mode == PROTECTED) {
681 ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
683 PrintError("Could not read instruction 0x%x (ret=%d)\n", info->rip, ret);
688 /* Can INVLPG work with Segments?? */
689 while (is_prefix_byte(instr[index])) {
694 if ((instr[index] == (uchar_t)0x0f) &&
695 (instr[index + 1] == (uchar_t)0x01)) {
697 addr_t first_operand;
698 addr_t second_operand;
699 v3_operand_type_t addr_type;
700 addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
702 pde32_t * guest_pd = NULL;
704 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
705 PrintError("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
714 addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32);
716 if (addr_type == MEM_OPERAND) {
717 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
718 pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
721 //PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand));
722 //PrintDebug("FirstOperand = %x\n", first_operand);
724 PrintDebug("Invalidating page for %x\n", first_operand);
726 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(first_operand)]);
728 if (guest_pde->large_page == 1) {
729 shadow_pde->present = 0;
730 PrintDebug("Invalidating Large Page\n");
733 if (shadow_pde->present == 1) {
734 pte32_t * shadow_pt = (pte32_t *)(addr_t)PDE32_T_ADDR((*shadow_pde));
735 pte32_t * shadow_pte = (pte32_t *)&shadow_pt[PTE32_INDEX(first_operand)];
737 #ifdef DEBUG_SHADOW_PAGING
738 PrintDebug("Setting not present\n");
739 PrintPTE32(first_operand, shadow_pte);
742 shadow_pte->present = 0;
749 PrintError("Invalid Operand type\n");
753 PrintError("invalid Instruction Opcode\n");
754 PrintTraceMemDump(instr, 15);
766 static int create_pd32_nonaligned_4MB_page(struct guest_info * info, pte32_t * pt, addr_t guest_addr, pde32_4MB_t * large_shadow_pde) {
768 pte32_t * pte_cursor;
771 for (i = 0; i < 1024; i++) {
772 guest_pa = guest_addr + (PAGE_SIZE * i);
773 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
775 pte_cursor = &(pt[i]);
777 if (host_page_type == HOST_REGION_INVALID) {
778 // Currently we don't support this, but in theory we could
779 PrintError("Invalid Host Memory Type\n");
781 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
782 addr_t shadow_pa = get_shadow_addr(info, guest_pa);
785 pte_cursor->page_base_addr = PT32_BASE_ADDR(shadow_pa);
786 pte_cursor->present = 1;
787 pte_cursor->writable = large_shadow_pde->writable;
788 pte_cursor->user_page = large_shadow_pde->user_page;
789 pte_cursor->write_through = 0;
790 pte_cursor->cache_disable = 0;
791 pte_cursor->global_page = 0;
794 PrintError("Unsupported Host Memory Type\n");
802 static int handle_large_pagefault32(struct guest_info * info,
803 pde32_t * guest_pde, pde32_t * shadow_pde,
804 addr_t fault_addr, pf_error_t error_code ) {
805 struct shadow_region * mem_reg;
806 pde32_4MB_t * large_guest_pde = (pde32_4MB_t *)guest_pde;
807 pde32_4MB_t * large_shadow_pde = (pde32_4MB_t *)shadow_pde;
808 host_region_type_t host_page_type;
809 addr_t guest_start_addr = PDE32_4MB_T_ADDR(*large_guest_pde);
810 // addr_t guest_end_addr = guest_start_addr + PAGE_SIZE_4MB; // start address + 4MB
813 // Check that the Guest PDE entry points to valid memory
814 // else Machine Check the guest
815 PrintDebug("Large Page: Page Base Addr=%x\n", guest_start_addr);
817 host_page_type = get_shadow_addr_type(info, guest_start_addr);
819 if (host_page_type == HOST_REGION_INVALID) {
820 PrintError("Invalid guest address in large page (0x%x)\n", guest_start_addr);
821 v3_raise_exception(info, MC_EXCEPTION);
827 if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
829 addr_t host_start_addr = 0;
830 addr_t region_end_addr = 0;
832 // Check for a large enough region in host memory
833 mem_reg = get_shadow_region_by_addr(&(info->mem_map), guest_start_addr);
834 PrintDebug("Host region: host_addr=%x (guest_start=%x, end=%x)\n",
835 mem_reg->host_addr, mem_reg->guest_start, mem_reg->guest_end);
836 host_start_addr = mem_reg->host_addr + (guest_start_addr - mem_reg->guest_start);
837 region_end_addr = mem_reg->host_addr + (mem_reg->guest_end - mem_reg->guest_start);
839 PrintDebug("Host Start Addr=%x; Region End Addr=%x\n", host_start_addr, region_end_addr);
843 if (large_guest_pde->dirty == 1) { // dirty
844 large_shadow_pde->writable = guest_pde->writable;
845 } else if (error_code.write == 1) { // not dirty, access is write
846 large_shadow_pde->writable = guest_pde->writable;
847 large_guest_pde->dirty = 1;
848 } else { // not dirty, access is read
849 large_shadow_pde->writable = 0;
853 // Check if the region is at least an additional 4MB
857 if ((PD32_4MB_PAGE_OFFSET(host_start_addr) == 0) &&
858 (region_end_addr >= host_start_addr + PAGE_SIZE_4MB)) { // if 4MB boundary
859 large_shadow_pde->page_base_addr = PD32_4MB_BASE_ADDR(host_start_addr);
860 } else { // else generate 4k pages
861 pte32_t * shadow_pt = NULL;
862 PrintDebug("Handling non aligned large page\n");
864 shadow_pde->large_page = 0;
866 shadow_pt = create_new_shadow_pt32();
868 if (create_pd32_nonaligned_4MB_page(info, shadow_pt, guest_start_addr, large_shadow_pde) == -1) {
869 PrintError("Non Aligned Large Page Error\n");
875 #ifdef DEBUG_SHADOW_PAGING
876 PrintDebug("non-aligned Shadow PT\n");
877 PrintPT32(PT32_PAGE_ADDR(fault_addr), shadow_pt);
879 shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
883 // Handle hooked pages as well as other special pages
884 if (handle_special_page_fault(info, fault_addr, guest_start_addr, error_code) == -1) {
885 PrintError("Special Page Fault handler returned error for address: %x\n", fault_addr);