1 #include <palacios/vmm_shadow_paging.h>
4 #include <palacios/vmm.h>
5 #include <palacios/vm_guest_mem.h>
6 #include <palacios/vmm_decoder.h>
8 #ifndef DEBUG_SHADOW_PAGING
10 #define PrintDebug(fmt, args...)
20 static int handle_shadow_pte32_fault(struct guest_info* info,
22 pf_error_t error_code,
26 static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
28 int init_shadow_page_state(struct guest_info * info) {
29 struct shadow_page_state * state = &(info->shdw_pg_state);
30 state->guest_mode = PDE32;
31 state->shadow_mode = PDE32;
34 state->shadow_cr3 = 0;
44 int v3_replace_shdw_page32(struct guest_info * info, addr_t location, pte32_t * new_page, pte32_t * old_page) {
45 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
46 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(location)]);
48 if (shadow_pde->large_page == 0) {
49 pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
50 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(location)]);
52 //if (shadow_pte->present == 1) {
53 *(uint_t *)old_page = *(uint_t *)shadow_pte;
56 *(uint_t *)shadow_pte = *(uint_t *)new_page;
59 // currently unhandled
71 int handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
73 if (info->mem_mode == PHYSICAL_MEM) {
74 // If paging is not turned on we need to handle the special cases
75 return handle_special_page_fault(info, fault_addr, fault_addr, error_code);
76 } else if (info->mem_mode == VIRTUAL_MEM) {
78 switch (info->cpu_mode) {
80 return handle_shadow_pagefault32(info, fault_addr, error_code);
85 PrintError("Unhandled CPU Mode\n");
89 PrintError("Invalid Memory mode\n");
94 addr_t create_new_shadow_pt32() {
97 host_pde = V3_AllocPages(1);
98 memset(host_pde, 0, PAGE_SIZE);
100 return (addr_t)host_pde;
104 static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
105 info->ctrl_regs.cr2 = fault_addr;
106 v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
110 static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access) {
111 /* basically the reasoning is that there can be multiple reasons for a page fault:
112 If there is a permissions failure for a page present in the guest _BUT_
113 the reason for the fault was that the page is not present in the shadow,
114 _THEN_ we have to map the shadow page in and reexecute, this will generate
115 a permissions fault which is _THEN_ valid to send to the guest
116 _UNLESS_ both the guest and shadow have marked the page as not present
120 if (guest_access != PT_ACCESS_OK) {
121 // Guest Access Error
123 if ((shadow_access != PT_ENTRY_NOT_PRESENT) &&
124 (guest_access != PT_ENTRY_NOT_PRESENT)) {
125 // aka (guest permission error)
129 if ((shadow_access == PT_ENTRY_NOT_PRESENT) &&
130 (guest_access == PT_ENTRY_NOT_PRESENT)) {
131 // Page tables completely blank, handle guest first
135 // Otherwise we'll handle the guest fault later...?
144 /* The guest status checks have already been done,
145 * only special case shadow checks remain
147 static int handle_large_pagefault32(struct guest_info * info,
148 addr_t fault_addr, pf_error_t error_code,
149 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde)
151 pt_access_status_t shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code);
152 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
154 if (shadow_pte_access == PT_ACCESS_OK) {
155 // Inconsistent state...
156 // Guest Re-Entry will flush tables and everything should now workd
157 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
162 if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
163 // Get the guest physical address of the fault
164 addr_t guest_fault_pa = PDE32_4MB_T_ADDR(*large_guest_pde) + PD32_4MB_PAGE_OFFSET(fault_addr);
165 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_fault_pa);
168 if (host_page_type == HOST_REGION_INVALID) {
169 // Inject a machine check in the guest
170 PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_fault_pa);
171 v3_raise_exception(info, MC_EXCEPTION);
175 if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
176 addr_t shadow_pa = get_shadow_addr(info, guest_fault_pa);
178 shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
180 shadow_pte->present = 1;
182 /* We are assuming that the PDE entry has precedence
183 * so the Shadow PDE will mirror the guest PDE settings,
184 * and we don't have to worry about them here
187 shadow_pte->user_page = 1;
188 shadow_pte->writable = 1;
190 //set according to VMM policy
191 shadow_pte->write_through = 0;
192 shadow_pte->cache_disable = 0;
193 shadow_pte->global_page = 0;
197 // Handle hooked pages as well as other special pages
198 if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
199 PrintError("Special Page Fault handler returned error for address: %x\n", fault_addr);
204 PrintError("Error in large page fault handler...\n");
205 PrintError("This case should have been handled at the top level handler\n");
209 PrintDebug("Returning from large page fault handler\n");
214 static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
215 pde32_t * guest_pd = NULL;
216 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
217 addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
218 pt_access_status_t guest_pde_access;
219 pt_access_status_t shadow_pde_access;
220 pde32_t * guest_pde = NULL;
221 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
223 PrintDebug("Shadow page fault handler\n");
225 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
226 PrintError("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
230 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
233 // Check the guest page permissions
234 guest_pde_access = can_access_pde32(guest_pd, fault_addr, error_code);
236 // Check the shadow page permissions
237 shadow_pde_access = can_access_pde32(shadow_pd, fault_addr, error_code);
239 /* Was the page fault caused by the Guest's page tables? */
240 if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
241 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
242 guest_pde_access, error_code);
243 inject_guest_pf(info, fault_addr, error_code);
248 if (shadow_pde_access == PT_ENTRY_NOT_PRESENT)
250 pte32_t * shadow_pt = (pte32_t *)create_new_shadow_pt32();
252 shadow_pde->present = 1;
253 shadow_pde->user_page = guest_pde->user_page;
254 // shadow_pde->large_page = guest_pde->large_page;
255 shadow_pde->large_page = 0;
258 // VMM Specific options
259 shadow_pde->write_through = 0;
260 shadow_pde->cache_disable = 0;
261 shadow_pde->global_page = 0;
264 guest_pde->accessed = 1;
266 shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
268 if (guest_pde->large_page == 0) {
269 shadow_pde->writable = guest_pde->writable;
271 ((pde32_4MB_t *)guest_pde)->dirty = 0;
272 shadow_pde->writable = 0;
275 else if (shadow_pde_access == PT_ACCESS_OK)
280 pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
282 if (guest_pde->large_page == 0) {
283 pte32_t * guest_pt = NULL;
284 if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde)), (addr_t*)&guest_pt) == -1) {
285 // Machine check the guest
286 PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde)));
287 v3_raise_exception(info, MC_EXCEPTION);
291 if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
292 PrintError("Error handling Page fault caused by PTE\n");
295 } else if (guest_pde->large_page == 1) {
296 if (handle_large_pagefault32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
297 PrintError("Error handling large pagefault\n");
302 else if ((shadow_pde_access == PT_WRITE_ERROR) &&
303 (guest_pde->large_page == 1) &&
304 (((pde32_4MB_t *)guest_pde)->dirty == 0))
307 // Page Directory Entry marked read-only
308 // Its a large page and we need to update the dirty bit in the guest
310 PrintDebug("Large page write error... Setting dirty bit and returning\n");
311 ((pde32_4MB_t *)guest_pde)->dirty = 1;
312 shadow_pde->writable = guest_pde->writable;
316 else if (shadow_pde_access == PT_USER_ERROR)
319 // Page Directory Entry marked non-user
321 PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n",
322 shadow_pde_access, guest_pde_access);
323 inject_guest_pf(info, fault_addr, error_code);
328 // inject page fault in guest
329 inject_guest_pf(info, fault_addr, error_code);
330 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
331 PrintDebug("Manual Says to inject page fault into guest\n");
332 #ifdef DEBUG_SHADOW_PAGING
333 PrintDebug("Guest PDE: (access=%d)\n\t", guest_pde_access);
334 PrintPDE32(fault_addr, guest_pde);
335 PrintDebug("Shadow PDE: (access=%d)\n\t", shadow_pde_access);
336 PrintPDE32(fault_addr, shadow_pde);
342 PrintDebug("Returning end of PDE function (rip=%x)\n", info->rip);
349 * We assume the the guest pte pointer has already been translated to a host virtual address
351 static int handle_shadow_pte32_fault(struct guest_info * info,
353 pf_error_t error_code,
355 pte32_t * guest_pt) {
357 pt_access_status_t guest_pte_access;
358 pt_access_status_t shadow_pte_access;
359 pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
360 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
363 // Check the guest page permissions
364 guest_pte_access = can_access_pte32(guest_pt, fault_addr, error_code);
366 // Check the shadow page permissions
367 shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code);
369 #ifdef DEBUG_SHADOW_PAGING
370 PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access);
371 PrintPTE32(fault_addr, guest_pte);
372 PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access);
373 PrintPTE32(fault_addr, shadow_pte);
376 /* Was the page fault caused by the Guest's page tables? */
377 if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
378 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
379 guest_pte_access, *(uint_t*)&error_code);
380 inject_guest_pf(info, fault_addr, error_code);
385 if (shadow_pte_access == PT_ACCESS_OK) {
386 // Inconsistent state...
387 // Guest Re-Entry will flush page tables and everything should now work
388 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
393 if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
395 addr_t guest_pa = PTE32_T_ADDR((*guest_pte)) + PT32_PAGE_OFFSET(fault_addr);
397 // Page Table Entry Not Present
399 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
401 if (host_page_type == HOST_REGION_INVALID) {
402 // Inject a machine check in the guest
403 PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_pa);
404 v3_raise_exception(info, MC_EXCEPTION);
410 if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
411 addr_t shadow_pa = get_shadow_addr(info, guest_pa);
413 shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
415 shadow_pte->present = guest_pte->present;
416 shadow_pte->user_page = guest_pte->user_page;
418 //set according to VMM policy
419 shadow_pte->write_through = 0;
420 shadow_pte->cache_disable = 0;
421 shadow_pte->global_page = 0;
424 guest_pte->accessed = 1;
426 if (guest_pte->dirty == 1) {
427 shadow_pte->writable = guest_pte->writable;
428 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
429 shadow_pte->writable = guest_pte->writable;
430 guest_pte->dirty = 1;
431 } else if ((guest_pte->dirty = 0) && (error_code.write == 0)) {
432 shadow_pte->writable = 0;
435 // Page fault handled by hook functions
436 if (handle_special_page_fault(info, fault_addr, guest_pa, error_code) == -1) {
437 PrintError("Special Page fault handler returned error for address: %x\n", fault_addr);
442 } else if ((shadow_pte_access == PT_WRITE_ERROR) &&
443 (guest_pte->dirty == 0)) {
445 PrintDebug("Shadow PTE Write Error\n");
446 guest_pte->dirty = 1;
447 shadow_pte->writable = guest_pte->writable;
451 // Inject page fault into the guest
452 inject_guest_pf(info, fault_addr, error_code);
453 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
454 PrintError("Manual Says to inject page fault into guest\n");
458 PrintDebug("Returning end of function\n");
467 /* Currently Does not work with Segmentation!!! */
468 int handle_shadow_invlpg(struct guest_info * info) {
469 if (info->mem_mode != VIRTUAL_MEM) {
470 // Paging must be turned on...
471 // should handle with some sort of fault I think
472 PrintError("ERROR: INVLPG called in non paged mode\n");
477 if (info->cpu_mode == PROTECTED) {
482 ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
484 PrintError("Could not read instruction 0x%x (ret=%d)\n", info->rip, ret);
489 /* Can INVLPG work with Segments?? */
490 while (is_prefix_byte(instr[index])) {
495 if ((instr[index] == (uchar_t)0x0f) &&
496 (instr[index + 1] == (uchar_t)0x01)) {
498 addr_t first_operand;
499 addr_t second_operand;
500 operand_type_t addr_type;
501 addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
503 pde32_t * guest_pd = NULL;
505 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
506 PrintError("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
515 addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32);
517 if (addr_type == MEM_OPERAND) {
518 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
519 pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
522 //PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand));
523 //PrintDebug("FirstOperand = %x\n", first_operand);
525 PrintDebug("Invalidating page for %x\n", first_operand);
527 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(first_operand)]);
529 if (guest_pde->large_page == 1) {
530 shadow_pde->present = 0;
531 PrintDebug("Invalidating Large Page\n");
534 if (shadow_pde->present == 1) {
535 pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
536 pte32_t * shadow_pte = (pte32_t *)&shadow_pt[PTE32_INDEX(first_operand)];
538 #ifdef DEBUG_SHADOW_PAGING
539 PrintDebug("Setting not present\n");
540 PrintPTE32(first_operand, shadow_pte);
543 shadow_pte->present = 0;
550 PrintError("Invalid Operand type\n");
554 PrintError("invalid Instruction Opcode\n");
555 PrintTraceMemDump(instr, 15);
567 static int create_pd32_nonaligned_4MB_page(struct guest_info * info, pte32_t * pt, addr_t guest_addr, pde32_4MB_t * large_shadow_pde) {
569 pte32_t * pte_cursor;
572 for (i = 0; i < 1024; i++) {
573 guest_pa = guest_addr + (PAGE_SIZE * i);
574 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
576 pte_cursor = &(pt[i]);
578 if (host_page_type == HOST_REGION_INVALID) {
579 // Currently we don't support this, but in theory we could
580 PrintError("Invalid Host Memory Type\n");
582 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
583 addr_t shadow_pa = get_shadow_addr(info, guest_pa);
586 pte_cursor->page_base_addr = PT32_BASE_ADDR(shadow_pa);
587 pte_cursor->present = 1;
588 pte_cursor->writable = large_shadow_pde->writable;
589 pte_cursor->user_page = large_shadow_pde->user_page;
590 pte_cursor->write_through = 0;
591 pte_cursor->cache_disable = 0;
592 pte_cursor->global_page = 0;
595 PrintError("Unsupported Host Memory Type\n");
603 static int handle_large_pagefault32(struct guest_info * info,
604 pde32_t * guest_pde, pde32_t * shadow_pde,
605 addr_t fault_addr, pf_error_t error_code ) {
606 struct shadow_region * mem_reg;
607 pde32_4MB_t * large_guest_pde = (pde32_4MB_t *)guest_pde;
608 pde32_4MB_t * large_shadow_pde = (pde32_4MB_t *)shadow_pde;
609 host_region_type_t host_page_type;
610 addr_t guest_start_addr = PDE32_4MB_T_ADDR(*large_guest_pde);
611 // addr_t guest_end_addr = guest_start_addr + PAGE_SIZE_4MB; // start address + 4MB
614 // Check that the Guest PDE entry points to valid memory
615 // else Machine Check the guest
616 PrintDebug("Large Page: Page Base Addr=%x\n", guest_start_addr);
618 host_page_type = get_shadow_addr_type(info, guest_start_addr);
620 if (host_page_type == HOST_REGION_INVALID) {
621 PrintError("Invalid guest address in large page (0x%x)\n", guest_start_addr);
622 v3_raise_exception(info, MC_EXCEPTION);
628 if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
630 addr_t host_start_addr = 0;
631 addr_t region_end_addr = 0;
633 // Check for a large enough region in host memory
634 mem_reg = get_shadow_region_by_addr(&(info->mem_map), guest_start_addr);
635 PrintDebug("Host region: host_addr=%x (guest_start=%x, end=%x)\n",
636 mem_reg->host_addr, mem_reg->guest_start, mem_reg->guest_end);
637 host_start_addr = mem_reg->host_addr + (guest_start_addr - mem_reg->guest_start);
638 region_end_addr = mem_reg->host_addr + (mem_reg->guest_end - mem_reg->guest_start);
640 PrintDebug("Host Start Addr=%x; Region End Addr=%x\n", host_start_addr, region_end_addr);
644 if (large_guest_pde->dirty == 1) { // dirty
645 large_shadow_pde->writable = guest_pde->writable;
646 } else if (error_code.write == 1) { // not dirty, access is write
647 large_shadow_pde->writable = guest_pde->writable;
648 large_guest_pde->dirty = 1;
649 } else { // not dirty, access is read
650 large_shadow_pde->writable = 0;
654 // Check if the region is at least an additional 4MB
658 if ((PD32_4MB_PAGE_OFFSET(host_start_addr) == 0) &&
659 (region_end_addr >= host_start_addr + PAGE_SIZE_4MB)) { // if 4MB boundary
660 large_shadow_pde->page_base_addr = PD32_4MB_BASE_ADDR(host_start_addr);
661 } else { // else generate 4k pages
662 pte32_t * shadow_pt = NULL;
663 PrintDebug("Handling non aligned large page\n");
665 shadow_pde->large_page = 0;
667 shadow_pt = create_new_shadow_pt32();
669 if (create_pd32_nonaligned_4MB_page(info, shadow_pt, guest_start_addr, large_shadow_pde) == -1) {
670 PrintError("Non Aligned Large Page Error\n");
676 #ifdef DEBUG_SHADOW_PAGING
677 PrintDebug("non-aligned Shadow PT\n");
678 PrintPT32(PT32_PAGE_ADDR(fault_addr), shadow_pt);
680 shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
684 // Handle hooked pages as well as other special pages
685 if (handle_special_page_fault(info, fault_addr, guest_start_addr, error_code) == -1) {
686 PrintError("Special Page Fault handler returned error for address: %x\n", fault_addr);