1 #include <palacios/vmm_shadow_paging.h>
4 #include <palacios/vmm.h>
5 #include <palacios/vm_guest_mem.h>
6 #include <palacios/vmm_decoder.h>
8 #ifndef DEBUG_SHADOW_PAGING
10 #define PrintDebug(fmt, args...)
18 static int handle_shadow_pte32_fault(struct guest_info* info,
20 pf_error_t error_code,
24 static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
26 int init_shadow_page_state(struct guest_info * info) {
27 struct shadow_page_state * state = &(info->shdw_pg_state);
28 state->guest_mode = PDE32;
29 state->shadow_mode = PDE32;
32 state->shadow_cr3 = 0;
37 int handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
39 if (info->mem_mode == PHYSICAL_MEM) {
40 // If paging is not turned on we need to handle the special cases
41 return handle_special_page_fault(info, fault_addr, error_code);
42 } else if (info->mem_mode == VIRTUAL_MEM) {
44 switch (info->cpu_mode) {
46 return handle_shadow_pagefault32(info, fault_addr, error_code);
51 PrintError("Unhandled CPU Mode\n");
55 PrintError("Invalid Memory mode\n");
60 addr_t create_new_shadow_pt32(struct guest_info * info) {
63 host_pde = V3_AllocPages(1);
64 memset(host_pde, 0, PAGE_SIZE);
66 return (addr_t)host_pde;
70 static int handle_pd32_nonaligned_4MB_page(struct guest_info * info, pte32_t * pt, addr_t guest_addr, pde32_4MB_t * large_shadow_pde) {
75 for (i = 0; i < 1024; i++) {
76 guest_pa = guest_addr + (PAGE_SIZE * i);
77 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
79 pte_cursor = &(pt[i]);
81 if (host_page_type == HOST_REGION_INVALID) {
82 // Currently we don't support this, but in theory we could
83 PrintError("Invalid Host Memory Type\n");
85 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
86 addr_t shadow_pa = get_shadow_addr(info, guest_pa);
89 pte_cursor->page_base_addr = PT32_BASE_ADDR(shadow_pa);
90 pte_cursor->present = 1;
91 pte_cursor->writable = large_shadow_pde->writable;
92 pte_cursor->user_page = large_shadow_pde->user_page;
93 pte_cursor->write_through = 0;
94 pte_cursor->cache_disable = 0;
95 pte_cursor->global_page = 0;
98 PrintError("Unsupported Host Memory Type\n");
105 static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
106 pde32_t * guest_pd = NULL;
107 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
108 addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
109 pt_access_status_t guest_pde_access;
110 pt_access_status_t shadow_pde_access;
111 pde32_t * guest_pde = NULL;
112 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
114 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
115 PrintError("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
120 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
122 // Check the guest page permissions
123 guest_pde_access = can_access_pde32(guest_pd, fault_addr, error_code);
125 // Check the shadow page permissions
126 shadow_pde_access = can_access_pde32(shadow_pd, fault_addr, error_code);
128 /* This should be redone,
129 but basically the reasoning is that there can be multiple reasons for a page fault:
130 If there is a permissions failure for a page present in the guest _BUT_
131 the reason for the fault was that the page is not present in the shadow,
132 _THEN_ we have to map the shadow page in and reexecute, this will generate
133 a permissions fault which is _THEN_ valid to send to the guest
134 _UNLESS_ both the guest and shadow have marked the page as not present
138 if ((guest_pde_access != PT_ACCESS_OK) &&
140 ( (shadow_pde_access != PT_ENTRY_NOT_PRESENT) &&
141 (guest_pde_access != PT_ENTRY_NOT_PRESENT)) // aka (guest permission error)
143 ( (shadow_pde_access == PT_ENTRY_NOT_PRESENT) &&
144 (guest_pde_access == PT_ENTRY_NOT_PRESENT)))) {
145 // inject page fault to the guest (Guest PDE fault)
147 info->ctrl_regs.cr2 = fault_addr;
148 v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
151 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n", guest_pde_access, error_code);
155 #ifdef DEBUG_SHADOW_PAGING
156 PrintDebug("Guest CR3=%x\n", guest_cr3);
157 PrintDebug("Guest PD\n");
159 PrintDebug("Shadow PD\n");
160 PrintPD32(shadow_pd);
167 //shadow_pde_access = can_access_pde32(shadow_pd, fault_addr, error_code);
170 if (shadow_pde_access == PT_ENTRY_NOT_PRESENT) {
172 shadow_pde->present = 1;
173 shadow_pde->user_page = guest_pde->user_page;
174 shadow_pde->large_page = guest_pde->large_page;
177 // VMM Specific options
178 shadow_pde->write_through = 0;
179 shadow_pde->cache_disable = 0;
180 shadow_pde->global_page = 0;
183 guest_pde->accessed = 1;
185 if (guest_pde->large_page == 0) {
186 pte32_t * shadow_pt = NULL;
188 shadow_pt = V3_AllocPages(1);
189 memset(shadow_pt, 0, PAGE_SIZE);
191 shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
193 shadow_pde->writable = guest_pde->writable;
195 struct shadow_region * mem_reg;
196 pde32_4MB_t * large_guest_pde = (pde32_4MB_t *)guest_pde;
197 pde32_4MB_t * large_shadow_pde = (pde32_4MB_t *)shadow_pde;
198 host_region_type_t host_page_type;
199 addr_t guest_start_addr = PDE32_4MB_T_ADDR(*large_guest_pde);
200 // addr_t guest_end_addr = guest_start_addr + PAGE_SIZE_4MB; // start address + 4MB
203 // Check that the Guest PDE entry points to valid memory
204 // else Machine Check the guest
205 PrintDebug("Large Page: Page Base Addr=%x\n", guest_start_addr);
207 host_page_type = get_shadow_addr_type(info, guest_start_addr);
209 if (host_page_type == HOST_REGION_INVALID) {
211 v3_raise_exception(info, MC_EXCEPTION);
212 PrintError("Invalid guest address in large page (0x%x)\n", guest_start_addr);
214 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
215 addr_t host_start_addr = 0;
216 addr_t region_end_addr = 0;
218 // Check for a large enough region in host memory
219 mem_reg = get_shadow_region_by_addr(&(info->mem_map), guest_start_addr);
220 PrintDebug("Host region: host_addr=%x (guest_start=%x, end=%x)\n",
221 mem_reg->host_addr, mem_reg->guest_start, mem_reg->guest_end);
222 host_start_addr = mem_reg->host_addr + (guest_start_addr - mem_reg->guest_start);
223 region_end_addr = mem_reg->host_addr + (mem_reg->guest_end - mem_reg->guest_start);
225 PrintDebug("Host Start Addr=%x; Region End Addr=%x\n", host_start_addr, region_end_addr);
231 if (large_guest_pde->dirty == 1) { // dirty
232 large_shadow_pde->writable = guest_pde->writable;
233 } else if (error_code.write == 1) { // not dirty, access is write
234 large_shadow_pde->writable = guest_pde->writable;
235 large_guest_pde->dirty = 1;
236 } else { // not dirty, access is read
237 large_shadow_pde->writable = 0;
241 // Check if the region is at least an additional 4MB
245 if ((PD32_4MB_PAGE_OFFSET(host_start_addr) == 0) &&
246 (region_end_addr >= host_start_addr + PAGE_SIZE_4MB)) { // if 4MB boundary
247 large_shadow_pde->page_base_addr = PD32_4MB_BASE_ADDR(host_start_addr);
248 } else { // else generate 4k pages
249 pte32_t * shadow_pt = NULL;
250 PrintDebug("Handling non aligned large page\n");
252 shadow_pde->large_page = 0;
254 shadow_pt = V3_AllocPages(1);
255 memset(shadow_pt, 0, PAGE_SIZE);
257 if (handle_pd32_nonaligned_4MB_page(info, shadow_pt, guest_start_addr, large_shadow_pde) == -1) {
258 PrintError("Non Aligned Large Page Error\n");
264 #ifdef DEBUG_SHADOW_PAGING
265 PrintDebug("non-aligned Shadow PT\n");
266 PrintPT32(PT32_PAGE_ADDR(fault_addr), shadow_pt);
268 shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
273 // Handle hooked pages as well as other special pages
274 if (handle_special_page_fault(info, fault_addr, error_code) == -1) {
275 PrintError("Special Page Fault handler returned error for address: %x\n", fault_addr);
281 } else if ((shadow_pde_access == PT_WRITE_ERROR) &&
282 (guest_pde->large_page = 1) &&
283 (((pde32_4MB_t *)guest_pde)->dirty == 0)) {
286 // Page Directory Entry marked read-only
289 ((pde32_4MB_t *)guest_pde)->dirty = 1;
290 shadow_pde->writable = guest_pde->writable;
293 } else if (shadow_pde_access == PT_USER_ERROR) {
296 // Page Directory Entry marked non-user
299 PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x - injecting into guest\n", shadow_pde_access, guest_pde_access);
300 info->ctrl_regs.cr2 = fault_addr;
301 v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
304 } else if (shadow_pde_access == PT_ACCESS_OK) {
305 pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
306 pte32_t * guest_pt = NULL;
308 // Page Table Entry fault
310 if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde)), (addr_t*)&guest_pt) == -1) {
311 PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde)));
312 // Machine check the guest
314 v3_raise_exception(info, MC_EXCEPTION);
320 if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
321 PrintError("Error handling Page fault caused by PTE\n");
326 // Unknown error raise page fault in guest
327 info->ctrl_regs.cr2 = fault_addr;
328 v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
330 // For debugging we will return an error here for the time being,
331 // this probably shouldn't ever happen
332 PrintDebug("Unknown Error occurred\n");
333 PrintDebug("Manual Says to inject page fault into guest\n");
340 //PrintDebugPageTables(shadow_pd);
341 PrintDebug("Returning end of PDE function (rip=%x)\n", info->rip);
348 * We assume the the guest pte pointer has already been translated to a host virtual address
350 static int handle_shadow_pte32_fault(struct guest_info * info,
352 pf_error_t error_code,
354 pte32_t * guest_pt) {
356 pt_access_status_t guest_pte_access;
357 pt_access_status_t shadow_pte_access;
358 pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
359 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
362 // Check the guest page permissions
363 guest_pte_access = can_access_pte32(guest_pt, fault_addr, error_code);
365 // Check the shadow page permissions
366 shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code);
368 #ifdef DEBUG_SHADOW_PAGING
369 PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access);
370 PrintPTE32(fault_addr, guest_pte);
371 PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access);
372 PrintPTE32(fault_addr, shadow_pte);
375 /* This should be redone,
376 but basically the reasoning is that there can be multiple reasons for a page fault:
377 If there is a permissions failure for a page present in the guest _BUT_
378 the reason for the fault was that the page is not present in the shadow,
379 _THEN_ we have to map the shadow page in and reexecute, this will generate
380 a permissions fault which is _THEN_ valid to send to the guest
381 _UNLESS_ both the guest and shadow have marked the page as not present
385 if ((guest_pte_access != PT_ACCESS_OK) &&
387 ((shadow_pte_access != PT_ENTRY_NOT_PRESENT) &&
388 (guest_pte_access != PT_ENTRY_NOT_PRESENT)) // aka (guest permission error)
390 ((shadow_pte_access == PT_ENTRY_NOT_PRESENT) &&
391 (guest_pte_access == PT_ENTRY_NOT_PRESENT)))) {
392 // Inject page fault into the guest
394 info->ctrl_regs.cr2 = fault_addr;
395 v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
397 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", guest_pte_access, *(uint_t*)&error_code);
404 if (shadow_pte_access == PT_ACCESS_OK) {
405 // Inconsistent state...
406 // Guest Re-Entry will flush page tables and everything should now work
407 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
409 } else if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
411 addr_t guest_pa = PTE32_T_ADDR((*guest_pte));
413 // Page Table Entry Not Present
415 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
417 if (host_page_type == HOST_REGION_INVALID) {
418 // Inject a machine check in the guest
420 v3_raise_exception(info, MC_EXCEPTION);
421 #ifdef DEBUG_SHADOW_PAGING
422 PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_pa);
423 PrintDebug("fault_addr=0x%x next are guest and shadow ptes \n",fault_addr);
424 PrintPTE32(fault_addr,guest_pte);
425 PrintPTE32(fault_addr,shadow_pte);
426 PrintDebug("Done.\n");
430 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
432 shadow_pa = get_shadow_addr(info, guest_pa);
434 shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
436 shadow_pte->present = guest_pte->present;
437 shadow_pte->user_page = guest_pte->user_page;
439 //set according to VMM policy
440 shadow_pte->write_through = 0;
441 shadow_pte->cache_disable = 0;
442 shadow_pte->global_page = 0;
445 guest_pte->accessed = 1;
447 if (guest_pte->dirty == 1) {
448 shadow_pte->writable = guest_pte->writable;
449 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
450 shadow_pte->writable = guest_pte->writable;
451 guest_pte->dirty = 1;
452 } else if ((guest_pte->dirty = 0) && (error_code.write == 0)) {
453 shadow_pte->writable = 0;
456 // Page fault handled by hook functions
457 if (handle_special_page_fault(info, fault_addr, error_code) == -1) {
458 PrintError("Special Page fault handler returned error for address: %x\n", fault_addr);
463 } else if ((shadow_pte_access == PT_WRITE_ERROR) &&
464 (guest_pte->dirty == 0)) {
465 guest_pte->dirty = 1;
466 shadow_pte->writable = guest_pte->writable;
468 PrintDebug("Shadow PTE Write Error\n");
472 // Inject page fault into the guest
474 info->ctrl_regs.cr2 = fault_addr;
475 v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
477 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
478 PrintError("Manual Says to inject page fault into guest\n");
482 PrintDebug("Returning end of function\n");
491 /* Currently Does not work with Segmentation!!! */
492 int handle_shadow_invlpg(struct guest_info * info) {
493 if (info->mem_mode != VIRTUAL_MEM) {
494 // Paging must be turned on...
495 // should handle with some sort of fault I think
496 PrintError("ERROR: INVLPG called in non paged mode\n");
501 if (info->cpu_mode == PROTECTED) {
506 ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
508 PrintError("Could not read instruction 0x%x (ret=%d)\n", info->rip, ret);
513 /* Can INVLPG work with Segments?? */
514 while (is_prefix_byte(instr[index])) {
519 if ((instr[index] == (uchar_t)0x0f) &&
520 (instr[index + 1] == (uchar_t)0x01)) {
522 addr_t first_operand;
523 addr_t second_operand;
524 operand_type_t addr_type;
525 addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
527 pde32_t * guest_pd = NULL;
529 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
530 PrintError("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
539 addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32);
541 if (addr_type == MEM_OPERAND) {
542 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
543 pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
546 //PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand));
547 //PrintDebug("FirstOperand = %x\n", first_operand);
549 PrintDebug("Invalidating page for %x\n", first_operand);
551 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(first_operand)]);
553 if (guest_pde->large_page == 1) {
554 shadow_pde->present = 0;
555 PrintDebug("Invalidating Large Page\n");
558 if (shadow_pde->present == 1) {
559 pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
560 pte32_t * shadow_pte = (pte32_t *)&shadow_pt[PTE32_INDEX(first_operand)];
562 #ifdef DEBUG_SHADOW_PAGING
563 PrintDebug("Setting not present\n");
564 PrintPTE32(first_operand, shadow_pte);
567 shadow_pte->present = 0;
574 PrintError("Invalid Operand type\n");
578 PrintError("invalid Instruction Opcode\n");
579 PrintTraceMemDump(instr, 15);