1 #include <palacios/vmm_shadow_paging.h>
4 #include <palacios/vmm.h>
5 #include <palacios/vm_guest_mem.h>
6 #include <palacios/vmm_decoder.h>
10 int init_shadow_page_state(struct shadow_page_state * state) {
11 state->guest_mode = PDE32;
12 state->shadow_mode = PDE32;
15 state->shadow_cr3 = 0;
20 int handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
22 if (info->mem_mode == PHYSICAL_MEM) {
23 // If paging is not turned on we need to handle the special cases
24 return handle_special_page_fault(info, fault_addr, error_code);
25 } else if (info->mem_mode == VIRTUAL_MEM) {
27 switch (info->cpu_mode) {
29 return handle_shadow_pagefault32(info, fault_addr, error_code);
33 // currently not handled
40 PrintDebug("Invalid Memory mode\n");
45 addr_t create_new_shadow_pt32(struct guest_info * info) {
48 V3_AllocPages(host_pde, 1);
49 memset(host_pde, 0, PAGE_SIZE);
51 return (addr_t)host_pde;
55 static int handle_pd32_nonaligned_4MB_page(struct guest_info * info, pte32_t * pt, addr_t guest_addr, pde32_4MB_t * large_shadow_pde) {
60 for (i = 0; i < 1024; i++) {
61 guest_pa = guest_addr + (PAGE_SIZE * i);
62 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
64 pte_cursor = &(pt[i]);
66 if (host_page_type == HOST_REGION_INVALID) {
67 // Currently we don't support this, but in theory we could
68 PrintDebug("Invalid Host Memory Type\n");
70 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
71 addr_t shadow_pa = get_shadow_addr(info, guest_pa);
74 pte_cursor->page_base_addr = PT32_BASE_ADDR(shadow_pa);
75 pte_cursor->present = 1;
76 pte_cursor->writable = large_shadow_pde->writable;
77 pte_cursor->user_page = large_shadow_pde->user_page;
78 pte_cursor->write_through = 0;
79 pte_cursor->cache_disable = 0;
80 pte_cursor->global_page = 0;
83 PrintDebug("Unsupported Host Memory Type\n");
90 int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
91 pde32_t * guest_pd = NULL;
92 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
93 addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
94 pt_access_status_t guest_pde_access;
95 pt_access_status_t shadow_pde_access;
96 pde32_t * guest_pde = NULL;
97 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
99 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
100 PrintDebug("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
105 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
107 // Check the guest page permissions
108 guest_pde_access = can_access_pde32(guest_pd, fault_addr, error_code);
110 // Check the shadow page permissions
111 shadow_pde_access = can_access_pde32(shadow_pd, fault_addr, error_code);
113 /* This should be redone,
114 but basically the reasoning is that there can be multiple reasons for a page fault:
115 If there is a permissions failure for a page present in the guest _BUT_
116 the reason for the fault was that the page is not present in the shadow,
117 _THEN_ we have to map the shadow page in and reexecute, this will generate
118 a permissions fault which is _THEN_ valid to send to the guest
122 if ((guest_pde_access != PT_ACCESS_OK) &&
123 ( (shadow_pde_access != PT_ENTRY_NOT_PRESENT) &&
124 (guest_pde_access != PT_ENTRY_NOT_PRESENT))) { // aka (guest permission error)
125 // inject page fault to the guest (Guest PDE fault)
127 info->ctrl_regs.cr2 = fault_addr;
128 raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
131 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n", guest_pde_access, error_code);
134 PrintDebug("Guest CR3=%x\n", guest_cr3);
135 PrintDebug("Guest PD\n");
137 PrintDebug("Shadow PD\n");
138 PrintPD32(shadow_pd);
145 //shadow_pde_access = can_access_pde32(shadow_pd, fault_addr, error_code);
148 if (shadow_pde_access == PT_ENTRY_NOT_PRESENT) {
150 shadow_pde->present = 1;
151 shadow_pde->user_page = guest_pde->user_page;
152 shadow_pde->large_page = guest_pde->large_page;
155 // VMM Specific options
156 shadow_pde->write_through = 0;
157 shadow_pde->cache_disable = 0;
158 shadow_pde->global_page = 0;
161 guest_pde->accessed = 1;
163 if (guest_pde->large_page == 0) {
164 pte32_t * shadow_pt = NULL;
166 V3_AllocPages(shadow_pt, 1);
167 memset(shadow_pt, 0, PAGE_SIZE);
169 shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
171 shadow_pde->writable = guest_pde->writable;
173 struct shadow_region * mem_reg;
174 pde32_4MB_t * large_guest_pde = (pde32_4MB_t *)guest_pde;
175 pde32_4MB_t * large_shadow_pde = (pde32_4MB_t *)shadow_pde;
176 host_region_type_t host_page_type;
177 addr_t guest_start_addr = PDE32_4MB_T_ADDR(*large_guest_pde);
178 // addr_t guest_end_addr = guest_start_addr + PAGE_SIZE_4MB; // start address + 4MB
181 // Check that the Guest PDE entry points to valid memory
182 // else Machine Check the guest
183 PrintDebug("Large Page: Page Base Addr=%x\n", guest_start_addr);
185 host_page_type = get_shadow_addr_type(info, guest_start_addr);
187 if (host_page_type == HOST_REGION_INVALID) {
189 raise_exception(info, MC_EXCEPTION);
190 PrintDebug("Invalid guest address in large page (0x%x)\n", guest_start_addr);
192 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
193 addr_t host_start_addr = 0;
194 addr_t region_end_addr = 0;
196 // Check for a large enough region in host memory
197 mem_reg = get_shadow_region_by_addr(&(info->mem_map), guest_start_addr);
198 PrintDebug("Host region: host_addr=%x (guest_start=%x, end=%x)\n",
199 mem_reg->host_addr, mem_reg->guest_start, mem_reg->guest_end);
200 host_start_addr = mem_reg->host_addr + (guest_start_addr - mem_reg->guest_start);
201 region_end_addr = mem_reg->host_addr + (mem_reg->guest_end - mem_reg->guest_start);
203 PrintDebug("Host Start Addr=%x; Region End Addr=%x\n", host_start_addr, region_end_addr);
209 if (large_guest_pde->dirty == 1) { // dirty
210 large_shadow_pde->writable = guest_pde->writable;
211 } else if (error_code.write == 1) { // not dirty, access is write
212 large_shadow_pde->writable = guest_pde->writable;
213 large_guest_pde->dirty = 1;
214 } else { // not dirty, access is read
215 large_shadow_pde->writable = 0;
219 // Check if the region is at least an additional 4MB
223 if ((PD32_4MB_PAGE_OFFSET(host_start_addr) == 0) &&
224 (region_end_addr >= host_start_addr + PAGE_SIZE_4MB)) { // if 4MB boundary
225 large_shadow_pde->page_base_addr = PD32_4MB_BASE_ADDR(host_start_addr);
226 } else { // else generate 4k pages
227 pte32_t * shadow_pt = NULL;
228 PrintDebug("Handling non aligned large page\n");
230 shadow_pde->large_page = 0;
232 V3_AllocPages(shadow_pt, 1);
233 memset(shadow_pt, 0, PAGE_SIZE);
235 if (handle_pd32_nonaligned_4MB_page(info, shadow_pt, guest_start_addr, large_shadow_pde) == -1) {
236 PrintDebug("Non Aligned Large Page Error\n");
243 PrintDebug("non-aligned Shadow PT\n");
244 PrintPT32(PT32_PAGE_ADDR(fault_addr), shadow_pt);
246 shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
251 // Handle hooked pages as well as other special pages
252 if (handle_special_page_fault(info, fault_addr, error_code) == -1) {
253 PrintDebug("Special Page Fault handler returned error for address: %x\n", fault_addr);
259 } else if ((shadow_pde_access == PT_WRITE_ERROR) &&
260 (guest_pde->large_page = 1) &&
261 (((pde32_4MB_t *)guest_pde)->dirty == 0)) {
264 // Page Directory Entry marked read-only
267 ((pde32_4MB_t *)guest_pde)->dirty = 1;
268 shadow_pde->writable = guest_pde->writable;
271 } else if (shadow_pde_access == PT_USER_ERROR) {
274 // Page Directory Entry marked non-user
277 PrintDebug("Shadow Paging User access error\n");
279 } else if (shadow_pde_access == PT_ACCESS_OK) {
280 pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
281 pte32_t * guest_pt = NULL;
283 // Page Table Entry fault
285 if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde)), (addr_t*)&guest_pt) == -1) {
286 PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde)));
287 // Machine check the guest
289 raise_exception(info, MC_EXCEPTION);
295 if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
296 PrintDebug("Error handling Page fault caused by PTE\n");
301 // Unknown error raise page fault in guest
302 info->ctrl_regs.cr2 = fault_addr;
303 raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
305 // For debugging we will return an error here for the time being,
306 // this probably shouldn't ever happen
307 PrintDebug("Unknown Error occurred\n");
308 PrintDebug("Manual Says to inject page fault into guest\n");
312 //PrintDebugPageTables(shadow_pd);
313 PrintDebug("Returning end of PDE function (rip=%x)\n", info->rip);
320 * We assume the the guest pte pointer has already been translated to a host virtual address
322 int handle_shadow_pte32_fault(struct guest_info * info,
324 pf_error_t error_code,
326 pte32_t * guest_pt) {
328 pt_access_status_t guest_pte_access;
329 pt_access_status_t shadow_pte_access;
330 pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
331 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
334 // Check the guest page permissions
335 guest_pte_access = can_access_pte32(guest_pt, fault_addr, error_code);
337 // Check the shadow page permissions
338 shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code);
340 /* This should be redone,
341 but basically the reasoning is that there can be multiple reasons for a page fault:
342 If there is a permissions failure for a page present in the guest _BUT_
343 the reason for the fault was that the page is not present in the shadow,
344 _THEN_ we have to map the shadow page in and reexecute, this will generate
345 a permissions fault which is _THEN_ valid to send to the guest
349 if ((guest_pte_access != PT_ACCESS_OK) &&
350 ((shadow_pte_access != PT_ENTRY_NOT_PRESENT) &&
351 (guest_pte_access != PT_ENTRY_NOT_PRESENT))) { // aka (guest permission error)
352 // Inject page fault into the guest
354 info->ctrl_regs.cr2 = fault_addr;
355 raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
357 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", guest_pte_access, error_code);
364 if (shadow_pte_access == PT_ACCESS_OK) {
365 // Inconsistent state...
366 // Guest Re-Entry will flush page tables and everything should now work
367 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
369 } else if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
371 addr_t guest_pa = PTE32_T_ADDR((*guest_pte));
373 // Page Table Entry Not Present
375 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
377 if (host_page_type == HOST_REGION_INVALID) {
378 // Inject a machine check in the guest
380 raise_exception(info, MC_EXCEPTION);
382 PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_pa);
383 PrintDebug("fault_addr=0x%x next are guest and shadow ptes \n",fault_addr);
384 PrintPTE32(fault_addr,guest_pte);
385 PrintPTE32(fault_addr,shadow_pte);
386 PrintDebug("Done.\n");
389 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
391 shadow_pa = get_shadow_addr(info, guest_pa);
393 shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
395 shadow_pte->present = guest_pte->present;
396 shadow_pte->user_page = guest_pte->user_page;
398 //set according to VMM policy
399 shadow_pte->write_through = 0;
400 shadow_pte->cache_disable = 0;
401 shadow_pte->global_page = 0;
404 guest_pte->accessed = 1;
406 if (guest_pte->dirty == 1) {
407 shadow_pte->writable = guest_pte->writable;
408 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
409 shadow_pte->writable = guest_pte->writable;
410 guest_pte->dirty = 1;
411 } else if ((guest_pte->dirty = 0) && (error_code.write == 0)) {
412 shadow_pte->writable = 0;
415 // Page fault handled by hook functions
416 if (handle_special_page_fault(info, fault_addr, error_code) == -1) {
417 PrintDebug("Special Page fault handler returned error for address: %x\n", fault_addr);
422 } else if ((shadow_pte_access == PT_WRITE_ERROR) &&
423 (guest_pte->dirty == 0)) {
424 guest_pte->dirty = 1;
425 shadow_pte->writable = guest_pte->writable;
427 PrintDebug("Shadow PTE Write Error\n");
431 // Inject page fault into the guest
433 info->ctrl_regs.cr2 = fault_addr;
434 raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
436 PrintDebug("PTE Page fault fell through... Not sure if this should ever happen\n");
437 PrintDebug("Manual Says to inject page fault into guest\n");
441 PrintDebug("Returning end of function\n");
450 /* Currently Does not work with Segmentation!!! */
451 int handle_shadow_invlpg(struct guest_info * info) {
452 if (info->mem_mode != VIRTUAL_MEM) {
453 // Paging must be turned on...
454 // should handle with some sort of fault I think
455 PrintDebug("ERROR: INVLPG called in non paged mode\n");
460 if (info->cpu_mode == PROTECTED) {
465 ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
467 PrintDebug("Could not read instruction 0x%x (ret=%d)\n", info->rip, ret);
472 /* Can INVLPG work with Segments?? */
473 while (is_prefix_byte(instr[index])) {
478 if ((instr[index] == (uchar_t)0x0f) &&
479 (instr[index + 1] == (uchar_t)0x01)) {
481 addr_t first_operand;
482 addr_t second_operand;
483 operand_type_t addr_type;
484 addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
486 pde32_t * guest_pd = NULL;
488 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
489 PrintDebug("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
498 addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32);
500 if (addr_type == MEM_OPERAND) {
501 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
502 pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
505 //PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand));
506 //PrintDebug("FirstOperand = %x\n", first_operand);
508 PrintDebug("Invalidating page for %x\n", first_operand);
510 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(first_operand)]);
512 if (guest_pde->large_page == 1) {
513 shadow_pde->present = 0;
514 PrintDebug("Invalidating Large Page\n");
517 if (shadow_pde->present == 1) {
518 pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
519 pte32_t * shadow_pte = (pte32_t *)&shadow_pt[PTE32_INDEX(first_operand)];
520 PrintDebug("Setting not present\n");
521 PrintPTE32(first_operand, shadow_pte);
522 shadow_pte->present = 0;
529 PrintDebug("Invalid Operand type\n");
533 PrintDebug("invalid Instruction Opcode\n");
534 PrintTraceMemDump(instr, 15);
546 addr_t setup_shadow_pt32(struct guest_info * info, addr_t virt_cr3) {
547 addr_t cr3_guest_addr = CR3_TO_PDE32(virt_cr3);
549 pde32_t * host_pde = NULL;
552 // Setup up guest_pde to point to the PageDir in host addr
553 if (guest_pa_to_host_va(info, cr3_guest_addr, (addr_t*)&guest_pde) == -1) {
557 V3_AllocPages(host_pde, 1);
558 memset(host_pde, 0, PAGE_SIZE);
560 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
561 if (guest_pde[i].present == 1) {
565 if (guest_pa_to_host_va(info, PDE32_T_ADDR(guest_pde[i]), &pt_host_addr) == -1) {
569 if ((host_pte = setup_shadow_pte32(info, pt_host_addr)) == 0) {
573 host_pde[i].present = 1;
574 host_pde[i].pt_base_addr = PD32_BASE_ADDR(host_pte);
577 // Set Page DIR flags
582 PrintDebugPageTables(host_pde);
584 return (addr_t)host_pde;
589 addr_t setup_shadow_pte32(struct guest_info * info, addr_t pt_host_addr) {
590 pte32_t * guest_pte = (pte32_t *)pt_host_addr;
591 pte32_t * host_pte = NULL;
594 V3_AllocPages(host_pte, 1);
595 memset(host_pte, 0, PAGE_SIZE);
597 for (i = 0; i < MAX_PTE32_ENTRIES; i++) {
598 if (guest_pte[i].present == 1) {
599 addr_t guest_pa = PTE32_T_ADDR(guest_pte[i]);
600 shadow_mem_type_t page_type;
603 page_type = get_shadow_addr_type(info, guest_pa);
605 if (page_type == HOST_REGION_PHYSICAL_MEMORY) {
606 host_pa = get_shadow_addr(info, guest_pa);
610 // Setup various memory types
614 host_pte[i].page_base_addr = PT32_BASE_ADDR(host_pa);
615 host_pte[i].present = 1;
619 return (addr_t)host_pte;