2 static int cache_page_tables_32(struct guest_info * info, addr_t pde) {
3 struct shadow_page_state * state = &(info->shdw_pg_state);
6 struct hashtable * pte_cache = NULL;
9 if (pde == state->cached_cr3) {
13 if (state->cached_ptes != NULL) {
14 hashtable_destroy(state->cached_ptes, 0, 0);
15 state->cached_ptes = NULL;
18 state->cached_cr3 = pde;
20 pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
21 state->cached_ptes = pte_cache;
23 if (guest_pa_to_host_va(info, pde, &pde_host_addr) == -1) {
24 PrintError("Could not lookup host address of guest PDE\n");
28 tmp_pde = (pde32_t *)pde_host_addr;
30 add_pte_map(pte_cache, pde, pde_host_addr);
33 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
34 if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
37 if (guest_pa_to_host_va(info, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), &pte_host_addr) == -1) {
38 PrintError("Could not lookup host address of guest PDE\n");
42 add_pte_map(pte_cache, (addr_t)(BASE_TO_PAGE_ADDR(tmp_pde[i].pt_base_addr)), pte_host_addr);
52 // We assume that shdw_pg_state.guest_cr3 is pointing to the page tables we want to activate
53 // We also assume that the CPU mode has not changed during this page table transition
54 static inline int activate_shadow_pt_32(struct guest_info * info) {
55 struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
56 struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
59 // Check if shadow page tables are in the cache
60 cached = cache_page_tables_32(info, CR3_TO_PDE32_PA(*(addr_t *)guest_cr3));
63 PrintError("CR3 Cache failed\n");
65 } else if (cached == 0) {
68 PrintDebug("New CR3 is different - flushing shadow page table %p\n", shadow_cr3 );
69 delete_page_tables_32(CR3_TO_PDE32_VA(*(uint_t*)shadow_cr3));
71 shadow_pt = create_new_shadow_pt();
73 shadow_cr3->pdt_base_addr = (addr_t)V3_PAddr((void *)(addr_t)PAGE_BASE_ADDR(shadow_pt));
74 PrintDebug( "Created new shadow page table %p\n", (void *)(addr_t)shadow_cr3->pdt_base_addr );
76 PrintDebug("Reusing cached shadow Page table\n");
79 shadow_cr3->pwt = guest_cr3->pwt;
80 shadow_cr3->pcd = guest_cr3->pcd;
88 * * 32 bit Page table fault handlers
92 static int handle_large_pagefault_32(struct guest_info * info,
93 addr_t fault_addr, pf_error_t error_code,
94 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde);
96 static int handle_shadow_pte32_fault(struct guest_info * info,
98 pf_error_t error_code,
103 static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
104 pde32_t * guest_pd = NULL;
105 pde32_t * shadow_pd = CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
106 addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
107 pt_access_status_t guest_pde_access;
108 pt_access_status_t shadow_pde_access;
109 pde32_t * guest_pde = NULL;
110 pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(fault_addr)]);
112 PrintDebug("Shadow page fault handler: %p\n", (void*) fault_addr );
114 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
115 PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
119 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
122 // Check the guest page permissions
123 guest_pde_access = v3_can_access_pde32(guest_pd, fault_addr, error_code);
125 // Check the shadow page permissions
126 shadow_pde_access = v3_can_access_pde32(shadow_pd, fault_addr, error_code);
128 /* Was the page fault caused by the Guest's page tables? */
129 if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
130 PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n",
131 *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
132 inject_guest_pf(info, fault_addr, error_code);
137 if (shadow_pde_access == PT_ACCESS_NOT_PRESENT)
139 pte32_t * shadow_pt = (pte32_t *)create_new_shadow_pt();
141 shadow_pde->present = 1;
142 shadow_pde->user_page = guest_pde->user_page;
143 // shadow_pde->large_page = guest_pde->large_page;
144 shadow_pde->large_page = 0;
147 // VMM Specific options
148 shadow_pde->write_through = 0;
149 shadow_pde->cache_disable = 0;
150 shadow_pde->global_page = 0;
153 guest_pde->accessed = 1;
155 shadow_pde->pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(shadow_pt));
157 if (guest_pde->large_page == 0) {
158 pte32_t * guest_pt = NULL;
159 shadow_pde->writable = guest_pde->writable;
161 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
162 // Machine check the guest
163 PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
164 v3_raise_exception(info, MC_EXCEPTION);
168 if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
169 PrintError("Error handling Page fault caused by PTE\n");
173 // ?? What if guest pde is dirty a this point?
174 ((pde32_4MB_t *)guest_pde)->dirty = 0;
175 shadow_pde->writable = 0;
178 else if (shadow_pde_access == PT_ACCESS_OK)
183 pte32_t * shadow_pt = (pte32_t *)V3_VAddr( (void*)(addr_t) BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr) );
185 if (guest_pde->large_page == 0) {
186 pte32_t * guest_pt = NULL;
188 if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) {
189 // Machine check the guest
190 PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
191 v3_raise_exception(info, MC_EXCEPTION);
195 if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
196 PrintError("Error handling Page fault caused by PTE\n");
199 } else if (guest_pde->large_page == 1) {
200 if (handle_large_pagefault_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) {
201 PrintError("Error handling large pagefault\n");
206 else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) &&
207 (guest_pde->large_page == 1) &&
208 (((pde32_4MB_t *)guest_pde)->dirty == 0))
211 // Page Directory Entry marked read-only
212 // Its a large page and we need to update the dirty bit in the guest
215 PrintDebug("Large page write error... Setting dirty bit and returning\n");
216 ((pde32_4MB_t *)guest_pde)->dirty = 1;
217 shadow_pde->writable = guest_pde->writable;
221 else if (shadow_pde_access == PT_ACCESS_USER_ERROR)
224 // Page Directory Entry marked non-user
226 PrintDebug("Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n",
227 shadow_pde_access, guest_pde_access);
228 inject_guest_pf(info, fault_addr, error_code);
233 // inject page fault in guest
234 inject_guest_pf(info, fault_addr, error_code);
235 PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
236 PrintDebug("Manual Says to inject page fault into guest\n");
237 #ifdef DEBUG_SHADOW_PAGING
238 PrintDebug("Guest PDE: (access=%d)\n\t", guest_pde_access);
239 PrintPTEntry(PAGE_PD32, fault_addr, guest_pde);
240 PrintDebug("Shadow PDE: (access=%d)\n\t", shadow_pde_access);
241 PrintPTEntry(PAGE_PD32, fault_addr, shadow_pde);
247 PrintDebug("Returning end of PDE function (rip=%p)\n", (void *)(addr_t)(info->rip));
253 /* The guest status checks have already been done,
254 * only special case shadow checks remain
256 static int handle_large_pagefault_32(struct guest_info * info,
257 addr_t fault_addr, pf_error_t error_code,
258 pte32_t * shadow_pt, pde32_4MB_t * large_guest_pde)
260 pt_access_status_t shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
261 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
262 addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
264 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
267 if ((shdw_reg == NULL) ||
268 (shdw_reg->host_type == SHDW_REGION_INVALID)) {
269 // Inject a machine check in the guest
270 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
271 v3_raise_exception(info, MC_EXCEPTION);
275 if (shadow_pte_access == PT_ACCESS_OK) {
276 // Inconsistent state...
277 // Guest Re-Entry will flush tables and everything should now workd
278 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
283 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
284 // Get the guest physical address of the fault
286 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
287 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
288 struct shadow_page_state * state = &(info->shdw_pg_state);
289 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
291 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
293 shadow_pte->present = 1;
295 /* We are assuming that the PDE entry has precedence
296 * so the Shadow PDE will mirror the guest PDE settings,
297 * and we don't have to worry about them here
300 shadow_pte->user_page = 1;
302 if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) {
303 // Check if the entry is a page table...
304 PrintDebug("Marking page as Guest Page Table (large page)\n");
305 shadow_pte->vmm_info = PT32_GUEST_PT;
306 shadow_pte->writable = 0;
307 } else if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
308 shadow_pte->writable = 0;
310 shadow_pte->writable = 1;
313 //set according to VMM policy
314 shadow_pte->write_through = 0;
315 shadow_pte->cache_disable = 0;
316 shadow_pte->global_page = 0;
320 // Handle hooked pages as well as other special pages
321 // if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
323 if (v3_handle_mem_full_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
324 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
328 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
330 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
332 if (v3_handle_mem_wr_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
333 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
336 } else if (shadow_pte->vmm_info == PT32_GUEST_PT) {
337 struct shadow_page_state * state = &(info->shdw_pg_state);
338 PrintDebug("Write operation on Guest PAge Table Page (large page)\n");
339 state->cached_cr3 = 0;
340 shadow_pte->writable = 1;
344 PrintError("Error in large page fault handler...\n");
345 PrintError("This case should have been handled at the top level handler\n");
349 PrintDebug("Returning from large page fault handler\n");
357 * We assume the the guest pte pointer has already been translated to a host virtual address
359 static int handle_shadow_pte32_fault(struct guest_info * info,
361 pf_error_t error_code,
363 pte32_t * guest_pt) {
365 pt_access_status_t guest_pte_access;
366 pt_access_status_t shadow_pte_access;
367 pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
368 pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
369 addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
371 struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_pa);
373 if ((shdw_reg == NULL) ||
374 (shdw_reg->host_type == SHDW_REGION_INVALID)) {
375 // Inject a machine check in the guest
376 PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
377 v3_raise_exception(info, MC_EXCEPTION);
381 // Check the guest page permissions
382 guest_pte_access = v3_can_access_pte32(guest_pt, fault_addr, error_code);
384 // Check the shadow page permissions
385 shadow_pte_access = v3_can_access_pte32(shadow_pt, fault_addr, error_code);
387 #ifdef DEBUG_SHADOW_PAGING
388 PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access);
389 PrintPTEntry(PAGE_PT32, fault_addr, guest_pte);
390 PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access);
391 PrintPTEntry(PAGE_PT32, fault_addr, shadow_pte);
394 /* Was the page fault caused by the Guest's page tables? */
395 if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
396 PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n",
397 guest_pte_access, *(uint_t*)&error_code);
398 inject_guest_pf(info, fault_addr, error_code);
403 if (shadow_pte_access == PT_ACCESS_OK) {
404 // Inconsistent state...
405 // Guest Re-Entry will flush page tables and everything should now work
406 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
411 if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
412 // Page Table Entry Not Present
413 PrintDebug("guest_pa =%p\n", (void *)guest_pa);
415 if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
416 (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
417 struct shadow_page_state * state = &(info->shdw_pg_state);
418 addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
420 shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
422 shadow_pte->present = guest_pte->present;
423 shadow_pte->user_page = guest_pte->user_page;
425 //set according to VMM policy
426 shadow_pte->write_through = 0;
427 shadow_pte->cache_disable = 0;
428 shadow_pte->global_page = 0;
431 guest_pte->accessed = 1;
433 if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) {
434 // Check if the entry is a page table...
435 PrintDebug("Marking page as Guest Page Table %d\n", shadow_pte->writable);
436 shadow_pte->vmm_info = PT32_GUEST_PT;
439 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
440 shadow_pte->writable = 0;
441 } else if (guest_pte->dirty == 1) {
442 shadow_pte->writable = guest_pte->writable;
443 } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
444 shadow_pte->writable = guest_pte->writable;
445 guest_pte->dirty = 1;
447 if (shadow_pte->vmm_info == PT32_GUEST_PT) {
448 // Well that was quick...
449 struct shadow_page_state * state = &(info->shdw_pg_state);
450 PrintDebug("Immediate Write operation on Guest PAge Table Page\n");
451 state->cached_cr3 = 0;
454 } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) { // was =
455 shadow_pte->writable = 0;
459 // Page fault handled by hook functions
461 if (v3_handle_mem_full_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
462 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
466 } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
467 guest_pte->dirty = 1;
469 if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
470 if (v3_handle_mem_wr_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
471 PrintError("Special Page fault handler returned error for address: %p\n", (void *)fault_addr);
475 PrintDebug("Shadow PTE Write Error\n");
476 shadow_pte->writable = guest_pte->writable;
479 if (shadow_pte->vmm_info == PT32_GUEST_PT) {
480 struct shadow_page_state * state = &(info->shdw_pg_state);
481 PrintDebug("Write operation on Guest PAge Table Page\n");
482 state->cached_cr3 = 0;
488 // Inject page fault into the guest
489 inject_guest_pf(info, fault_addr, error_code);
490 PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
491 PrintError("Manual Says to inject page fault into guest\n");
495 PrintDebug("Returning end of function\n");
501 /* If we start to optimize we should look up the guest pages in the cache... */
502 static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) {
503 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32_VA(info->ctrl_regs.cr3);
504 pde32_t * shadow_pde = (pde32_t *)&shadow_pd[PDE32_INDEX(vaddr)];
506 addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
507 pde32_t * guest_pd = NULL;
510 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
511 PrintError("Invalid Guest PDE Address: 0x%p\n", (void *)guest_cr3);
515 guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(vaddr)]);
517 if (guest_pde->large_page == 1) {
518 shadow_pde->present = 0;
519 PrintDebug("Invalidating Large Page\n");
520 } else if (shadow_pde->present == 1) {
521 pte32_t * shadow_pt = (pte32_t *)(addr_t)BASE_TO_PAGE_ADDR_4KB(shadow_pde->pt_base_addr);
522 pte32_t * shadow_pte = (pte32_t *) V3_VAddr( (void*) &shadow_pt[PTE32_INDEX(vaddr)] );
524 PrintDebug("Setting not present\n");
526 shadow_pte->present = 0;