Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


updated shadow paging to handle cache-disabled and write-through pages
[palacios.git] / palacios / src / palacios / vmm_shadow_paging_64.h
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 static inline int activate_shadow_pt_64(struct guest_info * info) {
22     struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
23     struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
24     struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
25     addr_t shadow_pt_addr = shadow_pt->page_pa;
26
27     // Because this is a new CR3 load the allocated page is the new CR3 value
28     shadow_pt->cr3 = shadow_pt->page_pa;
29
30     PrintDebug("Top level Shadow page pa=%p\n", (void *)shadow_pt_addr);
31
32     shadow_cr3->pml4t_base_addr = PAGE_BASE_ADDR_4KB(shadow_pt_addr);
33     PrintDebug("Creating new 64 bit shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pml4t_base_addr));
34
35   
36     shadow_cr3->pwt = guest_cr3->pwt;
37     shadow_cr3->pcd = guest_cr3->pcd;
38
39     return 0;
40 }
41
42
43
44
45
46
47 /* 
48  * *
49  * * 
50  * * 64 bit Page table fault handlers
51  * *
52  * *
53  */
54
55 static int handle_2MB_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
56                                           pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde);
57
58 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
59                                           pte64_t * shadow_pt, pte64_t * guest_pt);
60
61 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
62                                           pde64_t * shadow_pd, pde64_t * guest_pd);
63
64 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
65                                            pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp);
66
67
68 static inline int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
69     pml4e64_t * guest_pml = NULL;
70     pml4e64_t * shadow_pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3);
71     addr_t guest_cr3 = CR3_TO_PML4E64_PA(info->shdw_pg_state.guest_cr3);
72     pt_access_status_t guest_pml4e_access;
73     pt_access_status_t shadow_pml4e_access;
74     pml4e64_t * guest_pml4e = NULL;
75     pml4e64_t * shadow_pml4e = (pml4e64_t *)&(shadow_pml[PML4E64_INDEX(fault_addr)]);
76
77     PrintDebug("64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
78     PrintDebug("Handling PML fault\n");
79
80     if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
81         PrintError("Invalid Guest PML4E Address: 0x%p\n",  (void *)guest_cr3);
82         return -1;
83     } 
84
85     guest_pml4e = (pml4e64_t *)&(guest_pml[PML4E64_INDEX(fault_addr)]);
86
87     PrintDebug("Checking Guest %p\n", (void *)guest_pml);
88     // Check the guest page permissions
89     guest_pml4e_access = v3_can_access_pml4e64(guest_pml, fault_addr, error_code);
90
91     PrintDebug("Checking shadow %p\n", (void *)shadow_pml);
92     // Check the shadow page permissions
93     shadow_pml4e_access = v3_can_access_pml4e64(shadow_pml, fault_addr, error_code);
94   
95     /* Was the page fault caused by the Guest's page tables? */
96     if (is_guest_pf(guest_pml4e_access, shadow_pml4e_access) == 1) {
97         PrintDebug("Injecting PML4E pf to guest: (guest access error=%d) (pf error code=%d)\n", 
98                    *(uint_t *)&guest_pml4e_access, *(uint_t *)&error_code);
99         inject_guest_pf(info, fault_addr, error_code);
100         return 0;
101     }
102
103     if (shadow_pml4e_access == PT_ACCESS_USER_ERROR) {
104         //
105         // PML4 Entry marked non-user
106         //      
107         PrintDebug("Shadow Paging User access error (shadow_pml4e_access=0x%x, guest_pml4e_access=0x%x)\n", 
108                    shadow_pml4e_access, guest_pml4e_access);
109         inject_guest_pf(info, fault_addr, error_code);
110         return 0;
111     } else if ((shadow_pml4e_access != PT_ACCESS_NOT_PRESENT) &&
112                (shadow_pml4e_access != PT_ACCESS_OK)) {
113         // inject page fault in guest
114         inject_guest_pf(info, fault_addr, error_code);
115         PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pml4e_access);
116         PrintDebug("Manual Says to inject page fault into guest\n");
117         return 0;
118     }
119
120
121     pdpe64_t * shadow_pdp = NULL;
122     pdpe64_t * guest_pdp = NULL;
123
124     // Get the next shadow page level, allocate if not present
125
126     if (shadow_pml4e_access == PT_ACCESS_NOT_PRESENT) {
127         struct shadow_page_data * shdw_page =  create_new_shadow_pt(info);
128         shadow_pdp = (pdpe64_t *)V3_VAddr((void *)shdw_page->page_pa);
129
130
131         shadow_pml4e->present = 1;
132         shadow_pml4e->user_page = guest_pml4e->user_page;
133         shadow_pml4e->writable = guest_pml4e->writable;
134         shadow_pml4e->cache_disable = guest_pml4e->cache_disable;
135         shadow_pml4e->write_through = guest_pml4e->write_through;
136     
137         guest_pml4e->accessed = 1;
138     
139         shadow_pml4e->pdp_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
140     } else {
141         shadow_pdp = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pml4e->pdp_base_addr));
142     }
143
144     // Continue processing at the next level
145
146     if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
147         // Machine check the guest
148         PrintDebug("Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
149         v3_raise_exception(info, MC_EXCEPTION);
150         return 0;
151     }
152   
153     if (handle_pdpe_shadow_pagefault_64(info, fault_addr, error_code, shadow_pdp, guest_pdp) == -1) {
154         PrintError("Error handling Page fault caused by PDPE\n");
155         return -1;
156     }
157
158     return 0;
159 }
160
161
162
163 // For now we are not going to handle 1 Gigabyte pages
164 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
165                                            pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp) {
166     pt_access_status_t guest_pdpe_access;
167     pt_access_status_t shadow_pdpe_access;
168     pdpe64_t * guest_pdpe = (pdpe64_t *)&(guest_pdp[PDPE64_INDEX(fault_addr)]);
169     pdpe64_t * shadow_pdpe = (pdpe64_t *)&(shadow_pdp[PDPE64_INDEX(fault_addr)]);
170  
171     PrintDebug("Handling PDP fault\n");
172
173     if (fault_addr==0) { 
174         PrintDebug("Guest Page Tree for guest virtual address zero fault\n");
175         PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
176         PrintDebug("Host Page Tree for guest virtual address zero fault\n");
177         PrintHostPageTree(info,fault_addr,(addr_t)(info->ctrl_regs.cr3));
178     }
179
180     // Check the guest page permissions
181     guest_pdpe_access = v3_can_access_pdpe64(guest_pdp, fault_addr, error_code);
182
183     // Check the shadow page permissions
184     shadow_pdpe_access = v3_can_access_pdpe64(shadow_pdp, fault_addr, error_code);
185   
186     /* Was the page fault caused by the Guest's page tables? */
187     if (is_guest_pf(guest_pdpe_access, shadow_pdpe_access) == 1) {
188         PrintDebug("Injecting PDPE pf to guest: (guest access error=%d) (pf error code=%d)\n", 
189                    *(uint_t *)&guest_pdpe_access, *(uint_t *)&error_code);
190         inject_guest_pf(info, fault_addr, error_code);
191         return 0;
192     }
193
194     if (shadow_pdpe_access == PT_ACCESS_USER_ERROR) {
195         //
196         // PML4 Entry marked non-user
197         //      
198         PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n", 
199                    shadow_pdpe_access, guest_pdpe_access);
200         inject_guest_pf(info, fault_addr, error_code);
201         return 0;
202     } else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT) &&
203                (shadow_pdpe_access != PT_ACCESS_OK)) {
204         // inject page fault in guest
205         inject_guest_pf(info, fault_addr, error_code);
206         PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pdpe_access);
207         PrintDebug("Manual Says to inject page fault into guest\n");
208         return 0;
209     }
210
211
212     pde64_t * shadow_pd = NULL;
213     pde64_t * guest_pd = NULL;
214
215     // Get the next shadow page level, allocate if not present
216
217     if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
218         struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
219         shadow_pd = (pde64_t *)V3_VAddr((void *)shdw_page->page_pa);
220
221
222         shadow_pdpe->present = 1;
223         shadow_pdpe->user_page = guest_pdpe->user_page;
224         shadow_pdpe->writable = guest_pdpe->writable;
225         shadow_pdpe->write_through = guest_pdpe->write_through;
226         shadow_pdpe->cache_disable = guest_pdpe->cache_disable;
227
228     
229         guest_pdpe->accessed = 1;
230     
231         shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
232     } else {
233         shadow_pd = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
234     }
235
236     // Continue processing at the next level
237
238     if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
239         // Machine check the guest
240         PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
241         v3_raise_exception(info, MC_EXCEPTION);
242         return 0;
243     }
244   
245     if (handle_pde_shadow_pagefault_64(info, fault_addr, error_code, shadow_pd, guest_pd) == -1) {
246         PrintError("Error handling Page fault caused by PDE\n");
247         return -1;
248     }
249
250     return 0;
251 }
252
253
254 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
255                                           pde64_t * shadow_pd, pde64_t * guest_pd) {
256     pt_access_status_t guest_pde_access;
257     pt_access_status_t shadow_pde_access;
258     pde64_t * guest_pde = (pde64_t *)&(guest_pd[PDE64_INDEX(fault_addr)]);
259     pde64_t * shadow_pde = (pde64_t *)&(shadow_pd[PDE64_INDEX(fault_addr)]);
260
261     PrintDebug("Handling PDE fault\n");
262  
263     // Check the guest page permissions
264     guest_pde_access = v3_can_access_pde64(guest_pd, fault_addr, error_code);
265
266     // Check the shadow page permissions
267     shadow_pde_access = v3_can_access_pde64(shadow_pd, fault_addr, error_code);
268   
269     /* Was the page fault caused by the Guest's page tables? */
270     if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
271         PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n", 
272                    *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
273         inject_guest_pf(info, fault_addr, error_code);
274         return 0;
275     }
276
277     if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
278         //
279         // PDE Entry marked non-user
280         //      
281         PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n", 
282                    shadow_pde_access, guest_pde_access);
283         inject_guest_pf(info, fault_addr, error_code);
284         return 0;
285
286     } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) && 
287                (guest_pde->large_page == 1)) {
288
289         ((pde64_2MB_t *)guest_pde)->dirty = 1;
290         shadow_pde->writable = guest_pde->writable;
291
292         //PrintDebug("Returning due to large page Write Error\n");
293         //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
294
295         return 0;
296     } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
297                (shadow_pde_access != PT_ACCESS_OK)) {
298         // inject page fault in guest
299         inject_guest_pf(info, fault_addr, error_code);
300         PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
301         PrintDebug("Manual Says to inject page fault into guest\n");
302         return 0;
303     }
304
305
306     pte64_t * shadow_pt = NULL;
307     pte64_t * guest_pt = NULL;
308
309     // Get the next shadow page level, allocate if not present
310
311     if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
312         struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
313         shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
314
315         PrintDebug("Creating new shadow PT: %p\n", shadow_pt);
316
317         shadow_pde->present = 1;
318         shadow_pde->user_page = guest_pde->user_page;
319
320
321         if (guest_pde->large_page == 0) {
322             shadow_pde->writable = guest_pde->writable;
323         } else {
324             // This large page flag is temporary until we can get a working cache....
325             ((pde64_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
326
327             if (error_code.write) {
328                 shadow_pde->writable = guest_pde->writable;
329                 ((pde64_2MB_t *)guest_pde)->dirty = 1;  
330             } else {
331                 shadow_pde->writable = 0;
332                 ((pde64_2MB_t *)guest_pde)->dirty = 0;
333             }
334         }
335     
336         // VMM Specific options
337         shadow_pde->write_through = guest_pde->write_through;
338         shadow_pde->cache_disable = guest_pde->cache_disable;
339         shadow_pde->global_page = guest_pde->global_page;
340         //
341     
342         guest_pde->accessed = 1;
343     
344         shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
345     } else {
346         shadow_pt = (pte64_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
347     }
348
349     // Continue processing at the next level
350     if (guest_pde->large_page == 0) {
351         if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
352             // Machine check the guest
353             PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
354             v3_raise_exception(info, MC_EXCEPTION);
355             return 0;
356         }
357     
358         if (handle_pte_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
359             PrintError("Error handling Page fault caused by PDE\n");
360             return -1;
361         }
362     } else {
363         if (handle_2MB_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, (pde64_2MB_t *)guest_pde) == -1) {
364             PrintError("Error handling large pagefault\n");
365             return -1;
366         } 
367     }
368
369     return 0;
370 }
371
372
373 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
374                                           pte64_t * shadow_pt, pte64_t * guest_pt) {
375     pt_access_status_t guest_pte_access;
376     pt_access_status_t shadow_pte_access;
377     pte64_t * guest_pte = (pte64_t *)&(guest_pt[PTE64_INDEX(fault_addr)]);;
378     pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
379     addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) +  PAGE_OFFSET(fault_addr);
380     //  struct shadow_page_state * state = &(info->shdw_pg_state);
381
382     PrintDebug("Handling PTE fault\n");
383
384     struct v3_shadow_region * shdw_reg =  v3_get_shadow_region(info, guest_pa);
385
386
387
388     if ((shdw_reg == NULL) || 
389         (shdw_reg->host_type == SHDW_REGION_INVALID)) {
390         // Inject a machine check in the guest
391         PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
392         v3_raise_exception(info, MC_EXCEPTION);
393         return 0;
394     }
395
396     // Check the guest page permissions
397     guest_pte_access = v3_can_access_pte64(guest_pt, fault_addr, error_code);
398
399     // Check the shadow page permissions
400     shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
401
402     /* Was the page fault caused by the Guest's page tables? */
403     if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
404         PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", 
405                    guest_pte_access, *(uint_t*)&error_code);    
406         inject_guest_pf(info, fault_addr, error_code);
407         return 0; 
408     }
409
410  
411     if (shadow_pte_access == PT_ACCESS_OK) {
412         // Inconsistent state...
413         // Guest Re-Entry will flush page tables and everything should now work
414         PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
415         return 0;
416     }
417
418
419     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
420         // Page Table Entry Not Present
421         PrintDebug("guest_pa =%p\n", (void *)guest_pa);
422
423         if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
424             (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
425             addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
426       
427             shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
428       
429             shadow_pte->present = guest_pte->present;
430             shadow_pte->user_page = guest_pte->user_page;
431       
432             //set according to VMM policy
433             shadow_pte->write_through = guest_pte->write_through;
434             shadow_pte->cache_disable = guest_pte->cache_disable;
435             shadow_pte->global_page = guest_pte->global_page;
436             //
437       
438             guest_pte->accessed = 1;
439       
440             if (guest_pte->dirty == 1) {
441                 shadow_pte->writable = guest_pte->writable;
442             } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
443                 shadow_pte->writable = guest_pte->writable;
444                 guest_pte->dirty = 1;
445             } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
446                 shadow_pte->writable = 0;
447             }
448
449             // dirty flag has been set, check if its in the cache
450             /*       if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) { */
451             /*  if (error_code.write == 1) { */
452             /*    state->cached_cr3 = 0; */
453             /*    shadow_pte->writable = guest_pte->writable; */
454             /*  } else { */
455             /*    shadow_pte->writable = 0; */
456             /*  } */
457             /*       } */
458
459             // Write hooks trump all, and are set Read Only
460             if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
461                 shadow_pte->writable = 0;
462             }
463
464         } else {
465             // Page fault handled by hook functions
466
467             if (v3_handle_mem_full_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
468                 PrintError("Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
469                 return -1;
470             }
471         }
472     } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
473         guest_pte->dirty = 1;
474
475         if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
476             if (v3_handle_mem_wr_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
477                 PrintError("Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
478                 return -1;
479             }
480         } else {
481             PrintDebug("Shadow PTE Write Error\n");
482             shadow_pte->writable = guest_pte->writable;
483         }
484
485         /*     if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) { */
486         /*       struct shadow_page_state * state = &(info->shdw_pg_state); */
487         /*       PrintDebug("Write operation on Guest PAge Table Page\n"); */
488         /*       state->cached_cr3 = 0; */
489         /*     } */
490
491         return 0;
492
493     } else {
494         // Inject page fault into the guest     
495         inject_guest_pf(info, fault_addr, error_code);
496         PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
497         PrintError("Manual Says to inject page fault into guest\n");
498         return -1;
499     }
500
501     return 0;
502 }
503
504
505
506 static int handle_2MB_shadow_pagefault_64(struct guest_info * info, 
507                                           addr_t fault_addr, pf_error_t error_code, 
508                                           pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde) 
509 {
510     pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
511     pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
512     addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
513     //  struct shadow_page_state * state = &(info->shdw_pg_state);
514
515     PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
516     PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
517
518     struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
519
520  
521     if ((shdw_reg == NULL) || 
522         (shdw_reg->host_type == SHDW_REGION_INVALID)) {
523         // Inject a machine check in the guest
524         PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
525         v3_raise_exception(info, MC_EXCEPTION);
526         return 0;
527     }
528
529     if (shadow_pte_access == PT_ACCESS_OK) {
530         // Inconsistent state...
531         // Guest Re-Entry will flush tables and everything should now workd
532         PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
533         //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
534         return 0;
535     }
536
537   
538     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
539         // Get the guest physical address of the fault
540
541         if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) || 
542             (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
543             addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
544
545             PrintDebug("Shadow PA=%p, ShadowPTE=%p\n", (void *)shadow_pa, (void *)shadow_pte);
546
547             shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
548             PrintDebug("Test1\n");
549
550             shadow_pte->present = 1;
551
552             /* We are assuming that the PDE entry has precedence
553              * so the Shadow PDE will mirror the guest PDE settings, 
554              * and we don't have to worry about them here
555              * Allow everything
556              */
557             shadow_pte->user_page = 1;
558
559
560
561             /*       if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) { */
562             /*  // Check if the entry is a page table... */
563                  /*     PrintDebug("Marking page as Guest Page Table (large page)\n"); */
564                  /*     shadow_pte->writable = 0; */
565                  /*       } else */ if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
566                 shadow_pte->writable = 0;
567             } else {
568                 shadow_pte->writable = 1;
569             }
570
571             //set according to VMM policy
572             shadow_pte->write_through = large_guest_pde->write_through;
573             shadow_pte->cache_disable = large_guest_pde->cache_disable;
574             shadow_pte->global_page = large_guest_pde->global_page;
575             //
576       
577         } else {
578             // Handle hooked pages as well as other special pages
579             //      if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
580
581             if (v3_handle_mem_full_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
582                 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
583                 return -1;
584             }
585         }
586     } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
587
588         if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
589
590             if (v3_handle_mem_wr_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
591                 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
592                 return -1;
593             }
594         }
595
596
597         /*     if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) { */
598         /*       struct shadow_page_state * state = &(info->shdw_pg_state); */
599         /*       PrintDebug("Write operation on Guest PAge Table Page (large page)\n"); */
600         /*       state->cached_cr3 = 0; */
601         /*       shadow_pte->writable = 1; */
602         /*     } */
603
604     } else {
605         PrintError("Error in large page fault handler...\n");
606         PrintError("This case should have been handled at the top level handler\n");
607         return -1;
608     }
609
610     //  PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
611     PrintDebug("Returning from large page fault handler\n");
612     return 0;
613 }
614
615
616
617
618 static int invalidation_cb_64(struct guest_info * info, page_type_t type, 
619                               addr_t vaddr, addr_t page_ptr, addr_t page_pa, 
620                               void * private_data) {
621
622     switch (type) {
623         case PAGE_PML464:
624             {    
625                 pml4e64_t * pml = (pml4e64_t *)page_ptr;
626
627                 if (pml[PML4E64_INDEX(vaddr)].present == 0) {
628                     return 1;
629                 }
630                 return 0;
631             }
632         case PAGE_PDP64:
633             {
634                 pdpe64_t * pdp = (pdpe64_t *)page_ptr;
635                 pdpe64_t * pdpe = &(pdp[PDPE64_INDEX(vaddr)]);
636
637                 if (pdpe->present == 0) {
638                     return 1;
639                 }
640      
641                 if (pdpe->vmm_info == V3_LARGE_PG) {
642                     PrintError("1 Gigabyte pages not supported\n");
643                     return -1;
644
645                     pdpe->present = 0;
646                     return 1;
647                 }
648
649                 return 0;
650             }
651         case PAGE_PD64:
652             {
653                 pde64_t * pd = (pde64_t *)page_ptr;
654                 pde64_t * pde = &(pd[PDE64_INDEX(vaddr)]);
655
656                 if (pde->present == 0) {
657                     return 1;
658                 }
659       
660                 if (pde->vmm_info == V3_LARGE_PG) {
661                     pde->present = 0;
662                     return 1;
663                 }
664
665                 return 0;
666             }
667         case PAGE_PT64:
668             {
669                 pte64_t * pt = (pte64_t *)page_ptr;
670
671                 pt[PTE64_INDEX(vaddr)].present = 0;
672
673                 return 1;
674             }
675         default:
676             PrintError("Invalid Page Type\n");
677             return -1;
678
679     }
680
681     // should not get here
682     PrintError("Should not get here....\n");
683     return -1;
684 }
685
686
687 static inline int handle_shadow_invlpg_64(struct guest_info * info, addr_t vaddr) {
688     PrintDebug("INVLPG64 - %p\n",(void*)vaddr);
689
690     int ret =  v3_drill_host_pt_64(info, info->ctrl_regs.cr3, vaddr, invalidation_cb_64, NULL);
691     if (ret == -1) {
692         PrintError("Page table drill returned error.... \n");
693         PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
694     }
695
696     return (ret == -1) ? -1 : 0; 
697 }