Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


5bcf9c9f0833e83d5936027f0f627c3b6613f0b7
[palacios.git] / palacios / src / palacios / vmm_shadow_paging_64.h
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 static inline int activate_shadow_pt_64(struct guest_info * info) {
22     struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
23     struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
24     struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
25     addr_t shadow_pt_addr = shadow_pt->page_pa;
26
27     // Because this is a new CR3 load the allocated page is the new CR3 value
28     shadow_pt->cr3 = shadow_pt->page_pa;
29
30     PrintDebug("Top level Shadow page pa=%p\n", (void *)shadow_pt_addr);
31
32     shadow_cr3->pml4t_base_addr = PAGE_BASE_ADDR_4KB(shadow_pt_addr);
33     PrintDebug("Creating new 64 bit shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pml4t_base_addr));
34
35   
36     shadow_cr3->pwt = guest_cr3->pwt;
37     shadow_cr3->pcd = guest_cr3->pcd;
38
39     return 0;
40 }
41
42
43
44
45
46
47 /* 
48  * *
49  * * 
50  * * 64 bit Page table fault handlers
51  * *
52  * *
53  */
54
55 static int handle_2MB_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
56                                           pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde);
57
58 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
59                                           pte64_t * shadow_pt, pte64_t * guest_pt);
60
61 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
62                                           pde64_t * shadow_pd, pde64_t * guest_pd);
63
64 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
65                                            pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp);
66
67
68 static inline int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
69     pml4e64_t * guest_pml = NULL;
70     pml4e64_t * shadow_pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3);
71     addr_t guest_cr3 = CR3_TO_PML4E64_PA(info->shdw_pg_state.guest_cr3);
72     pt_access_status_t guest_pml4e_access;
73     pt_access_status_t shadow_pml4e_access;
74     pml4e64_t * guest_pml4e = NULL;
75     pml4e64_t * shadow_pml4e = (pml4e64_t *)&(shadow_pml[PML4E64_INDEX(fault_addr)]);
76
77     PrintDebug("64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
78     PrintDebug("Handling PML fault\n");
79
80     if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
81         PrintError("Invalid Guest PML4E Address: 0x%p\n",  (void *)guest_cr3);
82         return -1;
83     } 
84
85     guest_pml4e = (pml4e64_t *)&(guest_pml[PML4E64_INDEX(fault_addr)]);
86
87     PrintDebug("Checking Guest %p\n", (void *)guest_pml);
88     // Check the guest page permissions
89     guest_pml4e_access = v3_can_access_pml4e64(guest_pml, fault_addr, error_code);
90
91     PrintDebug("Checking shadow %p\n", (void *)shadow_pml);
92     // Check the shadow page permissions
93     shadow_pml4e_access = v3_can_access_pml4e64(shadow_pml, fault_addr, error_code);
94   
95     /* Was the page fault caused by the Guest's page tables? */
96     if (is_guest_pf(guest_pml4e_access, shadow_pml4e_access) == 1) {
97         PrintDebug("Injecting PML4E pf to guest: (guest access error=%d) (pf error code=%d)\n", 
98                    *(uint_t *)&guest_pml4e_access, *(uint_t *)&error_code);
99         inject_guest_pf(info, fault_addr, error_code);
100         return 0;
101     }
102
103     if (shadow_pml4e_access == PT_ACCESS_USER_ERROR) {
104         //
105         // PML4 Entry marked non-user
106         //      
107         PrintDebug("Shadow Paging User access error (shadow_pml4e_access=0x%x, guest_pml4e_access=0x%x)\n", 
108                    shadow_pml4e_access, guest_pml4e_access);
109         inject_guest_pf(info, fault_addr, error_code);
110         return 0;
111     } else if ((shadow_pml4e_access != PT_ACCESS_NOT_PRESENT) &&
112                (shadow_pml4e_access != PT_ACCESS_OK)) {
113         // inject page fault in guest
114         inject_guest_pf(info, fault_addr, error_code);
115         PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pml4e_access);
116         PrintDebug("Manual Says to inject page fault into guest\n");
117         return 0;
118     }
119
120
121     pdpe64_t * shadow_pdp = NULL;
122     pdpe64_t * guest_pdp = NULL;
123
124     // Get the next shadow page level, allocate if not present
125
126     if (shadow_pml4e_access == PT_ACCESS_NOT_PRESENT) {
127         struct shadow_page_data * shdw_page =  create_new_shadow_pt(info);
128         shadow_pdp = (pdpe64_t *)V3_VAddr((void *)shdw_page->page_pa);
129
130
131         shadow_pml4e->present = 1;
132         shadow_pml4e->user_page = guest_pml4e->user_page;
133         shadow_pml4e->writable = guest_pml4e->writable;
134         shadow_pml4e->cache_disable = guest_pml4e->cache_disable;
135         shadow_pml4e->write_through = guest_pml4e->write_through;
136     
137         guest_pml4e->accessed = 1;
138     
139         shadow_pml4e->pdp_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
140     } else {
141         shadow_pdp = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pml4e->pdp_base_addr));
142     }
143
144     // Continue processing at the next level
145
146     if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
147         // Machine check the guest
148         PrintDebug("Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
149         v3_raise_exception(info, MC_EXCEPTION);
150         return 0;
151     }
152   
153     if (handle_pdpe_shadow_pagefault_64(info, fault_addr, error_code, shadow_pdp, guest_pdp) == -1) {
154         PrintError("Error handling Page fault caused by PDPE\n");
155         return -1;
156     }
157
158     return 0;
159 }
160
161
162
163 // For now we are not going to handle 1 Gigabyte pages
164 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
165                                            pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp) {
166     pt_access_status_t guest_pdpe_access;
167     pt_access_status_t shadow_pdpe_access;
168     pdpe64_t * guest_pdpe = (pdpe64_t *)&(guest_pdp[PDPE64_INDEX(fault_addr)]);
169     pdpe64_t * shadow_pdpe = (pdpe64_t *)&(shadow_pdp[PDPE64_INDEX(fault_addr)]);
170  
171     PrintDebug("Handling PDP fault\n");
172
173     if (fault_addr==0) { 
174         PrintDebug("Guest Page Tree for guest virtual address zero fault\n");
175         PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
176         PrintDebug("Host Page Tree for guest virtual address zero fault\n");
177         PrintHostPageTree(info,fault_addr,(addr_t)(info->ctrl_regs.cr3));
178     }
179
180     // Check the guest page permissions
181     guest_pdpe_access = v3_can_access_pdpe64(guest_pdp, fault_addr, error_code);
182
183     // Check the shadow page permissions
184     shadow_pdpe_access = v3_can_access_pdpe64(shadow_pdp, fault_addr, error_code);
185   
186     /* Was the page fault caused by the Guest's page tables? */
187     if (is_guest_pf(guest_pdpe_access, shadow_pdpe_access) == 1) {
188         PrintDebug("Injecting PDPE pf to guest: (guest access error=%d) (pf error code=%d)\n", 
189                    *(uint_t *)&guest_pdpe_access, *(uint_t *)&error_code);
190         inject_guest_pf(info, fault_addr, error_code);
191         return 0;
192     }
193
194     if (shadow_pdpe_access == PT_ACCESS_USER_ERROR) {
195         //
196         // PML4 Entry marked non-user
197         //      
198         PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n", 
199                    shadow_pdpe_access, guest_pdpe_access);
200         inject_guest_pf(info, fault_addr, error_code);
201         return 0;
202     } else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT) &&
203                (shadow_pdpe_access != PT_ACCESS_OK)) {
204         // inject page fault in guest
205         inject_guest_pf(info, fault_addr, error_code);
206         PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pdpe_access);
207         PrintDebug("Manual Says to inject page fault into guest\n");
208         return 0;
209     }
210
211
212     pde64_t * shadow_pd = NULL;
213     pde64_t * guest_pd = NULL;
214
215     // Get the next shadow page level, allocate if not present
216
217     if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
218         struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
219         shadow_pd = (pde64_t *)V3_VAddr((void *)shdw_page->page_pa);
220
221
222         shadow_pdpe->present = 1;
223         shadow_pdpe->user_page = guest_pdpe->user_page;
224         shadow_pdpe->writable = guest_pdpe->writable;
225         shadow_pdpe->write_through = guest_pdpe->write_through;
226         shadow_pdpe->cache_disable = guest_pdpe->cache_disable;
227
228     
229         guest_pdpe->accessed = 1;
230     
231         shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
232     } else {
233         shadow_pd = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
234     }
235
236     // Continue processing at the next level
237
238     if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
239         // Machine check the guest
240         PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
241         v3_raise_exception(info, MC_EXCEPTION);
242         return 0;
243     }
244   
245     if (handle_pde_shadow_pagefault_64(info, fault_addr, error_code, shadow_pd, guest_pd) == -1) {
246         PrintError("Error handling Page fault caused by PDE\n");
247         return -1;
248     }
249
250     return 0;
251 }
252
253
254 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
255                                           pde64_t * shadow_pd, pde64_t * guest_pd) {
256     pt_access_status_t guest_pde_access;
257     pt_access_status_t shadow_pde_access;
258     pde64_t * guest_pde = (pde64_t *)&(guest_pd[PDE64_INDEX(fault_addr)]);
259     pde64_t * shadow_pde = (pde64_t *)&(shadow_pd[PDE64_INDEX(fault_addr)]);
260
261     PrintDebug("Handling PDE fault\n");
262  
263     // Check the guest page permissions
264     guest_pde_access = v3_can_access_pde64(guest_pd, fault_addr, error_code);
265
266     // Check the shadow page permissions
267     shadow_pde_access = v3_can_access_pde64(shadow_pd, fault_addr, error_code);
268   
269     /* Was the page fault caused by the Guest's page tables? */
270     if (is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
271         PrintDebug("Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n", 
272                    *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
273         inject_guest_pf(info, fault_addr, error_code);
274         return 0;
275     }
276
277     if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
278         //
279         // PDE Entry marked non-user
280         //      
281         PrintDebug("Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n", 
282                    shadow_pde_access, guest_pde_access);
283         inject_guest_pf(info, fault_addr, error_code);
284         return 0;
285
286     } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) && 
287                (guest_pde->large_page == 1)) {
288
289         ((pde64_2MB_t *)guest_pde)->dirty = 1;
290         shadow_pde->writable = guest_pde->writable;
291
292         //PrintDebug("Returning due to large page Write Error\n");
293         //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
294
295         return 0;
296     } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
297                (shadow_pde_access != PT_ACCESS_OK)) {
298         // inject page fault in guest
299         inject_guest_pf(info, fault_addr, error_code);
300         PrintDebug("Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
301         PrintDebug("Manual Says to inject page fault into guest\n");
302         return 0;
303     }
304
305
306     pte64_t * shadow_pt = NULL;
307     pte64_t * guest_pt = NULL;
308
309     // Get the next shadow page level, allocate if not present
310
311     if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
312         struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
313         shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
314
315         PrintDebug("Creating new shadow PT: %p\n", shadow_pt);
316
317         shadow_pde->present = 1;
318         shadow_pde->user_page = guest_pde->user_page;
319
320
321         if (guest_pde->large_page == 0) {
322             shadow_pde->writable = guest_pde->writable;
323         } else {
324             // This large page flag is temporary until we can get a working cache....
325             ((pde64_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
326
327             if (error_code.write) {
328                 shadow_pde->writable = guest_pde->writable;
329                 ((pde64_2MB_t *)guest_pde)->dirty = 1;  
330             } else {
331                 shadow_pde->writable = 0;
332                 ((pde64_2MB_t *)guest_pde)->dirty = 0;
333             }
334         }
335     
336         // VMM Specific options
337         shadow_pde->write_through = guest_pde->write_through;
338         shadow_pde->cache_disable = guest_pde->cache_disable;
339         shadow_pde->global_page = guest_pde->global_page;
340         //
341     
342         guest_pde->accessed = 1;
343     
344         shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
345     } else {
346         shadow_pt = (pte64_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
347     }
348
349     // Continue processing at the next level
350     if (guest_pde->large_page == 0) {
351         if (guest_pa_to_host_va(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
352             // Machine check the guest
353             PrintDebug("Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
354             v3_raise_exception(info, MC_EXCEPTION);
355             return 0;
356         }
357     
358         if (handle_pte_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
359             PrintError("Error handling Page fault caused by PDE\n");
360             return -1;
361         }
362     } else {
363         if (handle_2MB_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, (pde64_2MB_t *)guest_pde) == -1) {
364             PrintError("Error handling large pagefault\n");
365             return -1;
366         } 
367     }
368
369     return 0;
370 }
371
372
373 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
374                                           pte64_t * shadow_pt, pte64_t * guest_pt) {
375     pt_access_status_t guest_pte_access;
376     pt_access_status_t shadow_pte_access;
377     pte64_t * guest_pte = (pte64_t *)&(guest_pt[PTE64_INDEX(fault_addr)]);;
378     pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
379     addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) +  PAGE_OFFSET(fault_addr);
380     //  struct shadow_page_state * state = &(info->shdw_pg_state);
381
382     PrintDebug("Handling PTE fault\n");
383
384     struct v3_shadow_region * shdw_reg =  v3_get_shadow_region(info, guest_pa);
385
386
387
388     if (shdw_reg == NULL) {
389         // Inject a machine check in the guest
390         PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
391         v3_raise_exception(info, MC_EXCEPTION);
392         return 0;
393     }
394
395     // Check the guest page permissions
396     guest_pte_access = v3_can_access_pte64(guest_pt, fault_addr, error_code);
397
398     // Check the shadow page permissions
399     shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
400
401     /* Was the page fault caused by the Guest's page tables? */
402     if (is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
403         PrintDebug("Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", 
404                    guest_pte_access, *(uint_t*)&error_code);    
405         inject_guest_pf(info, fault_addr, error_code);
406         return 0; 
407     }
408
409  
410     if (shadow_pte_access == PT_ACCESS_OK) {
411         // Inconsistent state...
412         // Guest Re-Entry will flush page tables and everything should now work
413         PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
414         return 0;
415     }
416
417
418     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
419         // Page Table Entry Not Present
420         PrintDebug("guest_pa =%p\n", (void *)guest_pa);
421
422         if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
423             (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
424             addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_pa);
425       
426             shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
427       
428             shadow_pte->present = guest_pte->present;
429             shadow_pte->user_page = guest_pte->user_page;
430       
431             //set according to VMM policy
432             shadow_pte->write_through = guest_pte->write_through;
433             shadow_pte->cache_disable = guest_pte->cache_disable;
434             shadow_pte->global_page = guest_pte->global_page;
435             //
436       
437             guest_pte->accessed = 1;
438       
439             if (guest_pte->dirty == 1) {
440                 shadow_pte->writable = guest_pte->writable;
441             } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
442                 shadow_pte->writable = guest_pte->writable;
443                 guest_pte->dirty = 1;
444             } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
445                 shadow_pte->writable = 0;
446             }
447
448             // dirty flag has been set, check if its in the cache
449             /*       if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) { */
450             /*  if (error_code.write == 1) { */
451             /*    state->cached_cr3 = 0; */
452             /*    shadow_pte->writable = guest_pte->writable; */
453             /*  } else { */
454             /*    shadow_pte->writable = 0; */
455             /*  } */
456             /*       } */
457
458             // Write hooks trump all, and are set Read Only
459             if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
460                 shadow_pte->writable = 0;
461             }
462
463         } else {
464             // Page fault handled by hook functions
465
466             if (v3_handle_mem_full_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
467                 PrintError("Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
468                 return -1;
469             }
470         }
471     } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
472         guest_pte->dirty = 1;
473
474         if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
475             if (v3_handle_mem_wr_hook(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
476                 PrintError("Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
477                 return -1;
478             }
479         } else {
480             PrintDebug("Shadow PTE Write Error\n");
481             shadow_pte->writable = guest_pte->writable;
482         }
483
484         /*     if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_pa)) != NULL) { */
485         /*       struct shadow_page_state * state = &(info->shdw_pg_state); */
486         /*       PrintDebug("Write operation on Guest PAge Table Page\n"); */
487         /*       state->cached_cr3 = 0; */
488         /*     } */
489
490         return 0;
491
492     } else {
493         // Inject page fault into the guest     
494         inject_guest_pf(info, fault_addr, error_code);
495         PrintError("PTE Page fault fell through... Not sure if this should ever happen\n");
496         PrintError("Manual Says to inject page fault into guest\n");
497         return -1;
498     }
499
500     return 0;
501 }
502
503
504
505 static int handle_2MB_shadow_pagefault_64(struct guest_info * info, 
506                                           addr_t fault_addr, pf_error_t error_code, 
507                                           pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde) 
508 {
509     pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
510     pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
511     addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
512     //  struct shadow_page_state * state = &(info->shdw_pg_state);
513
514     PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
515     PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
516
517     struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
518
519  
520     if (shdw_reg == NULL) {
521         // Inject a machine check in the guest
522         PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
523         v3_raise_exception(info, MC_EXCEPTION);
524         return 0;
525     }
526
527     if (shadow_pte_access == PT_ACCESS_OK) {
528         // Inconsistent state...
529         // Guest Re-Entry will flush tables and everything should now workd
530         PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
531         //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
532         return 0;
533     }
534
535   
536     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
537         // Get the guest physical address of the fault
538
539         if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) || 
540             (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
541             addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, guest_fault_pa);
542
543             PrintDebug("Shadow PA=%p, ShadowPTE=%p\n", (void *)shadow_pa, (void *)shadow_pte);
544
545             shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
546             PrintDebug("Test1\n");
547
548             shadow_pte->present = 1;
549
550             /* We are assuming that the PDE entry has precedence
551              * so the Shadow PDE will mirror the guest PDE settings, 
552              * and we don't have to worry about them here
553              * Allow everything
554              */
555             shadow_pte->user_page = 1;
556
557
558
559             /*       if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) { */
560             /*  // Check if the entry is a page table... */
561                  /*     PrintDebug("Marking page as Guest Page Table (large page)\n"); */
562                  /*     shadow_pte->writable = 0; */
563                  /*       } else */ if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
564                 shadow_pte->writable = 0;
565             } else {
566                 shadow_pte->writable = 1;
567             }
568
569             //set according to VMM policy
570             shadow_pte->write_through = large_guest_pde->write_through;
571             shadow_pte->cache_disable = large_guest_pde->cache_disable;
572             shadow_pte->global_page = large_guest_pde->global_page;
573             //
574       
575         } else {
576             // Handle hooked pages as well as other special pages
577             //      if (handle_special_page_fault(info, fault_addr, guest_fault_pa, error_code) == -1) {
578
579             if (v3_handle_mem_full_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
580                 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
581                 return -1;
582             }
583         }
584     } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
585
586         if (shdw_reg->host_type == SHDW_REGION_WRITE_HOOK) {
587
588             if (v3_handle_mem_wr_hook(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
589                 PrintError("Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
590                 return -1;
591             }
592         }
593
594
595         /*     if (find_pte_map(state->cached_ptes, PAGE_ADDR(guest_fault_pa)) != NULL) { */
596         /*       struct shadow_page_state * state = &(info->shdw_pg_state); */
597         /*       PrintDebug("Write operation on Guest PAge Table Page (large page)\n"); */
598         /*       state->cached_cr3 = 0; */
599         /*       shadow_pte->writable = 1; */
600         /*     } */
601
602     } else {
603         PrintError("Error in large page fault handler...\n");
604         PrintError("This case should have been handled at the top level handler\n");
605         return -1;
606     }
607
608     //  PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
609     PrintDebug("Returning from large page fault handler\n");
610     return 0;
611 }
612
613
614
615
616 static int invalidation_cb_64(struct guest_info * info, page_type_t type, 
617                               addr_t vaddr, addr_t page_ptr, addr_t page_pa, 
618                               void * private_data) {
619
620     switch (type) {
621         case PAGE_PML464:
622             {    
623                 pml4e64_t * pml = (pml4e64_t *)page_ptr;
624
625                 if (pml[PML4E64_INDEX(vaddr)].present == 0) {
626                     return 1;
627                 }
628                 return 0;
629             }
630         case PAGE_PDP64:
631             {
632                 pdpe64_t * pdp = (pdpe64_t *)page_ptr;
633                 pdpe64_t * pdpe = &(pdp[PDPE64_INDEX(vaddr)]);
634
635                 if (pdpe->present == 0) {
636                     return 1;
637                 }
638      
639                 if (pdpe->vmm_info == V3_LARGE_PG) {
640                     PrintError("1 Gigabyte pages not supported\n");
641                     return -1;
642
643                     pdpe->present = 0;
644                     return 1;
645                 }
646
647                 return 0;
648             }
649         case PAGE_PD64:
650             {
651                 pde64_t * pd = (pde64_t *)page_ptr;
652                 pde64_t * pde = &(pd[PDE64_INDEX(vaddr)]);
653
654                 if (pde->present == 0) {
655                     return 1;
656                 }
657       
658                 if (pde->vmm_info == V3_LARGE_PG) {
659                     pde->present = 0;
660                     return 1;
661                 }
662
663                 return 0;
664             }
665         case PAGE_PT64:
666             {
667                 pte64_t * pt = (pte64_t *)page_ptr;
668
669                 pt[PTE64_INDEX(vaddr)].present = 0;
670
671                 return 1;
672             }
673         default:
674             PrintError("Invalid Page Type\n");
675             return -1;
676
677     }
678
679     // should not get here
680     PrintError("Should not get here....\n");
681     return -1;
682 }
683
684
685 static inline int handle_shadow_invlpg_64(struct guest_info * info, addr_t vaddr) {
686     PrintDebug("INVLPG64 - %p\n",(void*)vaddr);
687
688     int ret =  v3_drill_host_pt_64(info, info->ctrl_regs.cr3, vaddr, invalidation_cb_64, NULL);
689     if (ret == -1) {
690         PrintError("Page table drill returned error.... \n");
691         PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
692     }
693
694     return (ret == -1) ? -1 : 0; 
695 }