Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Context-based output infrastructure (V3_Print, etc) and modifications to use it
[palacios.git] / palacios / src / palacios / mmu / vmm_shdw_pg_tlb_64.h
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 static inline int activate_shadow_pt_64(struct guest_info * info) {
22     struct cr3_64 * shadow_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
23     struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
24     struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
25     addr_t shadow_pt_addr = shadow_pt->page_pa;
26
27     // Because this is a new CR3 load the allocated page is the new CR3 value
28     shadow_pt->cr3 = shadow_pt->page_pa;
29
30     PrintDebug(info->vm_info, info, "Top level Shadow page pa=%p\n", (void *)shadow_pt_addr);
31
32     shadow_cr3->pml4t_base_addr = PAGE_BASE_ADDR_4KB(shadow_pt_addr);
33     PrintDebug(info->vm_info, info, "Creating new 64 bit shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pml4t_base_addr));
34
35   
36     shadow_cr3->pwt = guest_cr3->pwt;
37     shadow_cr3->pcd = guest_cr3->pcd;
38
39     return 0;
40 }
41
42
43
44
45
46
47 /* 
48  * *
49  * * 
50  * * 64 bit Page table fault handlers
51  * *
52  * *
53  */
54
55 static int handle_2MB_shadow_pagefault_pde_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
56                                               pt_access_status_t shadow_pde_access, pde64_2MB_t * shadow_pt, 
57                                               pde64_2MB_t * large_guest_pde);
58 static int handle_2MB_shadow_pagefault_pte_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
59                                               pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde);
60
61 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
62                                           pte64_t * shadow_pt, pte64_t * guest_pt);
63
64 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
65                                           pde64_t * shadow_pd, pde64_t * guest_pd);
66
67 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
68                                            pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp);
69
70
71 static inline int handle_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
72     pml4e64_t * guest_pml = NULL;
73     pml4e64_t * shadow_pml = CR3_TO_PML4E64_VA(info->ctrl_regs.cr3);
74     addr_t guest_cr3 = CR3_TO_PML4E64_PA(info->shdw_pg_state.guest_cr3);
75     pt_access_status_t guest_pml4e_access;
76     pt_access_status_t shadow_pml4e_access;
77     pml4e64_t * guest_pml4e = NULL;
78     pml4e64_t * shadow_pml4e = (pml4e64_t *)&(shadow_pml[PML4E64_INDEX(fault_addr)]);
79
80     PrintDebug(info->vm_info, info, "64 bit Shadow page fault handler: %p\n", (void *)fault_addr);
81     PrintDebug(info->vm_info, info, "Handling PML fault\n");
82
83     if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pml) == -1) {
84         PrintError(info->vm_info, info, "Invalid Guest PML4E Address: 0x%p\n",  (void *)guest_cr3);
85         return -1;
86     } 
87
88     guest_pml4e = (pml4e64_t *)&(guest_pml[PML4E64_INDEX(fault_addr)]);
89
90     PrintDebug(info->vm_info, info, "Checking Guest %p\n", (void *)guest_pml);
91     // Check the guest page permissions
92     guest_pml4e_access = v3_can_access_pml4e64(guest_pml, fault_addr, error_code);
93
94     PrintDebug(info->vm_info, info, "Checking shadow %p\n", (void *)shadow_pml);
95     // Check the shadow page permissions
96     shadow_pml4e_access = v3_can_access_pml4e64(shadow_pml, fault_addr, error_code);
97   
98     /* Was the page fault caused by the Guest's page tables? */
99     if (v3_is_guest_pf(guest_pml4e_access, shadow_pml4e_access) == 1) {
100         PrintDebug(info->vm_info, info, "Injecting PML4E pf to guest: (guest access error=%d) (pf error code=%d)\n", 
101                    *(uint_t *)&guest_pml4e_access, *(uint_t *)&error_code);
102         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
103             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
104             return -1;
105         }
106         return 0;
107     }
108
109     if (shadow_pml4e_access == PT_ACCESS_USER_ERROR) {
110         //
111         // PML4 Entry marked non-user
112         //      
113         PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pml4e_access=0x%x, guest_pml4e_access=0x%x)\n", 
114                    shadow_pml4e_access, guest_pml4e_access);
115         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
116             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
117             return -1;
118         }
119         return 0;
120     } else if ((shadow_pml4e_access != PT_ACCESS_NOT_PRESENT) &&
121                (shadow_pml4e_access != PT_ACCESS_OK)) {
122         // inject page fault in guest
123         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
124             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
125             return -1;
126         }
127         PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pml4e_access);
128         PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
129         return 0;
130     }
131
132
133     pdpe64_t * shadow_pdp = NULL;
134     pdpe64_t * guest_pdp = NULL;
135
136     // Get the next shadow page level, allocate if not present
137
138     if (shadow_pml4e_access == PT_ACCESS_NOT_PRESENT) {
139         struct shadow_page_data * shdw_page =  create_new_shadow_pt(info);
140         shadow_pdp = (pdpe64_t *)V3_VAddr((void *)shdw_page->page_pa);
141
142
143         shadow_pml4e->present = 1;
144         shadow_pml4e->user_page = guest_pml4e->user_page;
145         shadow_pml4e->writable = guest_pml4e->writable;
146         shadow_pml4e->cache_disable = guest_pml4e->cache_disable;
147         shadow_pml4e->write_through = guest_pml4e->write_through;
148     
149         guest_pml4e->accessed = 1;
150     
151         shadow_pml4e->pdp_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
152     } else {
153         shadow_pdp = (pdpe64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pml4e->pdp_base_addr));
154     }
155
156     // Continue processing at the next level
157
158     if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr), (addr_t *)&guest_pdp) == -1) {
159         // Machine check the guest
160         PrintError(info->vm_info, info, "Invalid Guest PDP Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pml4e->pdp_base_addr));
161         v3_raise_exception(info, MC_EXCEPTION);
162         return 0;
163     }
164   
165     if (handle_pdpe_shadow_pagefault_64(info, fault_addr, error_code, shadow_pdp, guest_pdp) == -1) {
166         PrintError(info->vm_info, info, "Error handling Page fault caused by PDPE\n");
167         return -1;
168     }
169
170     return 0;
171 }
172
173
174
175 // For now we are not going to handle 1 Gigabyte pages
176 static int handle_pdpe_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
177                                            pdpe64_t * shadow_pdp, pdpe64_t * guest_pdp) {
178     pt_access_status_t guest_pdpe_access;
179     pt_access_status_t shadow_pdpe_access;
180     pdpe64_t * guest_pdpe = (pdpe64_t *)&(guest_pdp[PDPE64_INDEX(fault_addr)]);
181     pdpe64_t * shadow_pdpe = (pdpe64_t *)&(shadow_pdp[PDPE64_INDEX(fault_addr)]);
182  
183     PrintDebug(info->vm_info, info, "Handling PDP fault\n");
184
185     if (fault_addr==0) { 
186         PrintDebug(info->vm_info, info, "Guest Page Tree for guest virtual address zero fault\n");
187         PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
188         PrintDebug(info->vm_info, info, "Host Page Tree for guest virtual address zero fault\n");
189         PrintHostPageTree(info,fault_addr,(addr_t)(info->ctrl_regs.cr3));
190     }
191
192     // Check the guest page permissions
193     guest_pdpe_access = v3_can_access_pdpe64(guest_pdp, fault_addr, error_code);
194
195     // Check the shadow page permissions
196     shadow_pdpe_access = v3_can_access_pdpe64(shadow_pdp, fault_addr, error_code);
197   
198     /* Was the page fault caused by the Guest's page tables? */
199     if (v3_is_guest_pf(guest_pdpe_access, shadow_pdpe_access) == 1) {
200         PrintDebug(info->vm_info, info, "Injecting PDPE pf to guest: (guest access error=%d) (pf error code=%d)\n", 
201                    *(uint_t *)&guest_pdpe_access, *(uint_t *)&error_code);
202         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
203             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
204             return -1;
205         }
206         return 0;
207     }
208
209     if (shadow_pdpe_access == PT_ACCESS_USER_ERROR) {
210         //
211         // PML4 Entry marked non-user
212         //      
213         PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n", 
214                    shadow_pdpe_access, guest_pdpe_access);
215         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
216             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
217             return -1;
218         }
219         return 0;
220     } else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT) &&
221                (shadow_pdpe_access != PT_ACCESS_OK)) {
222         // inject page fault in guest
223         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
224             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
225             return -1;
226         }
227         PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pdpe_access);
228         PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
229         return 0;
230     }
231
232
233     pde64_t * shadow_pd = NULL;
234     pde64_t * guest_pd = NULL;
235
236     // Get the next shadow page level, allocate if not present
237
238     if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
239         struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
240         shadow_pd = (pde64_t *)V3_VAddr((void *)shdw_page->page_pa);
241
242
243         shadow_pdpe->present = 1;
244         shadow_pdpe->user_page = guest_pdpe->user_page;
245         shadow_pdpe->writable = guest_pdpe->writable;
246         shadow_pdpe->write_through = guest_pdpe->write_through;
247         shadow_pdpe->cache_disable = guest_pdpe->cache_disable;
248
249     
250         guest_pdpe->accessed = 1;
251     
252         shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
253     } else {
254         shadow_pd = (pde64_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
255     }
256
257     // Continue processing at the next level
258
259     if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
260         // Machine check the guest
261         PrintError(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr));
262         v3_raise_exception(info, MC_EXCEPTION);
263         return 0;
264     }
265   
266     if (handle_pde_shadow_pagefault_64(info, fault_addr, error_code, shadow_pd, guest_pd) == -1) {
267         PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
268         return -1;
269     }
270
271     return 0;
272 }
273
274 static int handle_pde_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
275                                           pde64_t * shadow_pd, pde64_t * guest_pd) {
276     pt_access_status_t guest_pde_access;
277     pt_access_status_t shadow_pde_access;
278     pde64_t * guest_pde = (pde64_t *)&(guest_pd[PDE64_INDEX(fault_addr)]);
279     pde64_t * shadow_pde = (pde64_t *)&(shadow_pd[PDE64_INDEX(fault_addr)]);
280
281     PrintDebug(info->vm_info, info, "Handling PDE fault\n");
282  
283     // Check the guest page permissions
284     guest_pde_access = v3_can_access_pde64(guest_pd, fault_addr, error_code);
285
286     // Check the shadow page permissions
287     shadow_pde_access = v3_can_access_pde64(shadow_pd, fault_addr, error_code);
288   
289     /* Was the page fault caused by the Guest's page tables? */
290     if (v3_is_guest_pf(guest_pde_access, shadow_pde_access) == 1) {
291         PrintDebug(info->vm_info, info, "Injecting PDE pf to guest: (guest access error=%d) (pf error code=%d)\n", 
292                    *(uint_t *)&guest_pde_access, *(uint_t *)&error_code);
293         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
294             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
295             return -1;
296         }
297         return 0;
298     }
299
300     if (shadow_pde_access == PT_ACCESS_USER_ERROR) {
301         //
302         // PDE Entry marked non-user
303         //      
304         PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pdpe_access=0x%x, guest_pdpe_access=0x%x)\n", 
305                    shadow_pde_access, guest_pde_access);
306         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
307             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
308             return -1;
309         }
310         return 0;
311
312     } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) && 
313                (guest_pde->large_page == 1)) {
314
315         ((pde64_2MB_t *)guest_pde)->dirty = 1;
316         shadow_pde->writable = guest_pde->writable;
317
318         //PrintDebug(info->vm_info, info, "Returning due to large page Write Error\n");
319         //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
320
321         return 0;
322     } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
323                (shadow_pde_access != PT_ACCESS_OK)) {
324         // inject page fault in guest
325         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
326             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
327             return -1;
328         }
329         PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
330         PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
331         return 0;
332     }
333
334     pte64_t * shadow_pt = NULL;
335     pte64_t * guest_pt = NULL;
336
337     // get the next shadow page level, allocate if not present
338     if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
339         // Check if  we can use large pages and the guest memory is properly aligned
340         // to potentially use a large page
341
342         if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) {
343             addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t *)guest_pde)->page_base_addr);
344             uint32_t page_size = v3_get_max_page_size(info, guest_pa, LONG);
345             
346             if (page_size == PAGE_SIZE_2MB) {
347                 if (handle_2MB_shadow_pagefault_pde_64(info, fault_addr, error_code, shadow_pde_access,
348                                                        (pde64_2MB_t *)shadow_pde, (pde64_2MB_t *)guest_pde) == -1) {
349                     PrintError(info->vm_info, info, "Error handling large pagefault with large page\n");
350                     return -1;
351                 }
352
353                 return 0;
354             }
355             // Fallthrough to handle the region with small pages
356         }
357
358         struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
359         shadow_pt = (pte64_t *)V3_VAddr((void *)shdw_page->page_pa);
360
361         PrintDebug(info->vm_info, info, "Creating new shadow PT: %p\n", shadow_pt);
362
363         shadow_pde->present = 1;
364         shadow_pde->user_page = guest_pde->user_page;
365
366         if (guest_pde->large_page == 0) {
367             shadow_pde->writable = guest_pde->writable;
368         } else {
369             // This large page flag is temporary until we can get a working cache....
370             ((pde64_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
371
372             if (error_code.write) {
373                 shadow_pde->writable = guest_pde->writable;
374                 ((pde64_2MB_t *)guest_pde)->dirty = 1;  
375             } else {
376                 shadow_pde->writable = 0;
377                 ((pde64_2MB_t *)guest_pde)->dirty = 0;
378             }
379         }
380     
381         // VMM Specific options
382         shadow_pde->write_through = guest_pde->write_through;
383         shadow_pde->cache_disable = guest_pde->cache_disable;
384         shadow_pde->global_page = guest_pde->global_page;
385         //
386     
387         guest_pde->accessed = 1;
388     
389         shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
390     } else {
391         shadow_pt = (pte64_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
392     }
393
394     // Continue processing at the next level
395     if (guest_pde->large_page == 0) {
396         if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t *)&guest_pt) == -1) {
397             // Machine check the guest
398             PrintError(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
399             v3_raise_exception(info, MC_EXCEPTION);
400             return 0;
401         }
402     
403         if (handle_pte_shadow_pagefault_64(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) {
404             PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
405             return -1;
406         }
407     } else {
408         if (handle_2MB_shadow_pagefault_pte_64(info, fault_addr, error_code, shadow_pt, (pde64_2MB_t *)guest_pde) == -1) {
409             PrintError(info->vm_info, info, "Error handling large pagefault with small page\n");
410             return -1;
411         } 
412     }
413
414     return 0;
415 }
416
417
418 static int handle_pte_shadow_pagefault_64(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
419                                           pte64_t * shadow_pt, pte64_t * guest_pt) {
420     pt_access_status_t guest_pte_access;
421     pt_access_status_t shadow_pte_access;
422     pte64_t * guest_pte = (pte64_t *)&(guest_pt[PTE64_INDEX(fault_addr)]);;
423     pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
424     addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) +  PAGE_OFFSET(fault_addr);
425     //  struct shadow_page_state * state = &(info->shdw_pg_state);
426
427     PrintDebug(info->vm_info, info, "Handling PTE fault\n");
428
429     struct v3_mem_region * shdw_reg =  v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
430
431
432
433     if (shdw_reg == NULL) {
434         // Inject a machine check in the guest
435         PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
436         v3_raise_exception(info, MC_EXCEPTION);
437         return 0;
438     }
439
440     // Check the guest page permissions
441     guest_pte_access = v3_can_access_pte64(guest_pt, fault_addr, error_code);
442
443     // Check the shadow page permissions
444     shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
445
446     /* Was the page fault caused by the Guest's page tables? */
447     if (v3_is_guest_pf(guest_pte_access, shadow_pte_access) == 1) {
448
449         PrintDebug(info->vm_info, info, "Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", 
450                    guest_pte_access, *(uint_t*)&error_code);    
451
452         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
453             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
454             return -1;
455         }
456
457         return 0; 
458     }
459
460  
461     if (shadow_pte_access == PT_ACCESS_OK) {
462         // Inconsistent state...
463         // Guest Re-Entry will flush page tables and everything should now work
464         PrintDebug(info->vm_info, info, "Inconsistent state... Guest re-entry should flush tlb\n");
465         return 0;
466     }
467
468
469     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
470         // Page Table Entry Not Present
471         PrintDebug(info->vm_info, info, "guest_pa =%p\n", (void *)guest_pa);
472
473         if ((shdw_reg->flags.alloced == 1) ||
474             (shdw_reg->flags.read == 1)) {
475             addr_t shadow_pa = 0;
476
477             if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) {
478                 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_pa);
479                 return -1;
480             }
481
482             shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
483       
484             shadow_pte->present = guest_pte->present;
485             shadow_pte->user_page = guest_pte->user_page;
486       
487             //set according to VMM policy
488             shadow_pte->write_through = guest_pte->write_through;
489             shadow_pte->cache_disable = guest_pte->cache_disable;
490             shadow_pte->global_page = guest_pte->global_page;
491             //
492       
493             guest_pte->accessed = 1;
494       
495             if (guest_pte->dirty == 1) {
496                 shadow_pte->writable = guest_pte->writable;
497             } else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
498                 shadow_pte->writable = guest_pte->writable;
499                 guest_pte->dirty = 1;
500             } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
501                 shadow_pte->writable = 0;
502             }
503
504
505             // Write hooks trump all, and are set Read Only
506             if (shdw_reg->flags.write == 0) {
507                 shadow_pte->writable = 0;
508             }
509
510         } else {
511             // Pass to unhandled call back
512             if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
513                 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
514                 return -1;
515             }
516         }
517     } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
518         guest_pte->dirty = 1;
519
520
521         if (shdw_reg->flags.write == 1) {
522             PrintDebug(info->vm_info, info, "Shadow PTE Write Error\n");
523             shadow_pte->writable = guest_pte->writable;
524         } else {
525             if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
526                 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
527                 return -1;
528             }
529         }
530
531
532
533         return 0;
534
535     } else {
536         // Inject page fault into the guest     
537         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
538             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
539             return -1;
540         }
541         PrintError(info->vm_info, info, "PTE Page fault fell through... Not sure if this should ever happen\n");
542         PrintError(info->vm_info, info, "Manual Says to inject page fault into guest\n");
543         return -1;
544     }
545
546     return 0;
547 }
548
549
550 static int handle_2MB_shadow_pagefault_pde_64(struct guest_info * info, 
551                                               addr_t fault_addr, pf_error_t error_code, 
552                                               pt_access_status_t shadow_pde_access,
553                                               pde64_2MB_t * large_shadow_pde, pde64_2MB_t * large_guest_pde) 
554 {
555     addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
556     //  struct shadow_page_state * state = &(info->shdw_pg_state);
557
558     PrintDebug(info->vm_info, info, "Handling 2MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
559     PrintDebug(info->vm_info, info, "LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
560
561     struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
562  
563     if (shdw_reg == NULL) {
564         // Inject a machine check in the guest
565         PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
566         v3_raise_exception(info, MC_EXCEPTION);
567         return 0;
568     }
569
570     if (shadow_pde_access == PT_ACCESS_OK) {
571         // Inconsistent state...
572         // Guest Re-Entry will flush tables and everything should now workd
573         PrintDebug(info->vm_info, info, "Inconsistent state... Guest re-entry should flush tlb\n");
574         //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
575         return 0;
576     }
577
578   
579     if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) {
580         // Get the guest physical address of the fault
581
582         if ((shdw_reg->flags.alloced == 1) || 
583             (shdw_reg->flags.read == 1)) {
584             addr_t shadow_pa = 0;
585
586             if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
587                 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
588                 return -1;
589             }
590
591             large_guest_pde->vmm_info = V3_LARGE_PG; /* For invalidations */
592             large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_2MB(shadow_pa);
593             large_shadow_pde->large_page = 1;
594             large_shadow_pde->present = 1;
595             large_shadow_pde->user_page = 1;
596
597             if (shdw_reg->flags.write == 0) {
598                 large_shadow_pde->writable = 0;
599             } else {
600                 large_shadow_pde->writable = 1;
601             }
602
603             //set according to VMM policy
604             large_shadow_pde->write_through = large_guest_pde->write_through;
605             large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
606             large_shadow_pde->global_page = large_guest_pde->global_page;
607             //
608       
609         } else {
610             if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
611                 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
612                 return -1;
613             }
614         }
615     } else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) {
616         if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
617             PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
618             return -1;
619         }
620     } else {
621         PrintError(info->vm_info, info, "Error in large page fault handler...\n");
622         PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
623         return -1;
624     }
625
626     //  PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
627     PrintDebug(info->vm_info, info, "Returning from large page->large page fault handler\n");
628     return 0;
629 }
630
631 static int handle_2MB_shadow_pagefault_pte_64(struct guest_info * info, 
632                                               addr_t fault_addr, pf_error_t error_code, 
633                                               pte64_t * shadow_pt, pde64_2MB_t * large_guest_pde) 
634 {
635     pt_access_status_t shadow_pte_access = v3_can_access_pte64(shadow_pt, fault_addr, error_code);
636     pte64_t * shadow_pte = (pte64_t *)&(shadow_pt[PTE64_INDEX(fault_addr)]);
637     addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
638     //  struct shadow_page_state * state = &(info->shdw_pg_state);
639
640     PrintDebug(info->vm_info, info, "Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
641     PrintDebug(info->vm_info, info, "ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
642
643     struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
644
645  
646     if (shdw_reg == NULL) {
647         // Inject a machine check in the guest
648         PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
649         v3_raise_exception(info, MC_EXCEPTION);
650         return 0;
651     }
652
653     if (shadow_pte_access == PT_ACCESS_OK) {
654         // Inconsistent state...
655         // Guest Re-Entry will flush tables and everything should now workd
656         PrintDebug(info->vm_info, info, "Inconsistent state... Guest re-entry should flush tlb\n");
657         //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
658         return 0;
659     }
660
661   
662     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
663         // Get the guest physical address of the fault
664
665         if ((shdw_reg->flags.alloced == 1) || 
666             (shdw_reg->flags.read == 1)) {
667             addr_t shadow_pa = 0;
668
669             if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
670                 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
671                 return -1;
672             }
673
674             shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
675
676             shadow_pte->present = 1;
677
678             /* We are assuming that the PDE entry has precedence
679              * so the Shadow PDE will mirror the guest PDE settings, 
680              * and we don't have to worry about them here
681              * Allow everything
682              */
683             shadow_pte->user_page = 1;
684
685             if (shdw_reg->flags.write == 0) {
686                 shadow_pte->writable = 0;
687             } else {
688                 shadow_pte->writable = 1;
689             }
690
691             //set according to VMM policy
692             shadow_pte->write_through = large_guest_pde->write_through;
693             shadow_pte->cache_disable = large_guest_pde->cache_disable;
694             shadow_pte->global_page = large_guest_pde->global_page;
695             //
696       
697         } else {
698             if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
699                 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
700                 return -1;
701             }
702         }
703     } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
704         if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
705             PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
706             return -1;
707         }
708     } else {
709         PrintError(info->vm_info, info, "Error in large page fault handler...\n");
710         PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
711         return -1;
712     }
713
714     //  PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
715     PrintDebug(info->vm_info, info, "Returning from large page->small page fault handler\n");
716     return 0;
717 }
718
719
720
721
722 static int invalidation_cb_64(struct guest_info * info, page_type_t type, 
723                               addr_t vaddr, addr_t page_ptr, addr_t page_pa, 
724                               void * private_data) {
725
726     switch (type) {
727         case PAGE_PML464:
728             {    
729                 pml4e64_t * pml = (pml4e64_t *)page_ptr;
730
731                 if (pml[PML4E64_INDEX(vaddr)].present == 0) {
732                     return 1;
733                 }
734                 return 0;
735             }
736         case PAGE_PDP64:
737             {
738                 pdpe64_t * pdp = (pdpe64_t *)page_ptr;
739                 pdpe64_t * pdpe = &(pdp[PDPE64_INDEX(vaddr)]);
740
741                 if (pdpe->present == 0) {
742                     return 1;
743                 }
744      
745                 if (pdpe->vmm_info == V3_LARGE_PG) {
746                     PrintError(info->vm_info, info, "1 Gigabyte pages not supported\n");
747                     return -1;
748
749                     pdpe->present = 0;
750                     return 1;
751                 }
752
753                 return 0;
754             }
755         case PAGE_PD64:
756             {
757                 pde64_t * pd = (pde64_t *)page_ptr;
758                 pde64_t * pde = &(pd[PDE64_INDEX(vaddr)]);
759
760                 if (pde->present == 0) {
761                     return 1;
762                 }
763       
764                 if (pde->vmm_info == V3_LARGE_PG) {
765                     pde->present = 0;
766                     return 1;
767                 }
768
769                 return 0;
770             }
771         case PAGE_PT64:
772             {
773                 pte64_t * pt = (pte64_t *)page_ptr;
774
775                 pt[PTE64_INDEX(vaddr)].present = 0;
776
777                 return 1;
778             }
779         default:
780             PrintError(info->vm_info, info, "Invalid Page Type\n");
781             return -1;
782
783     }
784
785     // should not get here
786     PrintError(info->vm_info, info, "Should not get here....\n");
787     return -1;
788 }
789
790
791 static inline int handle_shadow_invlpg_64(struct guest_info * info, addr_t vaddr) {
792     PrintDebug(info->vm_info, info, "INVLPG64 - %p\n",(void*)vaddr);
793
794     int ret =  v3_drill_host_pt_64(info, info->ctrl_regs.cr3, vaddr, invalidation_cb_64, NULL);
795     if (ret == -1) {
796         PrintError(info->vm_info, info, "Page table drill returned error.... \n");
797         PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
798     }
799
800     return (ret == -1) ? -1 : 0; 
801 }