Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Cleanup and sanity-checking of endianness, dead code, unchecked returns (Coverity...
[palacios.git] / palacios / src / palacios / mmu / vmm_shdw_pg_tlb_32pae.h
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2014, Daniel Zuo <pengzuo2014@u.northwestern.edu>
11  * Copyright (c) 2014, Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu> 
12  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
13  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
14  * All rights reserved.
15  *
16  * Author: Daniel Zuo <pengzuo2014@u.northwestern.edu>
17  *         Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu> 
18  *         Jack Lange <jarusl@cs.northwestern.edu> 
19  *
20  * This is free software.  You are permitted to use,
21  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22  */
23 #define GET_BUDDY(x) (((ullong_t)x) ^ 0x1)
24 #define MARK_LAST_ZERO(x) (((ullong_t)x) & 0x0)
25 #define CR3_PAGE_BASE_ADDR(x) ((x) >> 5)
26 #define V3_SHADOW_LARGE_PAGE 0x3
27
28 static inline int activate_shadow_pt_32pae(struct guest_info * info) {
29     
30     struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *)&(info->ctrl_regs.cr3);
31     struct cr3_32_PAE * guest_cr3 = (struct cr3_32_PAE *)&(info->shdw_pg_state.guest_cr3);   
32     struct cr4_32 * shadow_cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
33     struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
34     addr_t shadow_pt_addr = shadow_pt->page_pa;
35     shadow_pt->cr3 = shadow_pt->page_pa;
36     PrintDebug(info->vm_info, info, "Top level shadow 32pae pdp page pa=%p\n", (void *) shadow_pt_addr);
37
38     //Shadow cr3 points to the new page, which is PDPT
39     shadow_cr3->pdpt_base_addr = CR3_PAGE_BASE_ADDR(shadow_pt_addr);
40     PrintDebug(info->vm_info, info, "Creating new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdpt_base_addr));
41
42     shadow_cr3->pwt = guest_cr3->pwt;
43     shadow_cr3->pcd = guest_cr3->pcd;
44     shadow_cr4->pae = 1;
45     //shadow_efer->lma = 1;
46
47     return 0;
48 }
49
50 /*
51 *
52 * Shadow PAE 32 pagefault handlers 
53 *
54 */
55
56 static int handle_pde_shadow32pae_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd, pde32pae_t * guest_pd);
57 static int handle_pte_shadow32pae_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pte32pae_t * shadow_pt, pte32pae_t * guest_pt);
58 static int handle_2MB_shadow32pae_pagefault_pte_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pte32pae_t * shadow_pt, pde32pae_2MB_t * large_guest_pde);
59 static int handle_2MB_shadow32pae_pagefault_pde_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd, pde32pae_2MB_t * large_guest_pde);
60
61 static inline int handle_shadow_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
62     
63     pdpe32pae_t * shadow_pdp = CR3_TO_PDPE32PAE_VA(info->ctrl_regs.cr3);
64     pdpe32pae_t * guest_pdp = NULL;
65     addr_t guest_cr3 = CR3_TO_PDPE32PAE_PA(info->shdw_pg_state.guest_cr3);
66     
67     pt_access_status_t shadow_pdpe_access;
68     pt_access_status_t guest_pdpe_access;
69     
70     pdpe32pae_t * guest_pdpe = NULL;
71     pdpe32pae_t * shadow_pdpe = (pdpe32pae_t *)&(shadow_pdp[PDPE32PAE_INDEX(fault_addr)]);
72     
73     PrintDebug(info->vm_info, info, "32 bit PAE shadow paging page fault handler: %p\n", (void*)fault_addr);
74     PrintDebug(info->vm_info, info, "Handling PDP fault\n");
75     
76     if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pdp) ==  -1) {
77         PrintError(info->vm_info, info, "Invalid Guest PDPE Address: 0x%p\n", (void *)guest_cr3);
78         return -1;
79         
80     }
81     
82     guest_pdpe = (pdpe32pae_t *)&(guest_pdp[PDPE32PAE_INDEX(fault_addr)]);
83     
84     PrintDebug(info->vm_info, info, "Checking Guest %p\n", (void *)guest_pdp);
85     //Check the guest page permissions
86     guest_pdpe_access = v3_can_access_pdpe32pae(guest_pdp, fault_addr, error_code);
87     
88     
89     PrintDebug(info->vm_info, info, "Checking Host %p\n", (void *)shadow_pdp);
90     //Check the host page permissions
91     shadow_pdpe_access = v3_can_access_pdpe32pae(shadow_pdp, fault_addr, error_code);
92     
93     /* Was the page fault caused by an out-of-range address */
94     if ((PDPE32PAE_INDEX(fault_addr) != 0) && (PDPE32PAE_INDEX(fault_addr) != 1) && (PDPE32PAE_INDEX(fault_addr) != 2) && (PDPE32PAE_INDEX(fault_addr) != 3)) {
95         PrintDebug(info->vm_info, info, "Fault PDPE index is 0x%x out of range\n", PDPE32PAE_INDEX(fault_addr));
96         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
97             PrintError(info->vm_info, info, "Cound not inject guest page fault\n");
98             return -1;
99         }
100         return 0;       
101     }
102     
103     /* Zero address fault */
104     if (fault_addr == 0) {
105         PrintDebug(info->vm_info, info, "Guest page tree for guest virtual address zero fault\n");
106         PrintGuestPageTree(info, fault_addr, (addr_t)(info->shdw_pg_state.guest_cr3));
107         PrintDebug(info->vm_info, info, "Host page tree for guest virtual address zero fault\n");
108         PrintHostPageTree(info, fault_addr, (addr_t)(info->ctrl_regs.cr3));
109     }
110     
111     /* Was the page fault caused by the Guest's page tables */
112     if (v3_is_guest_pf(guest_pdpe_access, shadow_pdpe_access) == 1) {
113         PrintDebug(info->vm_info, info, "Injecting PDPE pf to guest: (guest access error=%d) (pf error code=%d)\n", *(uint_t *)&guest_pdpe_access, *(uint_t *)&error_code);
114         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
115             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
116             return -1;
117         }
118         return 0;
119     }
120     
121     if (shadow_pdpe_access == PT_ACCESS_USER_ERROR || shadow_pdpe_access == PT_ACCESS_WRITE_ERROR) {
122         PrintDebug(info->vm_info, info, "Shadow Paging User or Write Access error (shadow_pdpe_access = 0x%x, guest_pdpe_access = 0x%x). Ignore it.\n", shadow_pdpe_access, guest_pdpe_access);
123     } else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT) && (shadow_pdpe_access != PT_ACCESS_OK)) {
124         // inject page fault in guest
125         //
126         // unknown error
127         //
128         if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
129             PrintError(info->vm_info, info, "Could not inject guest page fault\n");
130             return -1;
131         }
132         PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pdpe_access=%x)\n", shadow_pdpe_access);
133         PrintDebug(info->vm_info, info, "Manual says to inject page fault into guest\n");
134         return 0;
135     }
136     
137     pde32pae_t * shadow_pd = NULL;
138     pde32pae_t * guest_pd = NULL;
139     
140     // Get the next shadow page level, allocate if not present
141     if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
142         struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
143         shadow_pd = (pde32pae_t *)V3_VAddr((void *)shdw_page->page_pa);
144         
145         shadow_pdpe->present = 1;
146         //shadow_pdpe->user_page = guest_pdpe->user_page;
147         //shadow_pdpe->writable = guest_pdpe->writable;
148         shadow_pdpe->write_through = guest_pdpe->write_through;
149         shadow_pdpe->cache_disable = guest_pdpe->cache_disable;
150         
151         guest_pdpe->accessed = 1;
152         
153         shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
154     } else {
155         shadow_pd = (pde32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
156     }
157     
158     // Continue processing at the next level
159     if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pdpe->pd_base_addr), (addr_t *)&guest_pd) == -1) {
160         //Machine check the guest
161         PrintError(info->vm_info, info, "Invalid Guest PD Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
162         v3_raise_exception(info, MC_EXCEPTION);
163         return 0;
164     }
165     
166     if (handle_pde_shadow32pae_pagefault_32pae(info, fault_addr, error_code, shadow_pd, guest_pd) == -1) {
167         PrintError(info->vm_info, info, "Error handling Page Fault caused by PDE\n");
168         return -1;
169     }
170     return 0;
171 }
172
173 static int handle_pde_shadow32pae_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd, pde32pae_t * guest_pd) {
174    pt_access_status_t guest_pde_access;
175    pt_access_status_t shadow_pde_access;
176    
177    pde32pae_t * guest_pde = NULL;
178    guest_pde = (pde32pae_t *)&(guest_pd[PDE32PAE_INDEX(fault_addr)]);
179
180    pde32pae_t * shadow_pde = (pde32pae_t *)&(shadow_pd[PDE32PAE_INDEX(fault_addr)]);
181    // What is GET_BUDDY and MARK_LAST_ZERO
182    // pde32pae_t * shadow_pde_bd = (pde32pae_t *)&(shadow_pd[GET_BUDDY(PDE32PAE_INDEX(fault_addr))]);
183    // pde32pae_t * shadow_pde_sd = (pde32pae_t *)&(shadow_pd[MARK_LAST_ZERO(PDE32PAE_INDEX(fault_addr))]);  
184    PrintDebug(info->vm_info, info, "Handling PDE fault\n");     
185
186    PrintDebug(info->vm_info, info, "Checking guest_pde_access %p\n", (void *)guest_pd); 
187     // Check the guest page permissions
188     guest_pde_access = v3_can_access_pde32pae(guest_pd, fault_addr, error_code);        
189     // Check the shadow page permissions
190     PrintDebug(info->vm_info, info, "Checking shadow_pde_access %p\n", (void *)shadow_pd);
191     shadow_pde_access = v3_can_access_pde32pae(shadow_pd, fault_addr, error_code);
192         
193     /* Was the page fault caused by the Guest PDE */
194     if (v3_is_guest_pf(guest_pde_access, shadow_pde_access) == 1) 
195         {
196                 PrintDebug(info->vm_info, info, "Injecting PDE pf to guest: (guest access error=%d) (shdw access error=%d)  (pf error code=%d)\n", 
197                    *(uint_t *)&guest_pde_access, *(uint_t *)&shadow_pde_access, *(uint_t *)&error_code);
198                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) 
199                 {
200                         PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
201                         return -1;
202                 }
203                 return 0;
204     }
205         
206         //Guest PDE ok
207     if (shadow_pde_access == PT_ACCESS_USER_ERROR) 
208         {
209                 //
210                 // PDE Entry marked non-user
211                 //      
212                 PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n", 
213                         shadow_pde_access, guest_pde_access);
214                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) 
215                 {
216                         PrintError(info->vm_info, info, "Could not inject guest page fault\n");
217                         return -1;
218                 }
219                 return 0;
220     } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) && 
221                (guest_pde->large_page == 1)) {
222
223                 ((pde32pae_2MB_t *)guest_pde)->dirty = 1;
224                 shadow_pde->writable = guest_pde->writable;
225                 // shadow_pde_bd->writable = guest_pde->writable;
226                 return 0;
227     } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
228                (shadow_pde_access != PT_ACCESS_OK)) 
229         {
230                 // inject page fault in guest
231                 //
232                 //unknown error
233                 //
234                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
235                         PrintError(info->vm_info, info, "Could not inject guest page fault\n");
236                         return -1;
237                 }
238                 PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
239                 PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
240                 return 0;
241     }
242
243         pte32pae_t * shadow_pt = NULL;
244         //pte32pae_t * shadow_pt_bd = NULL;
245         pte32pae_t * guest_pt = NULL;
246         
247     // Get to the next shadow page level
248     if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) 
249     {
250         // Check if  we can use large pages and the guest memory is properly aligned
251         // to potentially use a large page
252
253         if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) 
254                 {
255                         addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde32pae_2MB_t *)guest_pde)->page_base_addr);
256                         uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);
257             
258                         if (page_size == PAGE_SIZE_2MB) 
259                         {
260                                 if (handle_2MB_shadow32pae_pagefault_pde_32pae(info, fault_addr, error_code,
261                                                        shadow_pd, (pde32pae_2MB_t *)guest_pde) == -1) 
262                                 {
263                                         PrintError(info->vm_info, info, "Error handling large pagefault with large page\n");
264                                         return -1;
265                                 }
266                                 return 0;
267                         } 
268             // Fallthrough to handle the region with small pages
269                 }       
270                 
271                 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
272                 //struct shadow_page_data * shdw_page_bd = create_new_shadow_pt(info);
273                 shadow_pt = (pte32pae_t *)V3_VAddr((void *)shdw_page->page_pa);
274                 //shadow_pt_bd = (pte32pae_t *)V3_VAddr((void *)shdw_page_bd->page_pa);
275                 PrintDebug(info->vm_info, info, "Creating new shadow PTs: %p\n", shadow_pt);
276
277                 shadow_pde->present = 1;
278                 //shadow_pde_bd->present = 1;
279                 shadow_pde->user_page = guest_pde->user_page;   
280                 //shadow_pde_bd->user_page = guest_pde->user_page;
281
282                 if (guest_pde->large_page == 0) {
283                         shadow_pde->writable = guest_pde->writable;
284                         //shadow_pde_bd->writable = guest_pde->writable;
285                 } 
286                 else {
287                         ((pde32pae_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
288
289                         if (error_code.write) {
290                                 shadow_pde->writable = guest_pde->writable;
291                                 //shadow_pde_bd->writable = guest_pde->writable;
292                                 ((pde32pae_2MB_t *)guest_pde)->dirty = 1;       
293                         } 
294                         else {
295                                 shadow_pde->writable = 0;
296                                 //shadow_pde_bd->writable = 0;
297                                 ((pde32pae_2MB_t *)guest_pde)->dirty = 0;
298                         } 
299                 }               
300         
301                 // VMM Specific options
302                 shadow_pde->write_through = guest_pde->write_through;
303                 shadow_pde->cache_disable = guest_pde->cache_disable;
304                 shadow_pde->global_page = guest_pde->global_page;
305                 
306                 //shadow_pde_bd->write_through = guest_pde->write_through;
307                 //shadow_pde_bd->cache_disable = guest_pde->cache_disable;
308                 //shadow_pde_bd->global_page = guest_pde->global_page;
309                 //
310                 guest_pde->accessed = 1;
311                 
312                 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
313                 //shadow_pde_bd->pt_base_addr = PAGE_BASE_ADDR(shdw_page_bd->page_pa);
314     } 
315         else {
316           if ((info->use_large_pages == 1) && (guest_pde->large_page == 1) && (guest_pde->vmm_info == V3_SHADOW_LARGE_PAGE)) 
317                 {
318                         addr_t guest_pa = BASE_TO_PAGE_ADDR_2MB(((pde32pae_2MB_t *)guest_pde)->page_base_addr);
319                         uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);   
320                         if (page_size == PAGE_SIZE_2MB) 
321                         {
322                                 if (shadow_pde_access == PT_ACCESS_OK) {
323                                         // Inconsistent state...
324                                         // Guest Re-Entry will flush tables and everything should now workd
325                                         PrintDebug(info->vm_info, info, "Inconsistent state PDE... Guest re-entry should flush tlb\n");
326                     //PrintDebug(info->vm_info, info, "Bug here: shadow_pde_access is %d page_size is %d\n",
327                                         //         (uint_t)shadow_pde_access,(uint_t)page_size);
328                                         return 0;
329                                 }
330                         } 
331                 }
332                 shadow_pt = (pte32pae_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
333     }
334         
335     if (guest_pde->large_page == 0) 
336         {
337                 if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) 
338                 {
339                         // Machine check the guest
340                         PrintDebug(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
341                         v3_raise_exception(info, MC_EXCEPTION);
342                         return 0;
343                 }       
344                 if (handle_pte_shadow32pae_pagefault_32pae(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) 
345                 {
346                         PrintError(info->vm_info, info, "Error handling Page fault caused by PTE\n");
347                         return -1;
348                 }
349         } else {
350                 //
351                 //use 4K pages to implement large page; ignore for now
352                 //
353                 if (handle_2MB_shadow32pae_pagefault_pte_32pae(info, fault_addr, error_code, shadow_pt, (pde32pae_2MB_t *)guest_pde) == -1) 
354                 {
355                         PrintError(info->vm_info, info, "Error handling large pagefault\n");
356                         return -1;
357                 }        
358     }   
359         
360         return 0;
361 }
362         
363         
364 static int handle_pte_shadow32pae_pagefault_32pae(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
365                                           pte32pae_t * shadow_pt, pte32pae_t * guest_pt) 
366 {
367     pt_access_status_t guest_pte_access;
368     pt_access_status_t shadow_pte_access;
369     pte32pae_t * guest_pte = (pte32pae_t *)&(guest_pt[PTE32PAE_INDEX(fault_addr)]);;
370     pte32pae_t * shadow_pte = (pte32pae_t *)&(shadow_pt[PTE32PAE_INDEX(fault_addr)]);
371     addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) +  PAGE_OFFSET(fault_addr);
372
373     PrintDebug(info->vm_info, info, "Handling PTE fault\n");
374
375     struct v3_mem_region * shdw_reg =  v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
376
377     if (shdw_reg == NULL) {
378                 // Inject a machine check in the guest
379                 PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
380                 v3_raise_exception(info, MC_EXCEPTION);
381                 return 0;
382     }
383
384     // Check the guest page permissions
385     guest_pte_access = v3_can_access_pte32pae(guest_pt, fault_addr, error_code);
386
387     // Check the shadow page permissions
388     shadow_pte_access = v3_can_access_pte32pae(shadow_pt, fault_addr, error_code);
389   
390     /* Was the page fault caused by the Guest's page tables? */
391     if (v3_is_guest_pf(guest_pte_access, shadow_pte_access) == 1) 
392         {
393
394                 PrintDebug(info->vm_info, info, "Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", 
395                    guest_pte_access, *(uint_t*)&error_code);
396         
397
398                 //   inject:
399                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
400                         PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
401                         return -1;
402                 }       
403
404                 return 0; 
405     }
406
407   
408   
409     if (shadow_pte_access == PT_ACCESS_OK) 
410         {
411                 // Inconsistent state...
412                 // Guest Re-Entry will flush page tables and everything should now work
413                 PrintDebug(info->vm_info, info, "Inconsistent state PTE... Guest re-entry should flush tlb\n");
414                 PrintDebug(info->vm_info, info, "guest_pte_access is %d and shadow_pte_access is %d\n", (uint_t)guest_pte_access, 
415                            (uint_t)shadow_pte_access);
416                 PrintDebug(info->vm_info, info, "Error_code: write 0x%x, present 0x%x, user 0x%x, rsvd_access 0x%x, ifetch 0x%x \n",  error_code.write,error_code.present,error_code.user,error_code.rsvd_access,error_code.ifetch);
417                 PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
418                 PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
419                 return 0;
420     }
421
422
423     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) 
424         {
425                 // Page Table Entry Not Present
426                 PrintDebug(info->vm_info, info, "guest_pa =%p\n", (void *)guest_pa);
427
428                 if ((shdw_reg->flags.alloced == 1) && (shdw_reg->flags.read == 1)) 
429                 {
430                         addr_t shadow_pa = 0;
431
432                         if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) 
433                         {
434                                 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_pa);
435                                 return -1;
436                         }
437
438                         shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
439
440                         PrintDebug(info->vm_info, info, "\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
441       
442                         shadow_pte->present = guest_pte->present;
443                         shadow_pte->user_page = guest_pte->user_page;
444       
445                         //set according to VMM policy
446                         shadow_pte->write_through = guest_pte->write_through;
447                         shadow_pte->cache_disable = guest_pte->cache_disable;
448                         shadow_pte->global_page = guest_pte->global_page;
449                         //
450       
451                         guest_pte->accessed = 1;
452       
453                         if (guest_pte->dirty == 1) {
454                                 shadow_pte->writable = guest_pte->writable;
455                         } 
456                         else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
457                                 shadow_pte->writable = guest_pte->writable;
458                                 guest_pte->dirty = 1;
459                         } 
460                         else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
461                                 shadow_pte->writable = 0;
462                         }
463
464                         // Write hooks trump all, and are set Read Only
465                         if (shdw_reg->flags.write == 0) {
466                                 shadow_pte->writable = 0;
467                         }       
468
469                 } 
470                 else {
471                         // Page fault on unhandled memory region
472             
473                         if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
474                                 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
475                                 return -1;
476                         }
477                 }
478     } 
479         else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) 
480         {
481                 guest_pte->dirty = 1;
482
483                 if (shdw_reg->flags.write == 1) {
484                         PrintDebug(info->vm_info, info, "Shadow PTE Write Error\n");
485                         shadow_pte->writable = guest_pte->writable;
486                 } 
487                 else {
488                         if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
489                                 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
490                                 return -1;
491                         }
492                 }
493                 return 0;
494     } 
495         else {
496                 // Inject page fault into the guest     
497                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
498                         PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
499                         return -1;
500                 }
501
502                 PrintError(info->vm_info, info, "PTE Page fault fell through... Not sure if this should ever happen\n");
503                 PrintError(info->vm_info, info, "Manual Says to inject page fault into guest\n");
504                 return -1;
505     }
506
507     return 0;
508 }       
509         
510         
511 // Handle a 2MB page fault with 2MB page in the PDE
512 static int handle_2MB_shadow32pae_pagefault_pde_32pae(struct guest_info * info,
513                                      addr_t fault_addr, pf_error_t error_code, 
514                                      pde32pae_t * shadow_pd,
515                                          pde32pae_2MB_t * large_guest_pde)      
516 {   
517     pt_access_status_t shadow_pde_access;
518     pde32pae_2MB_t * large_shadow_pde = (pde32pae_2MB_t*)&(shadow_pd[PDE32PAE_INDEX(fault_addr)]);
519     shadow_pde_access = v3_can_access_pde32pae(shadow_pd, fault_addr, error_code);         
520
521     addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);  
522
523     PrintDebug(info->vm_info, info, "Handling 4MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
524     PrintDebug(info->vm_info, info, "LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
525         
526     struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
527
528  
529     if (shdw_reg == NULL) {
530                 // Inject a machine check in the guest
531                 PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
532                 v3_raise_exception(info, MC_EXCEPTION);
533                 return -1;
534     }
535         
536         //dead bug
537     if (shadow_pde_access == PT_ACCESS_OK) {
538                 // Inconsistent state...
539                 // Guest Re-Entry will flush tables and everything should now workd
540                 PrintDebug(info->vm_info, info, "Inconsistent state 2MB pde... Guest re-entry should flush tlb\n");
541                 return 0;
542     }
543
544   
545     if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) 
546         {
547                 // Get the guest physical address of the fault
548
549                 if ((shdw_reg->flags.alloced == 1) && 
550                         (shdw_reg->flags.read  == 1)) 
551                 {
552                         addr_t shadow_pa = 0;
553
554
555                         if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) 
556                         {
557                                 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
558                                 return -1;
559                         }
560
561                         PrintDebug(info->vm_info, info, "shadow PA = %p\n", (void *)shadow_pa);
562
563
564             large_guest_pde->vmm_info = V3_SHADOW_LARGE_PAGE; /* For invalidations */
565                         //shadow pde (last bit 0) gets the half with smaller address and its buddy gets the rest
566             large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_2MB(shadow_pa);
567                         //large_shadow_pde_bd->page_base_addr =(PAGE_BASE_ADDR_4MB(shadow_pa)<<1)|1; 
568                         
569                         // large_shadow_pde->large_page = 1;
570             large_shadow_pde->present = 1;
571             large_shadow_pde->user_page = 1;
572                         
573                         // large_shadow_pde_bd->large_page = 1;
574             // large_shadow_pde_bd->present = 1;
575             // large_shadow_pde_bd->user_page = 1;
576
577             PrintDebug(info->vm_info, info, "\tMapping shadow pages (%p)\n", 
578                                                 (void *)BASE_TO_PAGE_ADDR_2MB(large_shadow_pde->page_base_addr));
579
580             if (shdw_reg->flags.write == 0) {
581                 large_shadow_pde->writable = 0;
582                 // large_shadow_pde_bd->writable = 0;
583             } else {
584                 // large_shadow_pde_bd->writable = 1;
585                 large_shadow_pde->writable = 1;
586             }
587
588                         //set according to VMM policy
589                         large_shadow_pde->write_through = large_guest_pde->write_through;
590                         large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
591                         large_shadow_pde->global_page = large_guest_pde->global_page;
592
593                         //large_shadow_pde_bd->write_through = large_guest_pde->write_through;
594                         //large_shadow_pde_bd->cache_disable = large_guest_pde->cache_disable;
595                         //large_shadow_pde_bd->global_page = large_guest_pde->global_page;                      
596                 } 
597                 else {
598                         if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
599                                 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
600                                 return -1;
601                         }
602                 }
603         } 
604         else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) 
605         {
606
607                 if (shdw_reg->flags.write == 0) {
608                         if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
609                                 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
610                                 return -1;
611                         }       
612                 }
613
614     } 
615         else {
616                 PrintError(info->vm_info, info, "Error in large page fault handler...\n");
617                 PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
618                 return -1;
619     }
620
621         PrintDebug(info->vm_info, info, "Returning from large page->large page fault handler\n");
622         return 0;
623 }       
624         
625 static int handle_2MB_shadow32pae_pagefault_pte_32pae(struct guest_info * info, 
626                                               addr_t fault_addr, pf_error_t error_code, 
627                                               pte32pae_t * shadow_pt, pde32pae_2MB_t * large_guest_pde) 
628 {
629     pt_access_status_t shadow_pte_access = v3_can_access_pte32pae(shadow_pt, fault_addr, error_code);
630     pte32pae_t * shadow_pte = (pte32pae_t *)&(shadow_pt[PTE32PAE_INDEX(fault_addr)]);
631     addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_2MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_2MB(fault_addr);
632     //  struct shadow_page_state * state = &(info->shdw_pg_state);
633
634     PrintDebug(info->vm_info, info, "Handling MB PTE fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
635     PrintDebug(info->vm_info, info, "ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
636
637     struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
638
639  
640     if (shdw_reg == NULL) {
641                 // Inject a machine check in the guest
642                 PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
643                 v3_raise_exception(info, MC_EXCEPTION);
644                 return 0;
645     }
646
647     if (shadow_pte_access == PT_ACCESS_OK) {
648                 // Inconsistent state...
649                 // Guest Re-Entry will flush tables and everything should now workd
650                 PrintDebug(info->vm_info, info, "Inconsistent state 2MB PTE... Guest re-entry should flush tlb\n");
651                 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
652                 return 0;
653     }
654
655   
656     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
657         // Get the guest physical address of the fault
658
659                 if ((shdw_reg->flags.alloced == 1) || 
660                         (shdw_reg->flags.read == 1)) {
661                         addr_t shadow_pa = 0;
662
663                         if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
664                                 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
665                                 return -1;
666                         }
667
668                         shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
669
670                         shadow_pte->present = 1;
671
672                         /* We are assuming that the PDE entry has precedence
673                         * so the Shadow PDE will mirror the guest PDE settings, 
674                         * and we don't have to worry about them here
675                         * Allow everything
676                         */
677                         shadow_pte->user_page = 1;
678
679                         if (shdw_reg->flags.write == 0) {
680                                 shadow_pte->writable = 0;
681                         } else {
682                                 shadow_pte->writable = 1;
683                         }
684
685                         //set according to VMM policy
686                         shadow_pte->write_through = large_guest_pde->write_through;
687                         shadow_pte->cache_disable = large_guest_pde->cache_disable;
688                         shadow_pte->global_page = large_guest_pde->global_page;
689                         //
690       
691                 } else {
692                         if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
693                                 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
694                                 return -1;
695                         }
696                 }
697     } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
698             if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
699                         PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
700                         return -1;
701                 }
702     } else {
703                 PrintError(info->vm_info, info, "Error in large page fault handler...\n");
704                 PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
705                 return -1;
706     }
707
708     //  PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
709     PrintDebug(info->vm_info, info, "Returning from large page->small page fault handler\n");
710     return 0;
711 }       
712
713 static int invalidation_cb32pae_64(struct guest_info * info, page_type_t type, 
714                               addr_t vaddr, addr_t page_ptr, addr_t page_pa, 
715                               void * private_data) {
716
717     switch (type) {
718         case PAGE_PDP32PAE:
719             {
720                         pdpe32pae_t * pdp = (pdpe32pae_t *)page_ptr;
721                         pdpe32pae_t * pdpe = &(pdp[PDPE32PAE_INDEX(vaddr)]);
722
723                         if (pdpe->present == 0) {
724                                 return 1;
725                         }
726      
727                         if (pdpe->vmm_info == V3_LARGE_PG) {
728                                 PrintError(info->vm_info, info, "1 Gigabyte pages not supported\n");
729                                 return -1;
730                         }
731
732                         return 0;
733             }
734         case PAGE_PD32PAE:
735             {
736                         pde32pae_t * pd = (pde32pae_t *)page_ptr;
737                         pde32pae_t * pde = &(pd[PDE32PAE_INDEX(vaddr)]);
738                         //pde32pae_t * pde_bd = &(pd[GET_BUDDY(PDE32PAE_INDEX(vaddr))]);
739                         if (pde->present == 0) {
740                                 return 1;
741                         }
742       
743                         if (pde->vmm_info == V3_LARGE_PG || pde->vmm_info == V3_SHADOW_LARGE_PAGE) {
744                                 pde->present = 0;
745                                 //pde_bd->present = 0;
746                                 return 1;
747                         }
748
749                         return 0;
750             }
751         case PAGE_PT32PAE:
752             {
753                         pte32pae_t * pt = (pte32pae_t *)page_ptr;
754
755                         pt[PTE32PAE_INDEX(vaddr)].present = 0;
756
757                         return 1;
758             }
759         default:
760             PrintError(info->vm_info, info, "Invalid Page Type\n");
761             return -1;
762
763     }
764
765     // should not get here
766     PrintError(info->vm_info, info, "Should not get here....\n");
767     return -1;
768 }       
769
770 static inline int handle_shadow_invlpg_32pae(struct guest_info * info, addr_t vaddr) {
771     PrintDebug(info->vm_info, info, "INVLPG32PAE - %p\n",(void*)vaddr);
772
773     int ret =  v3_drill_host_pt_32pae(info, info->ctrl_regs.cr3, vaddr, invalidation_cb32pae_64, NULL);
774     if (ret == -1) {
775                 PrintError(info->vm_info, info, "Page table drill returned error.... \n");
776                 PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
777     }
778
779     return (ret == -1) ? -1 : 0; 
780 }
781
782
783
784