Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Cleanup and sanity-checking of endianness, dead code, unchecked returns (Coverity...
[palacios.git] / palacios / src / palacios / mmu / vmm_shdw_pg_tlb_32.h
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2014, Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Jack Lange <jarusl@cs.northwestern.edu>
16  * Author: Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu> 
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #define GET_BUDDY(x) (((ullong_t)x) ^ 0x1)
23 #define MARK_LAST_ZERO(x) (((ullong_t)x) & 0x0)
24 #define CR3_PAGE_BASE_ADDR(x) ((x) >> 5)
25 #define V3_SHADOW_LARGE_PAGE 0x3
26
27
28 static inline int activate_shadow_pt_32( struct guest_info *info) 
29 {
30         struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *) &(info->ctrl_regs.cr3);
31         struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
32         struct cr4_32 * shadow_cr4 = (struct cr4_32 *) &(info->ctrl_regs.cr4);
33
34 #ifdef V3_CONFIG_DEBUG_SHADOW_PAGING
35         struct cr4_32 * guest_cr4 = (struct cr4_32 *)&(info->shdw_pg_state.guest_cr4);
36 #endif
37         
38         struct shadow_page_data * shadow_pt = create_new_shadow_pt(info);
39         addr_t shadow_pt_addr = shadow_pt->page_pa;             
40         shadow_pt->cr3 = shadow_pt->page_pa;      
41         PrintDebug(info->vm_info, info, "Top level ShadowPAE pdp page pa=%p\n", (void *)shadow_pt_addr);
42         PrintDebug(info->vm_info,info,"Guest CR4 =%x and Shadow CR4 =%x\n", *(uint_t *)guest_cr4, *(uint_t*)shadow_cr4);
43         //shadow cr3 points to the new page, which is PML4T
44     shadow_cr3->pdpt_base_addr = CR3_PAGE_BASE_ADDR(shadow_pt_addr); // x >> 5
45     PrintDebug(info->vm_info, info, "Creating new shadow page table %p\n", (void *)BASE_TO_PAGE_ADDR(shadow_cr3->pdpt_base_addr));
46
47         shadow_cr3->pwt = guest_cr3->pwt;  
48         shadow_cr3->pcd = guest_cr3->pcd;       
49         shadow_cr4->pae = 1;
50         //shadow_cr4->pse = 1;
51         /*      shadow_efer->lme = 1;
52         shadow_efer->lma = 1; */
53         
54         return 0;
55 }
56
57 /*
58 *
59 * shadowPAE page fault handlers
60 *
61 */
62
63 static int handle_pdpe_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,pdpe32pae_t * shadow_pdp) ;
64 static int handle_pde_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd);
65 static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
66                                             pte32pae_t * shadow_pt, pte32_t * guest_pt) ;
67 static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info, 
68                                               addr_t fault_addr, pf_error_t error_code, 
69                                               pte32pae_t * shadow_pt, pde32_4MB_t * large_guest_pde) ;
70 static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, 
71                                      addr_t fault_addr, pf_error_t error_code, 
72                                      pt_access_status_t shadow_pde_access,
73                                      pde32pae_2MB_t * large_shadow_pde, pde32pae_2MB_t *large_shadow_pde_bd,
74                                                 pde32_4MB_t * large_guest_pde)  ;
75
76 static inline int handle_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code)
77 {
78         // pointer to pml4t
79         pdpe32pae_t * shadow_pdp = CR3_TO_PDPE32PAE_VA(info->ctrl_regs.cr3);
80         PrintDebug(info->vm_info, info, "32 bit ShadowPAE page fault handler : %p----------------------------------------\n", (void*)fault_addr);
81         if (handle_pdpe_shadow_pagefault_32(info, fault_addr, error_code, shadow_pdp) == -1) {
82                 PrintError(info->vm_info, info, "Error handling Page fault caused by PDPE\n");
83                 return -1;
84         }       
85         return 0;
86 }
87
88 //first 4 entries of shadow pdpe should be present and accessible
89 static int handle_pdpe_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pdpe32pae_t * shadow_pdp) 
90 {
91         pt_access_status_t shadow_pdpe_access;  
92         
93         //fault address error
94         if ( (PDPE32PAE_INDEX(fault_addr) != 0) && (PDPE32PAE_INDEX(fault_addr) != 1)
95             && (PDPE32PAE_INDEX(fault_addr) != 2) && (PDPE32PAE_INDEX(fault_addr) != 3))
96         {
97                 PrintDebug(info->vm_info, info, "Fault pdpe index is 0x%x, out of range\n", PDPE32PAE_INDEX(fault_addr));
98                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
99                         PrintError(info->vm_info, info, "Could not inject guest page fault\n");
100                         return -1;
101                 }       
102                 return 0;
103         }
104         
105         pdpe32pae_t * shadow_pdpe = (pdpe32pae_t *)&(shadow_pdp[PDPE32PAE_INDEX(fault_addr)]);
106     
107         PrintDebug(info->vm_info, info, "Handling PDP fault\n");
108         
109         
110     if (fault_addr==0) { 
111                 PrintDebug(info->vm_info, info, "Guest Page Tree for guest virtual address zero fault\n");
112                 PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
113                 PrintDebug(info->vm_info, info, "Host Page Tree for guest virtual address zero fault\n");
114                 PrintHostPageTree(info,fault_addr,(addr_t)(info->ctrl_regs.cr3));
115     }   
116         
117     PrintDebug(info->vm_info, info, "Checking shadow_pdp_access %p\n", (void *)shadow_pdp);     
118     // Check the shadow page permissions
119     shadow_pdpe_access = v3_can_access_pdpe32pae(shadow_pdp, fault_addr, error_code);   
120         
121    if (shadow_pdpe_access == PT_ACCESS_USER_ERROR || shadow_pdpe_access == PT_ACCESS_WRITE_ERROR) 
122    {
123                 //
124                 // PML4 Entry marked non-user
125                 //      
126                 PrintDebug(info->vm_info, info, "Shadow Paging User or Write access error (shadow_pdpe_access=0x%x). Ignore it.\n", shadow_pdpe_access);
127                 //shadow_pdpe->user_page = 1;
128                 //return 0;
129     } 
130    else if ((shadow_pdpe_access != PT_ACCESS_NOT_PRESENT ) && 
131                (shadow_pdpe_access != PT_ACCESS_OK)) 
132         {
133                 // inject page fault in guest
134                 //
135                 // unknown error
136                 //
137                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
138                         PrintError(info->vm_info, info, "Could not inject guest page fault\n");
139                         return -1;
140                 }
141                 PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pdpe_access);
142                 PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
143                 return 0;
144     }   
145
146         pde32pae_t * shadow_pd = NULL;
147         //get to page directory table level, allocate if not present
148     if (shadow_pdpe_access == PT_ACCESS_NOT_PRESENT) {
149                 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
150                 shadow_pd = (pde32pae_t *)V3_VAddr((void *)shdw_page->page_pa);
151                 PrintDebug(info->vm_info, info, "Creating new shadow PDE table: %p\n",shadow_pd);        
152                 //values should be 1
153                 shadow_pdpe->present = 1;
154                 //shadow_pdpe->user_page = 1;
155                 //shadow_pdpe->writable = 1;
156                 // when these values set to 0, the next levels have freedom to change them
157                 shadow_pdpe->write_through = 0;
158                 shadow_pdpe->cache_disable = 0;
159
160                 shadow_pdpe->pd_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
161     } 
162         else 
163     {
164                 shadow_pd = (pde32pae_t *)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(shadow_pdpe->pd_base_addr));
165     }   
166         
167
168
169     if (handle_pde_shadow_pagefault_32(info, fault_addr, error_code, shadow_pd) == -1) {
170                 PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
171                 return -1;
172     }
173     return 0;
174 }       
175
176 //to handle pde fault
177 static int handle_pde_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, pde32pae_t * shadow_pd)
178 {
179     pt_access_status_t guest_pde_access;
180     pt_access_status_t shadow_pde_access;
181         
182         pde32_t * guest_pd = NULL;
183         pde32_t * guest_pde = NULL;
184         addr_t guest_cr3 = CR3_TO_PDE32_PA(info->shdw_pg_state.guest_cr3);
185     if (v3_gpa_to_hva(info, guest_cr3, (addr_t*)&guest_pd) == -1) {
186                 PrintError(info->vm_info, info, "Invalid Guest PDE Address: 0x%p\n",  (void *)guest_cr3);
187                 return -1;
188     }
189     guest_pde = (pde32_t *)&(guest_pd[PDE32_INDEX(fault_addr)]);
190         
191     pde32pae_t * shadow_pde = (pde32pae_t *)&(shadow_pd[PDE32PAE_INDEX(fault_addr)]);
192     pde32pae_t * shadow_pde_bd = (pde32pae_t *)&(shadow_pd[GET_BUDDY(PDE32PAE_INDEX(fault_addr))]);
193     pde32pae_t * shadow_pde_sd = (pde32pae_t *)&(shadow_pd[MARK_LAST_ZERO(PDE32PAE_INDEX(fault_addr))]);  
194     PrintDebug(info->vm_info, info, "Handling PDE fault\n");    
195
196     PrintDebug(info->vm_info, info, "Checking guest_pde_access %p\n", (void *)guest_pd);        
197     // Check the guest page permissions
198     guest_pde_access = v3_can_access_pde32(guest_pd, fault_addr, error_code);   
199     // Check the shadow page permissions
200     PrintDebug(info->vm_info, info, "Checking shadow_pde_access %p\n", (void *)shadow_pd);
201     shadow_pde_access = v3_can_access_pde32pae(shadow_pd, fault_addr, error_code);
202         
203     /* Was the page fault caused by the Guest PDE */
204     if (v3_is_guest_pf(guest_pde_access, shadow_pde_access) == 1) 
205         {
206                 PrintDebug(info->vm_info, info, "Injecting PDE pf to guest: (guest access error=%d) (shdw access error=%d)  (pf error code=%d)\n", 
207                    *(uint_t *)&guest_pde_access, *(uint_t *)&shadow_pde_access, *(uint_t *)&error_code);
208                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) 
209                 {
210                         PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
211                         return -1;
212                 }
213                 return 0;
214     }
215         
216         //Guest PDE ok
217     if (shadow_pde_access == PT_ACCESS_USER_ERROR) 
218         {
219                 //
220                 // PDE Entry marked non-user
221                 //      
222                 PrintDebug(info->vm_info, info, "Shadow Paging User access error (shadow_pde_access=0x%x, guest_pde_access=0x%x)\n", 
223                         shadow_pde_access, guest_pde_access);
224                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) 
225                 {
226                         PrintError(info->vm_info, info, "Could not inject guest page fault\n");
227                         return -1;
228                 }
229                 return 0;
230     } else if ((shadow_pde_access == PT_ACCESS_WRITE_ERROR) && 
231                (guest_pde->large_page == 1)) 
232         {
233
234                 ((pde32_4MB_t *)guest_pde)->dirty = 1;
235                 shadow_pde->writable = guest_pde->writable;
236                 shadow_pde_bd->writable = guest_pde->writable;
237                 return 0;
238     } else if ((shadow_pde_access != PT_ACCESS_NOT_PRESENT) &&
239                (shadow_pde_access != PT_ACCESS_OK)) 
240         {
241                 // inject page fault in guest
242                 //
243                 //unknown error
244                 //
245                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
246                         PrintError(info->vm_info, info, "Could not inject guest page fault\n");
247                         return -1;
248                 }
249                 PrintDebug(info->vm_info, info, "Unknown Error occurred (shadow_pde_access=%d)\n", shadow_pde_access);
250                 PrintDebug(info->vm_info, info, "Manual Says to inject page fault into guest\n");
251                 return 0;
252     }
253
254         pte32pae_t * shadow_pt = NULL;
255         pte32pae_t * shadow_pt_bd = NULL;
256         pte32_t * guest_pt = NULL;
257         
258     // get the next shadow page level (page table) , allocate 2 PDEs (buddies) if not present
259     if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) 
260     {
261         // Check if  we can use large pages and the guest memory is properly aligned
262         // to potentially use a large page
263
264         if ((info->use_large_pages == 1) && (guest_pde->large_page == 1)) 
265                 {
266                         addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr);
267                         uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);
268             
269                         if (page_size == PAGE_SIZE_4MB) 
270                         {
271
272                                 if (shadow_pde !=  shadow_pde_sd) // when handling page fault, we pass through the buddy with last bit as 0
273                                 {
274                                         pde32pae_t * tmp_addr = shadow_pde;
275                                         shadow_pde = shadow_pde_bd;  
276                                         shadow_pde_bd = tmp_addr;
277                                 }
278                                 if (handle_4MB_shadow_pagefault_pde_32(info, fault_addr, error_code, shadow_pde_access,
279                                                        (pde32pae_2MB_t *)shadow_pde,(pde32pae_2MB_t *)shadow_pde_bd, (pde32_4MB_t *)guest_pde) == -1) 
280                                 {
281                                         PrintError(info->vm_info, info, "Error handling large pagefault with large page\n");
282                                         return -1;
283                                 }
284                                 return 0;
285                         } 
286             // Fallthrough to handle the region with small pages
287                 }       
288                 
289                 struct shadow_page_data * shdw_page = create_new_shadow_pt(info);
290                 struct shadow_page_data * shdw_page_bd = create_new_shadow_pt(info);
291                 shadow_pt = (pte32pae_t *)V3_VAddr((void *)shdw_page->page_pa);
292                 shadow_pt_bd = (pte32pae_t *)V3_VAddr((void *)shdw_page_bd->page_pa);
293                 PrintDebug(info->vm_info, info, "Creating new shadow PTs: %p and %p\n", shadow_pt, shadow_pt_bd);
294
295                 shadow_pde->present = 1;
296                 shadow_pde_bd->present = 1;
297                 shadow_pde->user_page = guest_pde->user_page;   
298                 shadow_pde_bd->user_page = guest_pde->user_page;
299
300                 if (guest_pde->large_page == 0) {
301                         shadow_pde->writable = guest_pde->writable;
302                         shadow_pde_bd->writable = guest_pde->writable;
303                 } 
304                 else {
305                         ((pde32pae_2MB_t *)guest_pde)->vmm_info = V3_LARGE_PG;
306
307                         if (error_code.write) {
308                                 shadow_pde->writable = guest_pde->writable;
309                                 shadow_pde_bd->writable = guest_pde->writable;
310                                 ((pde32pae_2MB_t *)guest_pde)->dirty = 1;       
311                         } 
312                         else {
313                                 shadow_pde->writable = 0;
314                                 shadow_pde_bd->writable = 0;
315                                 ((pde32pae_2MB_t *)guest_pde)->dirty = 0;
316                         } 
317                 }               
318         
319                 // VMM Specific options
320                 shadow_pde->write_through = guest_pde->write_through;
321                 shadow_pde->cache_disable = guest_pde->cache_disable;
322                 shadow_pde->global_page = guest_pde->global_page;
323                 
324                 shadow_pde_bd->write_through = guest_pde->write_through;
325                 shadow_pde_bd->cache_disable = guest_pde->cache_disable;
326                 shadow_pde_bd->global_page = guest_pde->global_page;
327                 //
328                 guest_pde->accessed = 1;
329                 
330                 shadow_pde->pt_base_addr = PAGE_BASE_ADDR(shdw_page->page_pa);
331                 shadow_pde_bd->pt_base_addr = PAGE_BASE_ADDR(shdw_page_bd->page_pa);
332     } 
333         else {
334           if ((info->use_large_pages == 1) && (guest_pde->large_page == 1) && (guest_pde->vmm_info == V3_SHADOW_LARGE_PAGE)) 
335                 {
336                         addr_t guest_pa = BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t *)guest_pde)->page_base_addr);
337                         uint32_t page_size = v3_get_max_page_size(info, guest_pa, PROTECTED);   
338                         if (page_size == PAGE_SIZE_4MB) 
339                         {
340                                 if (shadow_pde_access == PT_ACCESS_OK) {
341                                         // Inconsistent state...
342                                         // Guest Re-Entry will flush tables and everything should now workd
343                                         PrintDebug(info->vm_info, info, "Inconsistent state PDE... Guest re-entry should flush tlb\n");
344                     //PrintDebug(info->vm_info, info, "Bug here: shadow_pde_access is %d page_size is %d\n",
345                                         //         (uint_t)shadow_pde_access,(uint_t)page_size);
346                                         return 0;
347                                 }
348                         } 
349                 }
350                 shadow_pt = (pte32pae_t *)V3_VAddr((void *)BASE_TO_PAGE_ADDR(shadow_pde->pt_base_addr));
351     }
352         
353     if (guest_pde->large_page == 0) 
354         {
355                 if (v3_gpa_to_hva(info, BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr), (addr_t*)&guest_pt) == -1) 
356                 {
357                         // Machine check the guest
358                         PrintDebug(info->vm_info, info, "Invalid Guest PTE Address: 0x%p\n", (void *)BASE_TO_PAGE_ADDR(guest_pde->pt_base_addr));
359                         v3_raise_exception(info, MC_EXCEPTION);
360                         return 0;
361                 }       
362                 if (handle_pte_shadow_pagefault_32(info, fault_addr, error_code, shadow_pt, guest_pt) == -1) 
363                 {
364                         PrintError(info->vm_info, info, "Error handling Page fault caused by PDE\n");
365                         return -1;
366                 }
367         }
368         else {
369                 //
370                 //use 4K pages to implement large page; ignore for now
371                 //
372                 if (handle_4MB_shadow_pagefault_pte_32(info, fault_addr, error_code, shadow_pt, (pde32_4MB_t *)guest_pde) == -1) 
373                 {
374                         PrintError(info->vm_info, info, "Error handling large pagefault\n");
375                         return -1;
376                 }        
377     }   
378         
379         return 0;
380 }
381         
382         
383 static int handle_pte_shadow_pagefault_32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
384                                           pte32pae_t * shadow_pt, pte32_t * guest_pt) 
385 {
386     pt_access_status_t guest_pte_access;
387     pt_access_status_t shadow_pte_access;
388     pte32_t * guest_pte = (pte32_t *)&(guest_pt[PTE32_INDEX(fault_addr)]);;
389     pte32pae_t * shadow_pte = (pte32pae_t *)&(shadow_pt[PTE32PAE_INDEX(fault_addr)]);
390     addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) +  PAGE_OFFSET(fault_addr);
391
392      PrintDebug(info->vm_info, info, "Handling PTE fault\n");
393
394     struct v3_mem_region * shdw_reg =  v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
395
396     if (shdw_reg == NULL) {
397                 // Inject a machine check in the guest
398                 PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
399                 v3_raise_exception(info, MC_EXCEPTION);
400                 return 0;
401     }
402
403     // Check the guest page permissions
404     guest_pte_access = v3_can_access_pte32(guest_pt, fault_addr, error_code);
405
406     // Check the shadow page permissions
407     shadow_pte_access = v3_can_access_pte32pae(shadow_pt, fault_addr, error_code);
408   
409   
410     /* Was the page fault caused by the Guest's page tables? */
411     if (v3_is_guest_pf(guest_pte_access, shadow_pte_access) == 1) 
412         {
413
414                 PrintDebug(info->vm_info, info, "Access error injecting pf to guest (guest access error=%d) (pf error code=%d)\n", 
415                    guest_pte_access, *(uint_t*)&error_code);
416         
417
418                 //   inject:
419                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
420                         PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
421                         return -1;
422                 }       
423
424                 return 0; 
425     }
426
427   
428   
429     if (shadow_pte_access == PT_ACCESS_OK) 
430         {
431                 // Inconsistent state...
432                 // Guest Re-Entry will flush page tables and everything should now work
433                 PrintDebug(info->vm_info, info, "Inconsistent state PTE... Guest re-entry should flush tlb\n");
434                 PrintDebug(info->vm_info, info, "guest_pte_access is %d and shadow_pte_access is %d\n", (uint_t)guest_pte_access, 
435                            (uint_t)shadow_pte_access);
436                 PrintDebug(info->vm_info, info, "Error_code: write 0x%x, present 0x%x, user 0x%x, rsvd_access 0x%x, ifetch 0x%x \n",  error_code.write,error_code.present,error_code.user,error_code.rsvd_access,error_code.ifetch);
437                 PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
438                 PrintGuestPageTree(info,fault_addr,(addr_t)(info->shdw_pg_state.guest_cr3));
439                 return 0;
440     }
441
442
443     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) 
444         {
445                 // Page Table Entry Not Present
446                 PrintDebug(info->vm_info, info, "guest_pa =%p\n", (void *)guest_pa);
447
448                 if ((shdw_reg->flags.alloced == 1) && (shdw_reg->flags.read == 1)) 
449                 {
450                         addr_t shadow_pa = 0;
451
452                         if (v3_gpa_to_hpa(info, guest_pa, &shadow_pa) == -1) 
453                         {
454                                 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_pa);
455                                 return -1;
456                         }
457
458                         shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
459
460                         PrintDebug(info->vm_info, info, "\tMapping shadow page (%p)\n", (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
461       
462                         shadow_pte->present = guest_pte->present;
463                         shadow_pte->user_page = guest_pte->user_page;
464       
465                         //set according to VMM policy
466                         shadow_pte->write_through = guest_pte->write_through;
467                         shadow_pte->cache_disable = guest_pte->cache_disable;
468                         shadow_pte->global_page = guest_pte->global_page;
469                         //
470       
471                         guest_pte->accessed = 1;
472       
473                         if (guest_pte->dirty == 1) {
474                                 shadow_pte->writable = guest_pte->writable;
475                         } 
476                         else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
477                                 shadow_pte->writable = guest_pte->writable;
478                                 guest_pte->dirty = 1;
479                         } 
480                         else if ((guest_pte->dirty == 0) && (error_code.write == 0)) {
481                                 shadow_pte->writable = 0;
482                         }
483
484                         // Write hooks trump all, and are set Read Only
485                         if (shdw_reg->flags.write == 0) {
486                                 shadow_pte->writable = 0;
487                         }       
488
489                 } 
490                 else {
491                         // Page fault on unhandled memory region
492             
493                         if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
494                                 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
495                                 return -1;
496                         }
497                 }
498     } 
499         else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) 
500         {
501                 guest_pte->dirty = 1;
502
503                 if (shdw_reg->flags.write == 1) {
504                         PrintDebug(info->vm_info, info, "Shadow PTE Write Error\n");
505                         shadow_pte->writable = guest_pte->writable;
506                 } 
507                 else {
508                         if (shdw_reg->unhandled(info, fault_addr, guest_pa, shdw_reg, error_code) == -1) {
509                                 PrintError(info->vm_info, info, "Special Page fault handler returned error for address: %p\n",  (void *)fault_addr);
510                                 return -1;
511                         }
512                 }
513                 return 0;
514     } 
515         else {
516                 // Inject page fault into the guest     
517                 if (v3_inject_guest_pf(info, fault_addr, error_code) == -1) {
518                         PrintError(info->vm_info, info, "Could not inject guest page fault for vaddr %p\n", (void *)fault_addr);
519                         return -1;
520                 }
521
522                 PrintError(info->vm_info, info, "PTE Page fault fell through... Not sure if this should ever happen\n");
523                 PrintError(info->vm_info, info, "Manual Says to inject page fault into guest\n");
524                 return -1;
525     }
526
527     return 0;
528 }       
529         
530         
531 // Handle a 4MB page fault with 2 2MB page in the PDE
532 static int handle_4MB_shadow_pagefault_pde_32(struct guest_info * info, 
533                                      addr_t fault_addr, pf_error_t error_code, 
534                                      pt_access_status_t shadow_pde_access,
535                                      pde32pae_2MB_t * large_shadow_pde, pde32pae_2MB_t * large_shadow_pde_bd,
536                                          pde32_4MB_t * large_guest_pde) 
537 {
538         addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);  
539
540     PrintDebug(info->vm_info, info, "Handling 4MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
541     PrintDebug(info->vm_info, info, "LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
542
543     struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
544
545  
546     if (shdw_reg == NULL) {
547                 // Inject a machine check in the guest
548                 PrintDebug(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
549                 v3_raise_exception(info, MC_EXCEPTION);
550                 return -1;
551     }
552         
553         //dead bug
554     if (shadow_pde_access == PT_ACCESS_OK) {
555                 // Inconsistent state...
556                 // Guest Re-Entry will flush tables and everything should now workd
557                 PrintDebug(info->vm_info, info, "Inconsistent state 4MB pde... Guest re-entry should flush tlb\n");
558                 return 0;
559     }
560
561   
562     if (shadow_pde_access == PT_ACCESS_NOT_PRESENT) 
563         {
564                 // Get the guest physical address of the fault
565
566                 if ((shdw_reg->flags.alloced == 1) && 
567                         (shdw_reg->flags.read  == 1)) 
568                 {
569                         addr_t shadow_pa = 0;
570
571
572                         if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) 
573                         {
574                                 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
575                                 return -1;
576                         }
577
578                         PrintDebug(info->vm_info, info, "shadow PA = %p\n", (void *)shadow_pa);
579
580
581               large_guest_pde->vmm_info = V3_SHADOW_LARGE_PAGE; /* For invalidations */
582               //shadow pde (last bit 0) gets the half with smaller address and its buddy gets the rest
583             large_shadow_pde->page_base_addr = PAGE_BASE_ADDR_4MB(shadow_pa)<<1;
584                         large_shadow_pde_bd->page_base_addr =(PAGE_BASE_ADDR_4MB(shadow_pa)<<1)|1; 
585                         
586                         // large_shadow_pde->large_page = 1;
587             large_shadow_pde->present = 1;
588             large_shadow_pde->user_page = 1;
589                         
590             //          large_shadow_pde_bd->large_page = 1;
591             large_shadow_pde_bd->present = 1;
592             large_shadow_pde_bd->user_page = 1;
593
594             PrintDebug(info->vm_info, info, "\tMapping shadow pages (%p) and (%p)\n", 
595                                                 (void *)BASE_TO_PAGE_ADDR_2MB(large_shadow_pde->page_base_addr),
596                                                 (void *)BASE_TO_PAGE_ADDR_2MB(large_shadow_pde_bd->page_base_addr));
597
598             if (shdw_reg->flags.write == 0) {
599                 large_shadow_pde->writable = 0;
600                                 large_shadow_pde_bd->writable = 0;
601             } else {
602                 large_shadow_pde_bd->writable = 1;
603                                 large_shadow_pde->writable = 1;
604             }
605
606                         //set according to VMM policy
607                         large_shadow_pde->write_through = large_guest_pde->write_through;
608                         large_shadow_pde->cache_disable = large_guest_pde->cache_disable;
609                         large_shadow_pde->global_page = large_guest_pde->global_page;
610
611                         large_shadow_pde_bd->write_through = large_guest_pde->write_through;
612                         large_shadow_pde_bd->cache_disable = large_guest_pde->cache_disable;
613                         large_shadow_pde_bd->global_page = large_guest_pde->global_page;                        
614                 } 
615                 else {
616                         if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
617                                 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
618                                 return -1;
619                         }
620                 }
621         } 
622         else if (shadow_pde_access == PT_ACCESS_WRITE_ERROR) 
623         {
624
625                 if (shdw_reg->flags.write == 0) {
626                         if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
627                                 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
628                                 return -1;
629                         }       
630                 }
631
632     } 
633         else {
634                 PrintError(info->vm_info, info, "Error in large page fault handler...\n");
635                 PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
636                 return -1;
637     }
638
639         PrintDebug(info->vm_info, info, "Returning from large page->large page fault handler\n");
640         return 0;
641 }       
642         
643 static int handle_4MB_shadow_pagefault_pte_32(struct guest_info * info, 
644                                               addr_t fault_addr, pf_error_t error_code, 
645                                               pte32pae_t * shadow_pt, pde32_4MB_t * large_guest_pde) 
646 {
647     pt_access_status_t shadow_pte_access = v3_can_access_pte32pae(shadow_pt, fault_addr, error_code);
648     pte32pae_t * shadow_pte = (pte32pae_t *)&(shadow_pt[PTE32PAE_INDEX(fault_addr)]);
649     addr_t guest_fault_pa = BASE_TO_PAGE_ADDR_4MB(large_guest_pde->page_base_addr) + PAGE_OFFSET_4MB(fault_addr);
650     //  struct shadow_page_state * state = &(info->shdw_pg_state);
651
652     PrintDebug(info->vm_info, info, "Handling 4MB PTE fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
653     PrintDebug(info->vm_info, info, "ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
654
655     struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
656
657  
658     if (shdw_reg == NULL) {
659                 // Inject a machine check in the guest
660                 PrintError(info->vm_info, info, "Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
661                 v3_raise_exception(info, MC_EXCEPTION);
662                 return 0;
663     }
664
665     if (shadow_pte_access == PT_ACCESS_OK) {
666                 // Inconsistent state...
667                 // Guest Re-Entry will flush tables and everything should now workd
668                 PrintDebug(info->vm_info, info, "Inconsistent state 4MB PTE... Guest re-entry should flush tlb\n");
669                 //PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
670                 return 0;
671     }
672
673   
674     if (shadow_pte_access == PT_ACCESS_NOT_PRESENT) {
675         // Get the guest physical address of the fault
676
677                 if ((shdw_reg->flags.alloced == 1) || 
678                         (shdw_reg->flags.read == 1)) {
679                         addr_t shadow_pa = 0;
680
681                         if (v3_gpa_to_hpa(info, guest_fault_pa, &shadow_pa) == -1) {
682                                 PrintError(info->vm_info, info, "could not translate page fault address (%p)\n", (void *)guest_fault_pa);
683                                 return -1;
684                         }
685
686                         shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
687
688                         shadow_pte->present = 1;
689
690                         /* We are assuming that the PDE entry has precedence
691                         * so the Shadow PDE will mirror the guest PDE settings, 
692                         * and we don't have to worry about them here
693                         * Allow everything
694                         */
695                         shadow_pte->user_page = 1;
696
697                         if (shdw_reg->flags.write == 0) {
698                                 shadow_pte->writable = 0;
699                         } else {
700                                 shadow_pte->writable = 1;
701                         }
702
703                         //set according to VMM policy
704                         shadow_pte->write_through = large_guest_pde->write_through;
705                         shadow_pte->cache_disable = large_guest_pde->cache_disable;
706                         shadow_pte->global_page = large_guest_pde->global_page;
707                         //
708       
709                 } else {
710                         if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
711                                 PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
712                                 return -1;
713                         }
714                 }
715     } else if (shadow_pte_access == PT_ACCESS_WRITE_ERROR) {
716             if (shdw_reg->unhandled(info, fault_addr, guest_fault_pa, shdw_reg, error_code) == -1) {
717                         PrintError(info->vm_info, info, "Special Page Fault handler returned error for address: %p\n", (void *)fault_addr);
718                         return -1;
719                 }
720     } else {
721                 PrintError(info->vm_info, info, "Error in large page fault handler...\n");
722                 PrintError(info->vm_info, info, "This case should have been handled at the top level handler\n");
723                 return -1;
724     }
725
726     //  PrintHostPageTree(info, fault_addr, info->ctrl_regs.cr3);
727     PrintDebug(info->vm_info, info, "Returning from large page->small page fault handler\n");
728     return 0;
729 }       
730         
731 static int invalidation_cb32_64(struct guest_info * info, page_type_t type, 
732                               addr_t vaddr, addr_t page_ptr, addr_t page_pa, 
733                               void * private_data) {
734
735     switch (type) {
736         case PAGE_PDP32PAE:
737             {
738                         pdpe32pae_t * pdp = (pdpe32pae_t *)page_ptr;
739                         pdpe32pae_t * pdpe = &(pdp[PDPE32PAE_INDEX(vaddr)]);
740
741                         if (pdpe->present == 0) {
742                                 return 1;
743                         }
744      
745                         if (pdpe->vmm_info == V3_LARGE_PG) {
746                                 PrintError(info->vm_info, info, "1 Gigabyte pages not supported\n");
747                                 return -1;
748                         }
749
750                         return 0;
751             }
752         case PAGE_PD32PAE:
753             {
754                         pde32pae_t * pd = (pde32pae_t *)page_ptr;
755                         pde32pae_t * pde = &(pd[PDE32PAE_INDEX(vaddr)]);
756                         pde32pae_t * pde_bd = &(pd[GET_BUDDY(PDE32PAE_INDEX(vaddr))]);
757                         if (pde->present == 0) {
758                                 return 1;
759                         }
760       
761                         if (pde->vmm_info == V3_LARGE_PG || pde->vmm_info == V3_SHADOW_LARGE_PAGE) {
762                                 pde->present = 0;
763                                 pde_bd->present = 0;
764                                 return 1;
765                         }
766
767                         return 0;
768             }
769         case PAGE_PT32PAE:
770             {
771                         pte32pae_t * pt = (pte32pae_t *)page_ptr;
772
773                         pt[PTE32PAE_INDEX(vaddr)].present = 0;
774
775                         return 1;
776             }
777         default:
778             PrintError(info->vm_info, info, "Invalid Page Type\n");
779             return -1;
780
781     }
782
783     // should not get here
784     PrintError(info->vm_info, info, "Should not get here....\n");
785     return -1;
786 }       
787
788 static inline int handle_shadow_invlpg_32(struct guest_info * info, addr_t vaddr) {
789     PrintDebug(info->vm_info, info, "INVLPG32PAE - %p\n",(void*)vaddr);
790
791     int ret =  v3_drill_host_pt_32pae(info, info->ctrl_regs.cr3, vaddr, invalidation_cb32_64, NULL);
792     if (ret == -1) {
793                 PrintError(info->vm_info, info, "Page table drill returned error.... \n");
794                 PrintHostPageTree(info, vaddr, info->ctrl_regs.cr3);
795     }
796
797     return (ret == -1) ? -1 : 0; 
798 }
799