Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Generalization of constraints on page allocation and implementation/use
[palacios.git] / palacios / src / palacios / mmu / vmm_shdw_pg_kvm.c
1 /*
2  * Shadow page cache implementation that has been stolen from Linux's KVM Implementation
3  * This module is licensed under the GPL
4  */
5
6 #include <palacios/vmm_shadow_paging.h>
7 #include <palacios/vmm_ctrl_regs.h>
8
9 #include <palacios/vm_guest.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_paging.h>
13
14
15 #ifndef V3_CONFIG_DEBUG_SHDW_CACHE
16 #undef PrintDebug
17 #define PrintDebug(fmt, ...)
18 #endif
19
20 #ifdef V3_CONFIG_SHADOW_CACHE
21
22 struct pde_chain {
23     addr_t shadow_pdes[NR_PTE_CHAIN_ENTRIES];
24     struct hlist_node link;
25 };
26
27 struct rmap {
28     addr_t shadow_ptes[RMAP_EXT];
29     struct rmap * more;
30 };
31
32 static inline int activate_shadow_pt_32(struct guest_info * core);
33 static inline unsigned shadow_page_table_hashfn(addr_t guest_fn)
34 {
35     return guest_fn;
36 }
37
38 static void *shadow_cache_alloc(struct shadow_cache *mc, size_t size)
39 {
40     void *p;
41     if (!mc->nobjs) {
42         PrintDebug(info->vm_info, info, "at shadow_cache_alloc mc->nobjs non-exist\n");
43     }
44
45     p = mc->objects[--mc->nobjs];
46     memset(p, 0, size);
47     return p;
48
49 }
50
51 static void shadow_cache_free(struct shadow_cache *mc, void *obj)
52 {
53     if (mc->nobjs < NR_MEM_OBJS) {
54         mc->objects[mc->nobjs++] = obj;
55     }
56     else V3_Free(obj);
57 }
58
59 static struct rmap *shadow_alloc_rmap(struct guest_info *core)
60 {       
61     return shadow_cache_alloc(&core->shadow_rmap_cache,sizeof(struct rmap));
62 }
63
64 static void shadow_free_rmap(struct guest_info *core,struct rmap *rd)
65 {
66     return shadow_cache_free(&core->shadow_rmap_cache,rd);
67 }
68
69 int shadow_topup_cache(struct shadow_cache * cache, size_t objsize, int min) {
70
71     void  *obj;
72
73     if (cache->nobjs >= min) return 0;
74     while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
75         obj = V3_Malloc(objsize);
76         if (!obj) {
77             PrintDebug(info->vm_info, info, "at shadow_topup_cache obj alloc fail\n");
78             return -1;
79         }
80         cache->objects[cache->nobjs++] = obj;
81     }
82     return 0;
83                 
84 }
85
86 static int shadow_topup_caches(struct guest_info * core) {
87     int r;
88         
89     r = shadow_topup_cache(&core->shadow_pde_chain_cache, 
90                 sizeof(struct pde_chain), 4);
91
92     if (r) goto out;
93
94     r = shadow_topup_cache(&core->shadow_rmap_cache, 
95                 sizeof(struct rmap), 1);
96
97 out:
98         return r;
99 }
100
101 static struct pde_chain *shadow_alloc_pde_chain(struct guest_info *core)
102 {
103     return shadow_cache_alloc(&core->shadow_pde_chain_cache,
104                 sizeof(struct pde_chain));
105 }
106
107 static void shadow_free_pde_chain(struct guest_info *core, struct pde_chain *pc)
108 {
109     PrintDebug(info->vm_info, info, "shdw_free_pdechain: start\n");
110     shadow_cache_free(&core->shadow_pde_chain_cache, pc);
111     PrintDebug(info->vm_info, info, "shdw_free_pdechain: return\n");
112 }
113
114
115
116 static void shadow_free_page (struct guest_info * core, struct shadow_page_cache_data * page) 
117 {
118     list_del(&page->link);
119
120     V3_FreePages((void *)page->page_pa, 1);
121     
122     // presumably the same page could be used for 32 or 64 bit tables, so, we'll make it 
123     // uniformly compatible
124     page->page_pa=(addr_t)V3_AllocPagesExtended(1,PAGE_SIZE_4KB,-1,V3_ALLOC_PAGES_CONSTRAINT_4GB);
125
126     if (!page->page_pa) { 
127         PrintError(info->vm_info, info, "Freeing shadow page failed on allocation\n");
128         return;
129     }
130         
131     list_add(&page->link,&core->free_pages);
132     ++core->n_free_shadow_pages;        
133         
134 }
135
136 static struct shadow_page_cache_data * shadow_alloc_page(struct guest_info * core, addr_t shadow_pde) {
137
138     struct shadow_page_cache_data * page;
139
140     if (list_empty(&core->free_pages)) return NULL;
141
142     page = list_entry(core->free_pages.next, struct shadow_page_cache_data, link);
143     list_del(&page->link);
144
145     list_add(&page->link, &core->active_shadow_pages);
146     page->multimapped = 0;
147     page->shadow_pde = shadow_pde;
148     --core->n_free_shadow_pages;
149         
150     PrintDebug(info->vm_info, info, "alloc_page: n_free_shdw_pg %d page_pa %p page_va %p\n",
151                 core->n_free_shadow_pages,(void *)(page->page_pa),V3_VAddr((void *)(page->page_pa)));
152
153     addr_t shdw_page = (addr_t)V3_VAddr((void *)(page->page_pa));
154     memset((void *)shdw_page, 0, PAGE_SIZE_4KB);
155         
156     return page;
157         
158 }
159
160 static void shadow_zap_page(struct guest_info * core, struct shadow_page_cache_data * page);
161
162 static void free_shadow_pages(struct guest_info * core)
163 {
164     struct shadow_page_cache_data *page;
165
166     while (!list_empty(&core->active_shadow_pages)) {
167         page = container_of(core->active_shadow_pages.next,
168                                     struct shadow_page_cache_data, link);
169         shadow_zap_page(core, page);
170     }
171         
172     while (!list_empty(&core->free_pages)) {
173         page = list_entry(core->free_pages.next, struct shadow_page_cache_data, link);
174         list_del(&page->link);
175         V3_FreePages((void *)page->page_pa, 1);
176         page->page_pa = ~(addr_t)0; //invalid address
177     }
178 }
179
180 static int alloc_shadow_pages(struct guest_info * core)
181 {
182     int i;
183     struct shadow_page_cache_data * page_header = NULL;
184
185     for (i = 0; i < NUM_SHADOW_PAGES; i++) {
186         page_header = &core->page_header_buf[i];
187
188         INIT_LIST_HEAD(&page_header->link);
189         // presumably the same page could be used for 32 or 64 bit tables, so, we'll make it 
190         // uniformly compatible
191         page_headeer->page_pa=(addr_t)V3_AllocPagesExtended(1,PAGE_SIZE_4KB,-1,
192                                                             V3_ALLOC_PAGES_CONSTRAINT_4GB);
193         if (!(page_header->page_pa)) {
194             PrintError(info->vm_info, info, "Allocation failed in allocating shadow page\n");
195             goto error_1;
196         }
197         addr_t shdw_page = (addr_t)V3_VAddr((void *)(page_header->page_pa));
198         memset((void *)shdw_page, 0, PAGE_SIZE_4KB);
199
200         list_add(&page_header->link, &core->free_pages);
201         ++core->n_free_shadow_pages;
202         PrintDebug(info->vm_info, info, "alloc_shdw_pg: n_free_shdw_pg %d page_pa %p\n",
203                 core->n_free_shadow_pages,(void*)page_header->page_pa);
204     }
205     return 0;
206
207 error_1:
208     free_shadow_pages(core);
209     return -1; //out of memory
210
211 }
212
213 static void shadow_page_add_shadow_pde(struct guest_info * core, 
214         struct shadow_page_cache_data * page, addr_t shadow_pde) 
215 {
216     struct pde_chain *pde_chain;
217     struct hlist_node *node;
218     int i;
219     addr_t old;
220
221     if(!shadow_pde) {
222         return; 
223     }
224
225     if (!page->multimapped) {
226         old = page->shadow_pde;
227
228         if(!old) {
229             page->shadow_pde = shadow_pde;
230             return;
231         }
232
233         page->multimapped = 1;
234         pde_chain = shadow_alloc_pde_chain(core);
235         INIT_HLIST_HEAD(&page->shadow_pdes);
236         hlist_add_head(&pde_chain->link,&page->shadow_pdes);
237         pde_chain->shadow_pdes[0] = old;                
238     }
239         
240     hlist_for_each_entry(pde_chain, node, &page->shadow_pdes, link) {
241         if (pde_chain->shadow_pdes[NR_PTE_CHAIN_ENTRIES-1]) continue;
242         for(i=0; i < NR_PTE_CHAIN_ENTRIES; ++i)
243             if (!pde_chain->shadow_pdes[i]) {
244                 pde_chain->shadow_pdes[i] = shadow_pde;
245                 return;
246             }
247         }
248
249         pde_chain = shadow_alloc_pde_chain(core);
250         //error msg
251         hlist_add_head(&pde_chain->link,&page->shadow_pdes);
252         pde_chain->shadow_pdes[0] = shadow_pde;
253         
254 }
255
256 static void shadow_page_remove_shadow_pde(struct guest_info * core, 
257         struct shadow_page_cache_data * page, addr_t shadow_pde) 
258 {
259
260     struct pde_chain * pde_chain;
261     struct hlist_node * node;
262     int i;
263
264     PrintDebug(info->vm_info, info, "rm_shdw_pde: multimap %d\n", page->multimapped);
265     if(!page->multimapped) {
266         PrintDebug(info->vm_info, info, "rm_shdw_pde: no multimap\n");
267         if(page->shadow_pde !=  shadow_pde) 
268             PrintDebug(info->vm_info, info, "rm_shdw_pde: error page->shadow_pde is not equal to shadow_pde\n");
269         page->shadow_pde = 0;
270         PrintDebug(info->vm_info, info, "rm_shdw_pde: return\n");
271         return;
272     }
273         
274     PrintDebug(info->vm_info, info, "rm_shdw_pde: multimap\n");
275
276     hlist_for_each_entry (pde_chain, node, &page->shadow_pdes, link)
277     for (i=0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
278         if(!pde_chain->shadow_pdes[i]) break;
279         if(pde_chain->shadow_pdes[i] != shadow_pde) continue;
280
281         PrintDebug(info->vm_info, info, "rm_shdw_pde: found shadow_pde at i %d\n",i);
282         while (i+1 < NR_PTE_CHAIN_ENTRIES && pde_chain->shadow_pdes[i+1]) {
283             pde_chain->shadow_pdes[i] = pde_chain->shadow_pdes[i+1];
284             ++i;
285         }
286         pde_chain->shadow_pdes[i] = 0;
287
288         if(i==0) {
289             PrintDebug(info->vm_info, info, "rm_shdw_pde: only one!\n");
290             hlist_del(&pde_chain->link);                                
291             shadow_free_pde_chain(core, pde_chain);
292             if(hlist_empty(&page->shadow_pdes)) {
293                 page->multimapped = 0;
294                 page->shadow_pde = 0;
295             }
296         }
297
298         PrintDebug(info->vm_info, info, "rm_shdw_pde: return\n");
299         return;
300     }
301     PrintDebug(info->vm_info, info, "rm_shdw_pde: return\n");
302 }
303
304 static void shadow_page_search_shadow_pde (struct guest_info* core, addr_t shadow_pde, 
305         addr_t guest_pde, unsigned hlevel) {
306
307     struct shadow_page_cache_data* shdw_page;
308     unsigned index;
309     struct hlist_head* bucket;
310     struct hlist_node* node;
311     int hugepage_access = 0;
312     union shadow_page_role role;
313     addr_t pt_base_addr = 0;
314     int metaphysical = 0;
315
316     PrintDebug(info->vm_info, info, "shadow_page_search_shadow_pde\n");
317     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
318
319     if (mode == PROTECTED) {
320
321         PrintDebug(info->vm_info, info, "shadow_page_search_shadow_pde: PROTECTED\n");
322         pt_base_addr = ((pde32_t*)guest_pde)->pt_base_addr;
323         
324         if(((pde32_t*)guest_pde)->large_page == 1) {
325             PrintDebug(info->vm_info, info, "shadow_page_search_shadow_pde: large page\n");
326             hugepage_access = (((pde32_4MB_t *) guest_pde)->writable) | (((pde32_4MB_t*)guest_pde)->user_page << 1);
327             metaphysical = 1;
328             pt_base_addr = (addr_t) PAGE_BASE_ADDR(BASE_TO_PAGE_ADDR_4MB(((pde32_4MB_t*)guest_pde)->page_base_addr));
329         }
330                         
331         role.word = 0; 
332         role.glevels = PT32_ROOT_LEVEL; //max level
333         role.hlevels = PT_PAGE_TABLE_LEVEL;
334         role.metaphysical = metaphysical;
335         role.hugepage_access = hugepage_access;
336                 
337     } else if (mode == LONG_32_COMPAT || mode == LONG) {
338
339         PrintDebug(info->vm_info, info, "shadow_page_search_shadow_pde: LONG_32_COMPAT/LONG\n");
340         pt_base_addr = ((pde64_t*)guest_pde)->pt_base_addr;
341
342                 
343         if(hlevel == PT_DIRECTORY_LEVEL) { 
344             if(((pde64_t*)guest_pde)->large_page == 1) {
345                 hugepage_access = (((pde64_2MB_t *) guest_pde)->writable) | (((pde64_2MB_t*)guest_pde)->user_page << 1);
346                 metaphysical = 1;
347                 pt_base_addr = (addr_t) PAGE_BASE_ADDR(BASE_TO_PAGE_ADDR_2MB(((pde64_2MB_t*)guest_pde)->page_base_addr));
348             }   
349             role.hlevels = PT_PAGE_TABLE_LEVEL;
350                 
351         } else if(hlevel == PT32E_ROOT_LEVEL) {
352             if(((pdpe64_t*)guest_pde)->large_page == 1) {
353                 hugepage_access = (((pdpe64_1GB_t *) guest_pde)->writable) | (((pdpe64_1GB_t*)guest_pde)->user_page << 1);
354                 metaphysical = 1;
355                 pt_base_addr = (addr_t) PAGE_BASE_ADDR(BASE_TO_PAGE_ADDR_1GB(((pdpe64_1GB_t*)guest_pde)->page_base_addr));
356             }
357             role.hlevels = PT_DIRECTORY_LEVEL;
358                 
359         } else if(hlevel == PT64_ROOT_LEVEL) {          
360             if(((pdpe64_t*)guest_pde)->large_page == 1) {
361                 hugepage_access = (((pdpe64_1GB_t *) guest_pde)->writable) | (((pdpe64_1GB_t*)guest_pde)->user_page << 1);
362                 metaphysical = 1;
363                 pt_base_addr = (addr_t) PAGE_BASE_ADDR(BASE_TO_PAGE_ADDR_1GB(((pdpe64_1GB_t*)guest_pde)->page_base_addr));
364             }
365             role.hlevels = PT32E_ROOT_LEVEL;
366
367         }
368                         
369         role.word = 0; 
370         role.glevels = PT64_ROOT_LEVEL; //store numeric
371         role.metaphysical = metaphysical;
372         role.hugepage_access = hugepage_access; 
373
374     }
375
376     index = shadow_page_table_hashfn(pt_base_addr) % NUM_SHADOW_PAGES;
377     bucket = &core->shadow_page_hash[index];
378
379     hlist_for_each_entry(shdw_page, node, bucket, hash_link) 
380     if (shdw_page->guest_fn == pt_base_addr  && shdw_page->role.word == role.word ) {
381         PrintDebug(info->vm_info, info, "shadow_page_search_shadow_pde: found\n");
382         shadow_page_remove_shadow_pde(core, shdw_page, (addr_t)shadow_pde);
383         
384     } 
385     return;
386
387 }
388
389 static struct shadow_page_cache_data * shadow_page_lookup_page(struct guest_info *core, addr_t guest_fn, int opt) //purpose of this is write protection 
390 {
391     unsigned index;
392     struct hlist_head * bucket;
393     struct shadow_page_cache_data * page;
394     struct hlist_node * node;
395         
396     PrintDebug(info->vm_info, info, "lookup: guest_fn addr %p\n",(void *)BASE_TO_PAGE_ADDR(guest_fn));
397         
398     index = shadow_page_table_hashfn(guest_fn) % NUM_SHADOW_PAGES;
399     bucket = &core->shadow_page_hash[index];
400     PrintDebug(info->vm_info, info, "lookup: index %d bucket %p\n",index,(void*)bucket);
401
402     hlist_for_each_entry(page, node, bucket, hash_link)
403         if (opt == 0) {
404             PrintDebug(info->vm_info, info, "lookup: page->gfn %p gfn %p metaphysical %d\n",
405                 (void*)BASE_TO_PAGE_ADDR(page->guest_fn),(void*)BASE_TO_PAGE_ADDR(guest_fn),page->role.metaphysical);
406             if (page->guest_fn == guest_fn && !page->role.metaphysical) {
407                 return page;
408             }
409         }
410         else if(page->guest_fn == guest_fn) { 
411             return page; 
412         }
413         
414     return NULL;        
415 }
416
417 static void rmap_remove(struct guest_info * core, addr_t shadow_pte);
418 static void rmap_write_protect(struct guest_info * core, addr_t guest_fn);
419
420 struct shadow_page_cache_data * shadow_page_get_page(struct guest_info *core, 
421                                                                                                                 addr_t guest_fn,
422                                                                                                                 unsigned level, 
423                                                                                                                 int metaphysical,
424                                                                                                                 unsigned hugepage_access,
425                                                                                                                 addr_t shadow_pde,
426                                                                                                                 int force)  //0:default 1:off cache 2:off debug print
427 {
428     struct shadow_page_cache_data *page;
429     union shadow_page_role role;
430     unsigned index;
431     struct hlist_head *bucket;
432     struct hlist_node *node;
433     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
434         
435     role.word = 0; 
436     if (mode == REAL || mode == PROTECTED) role.glevels = PT32_ROOT_LEVEL; 
437         //exceptional, longterm there should be argument 
438     else if (mode == PROTECTED_PAE) role.glevels = PT32E_ROOT_LEVEL;
439     else if (mode == LONG || mode == LONG_32_COMPAT) role.glevels = PT64_ROOT_LEVEL;
440     else return NULL;
441         
442         //store numeric
443     role.hlevels = level;
444     role.metaphysical = metaphysical;
445     role.hugepage_access = hugepage_access;
446         
447     index = shadow_page_table_hashfn(guest_fn) % NUM_SHADOW_PAGES;
448     bucket = &core->shadow_page_hash[index];
449
450     if (force != 2) PrintDebug(info->vm_info, info, "get_page: lvl %d idx %d gfn %p role %x\n", level, index, (void *)guest_fn,role.word);
451
452     hlist_for_each_entry(page, node, bucket, hash_link)
453         if (page->guest_fn == guest_fn && page->role.word == role.word) {
454             shadow_page_add_shadow_pde(core, page, shadow_pde); //guest_fn is right there
455             if(force != 2) 
456                 PrintDebug(info->vm_info, info, "get_page: found guest_fn %p, index %d, multi %d, next %p\n", 
457                     (void *)page->guest_fn, index, page->multimapped, (void *)page->hash_link.next);
458             if (force == 0 || force == 2) 
459                 return page;
460             else { 
461                 shadow_zap_page(core,page);
462                 goto new_alloc;
463             }
464         } else {
465             if(force != 2) 
466                 PrintDebug(info->vm_info, info, "get_page: no found guest_fn %p, index %d, multimapped %d, next %p\n", 
467                     (void *)page->guest_fn, index, page->multimapped, (void *)page->hash_link.next);
468         }
469
470     if (force != 2) 
471         PrintDebug(info->vm_info, info, "get_page: no found\n");
472
473 new_alloc:
474
475     page=shadow_alloc_page(core, shadow_pde);
476
477     if (!page) return page; 
478
479     page->guest_fn = guest_fn;
480     page->role=role;
481     page->multimapped = 0;
482     page->shadow_pde = 0;
483         
484     if (force != 2) 
485         PrintDebug(info->vm_info, info, "get_page: hadd h->first %p, n %p, n->next %p\n", 
486             (void *)bucket->first, (void *)&page->hash_link, (void *)page->hash_link.next);
487
488     hlist_add_head(&page->hash_link, bucket);
489     shadow_page_add_shadow_pde(core, page, shadow_pde);
490
491     if (force != 2) PrintDebug(info->vm_info, info, "get_page: hadd h->first %p, n %p, n->next %p\n", 
492         (void *)bucket->first, (void *)&page->hash_link, (void *)page->hash_link.next); 
493
494     if (!metaphysical) rmap_write_protect(core, guest_fn); //in case rmapped guest_fn being allocated as pt or pd
495     if (force != 2) PrintDebug(info->vm_info, info, "get_page: return\n");
496
497     return page;
498
499 }
500
501 static void shadow_page_unlink_children (struct guest_info * core, struct shadow_page_cache_data * page) {
502     unsigned i;
503
504     uint32_t* shdw32_table;
505     uint32_t* shdw32_entry;
506     uint64_t* shdw64_table;
507     uint64_t* shdw64_entry;
508
509     uint32_t* guest32_table;
510     uint32_t* guest32_entry;
511     uint64_t* guest64_table;
512     uint64_t* guest64_entry;
513
514     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
515
516     if(page->role.hlevels == PT_PAGE_TABLE_LEVEL) {             
517
518         if (mode == PROTECTED) {
519
520             shdw32_table = (uint32_t*) V3_VAddr((void *)(addr_t)CR3_TO_PDE32_PA(page->page_pa));                
521             PrintDebug(info->vm_info, info, "ulink_chil: pte lvl\n");
522
523             for (i = 0; i < PT32_ENT_PER_PAGE; ++i) {
524                 shdw32_entry = (uint32_t*)&(shdw32_table[i]);
525                 if (*shdw32_entry & PT_PRESENT_MASK) {
526                     rmap_remove(core, (addr_t)shdw32_entry);
527                     PrintDebug(info->vm_info, info, "ulink_chil: %d pte: shadow %x\n", i, *shdw32_entry);
528                 }
529                 memset((void *)shdw32_entry, 0, sizeof(uint32_t));
530             }
531             PrintDebug(info->vm_info, info, "ulink_chil: return pte\n");
532             return;     
533                         
534         } else if (mode == LONG_32_COMPAT || mode == LONG) {
535
536             shdw64_table = (uint64_t*) V3_VAddr((void *)(addr_t)CR3_TO_PML4E64_PA(page->page_pa));              
537             PrintDebug(info->vm_info, info, "ulink_chil: pte lvl\n");
538
539             for (i = 0; i < PT_ENT_PER_PAGE; ++i) {                     
540                 shdw64_entry = (uint64_t*)&(shdw64_table[i]);
541                 if (*shdw64_entry & PT_PRESENT_MASK) {
542                     rmap_remove(core, (addr_t)shdw64_entry);
543                     PrintDebug(info->vm_info, info, "ulink_chil: %d pte: shadow %p\n", i, (void*)*((uint64_t*)shdw64_entry));
544                 }
545                 memset((void *)shdw64_entry, 0, sizeof(uint64_t));
546             }
547
548             PrintDebug(info->vm_info, info, "ulink_chil: return pte\n");
549             return;                             
550         }
551     }
552
553     PrintDebug(info->vm_info, info, "ulink_chil: pde lvl\n");
554     if (mode == PROTECTED) {
555                 
556         shdw32_table = (uint32_t*) V3_VAddr((void*)(addr_t)CR3_TO_PDE32_PA(page->page_pa));
557
558         if (guest_pa_to_host_va(core, BASE_TO_PAGE_ADDR(page->guest_fn), (addr_t*)&guest32_table) == -1) {
559             PrintError(info->vm_info, info, "Invalid Guest PDE Address: 0x%p\n",  (void *)BASE_TO_PAGE_ADDR(page->guest_fn));
560             return;
561         } 
562                 
563         for (i = 0; i < PT32_ENT_PER_PAGE; ++i) {
564             int present = 0;
565             shdw32_entry = (uint32_t*)&(shdw32_table[i]);
566             guest32_entry = (uint32_t*)&(guest32_table[i]);
567             present = *shdw32_entry & PT_PRESENT_MASK;
568             if(present) PrintDebug(info->vm_info, info, "ulink_chil: pde %dth: shadow %x\n", i, *((uint32_t*)shdw32_entry));
569             memset((void *)shdw32_entry, 0, sizeof(uint32_t));
570             if (present != 1) continue;
571
572             shadow_page_search_shadow_pde(core, (addr_t)shdw32_entry, (addr_t)guest32_entry, page->role.hlevels);
573         }
574         PrintDebug(info->vm_info, info, "ulink_child: before return at pde lvel\n");
575         return;
576
577     }else if(mode == LONG_32_COMPAT || mode == LONG)  {
578
579         shdw64_table = (uint64_t*) V3_VAddr((void*)(addr_t)CR3_TO_PML4E64_PA(page->page_pa));
580
581         if (guest_pa_to_host_va(core, BASE_TO_PAGE_ADDR(page->guest_fn), (addr_t*)&guest64_table) == -1) {
582             if(page->role.hlevels == PT_DIRECTORY_LEVEL) 
583                 PrintError(info->vm_info, info, "Invalid Guest PDE Address: 0x%p\n",  (void *)BASE_TO_PAGE_ADDR(page->guest_fn));
584             if(page->role.hlevels == PT32E_ROOT_LEVEL) 
585                 PrintError(info->vm_info, info, "Invalid Guest PDPE Address: 0x%p\n",  (void *)BASE_TO_PAGE_ADDR(page->guest_fn));
586             if(page->role.hlevels == PT64_ROOT_LEVEL) 
587                 PrintError(info->vm_info, info, "Invalid Guest PML4E Address: 0x%p\n",  (void *)BASE_TO_PAGE_ADDR(page->guest_fn));
588             return;     
589         }
590
591         for (i = 0; i < PT_ENT_PER_PAGE; ++i) {
592             int present = 0;
593             shdw64_entry = (uint64_t*)&(shdw64_table[i]);
594             guest64_entry = (uint64_t*)&(guest64_table[i]);
595             present = *shdw64_entry & PT_PRESENT_MASK;
596             if(present) PrintDebug(info->vm_info, info, "ulink_chil: pde: shadow %p\n",(void *)*((uint64_t *)shdw64_entry));
597             memset((void *)shdw64_entry, 0, sizeof(uint64_t));
598             if (present != 1) continue;
599
600             shadow_page_search_shadow_pde(core, (addr_t)shdw64_entry, (addr_t)guest64_entry, page->role.hlevels);
601         }
602         return;         
603
604     }
605     //PrintDebug(info->vm_info, info, "ulink_chil: return pde\n");
606
607 }
608
609 static void shadow_page_put_page(struct guest_info *core, struct shadow_page_cache_data * page, addr_t shadow_pde) { 
610
611         PrintDebug(info->vm_info, info, "put_page: start\n");   
612         shadow_page_remove_shadow_pde(core, page, shadow_pde);
613
614         PrintDebug(info->vm_info, info, "put_page: end\n");
615
616
617
618 static void shadow_zap_page(struct guest_info * core, struct shadow_page_cache_data * page) {
619
620     addr_t shadow_pde;
621     addr_t cr3_base_addr = 0;
622     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
623         
624     PrintDebug(info->vm_info, info, "zap: multimapped %d, metaphysical %d\n", page->multimapped, page->role.metaphysical);
625         
626     while (page->multimapped || page->shadow_pde) {
627         if (!page->multimapped) {
628             shadow_pde = page->shadow_pde;              
629         } else {
630             struct pde_chain * chain;
631             chain = container_of(page->shadow_pdes.first, struct pde_chain, link);
632             shadow_pde = chain->shadow_pdes[0];
633         }               
634         shadow_page_put_page(core, page, shadow_pde);
635         PrintDebug(info->vm_info, info, "zap_parent: pde: shadow %p\n",(void *)*((addr_t *)shadow_pde));
636         memset((void *)shadow_pde, 0, sizeof(uint32_t));
637     }
638
639     shadow_page_unlink_children(core, page);
640
641     PrintDebug(info->vm_info, info, "zap: end of unlink\n");
642         
643     if (mode == PROTECTED) {
644         cr3_base_addr =  ((struct cr3_32 *)&(core->shdw_pg_state.guest_cr3))->pdt_base_addr;
645     } else if (mode == LONG_32_COMPAT || mode == LONG) {
646         cr3_base_addr =  ((struct cr3_64 *)&(core->shdw_pg_state.guest_cr3))->pml4t_base_addr;
647     }
648     else return;        
649
650     PrintDebug(info->vm_info, info, "zap: before hlist_del\n");
651     PrintDebug(info->vm_info, info, "zap: page->guest_fn %p\n", (void*) page->guest_fn);
652
653     if (page->guest_fn !=  (addr_t)(cr3_base_addr)) {
654         PrintDebug(info->vm_info, info, "zap: first hlist_del\n");
655
656         hlist_del(&page->hash_link);
657         shadow_free_page(core, page);
658
659     } else {
660         PrintDebug(info->vm_info, info, "zap: second hlist_del\n");
661
662         list_del(&page->link);
663         list_add(&page->link,&core->active_shadow_pages);
664     }           
665
666     PrintDebug(info->vm_info, info, "zap: end hlist_del\n");
667     return;
668 }
669
670 int shadow_zap_hierarchy_32(struct guest_info * core, struct shadow_page_cache_data * page) {
671
672     unsigned i;
673     pde32_t *shadow_pde;
674     pde32_t *shadow_pd;
675     pde32_t *guest_pde;
676     pde32_t *guest_pd;
677         
678     if (page->role.hlevels != 2) return -1;
679
680     shadow_pd = CR3_TO_PDE32_VA(page->page_pa);
681     if (guest_pa_to_host_va(core, BASE_TO_PAGE_ADDR(page->guest_fn), (addr_t*)&guest_pd) == -1) {
682         PrintError(info->vm_info, info, "Invalid Guest PDE Address: 0x%p\n", (void*)BASE_TO_PAGE_ADDR(page->guest_fn));
683         return -1;
684     }
685         
686     for (i=0; i < PT32_ENT_PER_PAGE; ++i) {
687         int present = 0;
688         shadow_pde = (pde32_t*)&(shadow_pd[i]);
689         guest_pde = (pde32_t*)&(guest_pd[i]);
690         present = shadow_pde->present;
691         if (shadow_pde->present) PrintDebug(info->vm_info, info, "ulink_child: pde shadow %x\n", *((uint32_t*)shadow_pde));
692         memset((void*)shadow_pde, 0, sizeof(uint32_t));
693         if (present != 1) continue;
694
695         struct shadow_page_cache_data *shdw_page;
696         unsigned index;
697         struct hlist_head *bucket;
698         struct hlist_node *node;
699         int hugepage_access =0;
700         int metaphysical = 0;
701         union shadow_page_role role;
702         v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
703
704         if (((pde32_t*)guest_pde)->large_page == 1) {
705             hugepage_access = (((pde32_4MB_t*)guest_pde)->writable) | (((pde32_4MB_t*)guest_pde)->user_page << 1);
706             metaphysical = 1;
707         }
708         
709         role.word = 0; 
710         if (mode == REAL || mode == PROTECTED) role.glevels = PT32_ROOT_LEVEL; 
711         //exceptional, longterm there should be argument 
712         else if (mode == PROTECTED_PAE) role.glevels = PT32E_ROOT_LEVEL;
713         else if (mode == LONG || mode == LONG_32_COMPAT) role.glevels = PT64_ROOT_LEVEL;
714         else return -1;
715         
716         role.hlevels = 1;
717         role.metaphysical = metaphysical;
718         role.hugepage_access = hugepage_access;
719
720         index = shadow_page_table_hashfn(guest_pde->pt_base_addr) % NUM_SHADOW_PAGES;
721         bucket = &core->shadow_page_hash[index];
722
723         hlist_for_each_entry(shdw_page, node, bucket, hash_link)
724         if (shdw_page->guest_fn == (guest_pde->pt_base_addr) && (shdw_page->role.word == role.word)) {
725             shadow_zap_page(core, shdw_page);
726         }       
727     }
728
729     shadow_zap_page(core, page);
730     return 0;
731 }
732
733
734 int shadow_unprotect_page(struct guest_info * core, addr_t guest_fn) {
735
736     unsigned index;
737     struct hlist_head * bucket;
738     struct shadow_page_cache_data * page = NULL;
739     struct hlist_node * node;
740     struct hlist_node * n;
741     int r;
742
743     r = 0;
744     index = shadow_page_table_hashfn(guest_fn) % NUM_SHADOW_PAGES;
745     bucket = &core->shadow_page_hash[index];
746     PrintDebug(info->vm_info, info, "unprotect: gfn %p\n",(void *) guest_fn);
747         
748     hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
749     //hlist_for_each_entry(page, node, bucket, hash_link) {
750         if ((page->guest_fn == guest_fn) && !(page->role.metaphysical)) {
751             PrintDebug(info->vm_info, info, "unprotect: match page.gfn %p page.role %x gfn %p\n",(void *) page->guest_fn,page->role.word,(void *)guest_fn);
752             shadow_zap_page(core, page);
753             r = 1;
754         }
755     }
756         
757     PrintDebug(info->vm_info, info, "at shadow_unprotect_page return %d\n",r);
758     return r;
759 }
760
761 /*
762 reverse mapping data structures:
763 if page_private bit zero is zero, then page->private points to the shadow page table entry that points to page address
764 if page_private bit zero is one, then page->private & ~1 points to a struct rmap containing more mappings
765 */
766
767 void rmap_add(struct guest_info *core, addr_t shadow_pte) {
768     struct rmap *desc;
769     addr_t page_private = 0;
770     gen_pt_t * shadow_pte_gen;
771     addr_t page_base_addr = 0;
772     addr_t * mem_map;
773     int i;
774     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
775
776     shadow_pte_gen = (gen_pt_t *) shadow_pte;
777
778     if (mode == PROTECTED) {
779         page_base_addr = ((pte32_t *)shadow_pte)->page_base_addr;
780         PrintDebug(info->vm_info, info, "at rmap_add shadow_pte: %x\n", (uint32_t)*((uint32_t*)shadow_pte));
781
782     } else if (mode == LONG_32_COMPAT || mode == LONG) {
783         page_base_addr = ((pte64_t *)shadow_pte)->page_base_addr;
784         PrintDebug(info->vm_info, info, "at rmap_add shadow_pte: %p\n", (void*)*((uint64_t*)shadow_pte));
785
786     }
787     else return;        
788         
789     PrintDebug(info->vm_info, info, "debug rmap: at rmap_add shadow_pte->page_base_addr (%p), shadow_pte_present %d, shadow_pte_writable %d\n", 
790         (void *)BASE_TO_PAGE_ADDR(page_base_addr), (shadow_pte_gen->present), (shadow_pte_gen->writable));
791         
792     if (shadow_pte_gen->present == 0 || shadow_pte_gen->writable == 0)
793         return;
794
795     PrintDebug(info->vm_info, info, "at rmap_add host_fn %p\n", (void *)BASE_TO_PAGE_ADDR(page_base_addr));
796                 
797     mem_map = core->vm_info.mem_map.base_region.mem_map;
798     page_private = mem_map[page_base_addr];
799
800     PrintDebug(info->vm_info, info, "at rmap_add page_private %p\n", (void *)page_private);
801         
802     if (!page_private) {
803         PrintDebug(info->vm_info, info, "at rmap_add initial\n");
804         mem_map[page_base_addr] = (addr_t)shadow_pte;
805         PrintDebug(info->vm_info, info, "rmap_add: shadow_pte %p\n", (void *)shadow_pte);
806
807     } else if (!(page_private & 1)) {
808         PrintDebug(info->vm_info, info, "at rmap_add into multi\n");
809
810         desc = shadow_alloc_rmap(core);
811         desc->shadow_ptes[0] = page_private;
812         desc->shadow_ptes[1] = shadow_pte;
813         mem_map[page_base_addr] = (addr_t)desc | 1;
814         desc->more = NULL;
815         PrintDebug(info->vm_info, info, "rmap_add: desc %p desc|1 %p\n",(void *)desc,(void *)((addr_t)desc |1));
816
817     } else {
818         PrintDebug(info->vm_info, info, "at rmap_add multimap\n");
819         desc = (struct rmap *)(page_private & ~1ul);
820
821         while (desc->more && desc->shadow_ptes[RMAP_EXT-1]) desc = desc->more;
822                 
823         if (desc->shadow_ptes[RMAP_EXT-1]) {
824             desc->more = shadow_alloc_rmap(core);
825             desc = desc->more;
826         }
827
828         for (i = 0; desc->shadow_ptes[i]; ++i) ;
829         desc->shadow_ptes[i] = shadow_pte;              
830     }
831                 
832 }
833
834 static void rmap_desc_remove_entry(struct guest_info *core,
835                                    addr_t * page_private,
836                                    struct rmap *desc,
837                                    int i,
838                                    struct rmap *prev_desc) 
839 {
840     int j;
841
842     for (j = RMAP_EXT - 1; !desc->shadow_ptes[j]  &&  j > i; --j) ;
843     desc->shadow_ptes[i] = desc->shadow_ptes[j];
844     desc->shadow_ptes[j] = 0;
845
846     if (j != 0) {
847         PrintDebug(info->vm_info, info, "rmap_desc_rm: i %d j %d\n",i,j);
848         return;
849     }
850
851     if (!prev_desc && !desc->more) {
852         PrintDebug(info->vm_info, info, "rmap_desc_rm: no more no less\n");
853         *page_private = desc->shadow_ptes[0];
854     } else {            //more should be null
855         if (prev_desc) {
856             PrintDebug(info->vm_info, info, "rmap_desc_rm: no more\n");
857             prev_desc->more = desc->more; 
858         } else {
859             PrintDebug(info->vm_info, info, "rmap_desc_rm: no less\n");
860             *page_private = (addr_t) desc->more | 1;
861         }
862     }
863     shadow_free_rmap(core, desc);
864 }
865
866 static void rmap_remove(struct guest_info * core, addr_t shadow_pte) {
867     struct rmap *desc;
868     struct rmap *prev_desc;
869     addr_t page_private = 0;
870     gen_pt_t * shadow_pte_gen;
871     addr_t page_base_addr = 0;
872     addr_t * mem_map;
873     int i;
874
875     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
876
877     if (mode == PROTECTED) {
878         PrintDebug(info->vm_info, info, "rmap_rm: PROTECTED %d\n", mode);
879         page_base_addr = ((pte32_t *)shadow_pte)->page_base_addr;
880
881     } else if (mode == LONG_32_COMPAT || mode == LONG) {
882         PrintDebug(info->vm_info, info, "rmap_rm: LONG_32_COMPAT/LONG %d\n", mode);
883         page_base_addr = ((pte64_t *)shadow_pte)->page_base_addr;               
884
885     }   else {
886         PrintDebug(info->vm_info, info, "rmap_rm: mode %d\n", mode);
887         return; 
888     }
889     shadow_pte_gen = (gen_pt_t*)shadow_pte;
890
891     if (shadow_pte_gen->present == 0 || shadow_pte_gen->writable == 0) {
892         PrintDebug(info->vm_info, info, "rmap_rm: present %d, write %d, pte %p\n",
893                 shadow_pte_gen->present, shadow_pte_gen->writable,
894                 (void*)*((addr_t*)shadow_pte));
895         return;
896     }
897     PrintDebug(info->vm_info, info, "rmap_rm: shadow_pte->page_base_addr (%p)\n", (void *)BASE_TO_PAGE_ADDR(page_base_addr));
898
899     mem_map = core->vm_info.mem_map.base_region.mem_map;
900     page_private = mem_map[page_base_addr];
901
902     PrintDebug(info->vm_info, info, "rmap_rm: page_private %p page_private&1 %p\n",(void *)page_private,(void*)(page_private&1));
903         
904     if (!page_private) {                
905         PrintDebug(info->vm_info, info, "rmap_rm: single page_prive %p\n",(void *)page_private);
906         
907     } else if (!(page_private & 1)) {   
908         PrintDebug(info->vm_info, info, "rmap_rm: multi page_prive %p\n",(void *)page_private);
909         mem_map[page_base_addr] = (addr_t)0;
910
911     } else {
912         PrintDebug(info->vm_info, info, "rmap_rm: multimap page_prive %p\n",(void *)page_private);
913         desc = (struct rmap *)(page_private & ~1ul);
914         prev_desc = NULL;
915         
916         while (desc) {
917             PrintDebug(info->vm_info, info, "rmap_rm: desc loop\n");
918             for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
919             if (desc->shadow_ptes[i] == shadow_pte) {
920                 PrintDebug(info->vm_info, info, "rmap_rm: rmap_desc_remove_entry i %d\n",i);
921                 rmap_desc_remove_entry(core, &mem_map[page_base_addr], desc, i, prev_desc);
922                 return;
923             }
924             prev_desc = desc;
925             desc = desc->more;
926         }
927     }
928 }
929
930 static inline int activate_shadow_pt_32(struct guest_info * core);
931
932 static void rmap_write_protect(struct guest_info * core, addr_t guest_fn) {
933     struct rmap * desc;
934     //pte32_t * shadow_pte;
935     addr_t shadow_pte;
936     addr_t page_private;
937     addr_t host_pa;
938
939     PrintDebug(info->vm_info, info, "rmap_wrprot: gfn %p\n",(void *) guest_fn);
940
941     if (guest_pa_to_host_pa(core, BASE_TO_PAGE_ADDR(guest_fn), &host_pa)!=0) {
942         PrintDebug(info->vm_info, info, "rmap_wrprot: error \n");
943     }
944
945     page_private = core->vm_info.mem_map.base_region.mem_map[PAGE_BASE_ADDR(host_pa)];
946
947     PrintDebug(info->vm_info, info, "rmap_wrprot: host_fn %p\n",(void *)PAGE_BASE_ADDR(host_pa));
948         
949     while(page_private) {
950         PrintDebug(info->vm_info, info, "rmap_wrprot: page_private %p\n", (void*)page_private);
951         if(!(page_private & 1)) {
952             PrintDebug(info->vm_info, info, "rmap_wrprot: reverse desc single\n");
953             shadow_pte = page_private;
954                 
955         } else {
956             desc = (struct rmap *) (page_private & ~1ul);
957             PrintDebug(info->vm_info, info, "rmap_wrprot: reverse desc multimap\n");
958             shadow_pte = desc->shadow_ptes[0];
959         }
960                 
961         PrintDebug(info->vm_info, info, "rmap_wrprot: pg_priv %p, host_fn %p, shdw_pte %p\n",
962                 (void *)page_private, (void *)PAGE_BASE_ADDR(host_pa), (void*)*((uint64_t*)shadow_pte));
963         
964         //CHECKPOINT
965         rmap_remove(core, shadow_pte); 
966
967         //PrintDebug(info->vm_info, info, "rmap_wrprot: shadow_pte->page_base_addr (%p)\n", 
968         //      (void *)BASE_TO_PAGE_ADDR(shadow_pte->page_base_addr));
969
970         ((gen_pt_t *)shadow_pte)->writable = 0;
971         PrintDebug(info->vm_info, info, "rmap_wrprot: %p\n",(void*)*((uint64_t *)shadow_pte));
972                                 
973         page_private = core->vm_info.mem_map.base_region.mem_map[PAGE_BASE_ADDR(host_pa)];
974
975         PrintDebug(info->vm_info, info, "rmap_wrprot: page_private %p\n",(void*)page_private);
976     }   
977
978     PrintDebug(info->vm_info, info, "rmap_wrprot: done\n");
979
980 }
981
982 void shadow_page_pre_write(struct guest_info * core, addr_t guest_pa, int bytes, int force) {
983 //guest frame number is not guest physical address
984     addr_t guest_fn = PAGE_BASE_ADDR(guest_pa);
985     struct shadow_page_cache_data * page;
986     struct hlist_node *node, *n;
987     struct hlist_head * bucket;
988     unsigned index;
989
990     uint32_t* shdw32_table = NULL;
991     uint32_t* shdw32_entry = NULL;
992     uint64_t* shdw64_table = NULL;
993     uint64_t* shdw64_entry = NULL;
994         
995     unsigned pte_size;
996     unsigned offset = PAGE_OFFSET(guest_pa);
997     unsigned misaligned = 0;
998     int level;
999     int flooded = 0;
1000
1001     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
1002
1003     if (guest_fn == core->last_pt_write_guest_fn) {
1004         ++core->last_pt_write_count;
1005         if (core->last_pt_write_count >= 3) flooded = 1;
1006     } else {
1007         core->last_pt_write_guest_fn = guest_fn;
1008         core->last_pt_write_count = 1;
1009     }
1010
1011     PrintDebug(info->vm_info, info, "shdw_pre-write: gpa %p byte %d force %d flood %d last_gfn %p last_cnt %d\n",
1012         (void *)guest_pa,bytes,force,flooded,(void*)core->last_pt_write_guest_fn,core->last_pt_write_count);
1013
1014     index = shadow_page_table_hashfn(guest_fn) % NUM_SHADOW_PAGES;
1015     bucket = &core->shadow_page_hash[index];
1016
1017     PrintDebug(info->vm_info, info, "shdw_pre-write: check point after bucket\n");
1018         
1019     //hlist_for_each_entry_safe(page, node, bucket, hash_link) {
1020     hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1021
1022         if (page->guest_fn != guest_fn || page->role.metaphysical) continue;
1023
1024         pte_size = 4; //because 32bit nonPAE for now
1025         pte_size = page->role.glevels == 2 ? 4 : 8;
1026
1027         if (!force) misaligned = (offset & (offset + bytes -1)) & ~(pte_size -1);
1028
1029         if (misaligned || flooded || force) {
1030             /*
1031             * Misaligned accesses are too much trobule to fix up
1032             * also they usually indicate a page is not used as a page table
1033             */
1034             PrintDebug(info->vm_info, info, "shdw_pre-write: misaligned\n");
1035             shadow_zap_page(core, page);
1036             continue;
1037         }       
1038
1039         level = page->role.hlevels;             
1040                 
1041         PrintDebug(info->vm_info, info, "shdw_pre-write: found out one page at the level of %d\n", level);
1042         
1043         if (mode == PROTECTED) {
1044             shdw32_table = (uint32_t*)V3_VAddr((void *)(addr_t)BASE_TO_PAGE_ADDR(PAGE_BASE_ADDR(page->page_pa)));
1045             shdw32_entry = (uint32_t*)&(shdw32_table[offset/sizeof(uint32_t)]);
1046
1047             if (*shdw32_entry & PT_PRESENT_MASK) {
1048                 if (level == PT_PAGE_TABLE_LEVEL) {
1049                     PrintDebug(info->vm_info, info, "shdw_pre-write: pte idx %d\n", (unsigned int)(offset/sizeof(uint32_t)));
1050                     rmap_remove(core, (addr_t)shdw32_entry);
1051                     memset((void*)shdw32_entry, 0, sizeof(uint32_t));
1052                 
1053                 } else {
1054                     shadow_page_remove_shadow_pde(core, page, (addr_t)shdw32_entry);
1055                     memset((void*)shdw32_entry, 0, sizeof(uint32_t));                   
1056                 }
1057             }
1058                         
1059         } else if (mode == LONG_32_COMPAT || mode == LONG) {
1060
1061             shdw64_table = (uint64_t*)V3_VAddr((void*)(addr_t)BASE_TO_PAGE_ADDR(PAGE_BASE_ADDR(page->page_pa)));
1062             shdw64_entry = (uint64_t*)&(shdw64_table[offset/sizeof(uint64_t)]);
1063
1064             if (*shdw64_entry & PT_PRESENT_MASK) {
1065                 if (level == PT_PAGE_TABLE_LEVEL) {
1066                     PrintDebug(info->vm_info, info, "shdw_pre-write: pte idx %d\n", (unsigned int)(offset/sizeof(uint64_t)));
1067                     rmap_remove(core, (addr_t)shdw64_entry);
1068                     memset((void*)shdw64_entry, 0, sizeof(uint64_t));
1069                 } else {
1070                     shadow_page_remove_shadow_pde(core, page, (addr_t)shdw64_entry);
1071                     memset((void*)shdw64_entry, 0, sizeof(uint64_t));                   
1072                 }
1073             }
1074         }
1075     }
1076 }
1077
1078 //emulation for synchronization
1079 void shadow_page_post_write(struct guest_info * core, addr_t guest_pa)  {
1080
1081 }
1082
1083 int shadow_unprotect_page_virt(struct guest_info * core, addr_t guest_va) {
1084     addr_t guest_pa;
1085
1086     if (guest_va_to_guest_pa(core, guest_va, &guest_pa) != 0) {
1087         PrintError(info->vm_info, info, "In GVA->HVA: Invalid GVA(%p)->GPA lookup\n", 
1088                 (void *)guest_va);
1089         return -1;
1090     }
1091         
1092     return shadow_unprotect_page(core, PAGE_BASE_ADDR(guest_pa));
1093 }
1094
1095 void shadow_free_some_pages(struct guest_info * core) {
1096     while (core->n_free_shadow_pages < REFILE_PAGES) {
1097         struct shadow_page_cache_data * page;
1098         page = container_of(core->active_shadow_pages.prev,
1099             struct shadow_page_cache_data, link);
1100         shadow_zap_page(core,page);
1101     }           
1102 }
1103
1104 void shadow_free_all_pages(struct guest_info *core) {
1105
1106     struct shadow_page_cache_data * sp, *node;
1107     list_for_each_entry_safe(sp, node, &core->active_shadow_pages, link) {
1108         shadow_zap_page(core , sp);
1109     }
1110 }
1111
1112
1113 static struct shadow_page_cache_data * create_new_shadow_pt(struct guest_info * core);
1114
1115
1116 #include "vmm_shdw_pg_cache_32.h"
1117 #include "vmm_shdw_pg_cache_32pae.h"
1118 #include "vmm_shdw_pg_cache_64.h"
1119
1120 static int vtlb_caching_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1121
1122     V3_Print(info->vm_info, info, "VTLB Caching initialization\n");
1123     return 0;
1124 }
1125
1126 static int vtlb_caching_deinit(struct v3_vm_info * vm) {
1127     return -1;
1128 }
1129
1130 static int vtlb_caching_local_init(struct guest_info * core) {
1131
1132     V3_Print(info->vm_info, info, "VTLB local initialization\n");
1133
1134     INIT_LIST_HEAD(&core->active_shadow_pages);
1135     INIT_LIST_HEAD(&core->free_pages);
1136
1137     alloc_shadow_pages(core);   
1138
1139     shadow_topup_caches(core);
1140
1141     core->prev_cr3_pdt_base = 0;
1142         
1143     return 0;
1144 }
1145
1146
1147 static int vtlb_caching_activate_shdw_pt(struct guest_info * core) {
1148     switch (v3_get_vm_cpu_mode(core)) {
1149
1150         case PROTECTED:
1151             return activate_shadow_pt_32(core);
1152         case PROTECTED_PAE:
1153             return activate_shadow_pt_32pae(core);
1154         case LONG:
1155         case LONG_32_COMPAT:
1156         case LONG_16_COMPAT:
1157             return activate_shadow_pt_64(core);
1158         default:
1159             PrintError(info->vm_info, info, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
1160             return -1;
1161     }
1162
1163     return 0;
1164 }
1165
1166 static int vtlb_caching_invalidate_shdw_pt(struct guest_info * core) {
1167     return vtlb_caching_activate_shdw_pt(core);
1168 }
1169
1170
1171 static int vtlb_caching_handle_pf(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
1172
1173         switch (v3_get_vm_cpu_mode(core)) {
1174             case PROTECTED:
1175                 return handle_shadow_pagefault_32(core, fault_addr, error_code);
1176                 break;
1177             case PROTECTED_PAE:
1178                 return handle_shadow_pagefault_32pae(core, fault_addr, error_code);
1179             case LONG:
1180             case LONG_32_COMPAT:
1181             case LONG_16_COMPAT:
1182                 return handle_shadow_pagefault_64(core, fault_addr, error_code);
1183                 break;
1184             default:
1185                 PrintError(info->vm_info, info, "Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
1186                 return -1;
1187         }
1188 }
1189
1190
1191 static int vtlb_caching_handle_invlpg(struct guest_info * core, addr_t vaddr) {
1192
1193     switch (v3_get_vm_cpu_mode(core)) {
1194         case PROTECTED:
1195             return handle_shadow_invlpg_32(core, vaddr);
1196         case PROTECTED_PAE:
1197             return handle_shadow_invlpg_32pae(core, vaddr);
1198         case LONG:
1199         case LONG_32_COMPAT:
1200         case LONG_16_COMPAT:
1201             return handle_shadow_invlpg_64(core, vaddr);
1202         default:
1203             PrintError(info->vm_info, info, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
1204             return -1;
1205     }
1206 }
1207
1208 static struct v3_shdw_pg_impl vtlb_caching_impl =  {
1209     .name = "VTLB_CACHING",
1210     .init = vtlb_caching_init,
1211     .deinit = vtlb_caching_deinit,
1212     .local_init = vtlb_caching_local_init,
1213     .handle_pagefault = vtlb_caching_handle_pf,
1214     .handle_invlpg = vtlb_caching_handle_invlpg,
1215     .activate_shdw_pt = vtlb_caching_activate_shdw_pt,
1216     .invalidate_shdw_pt = vtlb_caching_invalidate_shdw_pt
1217 };
1218
1219
1220
1221
1222
1223 register_shdw_pg_impl(&vtlb_caching_impl);
1224
1225 #endif