Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


initial shadow page cache version
[palacios.git] / palacios / src / palacios / mmu / vmm_shdw_pg_cache.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm_shadow_paging.h>
21 #include <palacios/vmm_swapbypass.h>
22 #include <palacios/vmm_ctrl_regs.h>
23
24 #include <palacios/vm_guest.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/vmm_hashtable.h>
28 #include <palacios/vmm_list.h>
29
30 #define DEFAULT_CACHE_SIZE ((32 * 1024 * 1024) / 4096)
31
32 #define V3_CACHED_PG 0x1
33
34 #ifndef CONFIG_DEBUG_SHDW_PG_CACHE
35 #undef PrintDebug
36 #define PrintDebug(fmt, ...)
37 #endif
38
39
40 struct shdw_back_ptr {
41     addr_t gva;
42     struct shdw_pg_data * pg_data;
43     struct list_head back_ptr_node;
44 };
45
46 struct guest_pg_tuple {
47     addr_t gpa;
48     page_type_t pt_type;    
49 } __attribute__((packed));
50
51
52
53 struct rmap_entry {
54     addr_t gva;
55     addr_t gpa;
56     page_type_t pt_type;
57     struct list_head rmap_node;
58 };
59
60 struct shdw_pg_data {
61     struct guest_pg_tuple tuple;
62
63     addr_t hpa;
64     void * hva;
65
66     struct list_head back_ptrs;
67     struct list_head pg_queue_node;
68
69 };
70
71
72
73 struct cache_core_state {
74
75
76 };
77
78
79 struct cache_vm_state {
80     
81     v3_lock_t cache_lock;
82
83     struct hashtable * page_htable; // GPA to shdw_pg_data
84     struct hashtable * reverse_map;
85
86
87     int max_cache_pgs;
88     int pgs_in_cache;
89
90     struct list_head pg_queue;
91
92     int pgs_in_free_list;
93     struct list_head free_list;
94 };
95
96
97
98 static  inline int evict_pt(void * pt, addr_t va, page_type_t pt_type) {
99     
100     switch (pt_type) {
101         case PAGE_PD32: {
102             pde32_t * pde = pt;
103             pde[PDE32_INDEX(va)].writable = 1;
104             break;
105         }
106         case PAGE_4MB: {
107             pde32_4MB_t * pde = pt;
108             pde[PDE32_INDEX(va)].writable = 1;
109             break;
110         }
111         case PAGE_PT32: {
112             pte32_t * pte = pt;
113             pte[PTE32_INDEX(va)].writable = 1;
114             break;
115         }
116         case PAGE_PML464: {
117             pml4e64_t * pml = pt;
118             pml[PML4E64_INDEX(va)].writable = 1;
119             break;
120         }
121         case PAGE_PDP64: {
122             pdpe64_t * pdp = pt;
123             pdp[PDPE64_INDEX(va)].writable = 1;
124             break;
125         }
126         case PAGE_PD64: {
127             pde64_t * pde = pt;
128             pde[PDE64_INDEX(va)].writable = 1;
129             break;
130         }
131         case PAGE_PT64: {
132             pte64_t * pte = pt;
133             pte[PTE64_INDEX(va)].writable = 1;
134             break;
135         }
136         default:
137             PrintError("Invalid page type: %d\n", pt_type);
138             return -1;
139     }
140
141     return 0;
142 }
143
144
145
146 static  inline int grab_pt(void * pt, addr_t va, page_type_t pt_type) {
147     
148     switch (pt_type) {
149         case PAGE_PD32: {
150             pde32_t * pde = pt;
151             pde[PDE32_INDEX(va)].writable = 0;
152             break;
153         }
154         case PAGE_4MB: {
155             pde32_4MB_t * pde = pt;
156             pde[PDE32_INDEX(va)].writable = 0;
157             break;
158         }
159         case PAGE_PT32: {
160             pte32_t * pte = pt;
161             pte[PTE32_INDEX(va)].writable = 0;
162             break;
163         }
164         case PAGE_PML464: {
165             pml4e64_t * pml = pt;
166             pml[PML4E64_INDEX(va)].writable = 0;
167             break;
168         }
169         case PAGE_PDP64: {
170             pdpe64_t * pdp = pt;
171             pdp[PDPE64_INDEX(va)].writable = 0;
172             break;
173         }
174         case PAGE_PD64: {
175             pde64_t * pde = pt;
176             pde[PDE64_INDEX(va)].writable = 0;
177             break;
178         }
179         case PAGE_PT64: {
180             pte64_t * pte = pt;
181             pte[PTE64_INDEX(va)].writable = 0;
182             break;
183         }
184         default:
185             PrintError("Invalid page type: %d\n", pt_type);
186             return -1;
187     }
188
189     return 0;
190 }
191
192
193 static int unlink_shdw_pg(struct shdw_pg_data * pg_data) {
194     struct shdw_back_ptr * back_ptr = NULL;
195     struct shdw_back_ptr * tmp_ptr = NULL;
196
197     PrintError("Unlinking gpa=%p, type=%d\n", (void *)pg_data->tuple.gpa, pg_data->tuple.pt_type);
198
199     list_for_each_entry_safe(back_ptr, tmp_ptr, &(pg_data->back_ptrs), back_ptr_node) {
200         struct shdw_pg_data * parent = back_ptr->pg_data;
201         
202         evict_pt(parent->hva, back_ptr->gva, parent->tuple.pt_type);
203         list_del(&(back_ptr->back_ptr_node));
204         V3_Free(back_ptr);
205     }
206     
207
208
209     return 0;
210 }
211
212
213 static int add_rmap(struct v3_vm_info * vm, struct shdw_pg_data * pg_data, addr_t gpa, addr_t gva) {
214     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
215     struct list_head * rmap_list = NULL;
216     struct rmap_entry * entry = NULL;
217
218
219     rmap_list = (struct list_head *)v3_htable_search(cache_state->reverse_map, gpa);
220
221     if (rmap_list == NULL) {
222         rmap_list = V3_Malloc(sizeof(struct list_head));
223         INIT_LIST_HEAD(rmap_list);
224
225         v3_htable_insert(cache_state->reverse_map, gpa, (addr_t)rmap_list);
226     }
227     
228     entry = V3_Malloc(sizeof(struct rmap_entry));
229
230     entry->gva = gva;
231     entry->gpa = pg_data->tuple.gpa;
232     entry->pt_type = pg_data->tuple.pt_type;
233
234     list_add(&(entry->rmap_node), rmap_list);
235
236     return 0;
237 }
238
239
240
241 static int update_rmap_entries(struct v3_vm_info * vm, addr_t gpa) {
242     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
243     struct list_head * rmap_list = NULL;
244     struct rmap_entry * entry = NULL;
245     int i = 0;
246
247     rmap_list = (struct list_head *)v3_htable_search(cache_state->reverse_map, gpa);
248
249     if (rmap_list == NULL) {
250         return 0;
251     }
252
253     PrintError("Updating rmap entries\n\t");
254
255     list_for_each_entry(entry, rmap_list, rmap_node) {
256         struct shdw_pg_data * pg_data = NULL;
257         struct guest_pg_tuple tuple = {entry->gpa, entry->pt_type};
258
259         V3_Print("%d \n", i);
260
261         pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
262
263         if (!pg_data) {
264             PrintError("Invalid PTE reference...\n");
265             continue;
266         }
267
268         if (grab_pt(pg_data->hva, entry->gva, entry->pt_type) == -1) {
269             PrintError("Could not invalidate reverse map entry\n");
270             return -1;
271         }
272
273         i++;
274         
275     }
276
277     return 0;
278 }
279
280
281
282
283 static int link_shdw_pg(struct shdw_pg_data * child_pg, struct shdw_pg_data * parent_pg, addr_t gva) {
284     struct shdw_back_ptr * back_ptr = V3_Malloc(sizeof(struct shdw_back_ptr));
285     memset(back_ptr, 0, sizeof(struct shdw_back_ptr));
286
287     back_ptr->pg_data = parent_pg;
288     back_ptr->gva = gva;
289
290     list_add(&(back_ptr->back_ptr_node), &(child_pg->back_ptrs));
291    
292     return 0;
293 }
294
295
296
297 static struct shdw_pg_data * find_shdw_pt(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
298     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
299     struct shdw_pg_data * pg_data = NULL;
300     struct guest_pg_tuple tuple = {gpa, pt_type};
301     
302     pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
303
304     if (pg_data != NULL) {
305         // move pg_data to head of queue, for LRU policy
306         list_move(&(pg_data->pg_queue_node), &(cache_state->pg_queue));
307     }
308
309     return pg_data;
310 }
311
312
313 static int evict_shdw_pg(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
314     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
315     struct shdw_pg_data * pg_data = NULL;
316
317     pg_data = find_shdw_pt(vm, gpa, pt_type);
318
319     PrintError("Evicting GPA: %p, type=%d\n", (void *)gpa, pt_type);
320
321     if (pg_data != NULL) {
322         if (unlink_shdw_pg(pg_data) == -1) {
323             PrintError("Error unlinking page...\n");
324             return -1;
325         }
326         
327         v3_htable_remove(cache_state->page_htable, (addr_t)&(pg_data->tuple), 0);
328         
329
330         // Move Page to free list
331         list_move(&(pg_data->pg_queue_node), &(cache_state->free_list));
332         cache_state->pgs_in_free_list++;
333         cache_state->pgs_in_cache--;
334     }
335
336     return 0;
337 }
338
339
340 static struct shdw_pg_data * pop_queue_pg(struct v3_vm_info * vm, 
341                                           struct cache_vm_state * cache_state) {
342     struct shdw_pg_data * pg_data = NULL;
343
344     pg_data = list_tail_entry(&(cache_state->pg_queue), struct shdw_pg_data, pg_queue_node);
345
346
347     if (unlink_shdw_pg(pg_data) == -1) {
348         PrintError("Error unlinking cached page\n");
349         return NULL;
350     }
351
352     v3_htable_remove(cache_state->page_htable, (addr_t)&(pg_data->tuple), 0);
353     list_del(&(pg_data->pg_queue_node));
354     
355     cache_state->pgs_in_cache--;
356
357     return pg_data;
358 }
359
360 static struct shdw_pg_data * create_shdw_pt(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
361     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
362     struct shdw_pg_data * pg_data = NULL;
363
364
365     PrintError("Creating shdw page: gpa=%p, type=%d\n", (void *)gpa, pt_type);
366
367     if (cache_state->pgs_in_cache < cache_state->max_cache_pgs) {
368         pg_data = V3_Malloc(sizeof(struct shdw_pg_data));
369
370         pg_data->hpa = (addr_t)V3_AllocPages(1);
371         pg_data->hva = (void *)V3_VAddr((void *)pg_data->hpa);
372
373     } else if (cache_state->pgs_in_free_list) {
374         // pull from free list
375         pg_data = list_tail_entry(&(cache_state->free_list), struct shdw_pg_data, pg_queue_node);
376         
377         list_del(&(pg_data->pg_queue_node));
378         cache_state->pgs_in_free_list--;
379
380     } else {
381         // pull from queue
382         pg_data = pop_queue_pg(vm, cache_state);
383     }
384
385
386     if (pg_data == NULL) {
387         PrintError("Error creating Shadow Page table page\n");
388         return NULL;
389     }
390
391     memset(pg_data->hva, 0, PAGE_SIZE_4KB);
392
393     pg_data->tuple.gpa = gpa;
394     pg_data->tuple.pt_type = pt_type;
395
396     INIT_LIST_HEAD(&(pg_data->back_ptrs));
397
398     v3_htable_insert(cache_state->page_htable, (addr_t)&(pg_data->tuple), (addr_t)pg_data);
399
400     list_add(&(pg_data->pg_queue_node), &(cache_state->pg_queue));
401     cache_state->pgs_in_cache++;
402
403     return pg_data;
404
405 }
406
407
408 #include "vmm_shdw_pg_cache_32.h"
409 //#include "vmm_shdw_pg_cache_32pae.h"
410 //#include "vmm_shdw_pg_cache_64.h"
411
412
413 static uint_t cache_hash_fn(addr_t key) {
414     struct guest_pg_tuple * tuple = (struct guest_pg_tuple *)key;
415
416     return v3_hash_buffer((uint8_t *)tuple, sizeof(struct guest_pg_tuple));
417 }
418
419 static int cache_eq_fn(addr_t key1, addr_t key2) {
420     struct guest_pg_tuple * tuple1 = (struct guest_pg_tuple *)key1;
421     struct guest_pg_tuple * tuple2 = (struct guest_pg_tuple *)key2;
422         
423     return ((tuple1->gpa == tuple2->gpa) && (tuple1->pt_type == tuple2->pt_type));
424 }
425
426 static uint_t rmap_hash_fn(addr_t key) {
427     return v3_hash_long(key, sizeof(addr_t) * 8);
428 }
429
430 static int rmap_eq_fn(addr_t key1, addr_t key2) {
431     return (key1 == key2);
432 }
433
434
435 static int cache_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
436     struct v3_shdw_impl_state * vm_state = &(vm->shdw_impl);
437     struct cache_vm_state * cache_state = NULL;
438     int cache_size = DEFAULT_CACHE_SIZE;
439     char * cache_sz_str = v3_cfg_val(cfg, "cache_size");
440
441     if (cache_sz_str != NULL) {
442         cache_size = ((atoi(cache_sz_str) * 1024 * 1024) / 4096);
443     }
444
445     V3_Print("Shadow Page Cache initialization\n");
446
447     cache_state = V3_Malloc(sizeof(struct cache_vm_state));
448     memset(cache_state, 0, sizeof(struct cache_vm_state));
449
450     cache_state->page_htable = v3_create_htable(0, cache_hash_fn, cache_eq_fn);
451     cache_state->reverse_map = v3_create_htable(0, rmap_hash_fn, rmap_eq_fn);
452     v3_lock_init(&(cache_state->cache_lock));
453     INIT_LIST_HEAD(&(cache_state->pg_queue));
454     INIT_LIST_HEAD(&(cache_state->free_list));
455     cache_state->max_cache_pgs = cache_size;
456
457     vm_state->impl_data = cache_state;
458
459     return 0;
460 }
461
462
463 static int cache_deinit(struct v3_vm_info * vm) {
464     return -1;
465 }
466
467
468 static int cache_local_init(struct guest_info * core) {
469     //    struct v3_shdw_pg_state * core_state = &(vm->shdw_pg_state);
470
471
472     return 0;
473 }
474
475 static int cache_activate_shdw_pt(struct guest_info * core) {
476     switch (v3_get_vm_cpu_mode(core)) {
477
478         case PROTECTED:
479             PrintError("Calling 32 bit cache activation\n");
480             return activate_shadow_pt_32(core);
481         case PROTECTED_PAE:
482             //      return activate_shadow_pt_32pae(core);
483         case LONG:
484         case LONG_32_COMPAT:
485         case LONG_16_COMPAT:
486             //      return activate_shadow_pt_64(core);
487         default:
488             PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
489             return -1;
490     }
491
492     return 0;
493 }
494
495 static int cache_invalidate_shdw_pt(struct guest_info * core) {
496     // wipe everything...
497     V3_Print("Cache invalidation called\n");
498     
499     return cache_activate_shdw_pt(core);
500 }
501
502
503
504 static int cache_handle_pf(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
505
506         switch (v3_get_vm_cpu_mode(core)) {
507             case PROTECTED:
508                 return handle_shadow_pagefault_32(core, fault_addr, error_code);
509                 break;
510             case PROTECTED_PAE:
511                 //      return handle_shadow_pagefault_32pae(core, fault_addr, error_code);
512             case LONG:
513             case LONG_32_COMPAT:
514             case LONG_16_COMPAT:
515                 //      return handle_shadow_pagefault_64(core, fault_addr, error_code);
516             default:
517                 PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
518                 return -1;
519         }
520 }
521
522
523 static int cache_handle_invlpg(struct guest_info * core, addr_t vaddr) {
524     PrintError("INVLPG called for %p\n", (void *)vaddr);
525
526     switch (v3_get_vm_cpu_mode(core)) {
527         case PROTECTED:
528             return handle_shadow_invlpg_32(core, vaddr);
529         case PROTECTED_PAE:
530             //    return handle_shadow_invlpg_32pae(core, vaddr);
531         case LONG:
532         case LONG_32_COMPAT:
533         case LONG_16_COMPAT:
534             //    return handle_shadow_invlpg_64(core, vaddr);
535         default:
536             PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
537             return -1;
538     }
539 }
540
541
542
543
544
545
546 static struct v3_shdw_pg_impl cache_impl = {
547     .name = "SHADOW_CACHE",
548     .init = cache_init, 
549     .deinit = cache_deinit, 
550     .local_init = cache_local_init, 
551     .handle_pagefault = cache_handle_pf, 
552     .handle_invlpg = cache_handle_invlpg,
553     .activate_shdw_pt = cache_activate_shdw_pt, 
554     .invalidate_shdw_pt = cache_invalidate_shdw_pt
555 };
556
557
558
559 register_shdw_pg_impl(&cache_impl);