Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


cf60c8624af05e316179657b493f5a0df3af80fe
[palacios.git] / palacios / src / palacios / mmu / vmm_shdw_pg_cache.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm_shadow_paging.h>
21 #include <palacios/vmm_swapbypass.h>
22 #include <palacios/vmm_ctrl_regs.h>
23
24 #include <palacios/vm_guest.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/vmm_hashtable.h>
28 #include <palacios/vmm_list.h>
29
30 #define DEFAULT_CACHE_SIZE ((32 * 1024 * 1024) / 4096)
31
32 #define V3_CACHED_PG 0x1
33
34 #ifndef V3_CONFIG_DEBUG_SHDW_PG_CACHE
35 #undef PrintDebug
36 #define PrintDebug(fmt, ...)
37 #endif
38
39
40 struct shdw_back_ptr {
41     addr_t gva;
42     struct shdw_pg_data * pg_data;
43     struct list_head back_ptr_node;
44 };
45
46 struct guest_pg_tuple {
47     addr_t gpa;
48     page_type_t pt_type;    
49 } __attribute__((packed));
50
51
52
53 struct rmap_entry {
54     addr_t gva;
55     addr_t gpa;
56     page_type_t pt_type;
57     struct list_head rmap_node;
58 };
59
60 struct shdw_pg_data {
61     struct guest_pg_tuple tuple;
62
63     addr_t hpa;
64     void * hva;
65
66     struct list_head back_ptrs;
67     struct list_head pg_queue_node;
68
69 };
70
71
72
73 struct cache_core_state {
74
75
76 };
77
78
79 struct cache_vm_state {
80     
81     v3_lock_t cache_lock;
82
83     struct hashtable * page_htable; // GPA to shdw_pg_data
84     struct hashtable * reverse_map;
85
86
87     int max_cache_pgs;
88     int pgs_in_cache;
89
90     struct list_head pg_queue;
91
92     int pgs_in_free_list;
93     struct list_head free_list;
94 };
95
96
97
98 static  inline int evict_pt(void * pt, addr_t va, page_type_t pt_type) {
99     
100     switch (pt_type) {
101         case PAGE_PD32: {
102             pde32_t * pde = pt;
103             pde[PDE32_INDEX(va)].present = 0;
104             break;
105         }
106         case PAGE_4MB: {
107             pde32_4MB_t * pde = pt;
108             pde[PDE32_INDEX(va)].present = 0;
109             break;
110         }
111         case PAGE_PT32: {
112             pte32_t * pte = pt;
113             pte[PTE32_INDEX(va)].present = 0;
114             break;
115         }
116         case PAGE_PML464: {
117             pml4e64_t * pml = pt;
118             pml[PML4E64_INDEX(va)].present = 0;
119             break;
120         }
121         case PAGE_PDP64: {
122             pdpe64_t * pdp = pt;
123             pdp[PDPE64_INDEX(va)].present = 0;
124             break;
125         }
126         case PAGE_PD64: {
127             pde64_t * pde = pt;
128             pde[PDE64_INDEX(va)].present = 0;
129             break;
130         }
131         case PAGE_PT64: {
132             pte64_t * pte = pt;
133             pte[PTE64_INDEX(va)].present = 0;
134             break;
135         }
136         default:
137             PrintError(VM_NONE, VCORE_NONE, "Invalid page type: %d\n", pt_type);
138             return -1;
139     }
140
141     return 0;
142 }
143
144
145
146 static  inline int grab_pt(void * pt, addr_t va, page_type_t pt_type) {
147     
148     switch (pt_type) {
149         case PAGE_PD32: {
150             pde32_t * pde = pt;
151             pde[PDE32_INDEX(va)].writable = 0;
152             break;
153         }
154         case PAGE_4MB: {
155             pde32_4MB_t * pde = pt;
156             pde[PDE32_INDEX(va)].writable = 0;
157             break;
158         }
159         case PAGE_PT32: {
160             pte32_t * pte = pt;
161             pte[PTE32_INDEX(va)].writable = 0;
162             break;
163         }
164         case PAGE_PML464: {
165             pml4e64_t * pml = pt;
166             pml[PML4E64_INDEX(va)].writable = 0;
167             break;
168         }
169         case PAGE_PDP64: {
170             pdpe64_t * pdp = pt;
171             pdp[PDPE64_INDEX(va)].writable = 0;
172             break;
173         }
174         case PAGE_PD64: {
175             pde64_t * pde = pt;
176             pde[PDE64_INDEX(va)].writable = 0;
177             break;
178         }
179         case PAGE_PT64: {
180             pte64_t * pte = pt;
181             pte[PTE64_INDEX(va)].writable = 0;
182             break;
183         }
184         default:
185             PrintError(VM_NONE, VCORE_NONE, "Invalid page type: %d\n", pt_type);
186             return -1;
187     }
188
189     return 0;
190 }
191
192
193 static int unlink_shdw_pg(struct shdw_pg_data * pg_data) {
194     struct shdw_back_ptr * back_ptr = NULL;
195     struct shdw_back_ptr * tmp_ptr = NULL;
196
197     PrintError(VM_NONE, VCORE_NONE, "Unlinking gpa=%p, type=%d\n", (void *)pg_data->tuple.gpa, pg_data->tuple.pt_type);
198
199     list_for_each_entry_safe(back_ptr, tmp_ptr, &(pg_data->back_ptrs), back_ptr_node) {
200         struct shdw_pg_data * parent = back_ptr->pg_data;
201         
202         evict_pt(parent->hva, back_ptr->gva, parent->tuple.pt_type);
203         list_del(&(back_ptr->back_ptr_node));
204         V3_Free(back_ptr);
205     }
206     
207
208
209     return 0;
210 }
211
212
213 static int add_rmap(struct v3_vm_info * vm, struct shdw_pg_data * pg_data, addr_t gpa, addr_t gva) {
214     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
215     struct list_head * rmap_list = NULL;
216     struct rmap_entry * entry = NULL;
217
218
219     rmap_list = (struct list_head *)v3_htable_search(cache_state->reverse_map, gpa);
220
221     if (rmap_list == NULL) {
222         rmap_list = V3_Malloc(sizeof(struct list_head));
223
224         if (!rmap_list) {
225             PrintError(vm, VCORE_NONE, "Cannot allocate\n");
226             return -1;
227         }
228
229         INIT_LIST_HEAD(rmap_list);
230
231         v3_htable_insert(cache_state->reverse_map, gpa, (addr_t)rmap_list);
232     }
233     
234     entry = V3_Malloc(sizeof(struct rmap_entry));
235
236     if (!entry) {
237         PrintError(vm, VCORE_NONE,  "Cannot allocate\n");
238         return -1;
239     }
240
241     entry->gva = gva;
242     entry->gpa = pg_data->tuple.gpa;
243     entry->pt_type = pg_data->tuple.pt_type;
244
245     list_add(&(entry->rmap_node), rmap_list);
246
247     return 0;
248 }
249
250
251
252 static int update_rmap_entries(struct v3_vm_info * vm, addr_t gpa) {
253     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
254     struct list_head * rmap_list = NULL;
255     struct rmap_entry * entry = NULL;
256     int i = 0;
257
258     rmap_list = (struct list_head *)v3_htable_search(cache_state->reverse_map, gpa);
259
260     if (rmap_list == NULL) {
261         return 0;
262     }
263
264     PrintError(vm, VCORE_NONE, "Updating rmap entries\n\t");
265
266     list_for_each_entry(entry, rmap_list, rmap_node) {
267         struct shdw_pg_data * pg_data = NULL;
268         struct guest_pg_tuple tuple = {entry->gpa, entry->pt_type};
269
270         V3_Print(vm, VCORE_NONE,  "%d \n", i);
271
272         pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
273
274         if (!pg_data) {
275             PrintError(vm, VCORE_NONE, "Invalid PTE reference... Should Delete rmap entry\n");
276             continue;
277         }
278
279         if (grab_pt(pg_data->hva, entry->gva, entry->pt_type) == -1) {
280             PrintError(vm, VCORE_NONE, "Could not invalidate reverse map entry\n");
281             return -1;
282         }
283
284         i++;
285         
286     }
287
288     return 0;
289 }
290
291
292
293
294 static int link_shdw_pg(struct shdw_pg_data * child_pg, struct shdw_pg_data * parent_pg, addr_t gva) {
295     struct shdw_back_ptr * back_ptr = V3_Malloc(sizeof(struct shdw_back_ptr));
296
297     if (!back_ptr) {
298         PrintError(VM_NONE, VCORE_NONE,  "Cannot allocate\n");
299         return -1;
300     }
301
302     memset(back_ptr, 0, sizeof(struct shdw_back_ptr));
303
304     back_ptr->pg_data = parent_pg;
305     back_ptr->gva = gva;
306
307     list_add(&(back_ptr->back_ptr_node), &(child_pg->back_ptrs));
308    
309     return 0;
310 }
311
312
313
314 static struct shdw_pg_data * find_shdw_pt(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
315     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
316     struct shdw_pg_data * pg_data = NULL;
317     struct guest_pg_tuple tuple = {gpa, pt_type};
318     
319     pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
320
321     if (pg_data != NULL) {
322         // move pg_data to head of queue, for LRU policy
323         list_move(&(pg_data->pg_queue_node), &(cache_state->pg_queue));
324     }
325
326     return pg_data;
327 }
328
329
330 static int evict_shdw_pg(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
331     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
332     struct shdw_pg_data * pg_data = NULL;
333
334     pg_data = find_shdw_pt(vm, gpa, pt_type);
335
336     PrintError(vm, VCORE_NONE,  "Evicting GPA: %p, type=%d\n", (void *)gpa, pt_type);
337
338     if (pg_data != NULL) {
339         if (unlink_shdw_pg(pg_data) == -1) {
340             PrintError(vm, VCORE_NONE,  "Error unlinking page...\n");
341             return -1;
342         }
343         
344         v3_htable_remove(cache_state->page_htable, (addr_t)&(pg_data->tuple), 0);
345         
346
347         // Move Page to free list
348         list_move(&(pg_data->pg_queue_node), &(cache_state->free_list));
349         cache_state->pgs_in_free_list++;
350         cache_state->pgs_in_cache--;
351     }
352
353     return 0;
354 }
355
356
357 static struct shdw_pg_data * pop_queue_pg(struct v3_vm_info * vm, 
358                                           struct cache_vm_state * cache_state) {
359     struct shdw_pg_data * pg_data = NULL;
360
361     PrintError(vm, VCORE_NONE, "popping page from queue\n");
362
363     pg_data = list_tail_entry(&(cache_state->pg_queue), struct shdw_pg_data, pg_queue_node);
364
365
366     if (unlink_shdw_pg(pg_data) == -1) {
367         PrintError(vm, VCORE_NONE, "Error unlinking cached page\n");
368         return NULL;
369     }
370
371     v3_htable_remove(cache_state->page_htable, (addr_t)&(pg_data->tuple), 0);
372     list_del(&(pg_data->pg_queue_node));
373     
374     cache_state->pgs_in_cache--;
375
376     return pg_data;
377 }
378
379 static struct shdw_pg_data * create_shdw_pt(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
380     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
381     struct shdw_pg_data * pg_data = NULL;
382
383
384     PrintError(vm, VCORE_NONE, "Creating shdw page: gpa=%p, type=%d\n", (void *)gpa, pt_type);
385
386     if (cache_state->pgs_in_cache < cache_state->max_cache_pgs) {
387         pg_data = V3_Malloc(sizeof(struct shdw_pg_data));
388
389         if (!pg_data) {
390             PrintError(vm, VCORE_NONE,  "Cannot allocate\n");
391             return NULL;
392         }
393
394         pg_data->hpa = (addr_t)V3_AllocPagesExtended(1,PAGE_SIZE_4KB,-1,
395                                                      V3_ALLOC_PAGES_CONSTRAINT_4GB);
396
397
398         if (!pg_data->hpa) {
399             PrintError(vm, VCORE_NONE,  "Cannot allocate page for shadow page table\n");
400             return NULL;
401         }
402
403         pg_data->hva = (void *)V3_VAddr((void *)pg_data->hpa);
404
405     } else if (cache_state->pgs_in_free_list) {
406
407         PrintError(vm, VCORE_NONE,  "pulling page from free list\n");
408         // pull from free list
409         pg_data = list_tail_entry(&(cache_state->free_list), struct shdw_pg_data, pg_queue_node);
410         
411         list_del(&(pg_data->pg_queue_node));
412         cache_state->pgs_in_free_list--;
413
414     } else {
415         // pull from queue
416         pg_data = pop_queue_pg(vm, cache_state);
417     }
418
419
420     if (pg_data == NULL) {
421         PrintError(vm, VCORE_NONE,  "Error creating Shadow Page table page\n");
422         return NULL;
423     }
424
425     memset(pg_data->hva, 0, PAGE_SIZE_4KB);
426
427     pg_data->tuple.gpa = gpa;
428     pg_data->tuple.pt_type = pt_type;
429
430     INIT_LIST_HEAD(&(pg_data->back_ptrs));
431
432     v3_htable_insert(cache_state->page_htable, (addr_t)&(pg_data->tuple), (addr_t)pg_data);
433
434     list_add(&(pg_data->pg_queue_node), &(cache_state->pg_queue));
435     cache_state->pgs_in_cache++;
436
437     return pg_data;
438
439 }
440
441
442 #include "vmm_shdw_pg_cache_32.h"
443 //#include "vmm_shdw_pg_cache_32pae.h"
444 //#include "vmm_shdw_pg_cache_64.h"
445
446
447 static uint_t cache_hash_fn(addr_t key) {
448     struct guest_pg_tuple * tuple = (struct guest_pg_tuple *)key;
449
450     return v3_hash_buffer((uint8_t *)tuple, sizeof(struct guest_pg_tuple));
451 }
452
453 static int cache_eq_fn(addr_t key1, addr_t key2) {
454     struct guest_pg_tuple * tuple1 = (struct guest_pg_tuple *)key1;
455     struct guest_pg_tuple * tuple2 = (struct guest_pg_tuple *)key2;
456         
457     return ((tuple1->gpa == tuple2->gpa) && (tuple1->pt_type == tuple2->pt_type));
458 }
459
460 static uint_t rmap_hash_fn(addr_t key) {
461     return v3_hash_long(key, sizeof(addr_t) * 8);
462 }
463
464 static int rmap_eq_fn(addr_t key1, addr_t key2) {
465     return (key1 == key2);
466 }
467
468
469 static int cache_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
470     struct v3_shdw_impl_state * vm_state = &(vm->shdw_impl);
471     struct cache_vm_state * cache_state = NULL;
472     int cache_size = DEFAULT_CACHE_SIZE;
473     char * cache_sz_str = v3_cfg_val(cfg, "cache_size");
474
475     if (cache_sz_str != NULL) {
476         cache_size = ((atoi(cache_sz_str) * 1024 * 1024) / 4096);
477     }
478
479     V3_Print(vm, VCORE_NONE, "Shadow Page Cache initialization\n");
480
481     cache_state = V3_Malloc(sizeof(struct cache_vm_state));
482
483     if (!cache_state) {
484         PrintError(vm, VCORE_NONE, "Cannot allocate\n");
485         return -1;
486     }
487
488     memset(cache_state, 0, sizeof(struct cache_vm_state));
489
490     cache_state->page_htable = v3_create_htable(0, cache_hash_fn, cache_eq_fn);
491     cache_state->reverse_map = v3_create_htable(0, rmap_hash_fn, rmap_eq_fn);
492     v3_lock_init(&(cache_state->cache_lock));
493     INIT_LIST_HEAD(&(cache_state->pg_queue));
494     INIT_LIST_HEAD(&(cache_state->free_list));
495     cache_state->max_cache_pgs = cache_size;
496
497     vm_state->impl_data = cache_state;
498
499     return 0;
500 }
501
502
503 static int cache_deinit(struct v3_vm_info * vm) {
504     return -1;
505 }
506
507
508 static int cache_local_init(struct guest_info * core) {
509     //    struct v3_shdw_pg_state * core_state = &(vm->shdw_pg_state);
510
511
512     return 0;
513 }
514
515 static int cache_activate_shdw_pt(struct guest_info * core) {
516     switch (v3_get_vm_cpu_mode(core)) {
517
518         case PROTECTED:
519             PrintError(core->vm_info, core, "Calling 32 bit cache activation\n");
520             return activate_shadow_pt_32(core);
521         case PROTECTED_PAE:
522             //      return activate_shadow_pt_32pae(core);
523         case LONG:
524         case LONG_32_COMPAT:
525         case LONG_16_COMPAT:
526             //      return activate_shadow_pt_64(core);
527         default:
528             PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
529             return -1;
530     }
531
532     return 0;
533 }
534
535 static int cache_invalidate_shdw_pt(struct guest_info * core) {
536     // wipe everything...
537     V3_Print(core->vm_info, core, "Cache invalidation called\n");
538     
539     return cache_activate_shdw_pt(core);
540 }
541
542
543
544 static int cache_handle_pf(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
545
546         switch (v3_get_vm_cpu_mode(core)) {
547             case PROTECTED:
548                 return handle_shadow_pagefault_32(core, fault_addr, error_code);
549                 break;
550             case PROTECTED_PAE:
551                 //      return handle_shadow_pagefault_32pae(core, fault_addr, error_code);
552             case LONG:
553             case LONG_32_COMPAT:
554             case LONG_16_COMPAT:
555                 //      return handle_shadow_pagefault_64(core, fault_addr, error_code);
556             default:
557                 PrintError(core->vm_info, core, "Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
558                 return -1;
559         }
560 }
561
562
563 static int cache_handle_invlpg(struct guest_info * core, addr_t vaddr) {
564     PrintError(core->vm_info, core, "INVLPG called for %p\n", (void *)vaddr);
565
566     switch (v3_get_vm_cpu_mode(core)) {
567         case PROTECTED:
568             return handle_shadow_invlpg_32(core, vaddr);
569         case PROTECTED_PAE:
570             //    return handle_shadow_invlpg_32pae(core, vaddr);
571         case LONG:
572         case LONG_32_COMPAT:
573         case LONG_16_COMPAT:
574             //    return handle_shadow_invlpg_64(core, vaddr);
575         default:
576             PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
577             return -1;
578     }
579 }
580
581
582
583
584
585
586 static struct v3_shdw_pg_impl cache_impl = {
587     .name = "SHADOW_CACHE",
588     .init = cache_init, 
589     .deinit = cache_deinit, 
590     .local_init = cache_local_init, 
591     .handle_pagefault = cache_handle_pf, 
592     .handle_invlpg = cache_handle_invlpg,
593     .activate_shdw_pt = cache_activate_shdw_pt, 
594     .invalidate_shdw_pt = cache_invalidate_shdw_pt
595 };
596
597
598
599 register_shdw_pg_impl(&cache_impl);