Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Context-based output infrastructure (V3_Print, etc) and modifications to use it
[palacios.git] / palacios / src / palacios / mmu / vmm_shdw_pg_cache.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm_shadow_paging.h>
21 #include <palacios/vmm_swapbypass.h>
22 #include <palacios/vmm_ctrl_regs.h>
23
24 #include <palacios/vm_guest.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/vmm_hashtable.h>
28 #include <palacios/vmm_list.h>
29
30 #define DEFAULT_CACHE_SIZE ((32 * 1024 * 1024) / 4096)
31
32 #define V3_CACHED_PG 0x1
33
34 #ifndef V3_CONFIG_DEBUG_SHDW_PG_CACHE
35 #undef PrintDebug
36 #define PrintDebug(fmt, ...)
37 #endif
38
39
40 struct shdw_back_ptr {
41     addr_t gva;
42     struct shdw_pg_data * pg_data;
43     struct list_head back_ptr_node;
44 };
45
46 struct guest_pg_tuple {
47     addr_t gpa;
48     page_type_t pt_type;    
49 } __attribute__((packed));
50
51
52
53 struct rmap_entry {
54     addr_t gva;
55     addr_t gpa;
56     page_type_t pt_type;
57     struct list_head rmap_node;
58 };
59
60 struct shdw_pg_data {
61     struct guest_pg_tuple tuple;
62
63     addr_t hpa;
64     void * hva;
65
66     struct list_head back_ptrs;
67     struct list_head pg_queue_node;
68
69 };
70
71
72
73 struct cache_core_state {
74
75
76 };
77
78
79 struct cache_vm_state {
80     
81     v3_lock_t cache_lock;
82
83     struct hashtable * page_htable; // GPA to shdw_pg_data
84     struct hashtable * reverse_map;
85
86
87     int max_cache_pgs;
88     int pgs_in_cache;
89
90     struct list_head pg_queue;
91
92     int pgs_in_free_list;
93     struct list_head free_list;
94 };
95
96
97
98 static  inline int evict_pt(void * pt, addr_t va, page_type_t pt_type) {
99     
100     switch (pt_type) {
101         case PAGE_PD32: {
102             pde32_t * pde = pt;
103             pde[PDE32_INDEX(va)].present = 0;
104             break;
105         }
106         case PAGE_4MB: {
107             pde32_4MB_t * pde = pt;
108             pde[PDE32_INDEX(va)].present = 0;
109             break;
110         }
111         case PAGE_PT32: {
112             pte32_t * pte = pt;
113             pte[PTE32_INDEX(va)].present = 0;
114             break;
115         }
116         case PAGE_PML464: {
117             pml4e64_t * pml = pt;
118             pml[PML4E64_INDEX(va)].present = 0;
119             break;
120         }
121         case PAGE_PDP64: {
122             pdpe64_t * pdp = pt;
123             pdp[PDPE64_INDEX(va)].present = 0;
124             break;
125         }
126         case PAGE_PD64: {
127             pde64_t * pde = pt;
128             pde[PDE64_INDEX(va)].present = 0;
129             break;
130         }
131         case PAGE_PT64: {
132             pte64_t * pte = pt;
133             pte[PTE64_INDEX(va)].present = 0;
134             break;
135         }
136         default:
137             PrintError(VM_NONE, VCORE_NONE, "Invalid page type: %d\n", pt_type);
138             return -1;
139     }
140
141     return 0;
142 }
143
144
145
146 static  inline int grab_pt(void * pt, addr_t va, page_type_t pt_type) {
147     
148     switch (pt_type) {
149         case PAGE_PD32: {
150             pde32_t * pde = pt;
151             pde[PDE32_INDEX(va)].writable = 0;
152             break;
153         }
154         case PAGE_4MB: {
155             pde32_4MB_t * pde = pt;
156             pde[PDE32_INDEX(va)].writable = 0;
157             break;
158         }
159         case PAGE_PT32: {
160             pte32_t * pte = pt;
161             pte[PTE32_INDEX(va)].writable = 0;
162             break;
163         }
164         case PAGE_PML464: {
165             pml4e64_t * pml = pt;
166             pml[PML4E64_INDEX(va)].writable = 0;
167             break;
168         }
169         case PAGE_PDP64: {
170             pdpe64_t * pdp = pt;
171             pdp[PDPE64_INDEX(va)].writable = 0;
172             break;
173         }
174         case PAGE_PD64: {
175             pde64_t * pde = pt;
176             pde[PDE64_INDEX(va)].writable = 0;
177             break;
178         }
179         case PAGE_PT64: {
180             pte64_t * pte = pt;
181             pte[PTE64_INDEX(va)].writable = 0;
182             break;
183         }
184         default:
185             PrintError(VM_NONE, VCORE_NONE, "Invalid page type: %d\n", pt_type);
186             return -1;
187     }
188
189     return 0;
190 }
191
192
193 static int unlink_shdw_pg(struct shdw_pg_data * pg_data) {
194     struct shdw_back_ptr * back_ptr = NULL;
195     struct shdw_back_ptr * tmp_ptr = NULL;
196
197     PrintError(VM_NONE, VCORE_NONE, "Unlinking gpa=%p, type=%d\n", (void *)pg_data->tuple.gpa, pg_data->tuple.pt_type);
198
199     list_for_each_entry_safe(back_ptr, tmp_ptr, &(pg_data->back_ptrs), back_ptr_node) {
200         struct shdw_pg_data * parent = back_ptr->pg_data;
201         
202         evict_pt(parent->hva, back_ptr->gva, parent->tuple.pt_type);
203         list_del(&(back_ptr->back_ptr_node));
204         V3_Free(back_ptr);
205     }
206     
207
208
209     return 0;
210 }
211
212
213 static int add_rmap(struct v3_vm_info * vm, struct shdw_pg_data * pg_data, addr_t gpa, addr_t gva) {
214     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
215     struct list_head * rmap_list = NULL;
216     struct rmap_entry * entry = NULL;
217
218
219     rmap_list = (struct list_head *)v3_htable_search(cache_state->reverse_map, gpa);
220
221     if (rmap_list == NULL) {
222         rmap_list = V3_Malloc(sizeof(struct list_head));
223
224         if (!rmap_list) {
225             PrintError(vm, VCORE_NONE, "Cannot allocate\n");
226             return -1;
227         }
228
229         INIT_LIST_HEAD(rmap_list);
230
231         v3_htable_insert(cache_state->reverse_map, gpa, (addr_t)rmap_list);
232     }
233     
234     entry = V3_Malloc(sizeof(struct rmap_entry));
235
236     if (!entry) {
237         PrintError(vm, VCORE_NONE,  "Cannot allocate\n");
238         return -1;
239     }
240
241     entry->gva = gva;
242     entry->gpa = pg_data->tuple.gpa;
243     entry->pt_type = pg_data->tuple.pt_type;
244
245     list_add(&(entry->rmap_node), rmap_list);
246
247     return 0;
248 }
249
250
251
252 static int update_rmap_entries(struct v3_vm_info * vm, addr_t gpa) {
253     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
254     struct list_head * rmap_list = NULL;
255     struct rmap_entry * entry = NULL;
256     int i = 0;
257
258     rmap_list = (struct list_head *)v3_htable_search(cache_state->reverse_map, gpa);
259
260     if (rmap_list == NULL) {
261         return 0;
262     }
263
264     PrintError(vm, VCORE_NONE, "Updating rmap entries\n\t");
265
266     list_for_each_entry(entry, rmap_list, rmap_node) {
267         struct shdw_pg_data * pg_data = NULL;
268         struct guest_pg_tuple tuple = {entry->gpa, entry->pt_type};
269
270         V3_Print(vm, VCORE_NONE,  "%d \n", i);
271
272         pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
273
274         if (!pg_data) {
275             PrintError(vm, VCORE_NONE, "Invalid PTE reference... Should Delete rmap entry\n");
276             continue;
277         }
278
279         if (grab_pt(pg_data->hva, entry->gva, entry->pt_type) == -1) {
280             PrintError(vm, VCORE_NONE, "Could not invalidate reverse map entry\n");
281             return -1;
282         }
283
284         i++;
285         
286     }
287
288     return 0;
289 }
290
291
292
293
294 static int link_shdw_pg(struct shdw_pg_data * child_pg, struct shdw_pg_data * parent_pg, addr_t gva) {
295     struct shdw_back_ptr * back_ptr = V3_Malloc(sizeof(struct shdw_back_ptr));
296
297     if (!back_ptr) {
298         PrintError(VM_NONE, VCORE_NONE,  "Cannot allocate\n");
299         return -1;
300     }
301
302     memset(back_ptr, 0, sizeof(struct shdw_back_ptr));
303
304     back_ptr->pg_data = parent_pg;
305     back_ptr->gva = gva;
306
307     list_add(&(back_ptr->back_ptr_node), &(child_pg->back_ptrs));
308    
309     return 0;
310 }
311
312
313
314 static struct shdw_pg_data * find_shdw_pt(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
315     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
316     struct shdw_pg_data * pg_data = NULL;
317     struct guest_pg_tuple tuple = {gpa, pt_type};
318     
319     pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
320
321     if (pg_data != NULL) {
322         // move pg_data to head of queue, for LRU policy
323         list_move(&(pg_data->pg_queue_node), &(cache_state->pg_queue));
324     }
325
326     return pg_data;
327 }
328
329
330 static int evict_shdw_pg(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
331     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
332     struct shdw_pg_data * pg_data = NULL;
333
334     pg_data = find_shdw_pt(vm, gpa, pt_type);
335
336     PrintError(vm, VCORE_NONE,  "Evicting GPA: %p, type=%d\n", (void *)gpa, pt_type);
337
338     if (pg_data != NULL) {
339         if (unlink_shdw_pg(pg_data) == -1) {
340             PrintError(vm, VCORE_NONE,  "Error unlinking page...\n");
341             return -1;
342         }
343         
344         v3_htable_remove(cache_state->page_htable, (addr_t)&(pg_data->tuple), 0);
345         
346
347         // Move Page to free list
348         list_move(&(pg_data->pg_queue_node), &(cache_state->free_list));
349         cache_state->pgs_in_free_list++;
350         cache_state->pgs_in_cache--;
351     }
352
353     return 0;
354 }
355
356
357 static struct shdw_pg_data * pop_queue_pg(struct v3_vm_info * vm, 
358                                           struct cache_vm_state * cache_state) {
359     struct shdw_pg_data * pg_data = NULL;
360
361     PrintError(vm, VCORE_NONE, "popping page from queue\n");
362
363     pg_data = list_tail_entry(&(cache_state->pg_queue), struct shdw_pg_data, pg_queue_node);
364
365
366     if (unlink_shdw_pg(pg_data) == -1) {
367         PrintError(vm, VCORE_NONE, "Error unlinking cached page\n");
368         return NULL;
369     }
370
371     v3_htable_remove(cache_state->page_htable, (addr_t)&(pg_data->tuple), 0);
372     list_del(&(pg_data->pg_queue_node));
373     
374     cache_state->pgs_in_cache--;
375
376     return pg_data;
377 }
378
379 static struct shdw_pg_data * create_shdw_pt(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
380     struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
381     struct shdw_pg_data * pg_data = NULL;
382
383
384     PrintError(vm, VCORE_NONE, "Creating shdw page: gpa=%p, type=%d\n", (void *)gpa, pt_type);
385
386     if (cache_state->pgs_in_cache < cache_state->max_cache_pgs) {
387         pg_data = V3_Malloc(sizeof(struct shdw_pg_data));
388
389         if (!pg_data) {
390             PrintError(vm, VCORE_NONE,  "Cannot allocate\n");
391             return NULL;
392         }
393
394         pg_data->hpa = (addr_t)V3_AllocPages(1);
395
396         if (!pg_data->hpa) {
397             PrintError(vm, VCORE_NONE,  "Cannot allocate page for shadow page table\n");
398             return NULL;
399         }
400
401         pg_data->hva = (void *)V3_VAddr((void *)pg_data->hpa);
402
403     } else if (cache_state->pgs_in_free_list) {
404
405         PrintError(vm, VCORE_NONE,  "pulling page from free list\n");
406         // pull from free list
407         pg_data = list_tail_entry(&(cache_state->free_list), struct shdw_pg_data, pg_queue_node);
408         
409         list_del(&(pg_data->pg_queue_node));
410         cache_state->pgs_in_free_list--;
411
412     } else {
413         // pull from queue
414         pg_data = pop_queue_pg(vm, cache_state);
415     }
416
417
418     if (pg_data == NULL) {
419         PrintError(vm, VCORE_NONE,  "Error creating Shadow Page table page\n");
420         return NULL;
421     }
422
423     memset(pg_data->hva, 0, PAGE_SIZE_4KB);
424
425     pg_data->tuple.gpa = gpa;
426     pg_data->tuple.pt_type = pt_type;
427
428     INIT_LIST_HEAD(&(pg_data->back_ptrs));
429
430     v3_htable_insert(cache_state->page_htable, (addr_t)&(pg_data->tuple), (addr_t)pg_data);
431
432     list_add(&(pg_data->pg_queue_node), &(cache_state->pg_queue));
433     cache_state->pgs_in_cache++;
434
435     return pg_data;
436
437 }
438
439
440 #include "vmm_shdw_pg_cache_32.h"
441 //#include "vmm_shdw_pg_cache_32pae.h"
442 //#include "vmm_shdw_pg_cache_64.h"
443
444
445 static uint_t cache_hash_fn(addr_t key) {
446     struct guest_pg_tuple * tuple = (struct guest_pg_tuple *)key;
447
448     return v3_hash_buffer((uint8_t *)tuple, sizeof(struct guest_pg_tuple));
449 }
450
451 static int cache_eq_fn(addr_t key1, addr_t key2) {
452     struct guest_pg_tuple * tuple1 = (struct guest_pg_tuple *)key1;
453     struct guest_pg_tuple * tuple2 = (struct guest_pg_tuple *)key2;
454         
455     return ((tuple1->gpa == tuple2->gpa) && (tuple1->pt_type == tuple2->pt_type));
456 }
457
458 static uint_t rmap_hash_fn(addr_t key) {
459     return v3_hash_long(key, sizeof(addr_t) * 8);
460 }
461
462 static int rmap_eq_fn(addr_t key1, addr_t key2) {
463     return (key1 == key2);
464 }
465
466
467 static int cache_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
468     struct v3_shdw_impl_state * vm_state = &(vm->shdw_impl);
469     struct cache_vm_state * cache_state = NULL;
470     int cache_size = DEFAULT_CACHE_SIZE;
471     char * cache_sz_str = v3_cfg_val(cfg, "cache_size");
472
473     if (cache_sz_str != NULL) {
474         cache_size = ((atoi(cache_sz_str) * 1024 * 1024) / 4096);
475     }
476
477     V3_Print(vm, VCORE_NONE, "Shadow Page Cache initialization\n");
478
479     cache_state = V3_Malloc(sizeof(struct cache_vm_state));
480
481     if (!cache_state) {
482         PrintError(vm, VCORE_NONE, "Cannot allocate\n");
483         return -1;
484     }
485
486     memset(cache_state, 0, sizeof(struct cache_vm_state));
487
488     cache_state->page_htable = v3_create_htable(0, cache_hash_fn, cache_eq_fn);
489     cache_state->reverse_map = v3_create_htable(0, rmap_hash_fn, rmap_eq_fn);
490     v3_lock_init(&(cache_state->cache_lock));
491     INIT_LIST_HEAD(&(cache_state->pg_queue));
492     INIT_LIST_HEAD(&(cache_state->free_list));
493     cache_state->max_cache_pgs = cache_size;
494
495     vm_state->impl_data = cache_state;
496
497     return 0;
498 }
499
500
501 static int cache_deinit(struct v3_vm_info * vm) {
502     return -1;
503 }
504
505
506 static int cache_local_init(struct guest_info * core) {
507     //    struct v3_shdw_pg_state * core_state = &(vm->shdw_pg_state);
508
509
510     return 0;
511 }
512
513 static int cache_activate_shdw_pt(struct guest_info * core) {
514     switch (v3_get_vm_cpu_mode(core)) {
515
516         case PROTECTED:
517             PrintError(core->vm_info, core, "Calling 32 bit cache activation\n");
518             return activate_shadow_pt_32(core);
519         case PROTECTED_PAE:
520             //      return activate_shadow_pt_32pae(core);
521         case LONG:
522         case LONG_32_COMPAT:
523         case LONG_16_COMPAT:
524             //      return activate_shadow_pt_64(core);
525         default:
526             PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
527             return -1;
528     }
529
530     return 0;
531 }
532
533 static int cache_invalidate_shdw_pt(struct guest_info * core) {
534     // wipe everything...
535     V3_Print(core->vm_info, core, "Cache invalidation called\n");
536     
537     return cache_activate_shdw_pt(core);
538 }
539
540
541
542 static int cache_handle_pf(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
543
544         switch (v3_get_vm_cpu_mode(core)) {
545             case PROTECTED:
546                 return handle_shadow_pagefault_32(core, fault_addr, error_code);
547                 break;
548             case PROTECTED_PAE:
549                 //      return handle_shadow_pagefault_32pae(core, fault_addr, error_code);
550             case LONG:
551             case LONG_32_COMPAT:
552             case LONG_16_COMPAT:
553                 //      return handle_shadow_pagefault_64(core, fault_addr, error_code);
554             default:
555                 PrintError(core->vm_info, core, "Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
556                 return -1;
557         }
558 }
559
560
561 static int cache_handle_invlpg(struct guest_info * core, addr_t vaddr) {
562     PrintError(core->vm_info, core, "INVLPG called for %p\n", (void *)vaddr);
563
564     switch (v3_get_vm_cpu_mode(core)) {
565         case PROTECTED:
566             return handle_shadow_invlpg_32(core, vaddr);
567         case PROTECTED_PAE:
568             //    return handle_shadow_invlpg_32pae(core, vaddr);
569         case LONG:
570         case LONG_32_COMPAT:
571         case LONG_16_COMPAT:
572             //    return handle_shadow_invlpg_64(core, vaddr);
573         default:
574             PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
575             return -1;
576     }
577 }
578
579
580
581
582
583
584 static struct v3_shdw_pg_impl cache_impl = {
585     .name = "SHADOW_CACHE",
586     .init = cache_init, 
587     .deinit = cache_deinit, 
588     .local_init = cache_local_init, 
589     .handle_pagefault = cache_handle_pf, 
590     .handle_invlpg = cache_handle_invlpg,
591     .activate_shdw_pt = cache_activate_shdw_pt, 
592     .invalidate_shdw_pt = cache_invalidate_shdw_pt
593 };
594
595
596
597 register_shdw_pg_impl(&cache_impl);