2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_shadow_paging.h>
21 #include <palacios/vmm_swapbypass.h>
22 #include <palacios/vmm_ctrl_regs.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/vmm_hashtable.h>
28 #include <palacios/vmm_list.h>
30 #define DEFAULT_CACHE_SIZE ((32 * 1024 * 1024) / 4096)
32 #define V3_CACHED_PG 0x1
34 #ifndef V3_CONFIG_DEBUG_SHDW_PG_CACHE
36 #define PrintDebug(fmt, ...)
40 struct shdw_back_ptr {
42 struct shdw_pg_data * pg_data;
43 struct list_head back_ptr_node;
46 struct guest_pg_tuple {
49 } __attribute__((packed));
57 struct list_head rmap_node;
61 struct guest_pg_tuple tuple;
66 struct list_head back_ptrs;
67 struct list_head pg_queue_node;
73 struct cache_core_state {
79 struct cache_vm_state {
83 struct hashtable * page_htable; // GPA to shdw_pg_data
84 struct hashtable * reverse_map;
90 struct list_head pg_queue;
93 struct list_head free_list;
98 static inline int evict_pt(void * pt, addr_t va, page_type_t pt_type) {
103 pde[PDE32_INDEX(va)].present = 0;
107 pde32_4MB_t * pde = pt;
108 pde[PDE32_INDEX(va)].present = 0;
113 pte[PTE32_INDEX(va)].present = 0;
117 pml4e64_t * pml = pt;
118 pml[PML4E64_INDEX(va)].present = 0;
123 pdp[PDPE64_INDEX(va)].present = 0;
128 pde[PDE64_INDEX(va)].present = 0;
133 pte[PTE64_INDEX(va)].present = 0;
137 PrintError("Invalid page type: %d\n", pt_type);
146 static inline int grab_pt(void * pt, addr_t va, page_type_t pt_type) {
151 pde[PDE32_INDEX(va)].writable = 0;
155 pde32_4MB_t * pde = pt;
156 pde[PDE32_INDEX(va)].writable = 0;
161 pte[PTE32_INDEX(va)].writable = 0;
165 pml4e64_t * pml = pt;
166 pml[PML4E64_INDEX(va)].writable = 0;
171 pdp[PDPE64_INDEX(va)].writable = 0;
176 pde[PDE64_INDEX(va)].writable = 0;
181 pte[PTE64_INDEX(va)].writable = 0;
185 PrintError("Invalid page type: %d\n", pt_type);
193 static int unlink_shdw_pg(struct shdw_pg_data * pg_data) {
194 struct shdw_back_ptr * back_ptr = NULL;
195 struct shdw_back_ptr * tmp_ptr = NULL;
197 PrintError("Unlinking gpa=%p, type=%d\n", (void *)pg_data->tuple.gpa, pg_data->tuple.pt_type);
199 list_for_each_entry_safe(back_ptr, tmp_ptr, &(pg_data->back_ptrs), back_ptr_node) {
200 struct shdw_pg_data * parent = back_ptr->pg_data;
202 evict_pt(parent->hva, back_ptr->gva, parent->tuple.pt_type);
203 list_del(&(back_ptr->back_ptr_node));
213 static int add_rmap(struct v3_vm_info * vm, struct shdw_pg_data * pg_data, addr_t gpa, addr_t gva) {
214 struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
215 struct list_head * rmap_list = NULL;
216 struct rmap_entry * entry = NULL;
219 rmap_list = (struct list_head *)v3_htable_search(cache_state->reverse_map, gpa);
221 if (rmap_list == NULL) {
222 rmap_list = V3_Malloc(sizeof(struct list_head));
223 INIT_LIST_HEAD(rmap_list);
225 v3_htable_insert(cache_state->reverse_map, gpa, (addr_t)rmap_list);
228 entry = V3_Malloc(sizeof(struct rmap_entry));
231 entry->gpa = pg_data->tuple.gpa;
232 entry->pt_type = pg_data->tuple.pt_type;
234 list_add(&(entry->rmap_node), rmap_list);
241 static int update_rmap_entries(struct v3_vm_info * vm, addr_t gpa) {
242 struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
243 struct list_head * rmap_list = NULL;
244 struct rmap_entry * entry = NULL;
247 rmap_list = (struct list_head *)v3_htable_search(cache_state->reverse_map, gpa);
249 if (rmap_list == NULL) {
253 PrintError("Updating rmap entries\n\t");
255 list_for_each_entry(entry, rmap_list, rmap_node) {
256 struct shdw_pg_data * pg_data = NULL;
257 struct guest_pg_tuple tuple = {entry->gpa, entry->pt_type};
259 V3_Print("%d \n", i);
261 pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
264 PrintError("Invalid PTE reference... Should Delete rmap entry\n");
268 if (grab_pt(pg_data->hva, entry->gva, entry->pt_type) == -1) {
269 PrintError("Could not invalidate reverse map entry\n");
283 static int link_shdw_pg(struct shdw_pg_data * child_pg, struct shdw_pg_data * parent_pg, addr_t gva) {
284 struct shdw_back_ptr * back_ptr = V3_Malloc(sizeof(struct shdw_back_ptr));
285 memset(back_ptr, 0, sizeof(struct shdw_back_ptr));
287 back_ptr->pg_data = parent_pg;
290 list_add(&(back_ptr->back_ptr_node), &(child_pg->back_ptrs));
297 static struct shdw_pg_data * find_shdw_pt(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
298 struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
299 struct shdw_pg_data * pg_data = NULL;
300 struct guest_pg_tuple tuple = {gpa, pt_type};
302 pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
304 if (pg_data != NULL) {
305 // move pg_data to head of queue, for LRU policy
306 list_move(&(pg_data->pg_queue_node), &(cache_state->pg_queue));
313 static int evict_shdw_pg(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
314 struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
315 struct shdw_pg_data * pg_data = NULL;
317 pg_data = find_shdw_pt(vm, gpa, pt_type);
319 PrintError("Evicting GPA: %p, type=%d\n", (void *)gpa, pt_type);
321 if (pg_data != NULL) {
322 if (unlink_shdw_pg(pg_data) == -1) {
323 PrintError("Error unlinking page...\n");
327 v3_htable_remove(cache_state->page_htable, (addr_t)&(pg_data->tuple), 0);
330 // Move Page to free list
331 list_move(&(pg_data->pg_queue_node), &(cache_state->free_list));
332 cache_state->pgs_in_free_list++;
333 cache_state->pgs_in_cache--;
340 static struct shdw_pg_data * pop_queue_pg(struct v3_vm_info * vm,
341 struct cache_vm_state * cache_state) {
342 struct shdw_pg_data * pg_data = NULL;
344 PrintError("popping page from queue\n");
346 pg_data = list_tail_entry(&(cache_state->pg_queue), struct shdw_pg_data, pg_queue_node);
349 if (unlink_shdw_pg(pg_data) == -1) {
350 PrintError("Error unlinking cached page\n");
354 v3_htable_remove(cache_state->page_htable, (addr_t)&(pg_data->tuple), 0);
355 list_del(&(pg_data->pg_queue_node));
357 cache_state->pgs_in_cache--;
362 static struct shdw_pg_data * create_shdw_pt(struct v3_vm_info * vm, addr_t gpa, page_type_t pt_type) {
363 struct cache_vm_state * cache_state = vm->shdw_impl.impl_data;
364 struct shdw_pg_data * pg_data = NULL;
367 PrintError("Creating shdw page: gpa=%p, type=%d\n", (void *)gpa, pt_type);
369 if (cache_state->pgs_in_cache < cache_state->max_cache_pgs) {
370 pg_data = V3_Malloc(sizeof(struct shdw_pg_data));
372 pg_data->hpa = (addr_t)V3_AllocPages(1);
373 pg_data->hva = (void *)V3_VAddr((void *)pg_data->hpa);
375 } else if (cache_state->pgs_in_free_list) {
377 PrintError("pulling page from free list\n");
378 // pull from free list
379 pg_data = list_tail_entry(&(cache_state->free_list), struct shdw_pg_data, pg_queue_node);
381 list_del(&(pg_data->pg_queue_node));
382 cache_state->pgs_in_free_list--;
386 pg_data = pop_queue_pg(vm, cache_state);
390 if (pg_data == NULL) {
391 PrintError("Error creating Shadow Page table page\n");
395 memset(pg_data->hva, 0, PAGE_SIZE_4KB);
397 pg_data->tuple.gpa = gpa;
398 pg_data->tuple.pt_type = pt_type;
400 INIT_LIST_HEAD(&(pg_data->back_ptrs));
402 v3_htable_insert(cache_state->page_htable, (addr_t)&(pg_data->tuple), (addr_t)pg_data);
404 list_add(&(pg_data->pg_queue_node), &(cache_state->pg_queue));
405 cache_state->pgs_in_cache++;
412 #include "vmm_shdw_pg_cache_32.h"
413 //#include "vmm_shdw_pg_cache_32pae.h"
414 //#include "vmm_shdw_pg_cache_64.h"
417 static uint_t cache_hash_fn(addr_t key) {
418 struct guest_pg_tuple * tuple = (struct guest_pg_tuple *)key;
420 return v3_hash_buffer((uint8_t *)tuple, sizeof(struct guest_pg_tuple));
423 static int cache_eq_fn(addr_t key1, addr_t key2) {
424 struct guest_pg_tuple * tuple1 = (struct guest_pg_tuple *)key1;
425 struct guest_pg_tuple * tuple2 = (struct guest_pg_tuple *)key2;
427 return ((tuple1->gpa == tuple2->gpa) && (tuple1->pt_type == tuple2->pt_type));
430 static uint_t rmap_hash_fn(addr_t key) {
431 return v3_hash_long(key, sizeof(addr_t) * 8);
434 static int rmap_eq_fn(addr_t key1, addr_t key2) {
435 return (key1 == key2);
439 static int cache_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
440 struct v3_shdw_impl_state * vm_state = &(vm->shdw_impl);
441 struct cache_vm_state * cache_state = NULL;
442 int cache_size = DEFAULT_CACHE_SIZE;
443 char * cache_sz_str = v3_cfg_val(cfg, "cache_size");
445 if (cache_sz_str != NULL) {
446 cache_size = ((atoi(cache_sz_str) * 1024 * 1024) / 4096);
449 V3_Print("Shadow Page Cache initialization\n");
451 cache_state = V3_Malloc(sizeof(struct cache_vm_state));
452 memset(cache_state, 0, sizeof(struct cache_vm_state));
454 cache_state->page_htable = v3_create_htable(0, cache_hash_fn, cache_eq_fn);
455 cache_state->reverse_map = v3_create_htable(0, rmap_hash_fn, rmap_eq_fn);
456 v3_lock_init(&(cache_state->cache_lock));
457 INIT_LIST_HEAD(&(cache_state->pg_queue));
458 INIT_LIST_HEAD(&(cache_state->free_list));
459 cache_state->max_cache_pgs = cache_size;
461 vm_state->impl_data = cache_state;
467 static int cache_deinit(struct v3_vm_info * vm) {
472 static int cache_local_init(struct guest_info * core) {
473 // struct v3_shdw_pg_state * core_state = &(vm->shdw_pg_state);
479 static int cache_activate_shdw_pt(struct guest_info * core) {
480 switch (v3_get_vm_cpu_mode(core)) {
483 PrintError("Calling 32 bit cache activation\n");
484 return activate_shadow_pt_32(core);
486 // return activate_shadow_pt_32pae(core);
490 // return activate_shadow_pt_64(core);
492 PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
499 static int cache_invalidate_shdw_pt(struct guest_info * core) {
500 // wipe everything...
501 V3_Print("Cache invalidation called\n");
503 return cache_activate_shdw_pt(core);
508 static int cache_handle_pf(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
510 switch (v3_get_vm_cpu_mode(core)) {
512 return handle_shadow_pagefault_32(core, fault_addr, error_code);
515 // return handle_shadow_pagefault_32pae(core, fault_addr, error_code);
519 // return handle_shadow_pagefault_64(core, fault_addr, error_code);
521 PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
527 static int cache_handle_invlpg(struct guest_info * core, addr_t vaddr) {
528 PrintError("INVLPG called for %p\n", (void *)vaddr);
530 switch (v3_get_vm_cpu_mode(core)) {
532 return handle_shadow_invlpg_32(core, vaddr);
534 // return handle_shadow_invlpg_32pae(core, vaddr);
538 // return handle_shadow_invlpg_64(core, vaddr);
540 PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
550 static struct v3_shdw_pg_impl cache_impl = {
551 .name = "SHADOW_CACHE",
553 .deinit = cache_deinit,
554 .local_init = cache_local_init,
555 .handle_pagefault = cache_handle_pf,
556 .handle_invlpg = cache_handle_invlpg,
557 .activate_shdw_pt = cache_activate_shdw_pt,
558 .invalidate_shdw_pt = cache_invalidate_shdw_pt
563 register_shdw_pg_impl(&cache_impl);