2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19 #include <palacios/vmm.h>
22 #include <palacios/vmm_sym_swap.h>
23 #include <palacios/vmm_list.h>
24 #include <palacios/vm_guest.h>
26 #ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
27 #include <palacios/vmm_telemetry.h>
30 // This is a hack and 32 bit linux specific.... need to fix...
33 uint32_t dev_index : 8;
34 uint32_t pg_index : 23;
38 struct shadow_pointer {
46 struct list_head node;
50 static uint_t swap_hash_fn(addr_t key) {
51 return v3_hash_long(key, 32);
55 static int swap_eq_fn(addr_t key1, addr_t key2) {
56 return (key1 == key2);
61 static inline uint32_t get_pg_index(pte32_t * pte) {
62 return ((struct swap_pte *)pte)->pg_index;
66 static inline uint32_t get_dev_index(pte32_t * pte) {
67 return ((struct swap_pte *)pte)->dev_index;
72 #ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
73 static void telemetry_cb(struct guest_info * info, void * private_data, char * hdr) {
74 struct v3_sym_swap_state * swap_state = &(info->vm_info->swap_state);
76 V3_Print("%sSymbiotic Swap:\n", hdr);
77 V3_Print("%s\tRead faults=%d\n", hdr, swap_state->read_faults);
78 V3_Print("%s\tWrite faults=%d\n", hdr, swap_state->write_faults);
79 V3_Print("%s\tMapped Pages=%d\n", hdr, swap_state->mapped_pages);
80 V3_Print("%s\tFlushes=%d\n", hdr, swap_state->flushes);
81 V3_Print("%s\tlist size=%d\n", hdr, swap_state->list_size);
86 int v3_init_sym_swap(struct v3_vm_info * vm) {
87 struct v3_sym_swap_state * swap_state = &(vm->swap_state);
89 memset(swap_state, 0, sizeof(struct v3_sym_swap_state));
90 swap_state->shdw_ptr_ht = v3_create_htable(0, swap_hash_fn, swap_eq_fn);
92 #ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
93 if (info->enable_telemetry) {
94 v3_add_telemetry_cb(vm, telemetry_cb, NULL);
98 PrintDebug("Initialized Symbiotic Swap\n");
104 int v3_register_swap_disk(struct v3_vm_info * vm, int dev_index,
105 struct v3_swap_ops * ops, void * private_data) {
106 struct v3_sym_swap_state * swap_state = &(vm->swap_state);
108 swap_state->devs[dev_index].present = 1;
109 swap_state->devs[dev_index].private_data = private_data;
110 swap_state->devs[dev_index].ops = ops;
118 int v3_swap_in_notify(struct v3_vm_info * vm, int pg_index, int dev_index) {
119 struct list_head * shdw_ptr_list = NULL;
120 struct v3_sym_swap_state * swap_state = &(vm->swap_state);
121 struct shadow_pointer * tmp_shdw_ptr = NULL;
122 struct shadow_pointer * shdw_ptr = NULL;
123 struct swap_pte guest_pte = {0, dev_index, pg_index};
125 shdw_ptr_list = (struct list_head * )v3_htable_search(swap_state->shdw_ptr_ht, *(addr_t *)&(guest_pte));
127 if (shdw_ptr_list == NULL) {
131 list_for_each_entry_safe(shdw_ptr, tmp_shdw_ptr, shdw_ptr_list, node) {
132 if ((shdw_ptr->pg_index == pg_index) &&
133 (shdw_ptr->dev_index == dev_index)) {
135 // Trigger faults for next shadow access
136 shdw_ptr->shadow_pte->present = 0;
138 // Delete entry from list
139 list_del(&(shdw_ptr->node));
149 int v3_swap_flush(struct v3_vm_info * vm) {
150 struct v3_sym_swap_state * swap_state = &(vm->swap_state);
151 struct hashtable_iter * ht_iter = v3_create_htable_iter(swap_state->shdw_ptr_ht);
153 // PrintDebug("Flushing Symbiotic Swap table\n");
155 #ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
156 swap_state->flushes++;
160 PrintError("NULL iterator in swap flush!! Probably will crash soon...\n");
163 while (ht_iter->entry) {
164 struct shadow_pointer * tmp_shdw_ptr = NULL;
165 struct shadow_pointer * shdw_ptr = NULL;
166 struct list_head * shdw_ptr_list = (struct list_head *)v3_htable_get_iter_value(ht_iter);
168 // delete all swapped entries
169 // we can leave the list_head structures and reuse them for the next round
171 list_for_each_entry_safe(shdw_ptr, tmp_shdw_ptr, shdw_ptr_list, node) {
172 if (shdw_ptr == NULL) {
173 PrintError("Null shadow pointer in swap flush!! Probably crashing soon...\n");
176 // Trigger faults for next shadow access
177 shdw_ptr->shadow_pte->present = 0;
179 // Delete entry from list
180 list_del(&(shdw_ptr->node));
184 v3_htable_iter_advance(ht_iter);
192 int v3_get_vaddr_perms(struct guest_info * info, addr_t vaddr, pte32_t * guest_pte, pf_error_t * page_perms) {
193 uint64_t pte_val = (uint64_t)*(uint32_t *)guest_pte;
195 // symcall to check if page is in cache or on swap disk
196 if (v3_sym_call3(info, SYMCALL_MEM_LOOKUP, (uint64_t *)&vaddr, (uint64_t *)&pte_val, (uint64_t *)page_perms) == -1) {
197 PrintError("Sym call error?? that's weird... \n");
201 // V3_Print("page perms = %x\n", *(uint32_t *)page_perms);
212 addr_t v3_get_swapped_pg_addr(struct v3_vm_info * vm, pte32_t * guest_pte) {
213 struct v3_sym_swap_state * swap_state = &(vm->swap_state);
214 int dev_index = get_dev_index(guest_pte);
215 struct v3_swap_dev * swp_dev = &(swap_state->devs[dev_index]);
218 if (! swp_dev->present ) {
222 return (addr_t)swp_dev->ops->get_swap_entry(get_pg_index(guest_pte), swp_dev->private_data);
226 addr_t v3_map_swp_page(struct v3_vm_info * vm, pte32_t * shadow_pte, pte32_t * guest_pte, void * swp_page_ptr) {
227 struct list_head * shdw_ptr_list = NULL;
228 struct v3_sym_swap_state * swap_state = &(vm->swap_state);
229 struct shadow_pointer * shdw_ptr = NULL;
233 if (swp_page_ptr == NULL) {
234 // PrintError("Swapped out page not found on swap device\n");
238 shdw_ptr_list = (struct list_head *)v3_htable_search(swap_state->shdw_ptr_ht, (addr_t)*(uint32_t *)guest_pte);
240 if (shdw_ptr_list == NULL) {
241 shdw_ptr_list = (struct list_head *)V3_Malloc(sizeof(struct list_head *));
242 swap_state->list_size++;
243 INIT_LIST_HEAD(shdw_ptr_list);
244 v3_htable_insert(swap_state->shdw_ptr_ht, (addr_t)*(uint32_t *)guest_pte, (addr_t)shdw_ptr_list);
247 shdw_ptr = (struct shadow_pointer *)V3_Malloc(sizeof(struct shadow_pointer));
249 if (shdw_ptr == NULL) {
250 PrintError("MEMORY LEAK\n");
251 telemetry_cb(info, NULL, "");
255 shdw_ptr->shadow_pte = shadow_pte;
256 shdw_ptr->guest_pte = *(uint32_t *)guest_pte;
257 shdw_ptr->pg_index = get_pg_index(guest_pte);
258 shdw_ptr->dev_index = get_dev_index(guest_pte);
260 // We don't check for conflicts, because it should not happen...
261 list_add(&(shdw_ptr->node), shdw_ptr_list);
263 return PAGE_BASE_ADDR((addr_t)V3_PAddr(swp_page_ptr));
269 int v3_is_mapped_fault(struct guest_info * info, pte32_t * shadow_pte, pte32_t * guest_pte) {
270 struct list_head * shdw_ptr_list = NULL;
272 shdw_ptr_list = (struct list_head * )v3_htable_search(swap_state->shdw_ptr_ht, *(addr_t *)&(guest_pte));
275 if (shdw_ptr_list != NULL) {
276 PrintError("We faulted on a mapped in page....\n");