#define DEFINE_HASHTABLE_INSERT(fnname, keytype, valuetype) \
- int fnname (struct hashtable * htable, keytype * key, valuetype * value) { \
- return hashtable_insert(htable, key, value); \
+ int fnname (struct hashtable * htable, keytype key, valuetype value) { \
+ return hashtable_insert(htable, (addr_t)key, (addr_t)value); \
}
#define DEFINE_HASHTABLE_SEARCH(fnname, keytype, valuetype) \
- valuetype * fnname (struct hashtable * htable, keytype * key) { \
- return (valuetype *) (hashtable_search(htable, key)); \
+ valuetype * fnname (struct hashtable * htable, keytype key) { \
+ return (valuetype *) (hashtable_search(htable, (addr_t)key)); \
}
-#define DEFINE_HASHTABLE_REMOVE(fnname, keytype, valuetype) \
- valuetype * fnname (struct hashtable * htable, keytype * key) { \
- return (valuetype *) (hashtable_remove(htable, key)); \
+#define DEFINE_HASHTABLE_REMOVE(fnname, keytype, valuetype, free_key) \
+ valuetype * fnname (struct hashtable * htable, keytype key) { \
+ return (valuetype *) (hashtable_remove(htable, (addr_t)key, free_key)); \
}
struct hashtable * create_hashtable(uint_t min_size,
- uint_t (*hashfunction) (void * key),
- int (*key_eq_fn) (void * key1, void * key2));
+ uint_t (*hashfunction) (addr_t key),
+ int (*key_eq_fn) (addr_t key1, addr_t key2));
-void hashtable_destroy(struct hashtable * htable, int free_values);
+void hashtable_destroy(struct hashtable * htable, int free_values, int free_keys);
/*
* returns non-zero for successful insertion
* entries is reversed.
* If in doubt, remove before insert.
*/
-int hashtable_insert(struct hashtable * htable, void * key, void * value);
+int hashtable_insert(struct hashtable * htable, addr_t key, addr_t value);
-int hashtable_change(struct hashtable * htable, void * key, void * value);
+int hashtable_change(struct hashtable * htable, addr_t key, addr_t value, int free_value);
// returns the value associated with the key, or NULL if none found
-void * hashtable_search(struct hashtable * htable, void * key);
+addr_t hashtable_search(struct hashtable * htable, addr_t key);
// returns the value associated with the key, or NULL if none found
-void * hashtable_remove(struct hashtable * htable, void * key);
+addr_t hashtable_remove(struct hashtable * htable, addr_t key, int free_key);
uint_t hashtable_count(struct hashtable * htable);
/* - return the value of the (key,value) pair at the current position */
//extern inline
-void * hashtable_get_iter_key(struct hashtable_iter * iter);
+addr_t hashtable_get_iter_key(struct hashtable_iter * iter);
/* {
return iter->entry->key;
}
/* value - return the value of the (key,value) pair at the current position */
//extern inline
-void * hashtable_get_iter_value(struct hashtable_iter * iter);
+addr_t hashtable_get_iter_value(struct hashtable_iter * iter);
/* {
return iter->entry->value;
}
* removing. ie: beware memory leaks!
* returns zero if advanced to end of table
*/
-int hashtable_iterator_remove(struct hashtable_iter * iter);
+int hashtable_iterator_remove(struct hashtable_iter * iter, int free_key);
/* search - overwrite the supplied iterator, to point to the entry
* matching the supplied key.
* returns zero if not found. */
-int hashtable_iterator_search(struct hashtable_iter * iter, struct hashtable * htable, void * key);
+int hashtable_iterator_search(struct hashtable_iter * iter, struct hashtable * htable, addr_t key);
/* Page Table Flag Values */
#define PT32_HOOK 0x1
+#define PT32_GUEST_PT 0x2
#endif
#include <palacios/vmm_util.h>
#include <palacios/vmm_paging.h>
+#include <palacios/vmm_hashtable.h>
struct shadow_page_state {
ullong_t shadow_cr3;
+ // Hash table that ties a CR3 value to a hash table pointer for the PT entries
+ struct hashtable * cr3_cache;
+ // Hash table that contains a mapping of guest pte addresses to host pte addresses
+ struct hashtable * cached_ptes;
+ addr_t cached_cr3;
+
};
-
+int cache_page_tables32(struct guest_info * info, addr_t pde);
int init_shadow_page_state(struct guest_info * info);
const uchar_t * vmexit_code_to_str(uint_t exit_code) {
-
switch(exit_code) {
case VMEXIT_CR0_READ:
return VMEXIT_CR0_READ_STR;
-/*static int mem_test_read(addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
- int foo = 20;
+static int mem_test_read(addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
+ int foo = 20;
memcpy(dst, &foo, length);
PrintDebug("Passthrough mem read returning: %d (length=%d)\n", foo + (guest_addr & 0xfff), length);
return length;
- }*/
+}
static int passthrough_mem_read(addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
memcpy(dst, (void*)guest_addr, length);
//
add_shadow_region_passthrough(info, 0x0, 0xa0000, (addr_t)V3_AllocPages(160));
- //add_shadow_region_passthrough(info, 0xa0000, 0xc0000, 0xa0000);
- hook_guest_mem(info, 0xa0000, 0xc0000, passthrough_mem_read, passthrough_mem_write, NULL);
-
+ if (1) {
+ add_shadow_region_passthrough(info, 0xa0000, 0xc0000, 0xa0000);
+ } else {
+ hook_guest_mem(info, 0xa0000, 0xc0000, passthrough_mem_read, passthrough_mem_write, NULL);
+ }
// TEMP
//add_shadow_region_passthrough(info, 0xc0000, 0xc8000, 0xc0000);
}
-
+ if (1) {
add_shadow_region_passthrough(info, 0x100000, 0x1000000, (addr_t)V3_AllocPages(4096));
- /* MEMORY HOOK TEST */
- /* {
-
+ } else {
+ /* MEMORY HOOK TEST */
add_shadow_region_passthrough(info, 0x100000, 0xa00000, (addr_t)V3_AllocPages(2304));
- hook_guest_mem(info, 0xa00000, 0xa01000, mem_test_read, passthrough_mem_write, NULL);
-
+ hook_guest_mem(info, 0xa00000, 0xa01000, mem_test_read, passthrough_mem_write, NULL);
add_shadow_region_passthrough(info, 0xa01000, 0x1000000, (addr_t)V3_AllocPages(1791));
-
}
-*/
+
add_shadow_region_passthrough(info, 0x1000000, 0x8000000, (addr_t)V3_AllocPages(32768));
// test - give linux accesss to PCI space - PAD
#endif
-// Set to 1 if CR3 reload with same value shall not
-// force a shadow page table flush
-// It makes windows loading MUCH faster.
-// Note that this optimization appears to fail with a 2.6 linux kernel
-#define CR3_RELOAD_OPTIMIZATION 1
-
-
-
-
struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand);
struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.shadow_cr3);
+ int cached = 0;
+
PrintDebug("Old Shadow CR3=%x; Old Guest CR3=%x\n",
*(uint_t*)shadow_cr3, *(uint_t*)guest_cr3);
- if (!CR3_RELOAD_OPTIMIZATION || !CR3_32_SAME_BASE(new_cr3, guest_cr3)) {
+
+ cached = cache_page_tables32(info, CR3_TO_PDE32(*(addr_t *)new_cr3));
+ if (cached == -1) {
+ PrintError("CR3 Cache failed\n");
+ return -1;
+ } else if (cached == 0) {
+
+
addr_t shadow_pt;
shadow_pt = create_new_shadow_pt32();
shadow_cr3->pdt_base_addr = PD32_BASE_ADDR(shadow_pt);
- }
+ } else {
+ PrintDebug("Reusing cached shadow Page table\n");
+ }
shadow_cr3->pwt = new_cr3->pwt;
shadow_cr3->pcd = new_cr3->pcd;
return 0;
}
+
+
+
+
struct hash_entry {
- void * key;
- void * value;
+ addr_t key;
+ addr_t value;
uint_t hash;
struct hash_entry * next;
};
uint_t entry_count;
uint_t load_limit;
uint_t prime_index;
- uint_t (*hash_fn) (void * key);
- int (*eq_fn) (void * key1, void * key2);
+ uint_t (*hash_fn) (addr_t key);
+ int (*eq_fn) (addr_t key1, addr_t key2);
};
-uint_t do_hash(struct hashtable * htable, void * key) {
+uint_t do_hash(struct hashtable * htable, addr_t key) {
/* Aim to protect against poor hash functions by adding logic here
* - logic taken from java 1.4 hashtable source */
uint_t i = htable->hash_fn(key);
/*****************************************************************************/
struct hashtable * create_hashtable(uint_t min_size,
- uint_t (*hash_fn) (void *),
- int (*eq_fn) (void *, void *)) {
+ uint_t (*hash_fn) (addr_t),
+ int (*eq_fn) (addr_t, addr_t)) {
struct hashtable * htable;
uint_t prime_index;
uint_t size = primes[0];
}
/*****************************************************************************/
-int hashtable_insert(struct hashtable * htable, void * key, void * value) {
+int hashtable_insert(struct hashtable * htable, addr_t key, addr_t value) {
/* This method allows duplicate keys - but they shouldn't be used */
uint_t index;
struct hash_entry * new_entry;
-int hashtable_change(struct hashtable * htable, void * key, void * value) {
+int hashtable_change(struct hashtable * htable, addr_t key, addr_t value, int free_value) {
struct hash_entry * tmp_entry;
uint_t hash_value;
uint_t index;
while (tmp_entry != NULL) {
/* Check hash value to short circuit heavier comparison */
if ((hash_value == tmp_entry->hash) && (htable->eq_fn(key, tmp_entry->key))) {
- V3_Free(tmp_entry->value);
- tmp_entry->value = value;
- return -1;
+ if (free_value) {
+ V3_Free((void *)(tmp_entry->value));
+ }
+
+ tmp_entry->value = value;
+ return -1;
}
tmp_entry = tmp_entry->next;
}
/*****************************************************************************/
/* returns value associated with key */
-void * hashtable_search(struct hashtable * htable, void * key) {
+addr_t hashtable_search(struct hashtable * htable, addr_t key) {
struct hash_entry * cursor;
uint_t hash_value;
uint_t index;
cursor = cursor->next;
}
- return NULL;
+ return (addr_t)NULL;
}
/*****************************************************************************/
/* returns value associated with key */
-void * hashtable_remove(struct hashtable * htable, void * key) {
+addr_t hashtable_remove(struct hashtable * htable, addr_t key, int free_key) {
/* TODO: consider compacting the table when the load factor drops enough,
* or provide a 'compact' method. */
struct hash_entry * cursor;
struct hash_entry ** entry_ptr;
- void * value;
+ addr_t value;
uint_t hash_value;
uint_t index;
htable->entry_count--;
value = cursor->value;
- freekey(cursor->key);
+ if (free_key) {
+ freekey((void *)(cursor->key));
+ }
V3_Free(cursor);
return value;
entry_ptr = &(cursor->next);
cursor = cursor->next;
}
- return NULL;
+ return (addr_t)NULL;
}
/*****************************************************************************/
/* destroy */
-void hashtable_destroy(struct hashtable * htable, int free_values) {
+void hashtable_destroy(struct hashtable * htable, int free_values, int free_keys) {
uint_t i;
struct hash_entry * cursor;;
struct hash_entry **table = htable->table;
tmp = cursor;
cursor = cursor->next;
- freekey(tmp->key);
- V3_Free(tmp->value);
+ if (free_keys) {
+ freekey((void *)(tmp->key));
+ }
+ V3_Free((void *)(tmp->value));
V3_Free(tmp);
}
}
tmp = cursor;
cursor = cursor->next;
-
- freekey(tmp->key);
+
+ if (free_keys) {
+ freekey((void *)(tmp->key));
+ }
V3_Free(tmp);
}
}
}
-void * hashtable_get_iter_key(struct hashtable_iter * iter) {
+addr_t hashtable_get_iter_key(struct hashtable_iter * iter) {
return iter->entry->key;
}
-void * hashtable_get_iter_value(struct hashtable_iter * iter) {
+addr_t hashtable_get_iter_value(struct hashtable_iter * iter) {
return iter->entry->value;
}
* If you want the value, read it before you remove:
* beware memory leaks if you don't.
* Returns zero if end of iteration. */
-int hashtable_iterator_remove(struct hashtable_iter * iter) {
+int hashtable_iterator_remove(struct hashtable_iter * iter, int free_key) {
struct hash_entry * remember_entry;
struct hash_entry * remember_parent;
int ret;
/* itr->e is now outside the hashtable */
remember_entry = iter->entry;
iter->htable->entry_count--;
- freekey(remember_entry->key);
-
+ if (free_key) {
+ freekey((void *)(remember_entry->key));
+ }
+
/* Advance the iterator, correcting the parent */
remember_parent = iter->parent;
ret = hashtable_iterator_advance(iter);
/* returns zero if not found */
int hashtable_iterator_search(struct hashtable_iter * iter,
- struct hashtable * htable, void * key) {
+ struct hashtable * htable, addr_t key) {
struct hash_entry * entry;
struct hash_entry * parent;
uint_t hash_value;
#include <palacios/vm_guest_mem.h>
-extern struct vmm_os_hooks * os_hooks;
+
void delete_page_tables_pde32(pde32_t * pde) {
int i;//, j;
}
*/
//PrintDebug("Deleting PTE %d (%x)\n", i, pte);
- os_hooks->free_page(pte);
+ V3_FreePage(pte);
}
}
// PrintDebug("Deleting PDE (%x)\n", pde);
- os_hooks->free_page(pde);
+ V3_FreePage(pde);
}
int i, j;
struct shadow_map * map = &(guest_info->mem_map);
- pde32_t * pde = os_hooks->allocate_pages(1);
+ pde32_t * pde = V3_AllocPages(1);
for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
int pte_present = 0;
- pte32_t * pte = os_hooks->allocate_pages(1);
+ pte32_t * pte = V3_AllocPages(1);
for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
}
if (pte_present == 0) {
- os_hooks->free_page(pte);
+ V3_FreePage(pte);
pde[i].present = 0;
pde[i].writable = 0;
+
+
+DEFINE_HASHTABLE_INSERT(add_cr3_to_cache, addr_t, struct hashtable *);
+DEFINE_HASHTABLE_SEARCH(find_cr3_in_cache, addr_t, struct hashtable *);
+DEFINE_HASHTABLE_REMOVE(del_cr3_from_cache, addr_t, struct hashtable *, 0);
+
+
+DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t);
+DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t);
+DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0);
+
+
+
+
+static uint_t pte_hash_fn(addr_t key) {
+ return hash_long(key, 32);
+}
+
+static int pte_equals(addr_t key1, addr_t key2) {
+ return (key1 == key2);
+}
+
+static uint_t cr3_hash_fn(addr_t key) {
+ return hash_long(key, 32);
+}
+
+static int cr3_equals(addr_t key1, addr_t key2) {
+ return (key1 == key2);
+}
+
+
static int handle_shadow_pte32_fault(struct guest_info* info,
addr_t fault_addr,
pf_error_t error_code,
state->guest_cr3 = 0;
state->shadow_cr3 = 0;
+
+ state->cr3_cache = create_hashtable(0, &cr3_hash_fn, &cr3_equals);
+
+ state->cached_cr3 = 0;
+ state->cached_ptes = NULL;
+
return 0;
}
+/*
+ For now we'll do something a little more lightweight
+int cache_page_tables32(struct guest_info * info, addr_t pde) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ addr_t pde_host_addr;
+ pde32_t * tmp_pde;
+ struct hashtable * pte_cache = NULL;
+ int i = 0;
+
+
+ pte_cache = (struct hashtable *)find_cr3_in_cache(state->cr3_cache, pde);
+ if (pte_cache != NULL) {
+ PrintError("CR3 already present in cache\n");
+ state->current_ptes = pte_cache;
+ return 1;
+ } else {
+ PrintError("Creating new CR3 cache entry\n");
+ pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
+ state->current_ptes = pte_cache;
+ add_cr3_to_cache(state->cr3_cache, pde, pte_cache);
+ }
+
+ if (guest_pa_to_host_va(info, pde, &pde_host_addr) == -1) {
+ PrintError("Could not lookup host address of guest PDE\n");
+ return -1;
+ }
+
+ tmp_pde = (pde32_t *)pde_host_addr;
+
+ add_pte_map(pte_cache, pde, pde_host_addr);
+
+
+ for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
+ if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
+ addr_t pte_host_addr;
+
+ if (guest_pa_to_host_va(info, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), &pte_host_addr) == -1) {
+ PrintError("Could not lookup host address of guest PDE\n");
+ return -1;
+ }
+
+ add_pte_map(pte_cache, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), pte_host_addr);
+ }
+ }
+
+
+ return 0;
+}
+*/
+
+int cache_page_tables32(struct guest_info * info, addr_t pde) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ addr_t pde_host_addr;
+ pde32_t * tmp_pde;
+ struct hashtable * pte_cache = NULL;
+ int i = 0;
+
+ if (pde == state->cached_cr3) {
+ return 1;
+ }
+
+ if (state->cached_ptes != NULL) {
+ hashtable_destroy(state->cached_ptes, 0, 0);
+ state->cached_ptes = NULL;
+ }
+
+ state->cached_cr3 = pde;
+
+ pte_cache = create_hashtable(0, &pte_hash_fn, &pte_equals);
+ state->cached_ptes = pte_cache;
+
+ if (guest_pa_to_host_pa(info, pde, &pde_host_addr) == -1) {
+ PrintError("Could not lookup host address of guest PDE\n");
+ return -1;
+ }
+
+ tmp_pde = (pde32_t *)pde_host_addr;
+
+ add_pte_map(pte_cache, pde, pde_host_addr);
+
+
+ for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
+ if ((tmp_pde[i].present) && (tmp_pde[i].large_page == 0)) {
+ addr_t pte_host_addr;
+
+ if (guest_pa_to_host_pa(info, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), &pte_host_addr) == -1) {
+ PrintError("Could not lookup host address of guest PDE\n");
+ return -1;
+ }
+
+ add_pte_map(pte_cache, (addr_t)(PDE32_T_ADDR(tmp_pde[i])), pte_host_addr);
+ }
+ }
+
+ return 0;
+
+}
+
int v3_replace_shdw_page32(struct guest_info * info, addr_t location, pte32_t * new_page, pte32_t * old_page) {
}
if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
addr_t shadow_pa = get_shadow_addr(info, guest_fault_pa);
shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
* Allow everything
*/
shadow_pte->user_page = 1;
- shadow_pte->writable = 1;
+
+ if (find_pte_map(state->cached_ptes, PT32_PAGE_ADDR(guest_fault_pa)) != NULL) {
+ // Check if the entry is a page table...
+ PrintDebug("Marking page as Guest Page Table (large page)\n");
+ shadow_pte->vmm_info = PT32_GUEST_PT;
+ shadow_pte->writable = 0;
+ } else {
+ shadow_pte->writable = 1;
+ }
+
//set according to VMM policy
shadow_pte->write_through = 0;
return -1;
}
}
+ } else if ((shadow_pte_access == PT_WRITE_ERROR) &&
+ (shadow_pte->vmm_info == PT32_GUEST_PT)) {
+
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ PrintDebug("Write operation on Guest PAge Table Page (large page)\n");
+ state->cached_cr3 = 0;
+ shadow_pte->writable = 1;
+
} else {
PrintError("Error in large page fault handler...\n");
PrintError("This case should have been handled at the top level handler\n");
// Page Directory Entry marked read-only
// Its a large page and we need to update the dirty bit in the guest
//
+
PrintDebug("Large page write error... Setting dirty bit and returning\n");
((pde32_4MB_t *)guest_pde)->dirty = 1;
shadow_pde->writable = guest_pde->writable;
// else...
if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
addr_t shadow_pa = get_shadow_addr(info, guest_pa);
shadow_pte->page_base_addr = PT32_BASE_ADDR(shadow_pa);
guest_pte->accessed = 1;
+ if (find_pte_map(state->cached_ptes, PT32_PAGE_ADDR(guest_pa)) != NULL) {
+ // Check if the entry is a page table...
+ PrintDebug("Marking page as Guest Page Table\n", shadow_pte->writable);
+ shadow_pte->vmm_info = PT32_GUEST_PT;
+ }
+
if (guest_pte->dirty == 1) {
shadow_pte->writable = guest_pte->writable;
} else if ((guest_pte->dirty == 0) && (error_code.write == 1)) {
shadow_pte->writable = guest_pte->writable;
guest_pte->dirty = 1;
+
+ if (shadow_pte->vmm_info == PT32_GUEST_PT) {
+ // Well that was quick...
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ PrintDebug("Immediate Write operation on Guest PAge Table Page\n");
+ state->cached_cr3 = 0;
+ }
+
} else if ((guest_pte->dirty = 0) && (error_code.write == 0)) {
shadow_pte->writable = 0;
}
+
+
+
} else {
// Page fault handled by hook functions
if (handle_special_page_fault(info, fault_addr, guest_pa, error_code) == -1) {
PrintDebug("Shadow PTE Write Error\n");
guest_pte->dirty = 1;
shadow_pte->writable = guest_pte->writable;
+
+ if (shadow_pte->vmm_info == PT32_GUEST_PT) {
+ struct shadow_page_state * state = &(info->shdw_pg_state);
+ PrintDebug("Write operation on Guest PAge Table Page\n");
+ state->cached_cr3 = 0;
+ }
+
return 0;
} else {