#include <palacios/vmm_types.h>
-
-typedef ulong_t addr_t;
+#include <palacios/vmm_paging.h>
struct guest_info;
// from the perspective of the HOST
typedef enum host_region_type {
HOST_REGION_INVALID, // This region is INVALID (this is a return type, to denote errors)
- HOST_REGION_NOTHING, // This region is mapped as not present (always generate page faults)
+ HOST_REGION_HOOK, // This region is mapped as not present (always generate page faults)
HOST_REGION_PHYSICAL_MEMORY, // Region is a section of host memory
HOST_REGION_MEMORY_MAPPED_DEVICE, // Region is allocated for DMA
HOST_REGION_UNALLOCATED, // Region is mapped on demand
#define shadow_mem_type_t host_region_type_t
-typedef struct shadow_region {
+struct shadow_region {
guest_region_type_t guest_type;
addr_t guest_start;
addr_t guest_end;
host_region_type_t host_type;
- union host_addr_t {
- struct physical_addr {
- addr_t host_start;
- } phys_addr;
- // Other addresses, like on disk, etc, would go here
- } host_addr;
+ addr_t host_addr; // This either points to a host address mapping,
+ // or a structure holding the map info
+
struct shadow_region *next, *prev;
-} shadow_region_t;
+};
struct shadow_map {
uint_t num_regions;
- shadow_region_t * head;
+ struct shadow_region * head;
};
-void init_shadow_region(shadow_region_t * entry,
+void init_shadow_region(struct shadow_region * entry,
addr_t guest_addr_start,
addr_t guest_addr_end,
guest_region_type_t guest_region_type,
host_region_type_t host_region_type);
/*
-void init_shadow_region_physical(shadow_region_t * entry,
+void init_shadow_region_physical(struct shadow_region * entry,
addr_t guest_addr_start,
addr_t guest_addr_end,
guest_region_type_t guest_region_type,
int add_shadow_region_passthrough(struct guest_info * guest_info,
addr_t guest_addr_start,
addr_t guest_addr_end,
- addr_t host_addr_start);
+ addr_t host_addr);
void init_shadow_map(struct shadow_map * map);
void free_shadow_map(struct shadow_map * map);
-shadow_region_t * get_shadow_region_by_addr(struct shadow_map * map, addr_t guest_addr);
+struct shadow_region * get_shadow_region_by_addr(struct shadow_map * map, addr_t guest_addr);
-shadow_region_t * get_shadow_region_by_index(struct shadow_map * map, uint_t index);
+struct shadow_region * get_shadow_region_by_index(struct shadow_map * map, uint_t index);
host_region_type_t lookup_shadow_map_addr(struct shadow_map * map, addr_t guest_addr, addr_t * host_addr);
// Semantics:
// Adding a region that overlaps with an existing region results is undefined
// and will probably fail
-int add_shadow_region(struct shadow_map * map, shadow_region_t * entry);
+int add_shadow_region(struct shadow_map * map, struct shadow_region * entry);
// Semantics:
// Deletions result in splitting
+
+struct vmm_mem_hook {
+ // Called when data is read from a memory page
+ int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data);
+
+ // Called when data is written to a memory page
+ int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data);
+
+ void * priv_data;
+ struct shadow_region * region;
+};
+
+
+
+struct vmm_mem_hook * get_mem_hook(struct guest_info * info, addr_t guest_addr);
+
+int hook_guest_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+ int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
+ int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
+ void * priv_data);
+int unhook_guest_mem(struct guest_info * info, addr_t guest_addr);
+
+
+
+
+int mem_hook_dispatch(struct guest_info * info, addr_t mem_addr, pf_error_t access_info, struct vmm_mem_hook * hook);
+int handle_special_page_fault(struct guest_info * info, addr_t mem_addr, pf_error_t access_info);
+
#endif
#include <palacios/vmm_types.h>
-
-
-
-#include <palacios/vmm_mem.h>
#include <palacios/vmm_util.h>
/*
#define PTE32_T_ADDR(x) (((x).page_base_addr) << 12)
+/* Page Table Flag Values */
+#define PT32_HOOK 0x1
+
+
#endif
/* PDE 32 bit PAGE STRUCTURES */
+
typedef unsigned long long uint64_t;
typedef long long sint64_t;
typedef unsigned int uint32_t;
typedef int sint32_t;
+typedef ulong_t addr_t;
+
#endif
+int passthrough_mem_read(addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
+ memcpy(dst, (void*)guest_addr, length);
+ return length;
+}
+
+int passthrough_mem_write(addr_t guest_addr, void * src, uint_t length, void * priv_data) {
+ memcpy((void*)guest_addr, src, length);
+ return length;
+}
+
+
/* We need a configuration mechanism, so we can wrap this completely inside the VMM code,
* with no pollution into the HOST OS
//
add_shadow_region_passthrough(&vm_info, 0x0, 0xa0000, (addr_t)Allocate_VMM_Pages(160));
- add_shadow_region_passthrough(&vm_info, 0xa0000, 0xc0000, 0xa0000);
+ add_shadow_region_passthrough(&vm_info, 0xa0000, 0xc0000, 0xa0000);
+ //hook_guest_mem(&vm_info, 0xa0000, 0xc0000, passthrough_mem_read, passthrough_mem_write, NULL);
// TEMP
} else if ((instr[index] == cr_access_byte) &&
(instr[index + 1] == clts_byte)) {
// CLTS
-
+ PrintDebug("CLTS unhandled\n");
+ return -1;
} else if ((instr[index] == cr_access_byte) &&
(instr[index + 1] = mov_to_cr_byte)) {
struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.shadow_cr3);
struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
+ /* Delete the current Page Tables */
+ delete_page_tables_pde32((pde32_t *)CR3_TO_PDE32(shadow_cr3));
*guest_cr3 = *new_cr3;
shadow_pt = create_new_shadow_pt32(info);
//shadow_pt = setup_shadow_pt32(info, CR3_TO_PDE32(*(addr_t *)new_cr3));
+
/* Copy Various flags */
*shadow_cr3 = *new_cr3;
#include <palacios/vmm_mem.h>
#include <palacios/vmm.h>
#include <palacios/vmm_util.h>
+#include <palacios/vmm_emulate.h>
-
-void init_shadow_region(shadow_region_t * entry,
+void init_shadow_region(struct shadow_region * entry,
addr_t guest_addr_start,
addr_t guest_addr_end,
guest_region_type_t guest_region_type,
entry->guest_start = guest_addr_start;
entry->guest_end = guest_addr_end;
entry->host_type = host_region_type;
+ entry->host_addr = 0;
entry->next=entry->prev = NULL;
}
int add_shadow_region_passthrough( struct guest_info * guest_info,
addr_t guest_addr_start,
addr_t guest_addr_end,
- addr_t host_addr_start)
+ addr_t host_addr)
{
- shadow_region_t * entry = (shadow_region_t *)V3_Malloc(sizeof(shadow_region_t));
+ struct shadow_region * entry = (struct shadow_region *)V3_Malloc(sizeof(struct shadow_region));
init_shadow_region(entry, guest_addr_start, guest_addr_end,
GUEST_REGION_PHYSICAL_MEMORY, HOST_REGION_PHYSICAL_MEMORY);
- entry->host_addr.phys_addr.host_start = host_addr_start;
+ entry->host_addr = host_addr;
return add_shadow_region(&(guest_info->mem_map), entry);
}
+int hook_guest_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+ int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
+ int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
+ void * priv_data) {
+
+ struct shadow_region * entry = (struct shadow_region *)V3_Malloc(sizeof(struct shadow_region));
+ struct vmm_mem_hook * hook = (struct vmm_mem_hook *)V3_Malloc(sizeof(struct vmm_mem_hook));
+
+ memset(hook, 0, sizeof(struct vmm_mem_hook));
+
+ hook->read = read;
+ hook->write = write;
+ hook->region = entry;
+ hook->priv_data = priv_data;
+
+
+ init_shadow_region(entry, guest_addr_start, guest_addr_end,
+ GUEST_REGION_PHYSICAL_MEMORY, HOST_REGION_HOOK);
+
+ entry->host_addr = (addr_t)hook;
+
+ return add_shadow_region(&(info->mem_map), entry);
+}
+
+
+struct vmm_mem_hook * get_mem_hook(struct guest_info * info, addr_t guest_addr) {
+ struct shadow_region * region = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
+
+ if (region == NULL) {
+ PrintDebug("Could not find shadow region for addr: %x\n", guest_addr);
+ return NULL;
+ }
+
+ return (struct vmm_mem_hook *)(region->host_addr);
+}
+
+
+int mem_hook_dispatch(struct guest_info * info, addr_t mem_addr, pf_error_t access_info, struct vmm_mem_hook * hook) {
+
+ if (access_info.write == 1) {
+ void * src = NULL;
+ uint_t length = 0;
+ PrintDebug("Memory hook write\n");
+ return -1;
+
+ if (hook->write(mem_addr, src, length, hook->priv_data) != length) {
+ return -1;
+ }
+ } else {
+ PrintDebug("Memory hook read\n");
+ return -1;
+ }
+
+ return -1;
+}
+
+
+int handle_special_page_fault(struct guest_info * info, addr_t mem_addr, pf_error_t access_info) {
+ struct shadow_region * reg = get_shadow_region_by_addr(&(info->mem_map), mem_addr);
+
+ switch (reg->host_type) {
+ case HOST_REGION_HOOK:
+ return mem_hook_dispatch(info, mem_addr, access_info, (struct vmm_mem_hook *)(reg->host_addr));
+ default:
+ return -1;
+ }
+
+ return 0;
+
+}
void free_shadow_map(struct shadow_map * map) {
- shadow_region_t * cursor = map->head;
- shadow_region_t * tmp = NULL;
+ struct shadow_region * cursor = map->head;
+ struct shadow_region * tmp = NULL;
while(cursor) {
tmp = cursor;
int add_shadow_region(struct shadow_map * map,
- shadow_region_t * region)
+ struct shadow_region * region)
{
- shadow_region_t * cursor = map->head;
+ struct shadow_region * cursor = map->head;
PrintDebug("Adding Shadow Region: (0x%x-0x%x)\n", region->guest_start, region->guest_end);
-shadow_region_t *get_shadow_region_by_index(struct shadow_map * map,
+struct shadow_region *get_shadow_region_by_index(struct shadow_map * map,
uint_t index) {
- shadow_region_t * reg = map->head;
+ struct shadow_region * reg = map->head;
uint_t i = 0;
while (reg) {
}
-shadow_region_t * get_shadow_region_by_addr(struct shadow_map * map,
+struct shadow_region * get_shadow_region_by_addr(struct shadow_map * map,
addr_t addr) {
- shadow_region_t * reg = map->head;
+ struct shadow_region * reg = map->head;
while (reg) {
if ((reg->guest_start <= addr) && (reg->guest_end > addr)) {
host_region_type_t get_shadow_addr_type(struct guest_info * info, addr_t guest_addr) {
- shadow_region_t * reg = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
+ struct shadow_region * reg = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
if (!reg) {
return HOST_REGION_INVALID;
}
addr_t get_shadow_addr(struct guest_info * info, addr_t guest_addr) {
- shadow_region_t * reg = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
+ struct shadow_region * reg = get_shadow_region_by_addr(&(info->mem_map), guest_addr);
if (!reg) {
return 0;
} else {
- return (guest_addr - reg->guest_start) + reg->host_addr.phys_addr.host_start;
+ return (guest_addr - reg->guest_start) + reg->host_addr;
}
}
host_region_type_t lookup_shadow_map_addr(struct shadow_map * map, addr_t guest_addr, addr_t * host_addr) {
- shadow_region_t * reg = get_shadow_region_by_addr(map, guest_addr);
+ struct shadow_region * reg = get_shadow_region_by_addr(map, guest_addr);
if (!reg) {
// No mapping exists
} else {
switch (reg->host_type) {
case HOST_REGION_PHYSICAL_MEMORY:
- *host_addr = (guest_addr - reg->guest_start) + reg->host_addr.phys_addr.host_start;
+ *host_addr = (guest_addr - reg->guest_start) + reg->host_addr;
return reg->host_type;
case HOST_REGION_MEMORY_MAPPED_DEVICE:
case HOST_REGION_UNALLOCATED:
void print_shadow_map(struct shadow_map * map) {
- shadow_region_t * cur = map->head;
+ struct shadow_region * cur = map->head;
int i = 0;
PrintDebug("Memory Layout (regions: %d) \n", map->num_regions);
if (cur->host_type == HOST_REGION_PHYSICAL_MEMORY ||
cur->host_type == HOST_REGION_UNALLOCATED ||
cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) {
- PrintDebug("0x%x", cur->host_addr.phys_addr.host_start);
+ PrintDebug("0x%x", cur->host_addr);
}
PrintDebug("(%s)\n",
cur->host_type == HOST_REGION_PHYSICAL_MEMORY ? "HOST_REGION_PHYSICAL_MEMORY" :
cur->host_type == HOST_REGION_UNALLOCATED ? "HOST_REGION_UNALLOACTED" :
- cur->host_type == HOST_REGION_NOTHING ? "HOST_REGION_NOTHING" :
+ cur->host_type == HOST_REGION_HOOK ? "HOST_REGION_HOOK" :
cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE ? "HOST_REGION_MEMORY_MAPPED_DEVICE" :
cur->host_type == HOST_REGION_REMOTE ? "HOST_REGION_REMOTE" :
cur->host_type == HOST_REGION_SWAPPED ? "HOST_REGION_SWAPPED" :
for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
- shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
+ struct shadow_region * region = get_shadow_region_by_addr(map, current_page_addr);
if (!region ||
- (region->host_type == HOST_REGION_NOTHING) ||
+ (region->host_type == HOST_REGION_HOOK) ||
(region->host_type == HOST_REGION_UNALLOCATED) ||
(region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
(region->host_type == HOST_REGION_REMOTE) ||
}
int handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
- if (info->cpu_mode == PROTECTED_PG) {
+
+ switch (info->cpu_mode) {
+ case PROTECTED_PG:
return handle_shadow_pagefault32(info, fault_addr, error_code);
- } else {
+ break;
+ case PROTECTED_PAE_PG:
+ case LONG_PG:
+ // currently not handled
+ return -1;
+ break;
+ case REAL:
+ case PROTECTED:
+ case PROTECTED_PAE:
+ case LONG:
+ // If paging is not turned on we need to handle the special cases
+ return handle_special_page_fault(info, fault_addr, error_code);
+ break;
+ default:
return -1;
}
}
} else {
/*
* Check the Intel manual because we are ignoring Large Page issues here
+ * Also be wary of hooked pages
*/
}
// Page Table Entry Not Present
- if (get_shadow_addr_type(info, guest_pa) == HOST_REGION_INVALID) {
+ host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
+
+ if (host_page_type == HOST_REGION_INVALID) {
// Inject a machine check in the guest
raise_exception(info, MC_EXCEPTION);
PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_pa);
return 0;
- }
-
- shadow_pa = get_shadow_addr(info, guest_pa);
-
- shadow_pte_entry->page_base_addr = PT32_BASE_ADDR(shadow_pa);
-
- shadow_pte_entry->present = guest_pte_entry->present;
- shadow_pte_entry->user_page = guest_pte_entry->user_page;
-
- //set according to VMM policy
- shadow_pte_entry->write_through = 0;
- shadow_pte_entry->cache_disable = 0;
- shadow_pte_entry->global_page = 0;
- //
-
- guest_pte_entry->accessed = 1;
- if (guest_pte_entry->dirty == 1) {
- shadow_pte_entry->writable = guest_pte_entry->writable;
- } else if ((guest_pte_entry->dirty == 0) && (error_code.write == 1)) {
- shadow_pte_entry->writable = guest_pte_entry->writable;
- guest_pte_entry->dirty = 1;
- } else if ((guest_pte_entry->dirty = 0) && (error_code.write == 0)) {
- shadow_pte_entry->writable = 0;
+ } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
+
+ shadow_pa = get_shadow_addr(info, guest_pa);
+
+ shadow_pte_entry->page_base_addr = PT32_BASE_ADDR(shadow_pa);
+
+ shadow_pte_entry->present = guest_pte_entry->present;
+ shadow_pte_entry->user_page = guest_pte_entry->user_page;
+
+ //set according to VMM policy
+ shadow_pte_entry->write_through = 0;
+ shadow_pte_entry->cache_disable = 0;
+ shadow_pte_entry->global_page = 0;
+ //
+
+ guest_pte_entry->accessed = 1;
+
+ if (guest_pte_entry->dirty == 1) {
+ shadow_pte_entry->writable = guest_pte_entry->writable;
+ } else if ((guest_pte_entry->dirty == 0) && (error_code.write == 1)) {
+ shadow_pte_entry->writable = guest_pte_entry->writable;
+ guest_pte_entry->dirty = 1;
+ } else if ((guest_pte_entry->dirty = 0) && (error_code.write == 0)) {
+ shadow_pte_entry->writable = 0;
+ }
+ } else {
+ // Page fault handled by hook functions
+ if (handle_special_page_fault(info, fault_addr, error_code) == -1) {
+ PrintDebug("Special Page fault handler returned error for address: %x\n", fault_addr);
+ return -1;
+ }
}
} else if ((shadow_pte_access == PT_WRITE_ERROR) &&