int handle_svm_io(guest_info_t * info);
+int handle_shadow_paging(guest_info_t * info);
int handle_svm_exit(guest_info_t * info);
};
+typedef enum {SHADOW_PAGING, NESTED_PAGING} vm_page_mode_t;
+
typedef struct guest_info {
ullong_t rip;
ullong_t rsp;
- shadow_paging_state_t shadow_paging_state;
+ shadow_map_t mem_map;
+
+
+ vm_page_mode_t page_mode;
+ shadow_page_state_t shadow_page_state;
+ // nested_paging_t nested_page_state;
- // vmm_mem_list_t mem_list;
- // vmm_mem_layout_t mem_layout;
vmm_io_map_t io_map;
// device_map
struct guest_gprs vm_regs;
- void * page_tables;
void * vmm_data;
} guest_info_t;
uint_t INVD : 1 PACKED;
uint_t PAUSE : 1 PACKED;
uint_t HLT : 1 PACKED;
- uint_t INVPLG : 1 PACKED;
- uint_t INVPLGA : 1 PACKED;
+ uint_t INVLPG : 1 PACKED;
+ uint_t INVLPGA : 1 PACKED;
uint_t IOIO_PROT : 1 PACKED;
uint_t MSR_PROT : 1 PACKED;
uint_t task_switch : 1 PACKED;
void *(*malloc)(uint_t size);
void (*free)(void * addr);
- void *(*physical_to_virtual)(void *addr);
- void *(*virtual_to_physical)(void *addr);
+ void *(*paddr_to_vaddr)(void *addr);
+ void *(*vaddr_to_paddr)(void *addr);
void (*start_kernel_thread)(); // include pointer to function
-typedef struct shadow_map_entry {
+typedef struct shadow_region {
guest_region_type_t guest_type;
addr_t guest_start;
addr_t guest_end;
union host_addr_t {
struct physical_addr {
addr_t host_start;
- addr_t host_end;
} phys_addr;
// Other addresses, like on disk, etc, would go here
} host_addr;
- struct shadow_map_entry *next, *prev;
-} shadow_map_entry_t;
+ struct shadow_region *next, *prev;
+} shadow_region_t;
typedef struct shadow_map {
uint_t num_regions;
- shadow_map_entry_t * head;
+ shadow_region_t * head;
} shadow_map_t;
-void init_shadow_map_entry(shadow_map_entry_t *entry,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- guest_region_type_t guest_region_type,
- host_region_type_t host_region_type);
+void init_shadow_region(shadow_region_t * entry,
+ addr_t guest_addr_start,
+ addr_t guest_addr_end,
+ guest_region_type_t guest_region_type,
+ host_region_type_t host_region_type);
-void init_shadow_map_entry_physical(shadow_map_entry_t *entry,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- guest_region_type_t guest_region_type,
- addr_t host_addr_start,
- addr_t host_addr_end,
- host_region_type_t host_region_type);
+void init_shadow_region_physical(shadow_region_t * entry,
+ addr_t guest_addr_start,
+ addr_t guest_addr_end,
+ guest_region_type_t guest_region_type,
+ addr_t host_addr_start,
+ host_region_type_t host_region_type);
-void init_shadow_map(shadow_map_t *map);
-void free_shadow_map(shadow_map_t *map);
+void init_shadow_map(shadow_map_t * map);
+void free_shadow_map(shadow_map_t * map);
-shadow_map_entry_t * get_shadow_map_region_by_addr(shadow_map_t *map, addr_t guest_addr);
+shadow_region_t * get_shadow_region_by_addr(shadow_map_t * map, addr_t guest_addr);
-shadow_map_entry_t * get_shadow_map_region_by_index(shadow_map_t * map, uint_t index);
+shadow_region_t * get_shadow_region_by_index(shadow_map_t * map, uint_t index);
-int map_guest_physical_to_host_physical(shadow_map_entry_t *entry,
- addr_t guest_addr,
- addr_t *host_addr);
+int guest_paddr_to_host_paddr(shadow_region_t * entry,
+ addr_t guest_addr,
+ addr_t * host_addr);
// Semantics:
// Adding a region that overlaps with an existing region results is undefined
// and will probably fail
-int add_shadow_map_region(shadow_map_t * map, shadow_map_entry_t *entry);
+int add_shadow_region(shadow_map_t * map, shadow_region_t * entry);
// Semantics:
// Deletions result in splitting
-int delete_shadow_map_region(shadow_map_t *map,
+int delete_shadow_region(shadow_map_t * map,
addr_t guest_start,
addr_t guest_end);
#define PAGE_OFFSET(x) ((((uint_t)x) & 0xfff))
#define PAGE_ALIGNED_ADDR(x) (((uint_t) (x)) >> 12)
+
#ifndef PAGE_ADDR
#define PAGE_ADDR(x) (PAGE_ALIGNED_ADDR(x) << 12)
#endif
#define PAGE_POWER 12
+#define CR3_TO_PDE(cr3) (((ulong_t)cr3) & 0xfffff000)
+#define CR3_TO_PDPTRE(cr3) (((ulong_t)cr3) & 0xffffffe0)
+#define CR3_TO_PML4E(cr3) (((ullong_t)cr3) & 0x000ffffffffff000)
+
#define VM_WRITE 1
#define VM_USER 2
#define VM_NOCACHE 8
#define VM_EXEC 0
-#define GUEST_PAGE 0x0
-#define SHARED_PAGE 0x1
-
typedef struct pde {
uint_t present : 1;
uint_t flags : 4;
-typedef enum { PDE32 } page_directory_type_t;
+typedef enum { PDE32 } paging_mode_t;
-typedef struct shadow_paging_state {
+typedef struct shadow_page_state {
+
// these two reflect the top-level page directory
// of the guest page table
- page_directory_type_t guest_page_directory_type;
- void *guest_page_directory; // points to guest's current page table
+ paging_mode_t guest_mode;
+ reg_ex_t guest_cr3; // points to guest's current page table
- // This reflects the guest physical to host physical mapping
- shadow_map_t shadow_map;
+ // Should thi sbe here
+ reg_ex_t guest_cr0;
// these two reflect the top-level page directory
// the shadow page table
- page_directory_type_t shadow_page_directory_type;
- void *shadow_page_directory;
-
-} shadow_paging_state_t;
+ paging_mode_t shadow_mode;
+ reg_ex_t shadow_cr3;
+} shadow_page_state_t;
-int init_shadow_paging_state(shadow_paging_state_t *state);
-// This function will cause the shadow page table to be deleted
-// and rewritten to reflect the guest page table and the shadow map
-int wholesale_update_shadow_paging_state(shadow_paging_state_t *state);
-//void free_guest_page_tables(vmm_pde_t * pde);
+int init_shadow_page_state(shadow_page_state_t * state);
-//generate_shadow_
+// This function will cause the shadow page table to be deleted
+// and rewritten to reflect the guest page table and the shadow map
+int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map);
-//vmm_pde_t * generate_guest_page_tables(shadow_map_t * map, vmm_mem_list_t * list);
-//pml4e64_t * generate_guest_page_tables_64(shadow_map_t * map, vmm_mem_list_t * list);
+vmm_pde_t * create_passthrough_pde32_pts(shadow_map_t * map);
+//void free_guest_page_tables(vmm_pde_t * pde);
void PrintDebugPageTables(vmm_pde_t * pde);
-
-
#endif
void * VMM_Malloc(uint_t size);
void VMM_Free(void * addr);
-void * Identity(void *addr) { return addr; };
+void * Identity(void *addr);
#endif
* Copyright (c) 2001,2003,2004 David H. Hovemeyer <daveho@cs.umd.edu>
* Copyright (c) 2003, Jeffrey K. Hollingsworth <hollings@cs.umd.edu>
* Copyright (c) 2004, Iulian Neamtiu <neamtiu@cs.umd.edu>
- * $Revision: 1.21 $
+ * $Revision: 1.22 $
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "COPYING".
os_hooks.free_page = &Free_VMM_Page;
os_hooks.malloc = &VMM_Malloc;
os_hooks.free = &VMM_Free;
- os_hooks.virtual_to_physical=&Identity;
- os_hooks.physical_to_virtual=&Identity;
-
+ os_hooks.vaddr_to_paddr = &Identity;
+ os_hooks.paddr_to_vaddr = &Identity;
// DumpGDT();
Init_VMM(&os_hooks, &vmm_ops);
-
- init_shadow_paging_state(&(vm_info.shadow_paging_state));
-
+ init_shadow_map(&(vm_info.mem_map));
+ init_shadow_page_state(&(vm_info.shadow_page_state));
+ vm_info.page_mode = SHADOW_PAGING;
init_vmm_io_map(&(vm_info.io_map));
if (0) {
// add_shared_mem_range(&(vm_info.mem_layout), 0, 0x800000, 0x10000);
- //add_shared_mem_range(&(vm_info.mem_layout), 0, 0x1000000, 0);
+ // add_shared_mem_range(&(vm_info.mem_layout), 0, 0x1000000, 0);
rip = (ulong_t)(void*)&BuzzVM;
// rip -= 0x10000;
//add_shared_mem_range(&(vm_info.mem_layout), 0x0, 0x1000, 0x100000);
// add_shared_mem_range(&(vm_info.mem_layout), 0x0, 0x100000, 0x0);
- shadow_map_entry_t *ent = Malloc(sizeof(shadow_map_entry_t));;
- init_shadow_map_entry_physical(ent,0,0x100000,GUEST_REGION_PHYSICAL_MEMORY,
- 0,0x100000,HOST_REGION_PHYSICAL_MEMORY);
- add_shadow_map_region(&(vm_info.shadow_paging_state.shadow_map),ent);
+ shadow_region_t *ent = Malloc(sizeof(shadow_region_t));;
+ init_shadow_region_physical(ent,0,0x100000,GUEST_REGION_PHYSICAL_MEMORY,
+ 0x100000, HOST_REGION_PHYSICAL_MEMORY);
+ add_shadow_region(&(vm_info.mem_map),ent);
hook_io_port(&(vm_info.io_map), 0x61, &IO_Read, &IO_Write);
/*
info->vmm_data = (void*)Allocate_VMCB();
- PrintDebug("Generating Guest nested page tables\n");
- // print_mem_list(&(info->mem_list));
- //print_mem_layout(&(info->mem_layout));
- info->page_tables = NULL;
+ //PrintDebug("Generating Guest nested page tables\n");
+ // info->page_tables = NULL;
//info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
//info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
//PrintDebugPageTables(info->page_tables);
*bitmap |= 1 << (port % 8);
}
- memset((uchar_t*)io_port_bitmap, 0xff, PAGE_SIZE * 2);
+ // memset((uchar_t*)io_port_bitmap, 0xff, PAGE_SIZE * 2);
//PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
ctrl_area->instrs.instrs.IOIO_PROT = 1;
ctrl_area->instrs.instrs.INTR = 1;
// also determine if CPU supports nested paging
- if (vm_info.page_tables) {
- // if (0) {
+
+ if (vm_info.page_mode == SHADOW_PAGING) {
+ PrintDebug("Creating initial shadow page table\n");
+ vm_info.shadow_page_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&(vm_info.mem_map)) & ~0xfff);
+ PrintDebug("Created\n");
+
+ guest_state->cr3 = vm_info.shadow_page_state.shadow_cr3.r_reg;
+
+ ctrl_area->cr_reads.crs.cr3 = 1;
+ ctrl_area->cr_writes.crs.cr3 = 1;
+ ctrl_area->cr_reads.crs.cr0 = 1;
+ ctrl_area->cr_writes.crs.cr0 = 1;
+
+ ctrl_area->instrs.instrs.INVLPG = 1;
+ ctrl_area->instrs.instrs.INVLPGA = 1;
+
+
+ guest_state->g_pat = 0x7040600070406ULL;
+
+ guest_state->cr0 |= 0x80000000;
+ } else if (vm_info.page_mode == NESTED_PAGING) {
// Flush the TLB on entries/exits
//ctrl_area->TLB_CONTROL = 1;
// Set the Nested Page Table pointer
// ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
- ctrl_area->N_CR3 = 0;
- guest_state->cr3 = (addr_t)(vm_info.page_tables);
+ // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
// ctrl_area->N_CR3 = Get_CR3();
// guest_state->cr3 |= (Get_CR3() & 0xfffff000);
- guest_state->g_pat = 0x7040600070406ULL;
-
- //PrintDebug("Set Nested CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
- guest_state->cr0 |= 0x80000000;
+ // guest_state->g_pat = 0x7040600070406ULL;
}
+
}
ctrl_area->instrs.instrs.INTR = 1;
- // also determine if CPU supports nested paging
- if (vm_info.page_tables) {
- // if (0) {
+
+
+ if (vm_info.page_mode == SHADOW_PAGING) {
+ PrintDebug("Creating initial shadow page table\n");
+ vm_info.shadow_page_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&(vm_info.mem_map)) & ~0xfff);
+ PrintDebug("Created\n");
+
+ guest_state->cr3 = vm_info.shadow_page_state.shadow_cr3.r_reg;
+
+ ctrl_area->cr_reads.crs.cr3 = 1;
+ ctrl_area->cr_writes.crs.cr3 = 1;
+ ctrl_area->cr_reads.crs.cr0 = 1;
+ ctrl_area->cr_writes.crs.cr0 = 1;
+
+ ctrl_area->instrs.instrs.INVLPG = 1;
+ ctrl_area->instrs.instrs.INVLPGA = 1;
+ ctrl_area->instrs.instrs.CR0 = 1;
+
+
+
+ guest_state->g_pat = 0x7040600070406ULL;
+
+ guest_state->cr0 |= 0x80000000;
+ } else if (vm_info.page_mode == NESTED_PAGING) {
// Flush the TLB on entries/exits
//ctrl_area->TLB_CONTROL = 1;
// Set the Nested Page Table pointer
// ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
- ctrl_area->N_CR3 = 0;
- guest_state->cr3 = (addr_t)(vm_info.page_tables);
+ // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
// ctrl_area->N_CR3 = Get_CR3();
// guest_state->cr3 |= (Get_CR3() & 0xfffff000);
- guest_state->g_pat = 0x7040600070406ULL;
-
- //PrintDebug("Set Nested CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
- guest_state->cr0 |= 0x80000000;
+ // guest_state->g_pat = 0x7040600070406ULL;
}
}
-
+
// also determine if CPU supports nested paging
+ /*
if (vm_info.page_tables) {
// if (0) {
// Flush the TLB on entries/exits
// Enable Paging
// guest_state->cr0 |= 0x80000000;
}
-
+ */
}
PrintDebug("io_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
if (exit_code == VMEXIT_IOIO) {
handle_svm_io(info);
+ } else if (( (exit_code == VMEXIT_CR3_READ) ||
+ (exit_code == VMEXIT_CR3_WRITE) ||
+ (exit_code == VMEXIT_INVLPG) ||
+ (exit_code == VMEXIT_INVLPGA) ||
+ (exit_code == VMEXIT_EXCP14)) &&
+ (info->page_mode == SHADOW_PAGING)) {
+ handle_shadow_paging(info);
}
return 0;
}
+
+
+int handle_shadow_paging(guest_info_t * info) {
+ vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
+ // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+
+ if (guest_ctrl->exit_code == VMEXIT_CR3_READ) {
+
+ }
+
+ return 0;
+}
PrintDebug("\tINVD: %d\n", ctrl_area->instrs.instrs.INVD);
PrintDebug("\tPAUSE: %d\n", ctrl_area->instrs.instrs.PAUSE);
PrintDebug("\tHLT: %d\n", ctrl_area->instrs.instrs.HLT);
- PrintDebug("\tINVPLG: %d\n", ctrl_area->instrs.instrs.INVPLG);
- PrintDebug("\tINVPLGA: %d\n", ctrl_area->instrs.instrs.INVPLGA);
+ PrintDebug("\tINVLPG: %d\n", ctrl_area->instrs.instrs.INVLPG);
+ PrintDebug("\tINVLPGA: %d\n", ctrl_area->instrs.instrs.INVLPGA);
PrintDebug("\tIOIO_PROT: %d\n", ctrl_area->instrs.instrs.IOIO_PROT);
PrintDebug("\tMSR_PROT: %d\n", ctrl_area->instrs.instrs.MSR_PROT);
PrintDebug("\ttask_switch: %d\n", ctrl_area->instrs.instrs.task_switch);
extern struct vmm_os_hooks * os_hooks;
-void init_shadow_map_entry(shadow_map_entry_t *entry,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- guest_region_type_t guest_region_type,
- host_region_type_t host_region_type)
+void init_shadow_region(shadow_region_t * entry,
+ addr_t guest_addr_start,
+ addr_t guest_addr_end,
+ guest_region_type_t guest_region_type,
+ host_region_type_t host_region_type)
{
- entry->guest_type=guest_region_type;
- entry->guest_start=guest_addr_start;
- entry->guest_end=guest_addr_end;
- entry->host_type=host_region_type;
- entry->next=entry->prev=NULL;
+ entry->guest_type = guest_region_type;
+ entry->guest_start = guest_addr_start;
+ entry->guest_end = guest_addr_end;
+ entry->host_type = host_region_type;
+ entry->next=entry->prev = NULL;
}
-void init_shadow_map_entry_physical(shadow_map_entry_t *entry,
- addr_t guest_addr_start,
- addr_t guest_addr_end,
- guest_region_type_t guest_region_type,
- addr_t host_addr_start,
- addr_t host_addr_end,
- host_region_type_t host_region_type)
+void init_shadow_region_physical(shadow_region_t * entry,
+ addr_t guest_addr_start,
+ addr_t guest_addr_end,
+ guest_region_type_t guest_region_type,
+ addr_t host_addr_start,
+ host_region_type_t host_region_type)
{
- init_shadow_map_entry(entry,guest_addr_start,guest_addr_end,guest_region_type,host_region_type);
- entry->host_addr.phys_addr.host_start=host_addr_start;
- entry->host_addr.phys_addr.host_end=host_addr_end;
+ init_shadow_region(entry, guest_addr_start, guest_addr_end, guest_region_type, host_region_type);
+ entry->host_addr.phys_addr.host_start = host_addr_start;
+
}
-void init_shadow_map(shadow_map_t * map)
-{
+void init_shadow_map(shadow_map_t * map) {
map->num_regions = 0;
map->head = NULL;
void free_shadow_map(shadow_map_t * map) {
- shadow_map_entry_t * cursor = map->head;
- shadow_map_entry_t * tmp = NULL;
+ shadow_region_t * cursor = map->head;
+ shadow_region_t * tmp = NULL;
while(cursor) {
tmp = cursor;
}
VMMFree(map);
-
}
* we don't allow overlaps we could probably allow overlappig regions
* of the same type... but I'll let someone else deal with that
*/
-int add_shadow_map_region(shadow_map_t * map,
- shadow_map_entry_t * region)
+int add_shadow_region(shadow_map_t * map,
+ shadow_region_t * region)
{
- shadow_map_entry_t * cursor = map->head;
+ shadow_region_t * cursor = map->head;
if ((!cursor) || (cursor->guest_start >= region->guest_end)) {
region->prev = NULL;
}
-int delete_shadow_map_region(shadow_map_t *map,
- addr_t guest_start,
- addr_t guest_end)
-{
+int delete_shadow_region(shadow_map_t * map,
+ addr_t guest_start,
+ addr_t guest_end) {
return -1;
}
-shadow_map_entry_t *get_shadow_map_region_by_index(shadow_map_t * map,
- uint_t index)
-{
- shadow_map_entry_t * reg = map->head;
+shadow_region_t *get_shadow_region_by_index(shadow_map_t * map,
+ uint_t index) {
+ shadow_region_t * reg = map->head;
uint_t i = 0;
while (reg) {
- if (i==index) {
+ if (i == index) {
return reg;
}
- reg=reg->next;
+ reg = reg->next;
i++;
}
return NULL;
}
-shadow_map_entry_t * get_shadow_map_region_by_addr(shadow_map_t *map,
- addr_t addr)
-{
- shadow_map_entry_t * reg = map->head;
-
+shadow_region_t * get_shadow_region_by_addr(shadow_map_t * map,
+ addr_t addr) {
+ shadow_region_t * reg = map->head;
while (reg) {
if ((reg->guest_start <= addr) && (reg->guest_end > addr)) {
-int map_guest_physical_to_host_physical(shadow_map_entry_t *entry,
- addr_t guest_addr,
- addr_t *host_addr)
-{
- if (!(guest_addr>=entry->guest_start && guest_addr<entry->guest_end)) {
+int guest_paddr_to_host_paddr(shadow_region_t * entry,
+ addr_t guest_addr,
+ addr_t * host_addr) {
+
+ if (!((guest_addr >= entry->guest_start) &&
+ (guest_addr < entry->guest_end))) {
return -1;
}
case HOST_REGION_PHYSICAL_MEMORY:
case HOST_REGION_MEMORY_MAPPED_DEVICE:
case HOST_REGION_UNALLOCATED:
- *host_addr=(guest_addr-entry->guest_start) + entry->host_addr.phys_addr.host_start;
+ *host_addr = (guest_addr-entry->guest_start) + entry->host_addr.phys_addr.host_start;
return 0;
break;
default:
}
-void print_shadow_map(shadow_map_t *map) {
- shadow_map_entry_t * cur = map->head;
+void print_shadow_map(shadow_map_t * map) {
+ shadow_region_t * cur = map->head;
int i = 0;
PrintDebug("Memory Layout (regions: %d) \n", map->num_regions);
while (cur) {
- PrintDebug("%d: 0x%x - 0x%x (%s) -> ", i, cur->guest_start, cur->guest_end -1,
+ PrintDebug("%d: 0x%x - 0x%x (%s) -> ", i, cur->guest_start, cur->guest_end - 1,
cur->guest_type == GUEST_REGION_PHYSICAL_MEMORY ? "GUEST_REGION_PHYSICAL_MEMORY" :
cur->guest_type == GUEST_REGION_NOTHING ? "GUEST_REGION_NOTHING" :
cur->guest_type == GUEST_REGION_MEMORY_MAPPED_DEVICE ? "GUEST_REGION_MEMORY_MAPPED_DEVICE" :
- "UNKNOWN");
- if (cur->host_type==HOST_REGION_PHYSICAL_MEMORY ||
- cur->host_type==HOST_REGION_UNALLOCATED ||
- cur->host_type==HOST_REGION_MEMORY_MAPPED_DEVICE) {
- PrintDebug("0x%x - 0x%x ", cur->host_addr.phys_addr.host_start, cur->host_addr.phys_addr.host_end);
+ "UNKNOWN");
+ if (cur->host_type == HOST_REGION_PHYSICAL_MEMORY ||
+ cur->host_type == HOST_REGION_UNALLOCATED ||
+ cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) {
+ PrintDebug("0x%x", cur->host_addr.phys_addr.host_start);
}
PrintDebug("(%s)\n",
cur->host_type == HOST_REGION_PHYSICAL_MEMORY ? "HOST_REGION_PHYSICAL_MEMORY" :
}
-int mem_layout_add_test_1(vmm_mem_layout_t *layout) {
+int mem_layout_add_test_1(vmm_mem_layout_t * layout) {
uint_t start = 0;
void delete_page_tables_pde32(vmm_pde_t * pde) {
int i, j;
- if (pde==NULL) {
- return ;
+ if (pde == NULL) {
+ return;
}
for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
- if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
- os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
+ if ((pte[j].present)) {
+ os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
}
}
}
-int init_shadow_paging_state(shadow_paging_state_t *state)
-{
- state->guest_page_directory_type=state->shadow_page_directory_type=PDE32;
+int init_shadow_page_state(shadow_page_state_t * state) {
+ state->guest_mode = PDE32;
+ state->shadow_mode = PDE32;
- state->guest_page_directory=state->shadow_page_directory=NULL;
+ state->guest_cr3.r_reg = 0;
+ state->shadow_cr3.r_reg = 0;
- init_shadow_map(&(state->shadow_map));
return 0;
}
-int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
-{
+int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map) {
unsigned i, j;
- vmm_pde_t *cur_guest_pde, *cur_shadow_pde;
- vmm_pte_t *cur_guest_pte, *cur_shadow_pte;
+ vmm_pde_t * guest_pde;
+ vmm_pde_t * shadow_pde;
+
// For now, we'll only work with PDE32
- if (state->guest_page_directory_type!=PDE32) {
+ if (state->guest_mode != PDE32) {
return -1;
}
+
+
- cur_shadow_pde=(vmm_pde_t*)(state->shadow_page_directory);
-
- cur_guest_pde = (vmm_pde_t*)(os_hooks->physical_to_virtual(state->guest_page_directory));
+ shadow_pde = (vmm_pde_t *)(CR3_TO_PDE(state->shadow_cr3.e_reg.low));
+ guest_pde = (vmm_pde_t *)(os_hooks->paddr_to_vaddr((void*)CR3_TO_PDE(state->guest_cr3.e_reg.low)));
// Delete the current page table
- delete_page_tables_pde32(cur_shadow_pde);
+ delete_page_tables_pde32(shadow_pde);
+
+ shadow_pde = os_hooks->allocate_pages(1);
+
- cur_shadow_pde = os_hooks->allocate_pages(1);
+ state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
- state->shadow_page_directory = cur_shadow_pde;
- state->shadow_page_directory_type=PDE32;
+ state->shadow_mode = PDE32;
- for (i=0;i<MAX_PAGE_DIR_ENTRIES;i++) {
- cur_shadow_pde[i] = cur_guest_pde[i];
+ for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
+ shadow_pde[i] = guest_pde[i];
+
// The shadow can be identical to the guest if it's not present
- if (!cur_shadow_pde[i].present) {
+ if (!shadow_pde[i].present) {
continue;
}
- if (cur_shadow_pde[i].large_pages) {
+
+ if (shadow_pde[i].large_pages) {
// large page - just map it through shadow map to generate its physical location
- addr_t guest_addr = PAGE_ADDR(cur_shadow_pde[i].pt_base_addr);
+ addr_t guest_addr = PAGE_ADDR(shadow_pde[i].pt_base_addr);
addr_t host_addr;
- shadow_map_entry_t *ent;
+ shadow_region_t * ent;
- ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+ ent = get_shadow_region_by_addr(mem_map, guest_addr);
if (!ent) {
// FIXME Panic here - guest is trying to map to physical memory
case HOST_REGION_PHYSICAL_MEMORY:
// points into currently allocated physical memory, so we just
// set up the shadow to point to the mapped location
- if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
+ if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
// Panic here
return -1;
}
- cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+
+ shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
// FIXME set vmm_info bits here
break;
case HOST_REGION_UNALLOCATED:
// points to physical memory that is *allowed* but that we
// have not yet allocated. We mark as not present and set a
// bit to remind us to allocate it later
- cur_shadow_pde[i].present=0;
+ shadow_pde[i].present = 0;
// FIXME Set vminfo bits here so that we know that we will be
// allocating it later
break;
// points to physical memory that is NOT ALLOWED.
// We will mark it as not present and set a bit to remind
// us that it's bad later and insert a GPF then
- cur_shadow_pde[i].present=0;
+ shadow_pde[i].present = 0;
break;
case HOST_REGION_MEMORY_MAPPED_DEVICE:
case HOST_REGION_REMOTE:
break;
}
} else {
- addr_t host_addr;
+ vmm_pte_t * guest_pte;
+ vmm_pte_t * shadow_pte;
addr_t guest_addr;
+ addr_t guest_pte_host_addr;
+ shadow_region_t * ent;
// small page - set PDE and follow down to the child table
- cur_shadow_pde[i] = cur_guest_pde[i];
-
- // Allocate a new second level page table for the shadow
- cur_shadow_pte = os_hooks->allocate_pages(1);
+ shadow_pde[i] = guest_pde[i];
- // make our first level page table in teh shadow point to it
- cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(cur_shadow_pte);
+ guest_addr = PAGE_ADDR(guest_pde[i].pt_base_addr);
- shadow_map_entry_t *ent;
-
- guest_addr=PAGE_ADDR(cur_guest_pde[i].pt_base_addr);
+ // Allocate a new second level page table for the shadow
+ shadow_pte = os_hooks->allocate_pages(1);
- ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+ // make our first level page table in the shadow point to it
+ shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
- if (!ent) {
+ ent = get_shadow_region_by_addr(mem_map, guest_addr);
+
+
+ /* JRL: This is bad.... */
+ // For now the guest Page Table must always be mapped to host physical memory
+ /* If we swap out a page table or if it isn't present for some reason, this turns real ugly */
+
+ if ((!ent) || (ent->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
// FIXME Panic here - guest is trying to map to physical memory
// it does not own in any way!
return -1;
}
// Address of the relevant second level page table in the guest
- if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
+ if (guest_paddr_to_host_paddr(ent, guest_addr, &guest_pte_host_addr)) {
// Panic here
return -1;
}
- // host_addr now contains the host physical address for the guest's 2nd level page table
+
+ // host_addr now contains the host physical address for the guest's 2nd level page table
// Now we transform it to relevant virtual address
- cur_guest_pte = os_hooks->physical_to_virtual((void*)host_addr);
+ guest_pte = os_hooks->paddr_to_vaddr((void *)guest_pte_host_addr);
// Now we walk through the second level guest page table
// and clone it into the shadow
- for (j=0;j<MAX_PAGE_TABLE_ENTRIES;j++) {
- cur_shadow_pte[j] = cur_guest_pte[j];
+ for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
+ shadow_pte[j] = guest_pte[j];
- addr_t guest_addr = PAGE_ADDR(cur_shadow_pte[j].page_base_addr);
+ addr_t guest_addr = PAGE_ADDR(shadow_pte[j].page_base_addr);
- shadow_map_entry_t *ent;
+ shadow_region_t * ent;
- ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+ ent = get_shadow_region_by_addr(mem_map, guest_addr);
if (!ent) {
// FIXME Panic here - guest is trying to map to physical memory
switch (ent->host_type) {
case HOST_REGION_PHYSICAL_MEMORY:
- // points into currently allocated physical memory, so we just
- // set up the shadow to point to the mapped location
- if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
- // Panic here
- return -1;
+ {
+ addr_t host_addr;
+
+ // points into currently allocated physical memory, so we just
+ // set up the shadow to point to the mapped location
+ if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
+ // Panic here
+ return -1;
+ }
+
+ shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+ // FIXME set vmm_info bits here
+ break;
}
- cur_shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
- // FIXME set vmm_info bits here
- break;
case HOST_REGION_UNALLOCATED:
// points to physical memory that is *allowed* but that we
// have not yet allocated. We mark as not present and set a
// bit to remind us to allocate it later
- cur_shadow_pte[j].present=0;
+ shadow_pte[j].present = 0;
// FIXME Set vminfo bits here so that we know that we will be
// allocating it later
break;
// points to physical memory that is NOT ALLOWED.
// We will mark it as not present and set a bit to remind
// us that it's bad later and insert a GPF then
- cur_shadow_pte[j].present=0;
+ shadow_pte[j].present = 0;
break;
case HOST_REGION_MEMORY_MAPPED_DEVICE:
case HOST_REGION_REMOTE:
-#if 0
+
/* We generate a page table to correspond to a given memory layout
* pulling pages from the mem_list when necessary
* If there are any gaps in the layout, we add them as unmapped pages
*/
-vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
+vmm_pde_t * create_passthrough_pde32_pts(shadow_map_t * map) {
ullong_t current_page_addr = 0;
- uint_t layout_index = 0;
- uint_t list_index = 0;
- ullong_t layout_addr = 0;
int i, j;
- uint_t num_entries = layout->num_pages; // The number of pages left in the layout
-
-
vmm_pde_t * pde = os_hooks->allocate_pages(1);
for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
- if (num_entries == 0) {
+ int pte_present = 0;
+ vmm_pte_t * pte = os_hooks->allocate_pages(1);
+
+
+ for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
+ shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
+
+ if (!region ||
+ (region->host_type == HOST_REGION_NOTHING) ||
+ (region->host_type == HOST_REGION_UNALLOCATED) ||
+ (region->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) ||
+ (region->host_type == HOST_REGION_REMOTE) ||
+ (region->host_type == HOST_REGION_SWAPPED)) {
+ pte[j].present = 0;
+ pte[j].flags = 0;
+ pte[j].accessed = 0;
+ pte[j].dirty = 0;
+ pte[j].pte_attr = 0;
+ pte[j].global_page = 0;
+ pte[j].vmm_info = 0;
+ pte[j].page_base_addr = 0;
+ } else {
+ addr_t host_addr;
+ pte[j].present = 1;
+ pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
+
+ pte[j].accessed = 0;
+ pte[j].dirty = 0;
+ pte[j].pte_attr = 0;
+ pte[j].global_page = 0;
+ pte[j].vmm_info = 0;
+
+ if (guest_paddr_to_host_paddr(region, current_page_addr, &host_addr) == -1) {
+ // BIG ERROR
+ // PANIC
+ return NULL;
+ }
+
+ pte[j].page_base_addr = host_addr >> 12;
+
+ pte_present = 1;
+ }
+
+ current_page_addr += PAGE_SIZE;
+ }
+
+ if (pte_present == 0) {
+ VMMFree(pte);
+
pde[i].present = 0;
pde[i].flags = 0;
pde[i].accessed = 0;
pde[i].vmm_info = 0;
pde[i].pt_base_addr = 0;
} else {
- vmm_pte_t * pte = os_hooks->allocate_pages(1);
-
pde[i].present = 1;
pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
pde[i].accessed = 0;
pde[i].large_pages = 0;
pde[i].global_page = 0;
pde[i].vmm_info = 0;
- pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
-
-
-
- for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
- layout_addr = get_mem_layout_addr(layout, layout_index);
-
- if ((current_page_addr < layout_addr) || (num_entries == 0)) {
- // We have a gap in the layout, fill with unmapped page
- pte[j].present = 0;
- pte[j].flags = 0;
- pte[j].accessed = 0;
- pte[j].dirty = 0;
- pte[j].pte_attr = 0;
- pte[j].global_page = 0;
- pte[j].vmm_info = 0;
- pte[j].page_base_addr = 0;
-
- current_page_addr += PAGE_SIZE;
- } else if (current_page_addr == layout_addr) {
- // Set up the Table entry to map correctly to the layout region
- layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
-
- if (page_region->type == UNMAPPED) {
- pte[j].present = 0;
- pte[j].flags = 0;
- } else {
- pte[j].present = 1;
- pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
- }
-
- pte[j].accessed = 0;
- pte[j].dirty = 0;
- pte[j].pte_attr = 0;
- pte[j].global_page = 0;
- pte[j].vmm_info = 0;
-
- if (page_region->type == UNMAPPED) {
- pte[j].page_base_addr = 0;
- } else if (page_region->type == SHARED) {
- addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
-
- pte[j].page_base_addr = host_addr >> 12;
- pte[j].vmm_info = SHARED_PAGE;
- } else if (page_region->type == GUEST) {
- addr_t list_addr = get_mem_list_addr(list, list_index++);
-
- if (list_addr == -1) {
- // error
- // cleanup...
- free_guest_page_tables(pde);
- return NULL;
- }
- PrintDebug("Adding guest page (%x)\n", list_addr);
- pte[j].page_base_addr = list_addr >> 12;
-
- // Reset this when we move over to dynamic page allocation
- // pte[j].vmm_info = GUEST_PAGE;
- pte[j].vmm_info = SHARED_PAGE;
- }
-
- num_entries--;
- current_page_addr += PAGE_SIZE;
- layout_index++;
- } else {
- // error
- PrintDebug("Error creating page table...\n");
- // cleanup
- free_guest_page_tables(pde);
- return NULL;
- }
- }
+ pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(pte);
}
+
}
return pde;
}
-#endif
+
#include <geekos/serial.h>
+
+void * Identity(void *addr) { return addr; };
+
void * Allocate_VMM_Pages(int num_pages) {
void * start_page = Alloc_Page();
SerialPrint("Allocating Page: %x (%d of %d)\n",start_page, 1, num_pages);