extern struct vmm_os_hooks * os_hooks;
+void delete_page_tables_pde32(vmm_pde_t * pde) {
+ int i, j;
+
+ if (pde==NULL) {
+ return ;
+ }
+
+ for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
+ if (pde[i].present) {
+ vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
+
+ for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
+ if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
+ os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
+ }
+ }
+
+ os_hooks->free_page(pte);
+ }
+ }
+
+ os_hooks->free_page(pde);
+}
+
+
+int init_shadow_paging_state(shadow_paging_state_t *state)
+{
+ state->guest_page_directory_type=state->shadow_page_directory_type=PDE32;
+
+ state->guest_page_directory=state->shadow_page_directory=NULL;
+
+ init_shadow_map(&(state->shadow_map));
+ return 0;
+}
+
+
+int wholesale_update_shadow_paging_state(shadow_paging_state_t *state)
+{
+ unsigned i, j;
+ vmm_pde_t *cur_guest_pde, *cur_shadow_pde;
+ vmm_pte_t *cur_guest_pte, *cur_shadow_pte;
+
+ // For now, we'll only work with PDE32
+ if (state->guest_page_directory_type!=PDE32) {
+ return -1;
+ }
+
+ cur_shadow_pde=(vmm_pde_t*)(state->shadow_page_directory);
+
+ cur_guest_pde = (vmm_pde_t*)(os_hooks->physical_to_virtual(state->guest_page_directory));
+
+ // Delete the current page table
+ delete_page_tables_pde32(cur_shadow_pde);
+
+ cur_shadow_pde = os_hooks->allocate_pages(1);
+
+ state->shadow_page_directory = cur_shadow_pde;
+ state->shadow_page_directory_type=PDE32;
+
+
+ for (i=0;i<MAX_PAGE_DIR_ENTRIES;i++) {
+ cur_shadow_pde[i] = cur_guest_pde[i];
+ // The shadow can be identical to the guest if it's not present
+ if (!cur_shadow_pde[i].present) {
+ continue;
+ }
+ if (cur_shadow_pde[i].large_pages) {
+ // large page - just map it through shadow map to generate its physical location
+ addr_t guest_addr = PAGE_ADDR(cur_shadow_pde[i].pt_base_addr);
+ addr_t host_addr;
+ shadow_map_entry_t *ent;
+
+ ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+
+ if (!ent) {
+ // FIXME Panic here - guest is trying to map to physical memory
+ // it does not own in any way!
+ return -1;
+ }
+
+ // FIXME Bounds check here to see if it's trying to trick us
+
+ switch (ent->host_type) {
+ case HOST_REGION_PHYSICAL_MEMORY:
+ // points into currently allocated physical memory, so we just
+ // set up the shadow to point to the mapped location
+ if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
+ // Panic here
+ return -1;
+ }
+ cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+ // FIXME set vmm_info bits here
+ break;
+ case HOST_REGION_UNALLOCATED:
+ // points to physical memory that is *allowed* but that we
+ // have not yet allocated. We mark as not present and set a
+ // bit to remind us to allocate it later
+ cur_shadow_pde[i].present=0;
+ // FIXME Set vminfo bits here so that we know that we will be
+ // allocating it later
+ break;
+ case HOST_REGION_NOTHING:
+ // points to physical memory that is NOT ALLOWED.
+ // We will mark it as not present and set a bit to remind
+ // us that it's bad later and insert a GPF then
+ cur_shadow_pde[i].present=0;
+ break;
+ case HOST_REGION_MEMORY_MAPPED_DEVICE:
+ case HOST_REGION_REMOTE:
+ case HOST_REGION_SWAPPED:
+ default:
+ // Panic. Currently unhandled
+ return -1;
+ break;
+ }
+ } else {
+ addr_t host_addr;
+ addr_t guest_addr;
+
+ // small page - set PDE and follow down to the child table
+ cur_shadow_pde[i] = cur_guest_pde[i];
+
+ // Allocate a new second level page table for the shadow
+ cur_shadow_pte = os_hooks->allocate_pages(1);
+
+ // make our first level page table in teh shadow point to it
+ cur_shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(cur_shadow_pte);
+
+ shadow_map_entry_t *ent;
+
+ guest_addr=PAGE_ADDR(cur_guest_pde[i].pt_base_addr);
+
+ ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+
+ if (!ent) {
+ // FIXME Panic here - guest is trying to map to physical memory
+ // it does not own in any way!
+ return -1;
+ }
+
+ // Address of the relevant second level page table in the guest
+ if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
+ // Panic here
+ return -1;
+ }
+ // host_addr now contains the host physical address for the guest's 2nd level page table
+
+ // Now we transform it to relevant virtual address
+ cur_guest_pte = os_hooks->physical_to_virtual((void*)host_addr);
+ // Now we walk through the second level guest page table
+ // and clone it into the shadow
+ for (j=0;j<MAX_PAGE_TABLE_ENTRIES;j++) {
+ cur_shadow_pte[j] = cur_guest_pte[j];
+
+ addr_t guest_addr = PAGE_ADDR(cur_shadow_pte[j].page_base_addr);
+
+ shadow_map_entry_t *ent;
+
+ ent = get_shadow_map_region_by_addr(&(state->shadow_map),guest_addr);
+
+ if (!ent) {
+ // FIXME Panic here - guest is trying to map to physical memory
+ // it does not own in any way!
+ return -1;
+ }
+
+ switch (ent->host_type) {
+ case HOST_REGION_PHYSICAL_MEMORY:
+ // points into currently allocated physical memory, so we just
+ // set up the shadow to point to the mapped location
+ if (map_guest_physical_to_host_physical(ent,guest_addr,&host_addr)) {
+ // Panic here
+ return -1;
+ }
+ cur_shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+ // FIXME set vmm_info bits here
+ break;
+ case HOST_REGION_UNALLOCATED:
+ // points to physical memory that is *allowed* but that we
+ // have not yet allocated. We mark as not present and set a
+ // bit to remind us to allocate it later
+ cur_shadow_pte[j].present=0;
+ // FIXME Set vminfo bits here so that we know that we will be
+ // allocating it later
+ break;
+ case HOST_REGION_NOTHING:
+ // points to physical memory that is NOT ALLOWED.
+ // We will mark it as not present and set a bit to remind
+ // us that it's bad later and insert a GPF then
+ cur_shadow_pte[j].present=0;
+ break;
+ case HOST_REGION_MEMORY_MAPPED_DEVICE:
+ case HOST_REGION_REMOTE:
+ case HOST_REGION_SWAPPED:
+ default:
+ // Panic. Currently unhandled
+ return -1;
+ break;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+#if 0
/* We generate a page table to correspond to a given memory layout
* pulling pages from the mem_list when necessary
* If there are any gaps in the layout, we add them as unmapped pages
return pde;
}
+#endif
-void free_guest_page_tables(vmm_pde_t * pde) {
- int i, j;
-
-
- for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
- if (pde[i].present) {
- vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
-
- for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
- if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
- os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
- }
- }
-
- os_hooks->free_page(pte);
- }
- }
-
- os_hooks->free_page(pde);
-}
-
+#if 0
pml4e64_t * generate_guest_page_tables_64(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
pml4e64_t * pml = os_hooks->allocate_pages(1);
}
return pml;
}
+
+#endif