#include <palacios/vmm.h>
#include <palacios/vm_guest_mem.h>
#include <palacios/vm_guest.h>
+#include <palacios/vmm_ctrl_regs.h>
#ifndef V3_CONFIG_DEBUG_NESTED_PAGING
void * page = 0;
void *temp;
- temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1,
- core->shdw_pg_mode==SHADOW_PAGING ? V3_ALLOC_PAGES_CONSTRAINT_4GB : 0);
- if (!temp) {
+ temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0); // no constraints
+
+ if (!temp) {
PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n");
return 0;
}
switch(mode) {
case REAL:
case PROTECTED:
- delete_page_tables_32((pde32_t *)V3_VAddr((void *)(core->direct_map_pt)));
- break;
+ // Intentional fallthrough here
+ // There are *only* PAE tables
case PROTECTED_PAE:
case LONG:
case LONG_32_COMPAT:
// For now... But we need to change this....
// As soon as shadow paging becomes active the passthrough tables are hosed
// So this will cause chaos if it is called at that time
-
- info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
- //PrintError(info->vm_info, info, "Activate Passthrough Page tables not implemented\n");
+ struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *) &(info->ctrl_regs.cr3);
+ struct cr4_32 * shadow_cr4 = (struct cr4_32 *) &(info->ctrl_regs.cr4);
+ addr_t shadow_pt_addr = *(addr_t*)&(info->direct_map_pt);
+ // Passthrough PTs will only be PAE page tables.
+ shadow_cr3->pdpt_base_addr = shadow_pt_addr >> 5;
+ shadow_cr4->pae = 1;
+ PrintDebug(info->vm_info, info, "Activated Passthrough Page tables\n");
return 0;
}
switch(mode) {
case REAL:
case PROTECTED:
- return handle_passthrough_pagefault_32(info, fault_addr, error_code);
-
+ // Note intentional fallthrough here
+ // There are only PAE page tables now
case PROTECTED_PAE:
case LONG:
case LONG_32_COMPAT:
switch(mode) {
case REAL:
case PROTECTED:
- return invalidate_addr_32(info, inv_addr);
-
+ // Intentional fallthrough - there
+ // are only PAE page tables now
case PROTECTED_PAE:
case LONG:
case LONG_32_COMPAT:
switch(mode) {
case REAL:
case PROTECTED:
- return invalidate_addr_32_range(info, inv_addr_start, inv_addr_end);
-
+ // Intentional fallthrough
+ // There are only PAE PTs now
case PROTECTED_PAE:
case LONG:
case LONG_32_COMPAT:
int pdpe_index = PDPE32PAE_INDEX(fault_addr);
int pde_index = PDE32PAE_INDEX(fault_addr);
int pte_index = PTE32PAE_INDEX(fault_addr);
+
struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->vcpu_id, fault_addr);
return -1;
}
+ PrintDebug(info->vm_info, info, "Direct Paging 32PAE page fault handler=%p\n", (void *)fault_addr);
+
// Lookup the correct PDPE address based on the PAGING MODE
if (info->shdw_pg_mode == SHADOW_PAGING) {
pdpe = CR3_TO_PDPE32PAE_VA(info->ctrl_regs.cr3);
} else {
pdpe = CR3_TO_PDPE32PAE_VA(info->direct_map_pt);
}
-
+
+ PrintDebug(info->vm_info, info, "Top level pdpe error pdp address=%p\n", (void *)pdpe);
// Fix up the PDPE entry
if (pdpe[pdpe_index].present == 0) {
pde = (pde32pae_t *)create_generic_pt_page(info);
-
+ PrintDebug(info->vm_info, info, "Creating a new pd page=%p\n", (void *)pde);
pdpe[pdpe_index].present = 1;
// Set default PDPE Flags...
pdpe[pdpe_index].pd_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pde));
} else {
pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr));
}
-
+ PrintDebug(info->vm_info, info, "Handling pde error pd base address =%p\n", (void *)pde);
// Fix up the PDE entry
if (pde[pde_index].present == 0) {
pte = (pte32pae_t *)create_generic_pt_page(info);
-
+ PrintDebug(info->vm_info, info, "Creating a new pt page=%p\n", (void *)pte);
pde[pde_index].present = 1;
pde[pde_index].writable = 1;
pde[pde_index].user_page = 1;
pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
}
-
+ PrintDebug(info->vm_info, info, "Handling pte error pt base address=%p\n", (void *)pte);
// Fix up the PTE entry
if (pte[pte_index].present == 0) {
pte[pte_index].user_page = 1;
}
pte[pte_index].page_base_addr = PAGE_BASE_ADDR(host_addr);
+ PrintDebug(info->vm_info, info, "PTE mapped to =%p\n", (void *)host_addr);
+ PrintDebug(info->vm_info, info, "PTE is =%llx\n", *(uint64_t *)&(pte[pte_index]));
} else {
return region->unhandled(info, fault_addr, fault_addr, region, error_code);
}
} else {
return region->unhandled(info, fault_addr, fault_addr, region, error_code);
}
+
+ PrintDebug(info->vm_info, info, "Handler ends with fault address=%p\n", (void *)fault_addr);
return 0;
}