};
+void PrintV3Segments(struct v3_segments * segs);
+void PrintV3CtrlRegs(struct v3_ctrl_regs * regs);
int (*start_guest)(struct guest_info * info);
// int (*stop_vm)(uint_t vm_id);
-
-
+ int (*has_nested_paging)();
};
ullong_t guest_cr3; // points to guest's current page table
// Should this be here??
- reg_ex_t guest_cr0;
+ ullong_t guest_cr0;
// these two reflect the top-level page directory
// of the shadow page table
//
// 2 1.44 MB floppy drives
//
-#if 1
+#if 0
nvram_state->mem_state[NVRAM_REG_FLOPPY_TYPE]= 0x44;
#else
nvram_state->mem_state[NVRAM_REG_FLOPPY_TYPE] = 0x00;
/* MOVE THIS TO AN INIT GUEST ROUTINE */
- init_shadow_map(&(vm_info.mem_map));
- init_shadow_page_state(&(vm_info.shdw_pg_state));
+
+
v3_init_time(&(vm_info.time_state));
- vm_info.shdw_pg_mode = SHADOW_PAGING;
+ init_shadow_map(&(vm_info.mem_map));
+
+ if ((vmm_ops).has_nested_paging()) {
+ vm_info.shdw_pg_mode = NESTED_PAGING;
+ } else {
+ init_shadow_page_state(&(vm_info.shdw_pg_state));
+ vm_info.shdw_pg_mode = SHADOW_PAGING;
+ }
vm_info.cpu_mode = REAL;
vm_info.mem_mode = PHYSICAL_MEM;
{0x08, 0x0f, GENERIC_PRINT_AND_PASSTHROUGH}, // DMA 1 misc registers (csr, req, smask,mode,clearff,reset,enable,mmask)
{0xd0, 0xde, GENERIC_PRINT_AND_PASSTHROUGH}, // DMA 2 misc registers
*/
+ {0x08, 0x0f, GENERIC_PRINT_AND_IGNORE},
+ {0x81, 0x8F, GENERIC_PRINT_AND_IGNORE},
+ {0xd0, 0xdf, GENERIC_PRINT_AND_IGNORE},
+
+
{0x3f8, 0x3f8+7, GENERIC_PRINT_AND_IGNORE}, // COM 1
{0x2f8, 0x2f8+7, GENERIC_PRINT_AND_IGNORE}, // COM 2
{0x3e8, 0x3e8+7, GENERIC_PRINT_AND_IGNORE}, // COM 2
{0x2e8, 0x2e8+7, GENERIC_PRINT_AND_IGNORE}, // COM 2
-
- {0x170, 0x178, GENERIC_PRINT_AND_PASSTHROUGH}, // IDE 1
- {0x376, 0x377, GENERIC_PRINT_AND_PASSTHROUGH}, // IDE 1
- {0x1f0, 0x1f8, GENERIC_PRINT_AND_PASSTHROUGH}, // IDE 0
- {0x3f6, 0x3f7, GENERIC_PRINT_AND_PASSTHROUGH}, // IDE 0
/*
- {0x3f0, 0x3f2, GENERIC_PRINT_AND_PASSTHROUGH}, // Primary floppy controller (base,statusa/statusb,DOR)
- {0x3f4, 0x3f5, GENERIC_PRINT_AND_PASSTHROUGH}, // Primary floppy controller (mainstat/datarate,data)
- {0x3f7, 0x3f7, GENERIC_PRINT_AND_PASSTHROUGH}, // Primary floppy controller (DIR)
- {0x370, 0x372, GENERIC_PRINT_AND_PASSTHROUGH}, // Secondary floppy controller (base,statusa/statusb,DOR)
- {0x374, 0x375, GENERIC_PRINT_AND_PASSTHROUGH}, // Secondary floppy controller (mainstat/datarate,data)
- {0x377, 0x377, GENERIC_PRINT_AND_PASSTHROUGH}, // Secondary floppy controller (DIR)
- {0x378, 0x400, GENERIC_PRINT_AND_PASSTHROUGH}
+ {0x170, 0x178, GENERIC_PRINT_AND_PASSTHROUGH}, // IDE 1
+ {0x376, 0x377, GENERIC_PRINT_AND_PASSTHROUGH}, // IDE 1
+ {0x1f0, 0x1f8, GENERIC_PRINT_AND_PASSTHROUGH}, // IDE 0
+ {0x3f6, 0x3f7, GENERIC_PRINT_AND_PASSTHROUGH}, // IDE 0
*/
+ {0x3f0, 0x3f2, GENERIC_PRINT_AND_IGNORE}, // Primary floppy controller (base,statusa/statusb,DOR)
+ {0x3f4, 0x3f5, GENERIC_PRINT_AND_IGNORE}, // Primary floppy controller (mainstat/datarate,data)
+ {0x3f7, 0x3f7, GENERIC_PRINT_AND_IGNORE}, // Primary floppy controller (DIR)
+ {0x370, 0x372, GENERIC_PRINT_AND_IGNORE}, // Secondary floppy controller (base,statusa/statusb,DOR)
+ {0x374, 0x375, GENERIC_PRINT_AND_IGNORE}, // Secondary floppy controller (mainstat/datarate,data)
+ {0x377, 0x377, GENERIC_PRINT_AND_IGNORE}, // Secondary floppy controller (DIR)
+
+ // {0x378, 0x400, GENERIC_PRINT_AND_IGNORE}
+
};
- struct vm_device * generic = create_generic(range,4, // THIS NUMBER IS CRITICAL
+ struct vm_device * generic = create_generic(range,13, // THIS NUMBER IS CRITICAL
+
NULL,0,NULL,0);
#endif
extern void DisableInts();
+extern void EnableInts();
if (vm_info.shdw_pg_mode == SHADOW_PAGING) {
PrintDebug("Creating initial shadow page table\n");
vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
+ vm_info.shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
PrintDebug("Created\n");
guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
} else if (vm_info.shdw_pg_mode == NESTED_PAGING) {
// Flush the TLB on entries/exits
-
+ ctrl_area->TLB_CONTROL = 1;
// Enable Nested Paging
- //ctrl_area->NP_ENABLE = 1;
+ ctrl_area->NP_ENABLE = 1;
- //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
+ PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
- // Set the Nested Page Table pointer
- // ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
- // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
+ // Set the Nested Page Table pointer
+ ctrl_area->N_CR3 = ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
// ctrl_area->N_CR3 = Get_CR3();
// guest_state->cr3 |= (Get_CR3() & 0xfffff000);
- // guest_state->g_pat = 0x7040600070406ULL;
+ guest_state->g_pat = 0x7040600070406ULL;
}
ullong_t tmp_tsc;
+ EnableInts();
CLGI();
PrintDebug("SVM Entry to rip=%x...\n", info->rip);
rdtscll(info->time_state.cached_host_tsc);
guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
+ PrintDebug("Launching\n");
safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
rdtscll(tmp_tsc);
PrintDebug("RIP Linear: %x\n", linear_addr);
+ PrintV3Segments(&(info->segments));
+ PrintV3CtrlRegs(&(info->ctrl_regs));
if (info->mem_mode == PHYSICAL_MEM) {
}
+int has_svm_nested_paging() {
+ uint32_t ret;
+
+ ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
+
+ //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
+
+ if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
+ PrintDebug("SVM Nested Paging not supported\n");
+ return 0;
+ } else {
+ PrintDebug("SVM Nested Paging supported\n");
+ return 1;
+ }
+
+}
+
void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
// Setup the SVM specific vmm operations
vmm_ops->init_guest = &init_svm_guest;
vmm_ops->start_guest = &start_svm_guest;
-
+ vmm_ops->has_nested_paging = &has_svm_nested_paging;
return;
}
SVM_SUCCESS equ 0x00000000
EXPORT DisableInts
+EXPORT EnableInts
EXPORT exit_test
cli
ret
+align 8
+EnableInts:
+ sti
+ ret
align 8
#include <palacios/vm_guest.h>
+#include <palacios/vmm.h>
+
+void PrintV3Segments(struct v3_segments * segs) {
+ int i = 0;
+ struct v3_segment * seg_ptr = (struct v3_segment *)segs;
+
+ char *seg_names[] = {"CS", "DS" , "ES", "FS", "GS", "SS" , "LDTR", "GDTR", "IDTR", "TR", NULL};
+ PrintDebug("Segments\n");
+
+ for (i = 0; seg_names[i] != NULL; i++) {
+
+ PrintDebug("\t%s: Sel=%x, base=%x, limit=%x\n", seg_names[i], seg_ptr[i].selector, seg_ptr[i].base, seg_ptr[i].limit);
+
+ }
+
+}
+
+
+void PrintV3CtrlRegs(struct v3_ctrl_regs * regs) {
+ int i = 0;
+ v3_reg_t * reg_ptr = (v3_reg_t *)regs;
+ char * reg_names[] = {"CR0", "CR2", "CR3", "CR4", "CR8", "FLAGS", NULL};
+
+ PrintDebug("32 bit Ctrl Regs:\n");
+
+ for (i = 0; reg_names[i] != NULL; i++) {
+ PrintDebug("\t%s=0x%x\n", reg_names[i], reg_ptr[i]);
+ }
+}
int guest_va_to_guest_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * guest_pa) {
- if (guest_info->shdw_pg_mode == SHADOW_PAGING) {
- if (guest_info->mem_mode == PHYSICAL_MEM) {
- // guest virtual address is the same as the physical
- *guest_pa = guest_va;
- return 0;
- }
-
-
+ if (guest_info->mem_mode == PHYSICAL_MEM) {
+ // guest virtual address is the same as the physical
+ *guest_pa = guest_va;
+ return 0;
+ }
+
+ if (guest_info->shdw_pg_mode == SHADOW_PAGING) {
// Guest Is in Paged mode
switch (guest_info->cpu_mode) {
case PROTECTED:
} else if (guest_info->shdw_pg_mode == NESTED_PAGING) {
// Fill in
-
+ return -1;
} else {
return -1;
}
*(char*)shadow_cr0 &= 0xf0;
*(char*)shadow_cr0 |= new_cr0_val;
+
PrintDebug("New CR0=%x, New Shadow CR0=%x\n", *real_cr0, *shadow_cr0);
} else {
PrintDebug("Old CR0=%x\n", *real_cr0);
PrintDebug("Old CR0=%x, Old Shadow CR0=%x\n", *real_cr0, *shadow_cr0);
*real_cr0 = *new_cr0;
real_cr0->pg = 1;
+ real_cr0->et = 1;
*shadow_cr0 = *new_cr0;
+ shadow_cr0->et = 1;
PrintDebug("New CR0=%x, New Shadow CR0=%x\n", *real_cr0, *shadow_cr0);
} else {
index += 2;
+ PrintDebug("MovToCR0 instr:\n");
+ PrintTraceMemDump(instr, 15);
+ PrintDebug("EAX=%x\n", *(uint_t*)&(info->vm_regs.rax));
addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32);
new_cr0 = (struct cr0_32 *)first_operand;
-
+ PrintDebug("first operand=%x\n", *(uint_t *)first_operand);
if (info->shdw_pg_mode == SHADOW_PAGING) {
struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
*shadow_cr0 = *new_cr0;
*real_cr0 = *new_cr0;
+ shadow_cr0->et = 1;
+ real_cr0->et = 1;
//
// Activate Shadow Paging
} else if (new_cr0->pe == 0) {
info->cpu_mode = REAL;
+ info->mem_mode = PHYSICAL_MEM;
+ PrintDebug("Entering Real Mode\n");
+
+ PrintV3CtrlRegs(&(info->ctrl_regs));
+ // reinstate the identity mapped paged tables
+ // But keep the shadow tables around to handle TLB issues.... UGH...
+ //info->shdw_pg_state.shadow_cr3 &= 0x00000fff;
+ //info->shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(info) & ~0xfff);
+
+ //info->ctrl_regs.cr3 = info->shdw_pg_state.shadow_cr3;
+ info->ctrl_regs.cr3 = ((addr_t)create_passthrough_pde32_pts(info) & ~0xfff);
+
*shadow_cr0 = *new_cr0;
*real_cr0 = *new_cr0;
real_cr0->pg = 1;
+ shadow_cr0->et = 1;
+ real_cr0->et = 1;
+
}
if (info->mem_mode == PHYSICAL_MEM) {
virt_cr0->pg = 0; // clear the pg bit because guest doesn't think it's on
}
+
+ PrintDebug("real CR0: %x\n", *(uint_t*)real_cr0);
+ PrintDebug("returned CR0: %x\n", *(uint_t*)virt_cr0);
+
} else {
*virt_cr0 = *real_cr0;
int handle_cr3_read(struct guest_info * info) {
- if (info->cpu_mode == PROTECTED ) {
+
+ if (info->cpu_mode == REAL) {
+ // what does this mean???
+
+ /*
+
+ addr_t host_addr;
+ addr_t linear_addr = 0;
+
+
+
+ linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
+
+
+ PrintDebug("RIP Linear: %x\n", linear_addr);
+ PrintV3Segments(&(info->segments));
+
+
+ if (info->mem_mode == PHYSICAL_MEM) {
+ guest_pa_to_host_pa(info, linear_addr, &host_addr);
+ } else if (info->mem_mode == VIRTUAL_MEM) {
+ guest_va_to_host_pa(info, linear_addr, &host_addr);
+ }
+
+
+ pt32_lookup((pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3), , addr_t * paddr);
+ */
+
+
+ return -1;
+ } else if (info->cpu_mode == PROTECTED) {
+
int index = 0;
int ret;
char instr[15];
%include "vmm_symbol.asm"
EXPORT DisableInts
+EXPORT EnableInts
EXPORT GetGDTR
EXPORT GetIDTR
align 8
+EnableInts:
+ sti
+ ret
+
+align 8
GetGDTR:
push ebp
mov ebp, esp
the reason for the fault was that the page is not present in the shadow,
_THEN_ we have to map the shadow page in and reexecute, this will generate
a permissions fault which is _THEN_ valid to send to the guest
+ _UNLESS_ both the guest and shadow have marked the page as not present
whew...
*/
if ((guest_pde_access != PT_ACCESS_OK) &&
- ( (shadow_pde_access != PT_ENTRY_NOT_PRESENT) &&
- (guest_pde_access != PT_ENTRY_NOT_PRESENT))) { // aka (guest permission error)
+ (
+ ( (shadow_pde_access != PT_ENTRY_NOT_PRESENT) &&
+ (guest_pde_access != PT_ENTRY_NOT_PRESENT)) // aka (guest permission error)
+ ||
+ ( (shadow_pde_access == PT_ENTRY_NOT_PRESENT) &&
+ (guest_pde_access == PT_ENTRY_NOT_PRESENT)))) {
// inject page fault to the guest (Guest PDE fault)
info->ctrl_regs.cr2 = fault_addr;
// this probably shouldn't ever happen
PrintDebug("Unknown Error occurred\n");
PrintDebug("Manual Says to inject page fault into guest\n");
- //return -1; Huh? It's a successful handling of the fault...
+
+
return 0;
+
}
//PrintDebugPageTables(shadow_pd);
// Check the shadow page permissions
shadow_pte_access = can_access_pte32(shadow_pt, fault_addr, error_code);
+
+ PrintDebug("Guest PTE: (access=%d)\n\t", guest_pte_access);
+ PrintPTE32(fault_addr, guest_pte);
+ PrintDebug("Shadow PTE: (access=%d)\n\t", shadow_pte_access);
+ PrintPTE32(fault_addr, shadow_pte);
/* This should be redone,
but basically the reasoning is that there can be multiple reasons for a page fault:
If there is a permissions failure for a page present in the guest _BUT_
the reason for the fault was that the page is not present in the shadow,
_THEN_ we have to map the shadow page in and reexecute, this will generate
a permissions fault which is _THEN_ valid to send to the guest
+ _UNLESS_ both the guest and shadow have marked the page as not present
whew...
*/
if ((guest_pte_access != PT_ACCESS_OK) &&
- ((shadow_pte_access != PT_ENTRY_NOT_PRESENT) &&
- (guest_pte_access != PT_ENTRY_NOT_PRESENT))) { // aka (guest permission error)
+ (
+ ((shadow_pte_access != PT_ENTRY_NOT_PRESENT) &&
+ (guest_pte_access != PT_ENTRY_NOT_PRESENT)) // aka (guest permission error)
+ ||
+ ((shadow_pte_access == PT_ENTRY_NOT_PRESENT) &&
+ (guest_pte_access == PT_ENTRY_NOT_PRESENT)))) {
// Inject page fault into the guest
info->ctrl_regs.cr2 = fault_addr;