#include <palacios/vmx_assist.h>
#include <palacios/vmx_hw_info.h>
+#ifdef V3_CONFIG_MEM_TRACK
+#include <palacios/vmm_mem_track.h>
+#endif
+
#ifndef V3_CONFIG_DEBUG_VMX
#undef PrintDebug
#define PrintDebug(fmt, args...)
PrintDebug(VM_NONE, VCORE_NONE, "Allocating page\n");
- temp = V3_AllocPages(1);
+ temp = V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
if (!temp) {
PrintError(VM_NONE, VCORE_NONE, "Cannot allocate VMCS\n");
return -1;
// Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
- vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
+ vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE );
- core->ctrl_regs.cr3 = core->direct_map_pt;
+ v3_activate_passthrough_pt(core);
// vmx_state->pinbased_ctrls |= NMI_EXIT;
/* Add CR exits */
vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
-
+
+ // Note that we intercept cr4.pae writes
+ // and we have cr4 read-shadowed to the shadow pager's cr4
+
vmx_state->pri_proc_ctrls.invlpg_exit = 1;
/* Add page fault exits */
- if (v3_init_ept(core, &hw_info) == -1) {
+ if (v3_init_nested_paging_core(core, &hw_info) == -1) {
PrintError(core->vm_info, core, "Error initializing EPT\n");
return -1;
}
((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->ne = 1;
((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->cd = 0;
- if (v3_init_ept(core, &hw_info) == -1) {
+ if (v3_init_nested_paging_core(core, &hw_info) == -1) {
PrintError(core->vm_info, core, "Error initializing EPT\n");
return -1;
}
return -1;
}
- vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1);
+ vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
if (vmx_state->msr_area_paddr == (addr_t)NULL) {
PrintError(core->vm_info, core, "could not allocate msr load/store area\n");
struct cr0_32 * shadow_cr0;
addr_t vmcs_page_paddr; //HPA
- vmcs_page_paddr = (addr_t) V3_AllocPages(1);
+ vmcs_page_paddr = (addr_t) V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
if (!vmcs_page_paddr) {
PrintError(core->vm_info, core, "Could not allocate space for a vmcs in VMX\n");
// Conditionally yield the CPU if the timeslice has expired
v3_schedule(info);
+#ifdef V3_CONFIG_MEM_TRACK
+ v3_mem_track_entry(info);
+#endif
+
// Update timer devices late after being in the VM so that as much
// of the time in the VM is accounted for as possible. Also do it before
// updating IRQ entry state so that any interrupts the timers raise get
check_vmcs_write(VMCS_PREEMPT_TIMER, preempt_window);
}
-
+
+ V3_FP_ENTRY_RESTORE(info);
{
uint64_t entry_tsc = 0;
info->num_exits++;
+ V3_FP_EXIT_SAVE(info);
+
/* If we have the preemption time, then use it to get more accurate guest time */
if (vmx_info->pin_ctrls.active_preempt_timer) {
uint32_t cycles_left = 0;
v3_handle_timeouts(info, guest_cycles);
}
+#ifdef V3_CONFIG_MEM_TRACK
+ v3_mem_track_exit(info);
+#endif
+
return 0;
}
PrintDebug(info->vm_info, info, "Starting VMX core %u\n", info->vcpu_id);
+#if V3_CONFIG_HVM
+ if (v3_setup_hvm_vm_for_boot(vm)) {
+ PrintError(vm, VCORE_NONE, "HVM setup for boot failed\n");
+ return -1;
+ }
+#endif
+
while (1) {
if (info->core_run_state == CORE_STOPPED) {
if (info->vcpu_id == 0) {
} else {
PrintDebug(info->vm_info, info, "VMX core %u: Waiting for core initialization\n", info->vcpu_id);
+
+ V3_NO_WORK(info);
while (info->core_run_state == CORE_STOPPED) {
// The VM was stopped before this core was initialized.
return 0;
}
-
- v3_yield(info,-1);
+
+ V3_STILL_NO_WORK(info);
//PrintDebug(info->vm_info, info, "VMX core %u: still waiting for INIT\n",info->vcpu_id);
}
-
+
+ V3_HAVE_WORK_AGAIN(info);
+
PrintDebug(info->vm_info, info, "VMX core %u initialized\n", info->vcpu_id);
// We'll be paranoid about race conditions here