&v3_handle_vm_cr_read,
&v3_handle_vm_cr_write,
core);
+
+
+ {
+#define INT_PENDING_AMD_MSR 0xc0010055
+
+ v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
+
+ v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
+
+
+ v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
+
+ // Passthrough read operations are ok.
+ v3_hook_msr(core->vm_info, INT_PENDING_AMD_MSR, NULL, v3_msr_unhandled_write, NULL);
+ }
}
#ifdef V3_CONFIG_CHECKPOINT
int v3_svm_save_core(struct guest_info * core, void * ctx){
+ v3_chkpt_save_8(ctx, "cpl", &(core->cpl));
v3_chkpt_save(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data);
return 0;
}
-int v3_svm_load_core(struct guest_info * core, void * chkpt_ctx){
- struct cr0_32 * shadow_cr0;
- vmcb_saved_state_t * guest_state;
- vmcb_ctrl_t * guest_ctrl;
-
-
+int v3_svm_load_core(struct guest_info * core, void * ctx){
+
+ v3_chkpt_load_8(ctx, "cpl", &(core->cpl));
- if (v3_chkpt_load(chkpt_ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1){
+ if (v3_chkpt_load(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) {
return -1;
}
- guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t *)(core->vmm_data));
- guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t *)(core->vmm_data));
-
-
- core->rip = guest_state->rip;
- core->vm_regs.rsp = guest_state->rsp;
- core->vm_regs.rax = guest_state->rax;
-
- core->cpl = guest_state->cpl;
-
- core->ctrl_regs.cr0 = guest_state->cr0;
- core->ctrl_regs.cr2 = guest_state->cr2;
- core->ctrl_regs.cr4 = guest_state->cr4;
- core->dbg_regs.dr6 = guest_state->dr6;
- core->dbg_regs.dr7 = guest_state->dr7;
- core->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
- core->ctrl_regs.rflags = guest_state->rflags;
- core->ctrl_regs.efer = guest_state->efer;
-
-
- shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
-
-
- if (core->shdw_pg_mode == SHADOW_PAGING) {
- if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
- if (v3_activate_shadow_pt(core) == -1) {
- PrintError("Failed to activate shadow page tables\n");
- return -1;
- }
- } else {
- if (v3_activate_passthrough_pt(core) == -1) {
- PrintError("Failed to activate passthrough page tables\n");
- return -1;
- }
- }
- }
-
-
- v3_get_vmcb_segments((vmcb_t *)(core->vmm_data), &(core->segments));
return 0;
}
#endif
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
+ sint64_t tsc_offset;
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
#endif
v3_time_enter_vm(info);
- guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
+ tsc_offset = v3_tsc_host_offset(&info->time_state);
+ guest_ctrl->TSC_OFFSET = tsc_offset;
//V3_Print("Calling v3_svm_launch\n");
if (info->vcpu_id == 0) {
info->core_run_state = CORE_RUNNING;
- info->vm_info->run_state = VM_RUNNING;
} else {
PrintDebug("SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
while (info->core_run_state == CORE_STOPPED) {
+
+ if (info->vm_info->run_state == VM_STOPPED) {
+ // The VM was stopped before this core was initialized.
+ return 0;
+ }
+
v3_yield(info);
//PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id);
}
PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
+
+ // We'll be paranoid about race conditions here
+ v3_wait_at_barrier(info);
}
PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",