#include <palacios/vmm_rbtree.h>
#include <palacios/vmm_barrier.h>
+#ifdef V3_CONFIG_CHECKPOINT
+#include <palacios/vmm_checkpoint.h>
+#endif
#include <palacios/vmm_direct_paging.h>
&v3_handle_vm_cr_read,
&v3_handle_vm_cr_write,
core);
+
+
+ {
+#define INT_PENDING_AMD_MSR 0xc0010055
+
+ v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
+
+ v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
+
+
+ v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
+ v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
+
+ // Passthrough read operations are ok.
+ v3_hook_msr(core->vm_info, INT_PENDING_AMD_MSR, NULL, v3_msr_unhandled_write, NULL);
+ }
}
}
+#ifdef V3_CONFIG_CHECKPOINT
+int v3_svm_save_core(struct guest_info * core, void * ctx){
+
+ v3_chkpt_save_8(ctx, "cpl", &(core->cpl));
+ v3_chkpt_save(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data);
+
+ return 0;
+}
+
+int v3_svm_load_core(struct guest_info * core, void * ctx){
+
+ v3_chkpt_load_8(ctx, "cpl", &(core->cpl));
+
+ if (v3_chkpt_load(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
static int update_irq_exit_state(struct guest_info * info) {
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
+ sint64_t tsc_offset;
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
#endif
v3_time_enter_vm(info);
- guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
-
- if(info->core_move_state == CORE_MOVE_PENDING) {
- v3_stgi();
-
- if(V3_MOVE_THREAD_TO_CPU(info->target_pcpu_id, info->core_thread) != 0){
- PrintError("Failed to move Vcore %d to CPU %d\n",
- info->vcpu_id,
- info->target_pcpu_id);
- } else {
- info->pcpu_id = info->target_pcpu_id;
- V3_Print("Core move done, vcore %d is running on CPU %d now\n",
- info->vcpu_id,
- V3_Get_CPU());
- }
-
- info->core_move_state = CORE_MOVE_DONE;
+ tsc_offset = v3_tsc_host_offset(&info->time_state);
+ guest_ctrl->TSC_OFFSET = tsc_offset;
- /* disable global interrupts,
- * NOTE now it is being running on a different CPU
- */
- v3_clgi();
- }
-
-
//V3_Print("Calling v3_svm_launch\n");
if (info->vcpu_id == 0) {
info->core_run_state = CORE_RUNNING;
- info->vm_info->run_state = VM_RUNNING;
} else {
PrintDebug("SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
while (info->core_run_state == CORE_STOPPED) {
+
+ if (info->vm_info->run_state == VM_STOPPED) {
+ // The VM was stopped before this core was initialized.
+ return 0;
+ }
+
v3_yield(info);
//PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id);
}
PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
+
+ // We'll be paranoid about race conditions here
+ v3_wait_at_barrier(info);
}
PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",