void v3_deinit_svm_cpu(int cpu_id);
+
int v3_init_svm_vmcb(struct guest_info * core, v3_vm_class_t vm_class);
int v3_deinit_svm_vmcb(struct guest_info * core);
int v3_svm_enter(struct guest_info * info);
int v3_start_svm_guest(struct guest_info *info);
+int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip);
+
+
+
#endif
int v3_vm_enter(struct guest_info * info);
+int v3_reset_vm_core(struct guest_info * core, addr_t rip);
#endif /*!__V3VEE__ */
void v3_deinit_vmx_cpu(int cpu_id);
int v3_start_vmx_guest(struct guest_info* info);
+int v3_reset_vmx_vm_core(struct guest_info * core, addr_t rip);
int v3_vmx_enter(struct guest_info * info);
int v3_init_vmx_vmcs(struct guest_info * info, v3_vm_class_t vm_class);
break;
}
- // Write the RIP, CS, and descriptor
- // assume the rest is already good to go
- //
- // vector VV -> rip at 0
- // CS = VV00
- // This means we start executing at linear address VV000
- //
- // So the selector needs to be VV00
- // and the base needs to be VV000
- //
- dst_core->rip = 0;
- dst_core->segments.cs.selector = vector << 8;
- dst_core->segments.cs.limit = 0xffff;
- dst_core->segments.cs.base = vector << 12;
+ v3_reset_vm_core(dst_core, vector);
PrintDebug(" SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
vector, dst_core->segments.cs.selector, dst_core->vcpu_id);
break;
}
+
+ case APIC_EXTINT_DELIVERY: // EXTINT
+ /* Two possible things to do here:
+ * 1. Ignore the IPI and assume the 8259a (PIC) will handle it
+ * 2. Add 32 to the vector and inject it...
+ * We probably just want to do 1 here, and assume the raise_irq() will hit the 8259a.
+ */
+ return 0;
+
case APIC_SMI_DELIVERY:
case APIC_RES1_DELIVERY: // reserved
case APIC_NMI_DELIVERY:
- case APIC_EXTINT_DELIVERY: // ExtInt
default:
PrintError("IPI %d delivery is unsupported\n", del_mode);
return -1;
+
/*
* This file is part of the Palacios Virtual Machine Monitor developed
* by the V3VEE Project with funding from the United States National
- if ((info->num_exits % 5000) == 0) {
+/*
+ if ((info->num_exits % 50000) == 0) {
V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
v3_print_guest_state(info);
}
-
+*/
}
+int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) {
+ // init vmcb_bios
+
+ // Write the RIP, CS, and descriptor
+ // assume the rest is already good to go
+ //
+ // vector VV -> rip at 0
+ // CS = VV00
+ // This means we start executing at linear address VV000
+ //
+ // So the selector needs to be VV00
+ // and the base needs to be VV000
+ //
+ core->rip = 0;
+ core->segments.cs.selector = rip << 8;
+ core->segments.cs.limit = 0xffff;
+ core->segments.cs.base = rip << 12;
+
+ return 0;
+}
+
+
+
+
+
/* Checks machine SVM capability */
/* Implemented from: AMD Arch Manual 3, sect 15.4 */
}
+
void v3_init_svm_cpu(int cpu_id) {
reg_ex_t msr;
extern v3_cpu_arch_t v3_cpu_types[];
+
static void init_cpu(void * arg) {
uint32_t cpu_id = (uint32_t)(addr_t)arg;
}
+
+
static int start_core(void * p)
{
struct guest_info * core = (struct guest_info *)p;
}
+int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
+
+ switch (v3_cpu_types[core->pcpu_id]) {
+#ifdef V3_CONFIG_SVM
+ case V3_SVM_CPU:
+ case V3_SVM_REV3_CPU:
+ PrintDebug("Resetting SVM Guest CPU %d\n", core->vcpu_id);
+ return v3_reset_svm_vm_core(core, rip);
+#endif
+#ifdef V3_CONFIG_VMX
+ case V3_VMX_CPU:
+ case V3_VMX_EPT_CPU:
+ case V3_VMX_EPT_UG_CPU:
+ PrintDebug("Resetting VMX Guest CPU %d\n", core->vcpu_id);
+ return v3_reset_vmx_vm_core(core, rip);
+#endif
+ case V3_INVALID_CPU:
+ default:
+ PrintError("CPU has no virtualization Extensions\n");
+ break;
+ }
+
+ return -1;
+}
+
+
+
int v3_stop_vm(struct v3_vm_info * vm) {
}
+int v3_reset_vmx_cpu(struct guest_info * core, addr_t rip) {
+ // init vmcs bios
+
+ if ((core->shdw_pg_mode == NESTED_PAGING) &&
+ (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
+ // easy
+ core->rip = 0;
+ core->segments.cs.selector = rip << 8;
+ core->segments.cs.limit = 0xffff;
+ core->segments.cs.base = rip << 12;
+ } else {
+ core->vm_regs.rdx = core->vcpu_id;
+ core->vm_regs.rbx = rip;
+ }
+ return 0;
+}