} else
#endif
{
- PrintError("CPU has no virtualizationExtensions\n");
+ PrintError("CPU has no virtualization Extensions\n");
}
}
+#if 0
+static void deinit_cpu(void * arg) {
+// uint32_t cpu_id = (uint32_t)(addr_t)arg;
+
+ #ifdef CONFIG_SVM
+ if (v3_is_svm_capable()) {
+ PrintDebug("Machine is SVM Capable\n");
+ //v3_deinit_svm_cpu(cpu_id);
+
+ } else
+#endif
+#ifdef CONFIG_VMX
+ if (v3_is_vmx_capable()) {
+ PrintDebug("Machine is VMX Capable\n");
+ //v3_deinit_vmx_cpu(cpu_id);
+
+ } else
+#endif
+ {
+ PrintError("CPU has no virtualization Extensions\n");
+ }
+}
+#endif
void Init_V3(struct v3_os_hooks * hooks, int num_cpus) {
}
// Register all the possible device types
- v3_init_devices();
+ V3_init_devices();
// Register all shadow paging handlers
V3_init_shdw_paging();
}
+void Shutdown_V3() {
+ // int i;
+
+ V3_deinit_devices();
+ V3_deinit_shdw_paging();
+
+#if 0
+
+#ifdef CONFIG_SYMMOD
+ V3_deinit_symmod();
+#endif
+
+#ifdef CONFIG_INSTRUMENT_VMM
+ v3_deinit_instrumentation();
+#endif
+
+#ifdef CONFIG_VNET
+ v3_deinit_vnet();
+#endif
+
+#ifdef CONFIG_MULTITHREAD_OS
+ if ((hooks) && (hooks->call_on_cpu)) {
+ for (i = 0; i < CONFIG_MAX_CPUS; i++) {
+ if (v3_cpu_types[i] != V3_INVALID_CPU) {
+ deinit_cpu(i);
+ }
+ }
+ }
+#else
+ deinit_cpu(0);
+#endif
+
+#endif
+
+}
+
+
v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
return v3_cpu_types[cpu_id];
}
// For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type...
+#ifdef CONFIG_MULTITHREAD_OS
#define MAX_CORES 32
+#else
+#define MAX_CORES 1
+#endif
int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
uint32_t i;
+#ifdef CONFIG_MULTITHREAD_OS
int vcore_id = 0;
+#endif
uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
uint32_t avail_cores = 0;
}
if (vm->num_cores > avail_cores) {
- PrintError("Attempted to start a VM with too many cores (MAX=%d)\n", MAX_CORES);
+ PrintError("Attempted to start a VM with too many cores (vm->num_cores = %d, avail_cores = %d, MAX=%d)\n", vm->num_cores, avail_cores, MAX_CORES);
return -1;
}
+#ifdef CONFIG_MULTITHREAD_OS
// spawn off new threads, for other cores
for (i = 0, vcore_id = 1; (i < MAX_CORES) && (vcore_id < vm->num_cores); i++) {
int major = i / 8;
vcore_id++;
}
+#endif
sprintf(vm->cores[0].exec_name, "%s", vm->name);
vm->run_state = VM_STOPPED;
-
// force exit all cores via a cross call/IPI
- // Wait for all cores to enter CORE_STOPPED state
+ while (1) {
+ int i = 0;
+ int still_running = 0;
+
+ for (i = 0; i < vm->num_cores; i++) {
+ if (vm->cores[i].core_run_state != CORE_STOPPED) {
+ still_running = 1;
+ }
+ }
+
+ if (still_running == 0) {
+ break;
+ }
+
+ V3_Print("Yielding\n");
+
+ v3_yield(NULL);
+ }
+
+ V3_Print("VM stopped. Returning\n");
+
+ return 0;
+}
+
+int v3_free_vm(struct v3_vm_info * vm) {
+ int i = 0;
// deinitialize guest (free memory, etc...)
+ v3_free_vm_devices(vm);
+
+ // free cores
+ for (i = 0; i < vm->num_cores; i++) {
+ v3_free_core(&(vm->cores[i]));
+ }
+
+ // free vm
+ v3_free_vm_internal(vm);
+
+ v3_free_config(vm);
+
+ V3_Free(vm);
+
return 0;
}