X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm.c;h=dca1018e2ac731a6e2eef6f43fc119b461144bc1;hb=08acd192b161e69bc47414ee615e674d7736dd9c;hp=3123030a99e5fd97c046d891754f24ad926b50b0;hpb=b580d9fca65a7b6f84eaebb57cad09bc6e941dfd;p=palacios-OLD.git diff --git a/palacios/src/palacios/vmm.c b/palacios/src/palacios/vmm.c index 3123030..dca1018 100644 --- a/palacios/src/palacios/vmm.c +++ b/palacios/src/palacios/vmm.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -63,7 +62,34 @@ static void init_cpu(void * arg) { } else #endif { - PrintError("CPU has no virtualizationExtensions\n"); + PrintError("CPU has no virtualization Extensions\n"); + } +} + + +static void deinit_cpu(void * arg) { + uint32_t cpu_id = (uint32_t)(addr_t)arg; + + + switch (v3_cpu_types[cpu_id]) { + #ifdef CONFIG_SVM + case V3_VMX_CPU: + case V3_VMX_EPT_CPU: + PrintDebug("Machine is SVM Capable\n"); + v3_deinit_svm_cpu(cpu_id); + break; +#endif +#ifdef CONFIG_VMX + case V3_SVM_CPU: + case V3_SVM_REV3_CPU: + PrintDebug("Machine is VMX Capable\n"); + v3_deinit_vmx_cpu(cpu_id); + break; +#endif + case V3_INVALID_CPU: + default: + PrintError("CPU has no virtualization Extensions\n"); + break; } } @@ -82,7 +108,7 @@ void Init_V3(struct v3_os_hooks * hooks, int num_cpus) { } // Register all the possible device types - v3_init_devices(); + V3_init_devices(); // Register all shadow paging handlers V3_init_shdw_paging(); @@ -92,10 +118,6 @@ void Init_V3(struct v3_os_hooks * hooks, int num_cpus) { V3_init_symmod(); #endif -#ifdef CONFIG_INSTRUMENT_VMM - v3_init_instrumentation(); -#endif - #ifdef CONFIG_VNET v3_init_vnet(); @@ -118,13 +140,43 @@ void Init_V3(struct v3_os_hooks * hooks, int num_cpus) { } +void Shutdown_V3() { + int i; + + V3_deinit_devices(); + V3_deinit_shdw_paging(); + +#ifdef CONFIG_SYMMOD + V3_deinit_symmod(); +#endif + + +#ifdef CONFIG_VNET + v3_deinit_vnet(); +#endif + +#ifdef CONFIG_MULTITHREAD_OS + if ((os_hooks) && (os_hooks->call_on_cpu)) { + for (i = 0; i < CONFIG_MAX_CPUS; i++) { + if (v3_cpu_types[i] != V3_INVALID_CPU) { + deinit_cpu((void *)(addr_t)i); + } + } + } +#else + deinit_cpu(0); +#endif + +} + + v3_cpu_arch_t v3_get_cpu_type(int cpu_id) { return v3_cpu_types[cpu_id]; } struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) { - struct v3_vm_info * vm = v3_config_guest(cfg); + struct v3_vm_info * vm = v3_config_guest(cfg, priv_data); V3_Print("CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip)); @@ -134,8 +186,6 @@ struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) { return NULL; } - - if (name == NULL) { name = "[V3_VM]"; } else if (strlen(name) >= 128) { @@ -145,8 +195,6 @@ struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) { memset(vm->name, 0, 128); strncpy(vm->name, name, 127); - vm->host_priv_data = priv_data; - return vm; } @@ -184,12 +232,18 @@ static int start_core(void * p) // For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type... +#ifdef CONFIG_MULTITHREAD_OS #define MAX_CORES 32 +#else +#define MAX_CORES 1 +#endif -static int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { +int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { uint32_t i; +#ifdef CONFIG_MULTITHREAD_OS int vcore_id = 0; +#endif uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier uint32_t avail_cores = 0; @@ -212,11 +266,12 @@ static int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { } if (vm->num_cores > avail_cores) { - PrintError("Attempted to start a VM with too many cores (MAX=%d)\n", MAX_CORES); + PrintError("Attempted to start a VM with too many cores (vm->num_cores = %d, avail_cores = %d, MAX=%d)\n", vm->num_cores, avail_cores, MAX_CORES); return -1; } +#ifdef CONFIG_MULTITHREAD_OS // spawn off new threads, for other cores for (i = 0, vcore_id = 1; (i < MAX_CORES) && (vcore_id < vm->num_cores); i++) { int major = i / 8; @@ -224,6 +279,7 @@ static int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { void * core_thread = NULL; struct guest_info * core = &(vm->cores[vcore_id]); + /* This assumes that the core 0 thread has been mapped to physical core 0 */ if (i == V3_Get_CPU()) { // We skip the local CPU, because it is reserved for vcore 0 continue; @@ -253,6 +309,7 @@ static int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { vcore_id++; } +#endif sprintf(vm->cores[0].exec_name, "%s", vm->name); @@ -273,13 +330,51 @@ int v3_stop_vm(struct v3_vm_info * vm) { vm->run_state = VM_STOPPED; - // force exit all cores via a cross call/IPI - // Wait for all cores to enter CORE_STOPPED state + while (1) { + int i = 0; + int still_running = 0; + + for (i = 0; i < vm->num_cores; i++) { + if (vm->cores[i].core_run_state != CORE_STOPPED) { + still_running = 1; + } + } + if (still_running == 0) { + break; + } + + V3_Print("Yielding\n"); + + v3_yield(NULL); + } + + V3_Print("VM stopped. Returning\n"); + + return 0; +} + + +int v3_free_vm(struct v3_vm_info * vm) { + int i = 0; // deinitialize guest (free memory, etc...) + v3_free_vm_devices(vm); + + // free cores + for (i = 0; i < vm->num_cores; i++) { + v3_free_core(&(vm->cores[i])); + } + + // free vm + v3_free_vm_internal(vm); + + v3_free_config(vm); + + V3_Free(vm); + return 0; }