2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_intr.h>
22 #include <palacios/vmm_config.h>
23 #include <palacios/vm_guest.h>
24 #include <palacios/vmm_ctrl_regs.h>
25 #include <palacios/vmm_lowlevel.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vmm_extensions.h>
30 #include <palacios/svm.h>
33 #include <palacios/vmx.h>
36 #ifdef V3_CONFIG_CHECKPOINT
37 #include <palacios/vmm_checkpoint.h>
41 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
42 struct v3_os_hooks * os_hooks = NULL;
43 int v3_dbg_enable = 0;
48 static void init_cpu(void * arg) {
49 uint32_t cpu_id = (uint32_t)(addr_t)arg;
52 if (v3_is_svm_capable()) {
53 PrintDebug("Machine is SVM Capable\n");
54 v3_init_svm_cpu(cpu_id);
59 if (v3_is_vmx_capable()) {
60 PrintDebug("Machine is VMX Capable\n");
61 v3_init_vmx_cpu(cpu_id);
66 PrintError("CPU has no virtualization Extensions\n");
71 static void deinit_cpu(void * arg) {
72 uint32_t cpu_id = (uint32_t)(addr_t)arg;
75 switch (v3_cpu_types[cpu_id]) {
79 PrintDebug("Deinitializing SVM CPU %d\n", cpu_id);
80 v3_deinit_svm_cpu(cpu_id);
86 case V3_VMX_EPT_UG_CPU:
87 PrintDebug("Deinitializing VMX CPU %d\n", cpu_id);
88 v3_deinit_vmx_cpu(cpu_id);
93 PrintError("CPU has no virtualization Extensions\n");
100 void Init_V3(struct v3_os_hooks * hooks, int num_cpus) {
103 V3_Print("V3 Print statement to fix a Kitten page fault bug\n");
105 // Set global variables.
108 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
109 v3_cpu_types[i] = V3_INVALID_CPU;
112 // Register all the possible device types
115 // Register all shadow paging handlers
116 V3_init_shdw_paging();
118 // Register all extensions
119 V3_init_extensions();
122 #ifdef V3_CONFIG_SYMMOD
126 #ifdef V3_CONFIG_CHECKPOINT
127 V3_init_checkpoint();
132 #ifdef V3_CONFIG_MULTITHREAD_OS
133 if ((hooks) && (hooks->call_on_cpu)) {
135 for (i = 0; i < num_cpus; i++) {
137 V3_Print("Initializing VMM extensions on cpu %d\n", i);
138 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
152 V3_deinit_shdw_paging();
154 V3_deinit_extensions();
156 #ifdef V3_CONFIG_SYMMOD
160 #ifdef V3_CONFIG_CHECKPOINT
161 V3_deinit_checkpoint();
165 #ifdef V3_CONFIG_MULTITHREAD_OS
166 if ((os_hooks) && (os_hooks->call_on_cpu)) {
167 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
168 if (v3_cpu_types[i] != V3_INVALID_CPU) {
169 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
170 //deinit_cpu((void *)(addr_t)i);
181 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
182 return v3_cpu_types[cpu_id];
186 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) {
187 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
190 PrintError("Could not configure guest\n");
194 V3_Print("CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
198 } else if (strlen(name) >= 128) {
199 PrintError("VM name is too long. Will be truncated to 128 chars.\n");
202 memset(vm->name, 0, 128);
203 strncpy(vm->name, name, 127);
211 static int start_core(void * p)
213 struct guest_info * core = (struct guest_info *)p;
216 PrintDebug("virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
217 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
219 switch (v3_cpu_types[0]) {
222 case V3_SVM_REV3_CPU:
223 return v3_start_svm_guest(core);
229 case V3_VMX_EPT_UG_CPU:
230 return v3_start_vmx_guest(core);
234 PrintError("Attempting to enter a guest on an invalid CPU\n");
242 // For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type...
243 #ifdef V3_CONFIG_MULTITHREAD_OS
250 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
252 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
253 uint32_t avail_cores = 0;
256 /// CHECK IF WE ARE MULTICORE ENABLED....
258 V3_Print("V3 -- Starting VM (%u cores)\n", vm->num_cores);
259 V3_Print("CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
262 // Check that enough cores are present in the mask to handle vcores
263 for (i = 0; i < MAX_CORES; i++) {
267 if (core_mask[major] & (0x1 << minor)) {
273 if (vm->num_cores > avail_cores) {
274 PrintError("Attempted to start a VM with too many cores (vm->num_cores = %d, avail_cores = %d, MAX=%d)\n",
275 vm->num_cores, avail_cores, MAX_CORES);
279 #ifdef V3_CONFIG_MULTITHREAD_OS
280 // spawn off new threads, for other cores
281 for (i = 0, vcore_id = 1; (i < MAX_CORES) && (vcore_id < vm->num_cores); i++) {
284 struct guest_info * core = &(vm->cores[vcore_id]);
285 char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu");
286 uint32_t core_idx = 0;
288 if (specified_cpu != NULL) {
289 core_idx = atoi(specified_cpu);
291 if ((core_idx < 0) || (core_idx >= MAX_CORES)) {
292 PrintError("Target CPU out of bounds (%d) (MAX_CORES=%d)\n", core_idx, MAX_CORES);
295 i--; // We reset the logical core idx. Not strictly necessary I guess...
298 if (i == V3_Get_CPU()) {
299 // We skip the local CPU because it is reserved for vcore 0
306 major = core_idx / 8;
307 minor = core_idx % 8;
310 if ((core_mask[major] & (0x1 << minor)) == 0) {
311 PrintError("Logical CPU %d not available for virtual core %d; not started\n",
314 if (specified_cpu != NULL) {
315 PrintError("CPU was specified explicitly (%d). HARD ERROR\n", core_idx);
323 PrintDebug("Starting virtual core %u on logical core %u\n",
326 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
328 PrintDebug("run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
329 core_idx, start_core, core, core->exec_name);
331 // TODO: actually manage these threads instead of just launching them
332 core->pcpu_id = core_idx;
333 core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
335 if (core->core_thread == NULL) {
336 PrintError("Thread launch failed\n");
345 sprintf(vm->cores[0].exec_name, "%s", vm->name);
347 vm->cores[0].pcpu_id = V3_Get_CPU();
349 if (start_core(&(vm->cores[0])) != 0) {
350 PrintError("Error starting VM core 0\n");
361 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
363 switch (v3_cpu_types[core->pcpu_id]) {
366 case V3_SVM_REV3_CPU:
367 PrintDebug("Resetting SVM Guest CPU %d\n", core->vcpu_id);
368 return v3_reset_svm_vm_core(core, rip);
373 case V3_VMX_EPT_UG_CPU:
374 PrintDebug("Resetting VMX Guest CPU %d\n", core->vcpu_id);
375 return v3_reset_vmx_vm_core(core, rip);
379 PrintError("CPU has no virtualization Extensions\n");
388 /* move a virtual core to different physical core */
389 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
390 struct guest_info * core = NULL;
392 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
393 PrintError("Attempted to migrate invalid virtual core (%d)\n", vcore_id);
397 core = &(vm->cores[vcore_id]);
399 if (target_cpu == core->pcpu_id) {
400 PrintError("Attempted to migrate to local core (%d)\n", target_cpu);
401 // well that was pointless
405 if (core->core_thread == NULL) {
406 PrintError("Attempted to migrate a core without a valid thread context\n");
410 while (v3_raise_barrier(vm, NULL) == -1);
412 V3_Print("Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
414 // Double check that we weren't preemptively migrated
415 if (target_cpu != core->pcpu_id) {
417 V3_Print("Moving Core\n");
419 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
420 PrintError("Failed to move Vcore %d to CPU %d\n",
421 core->vcpu_id, target_cpu);
422 v3_lower_barrier(vm);
426 /* There will be a benign race window here:
427 core->pcpu_id will be set to the target core before its fully "migrated"
428 However the core will NEVER run on the old core again, its just in flight to the new core
430 core->pcpu_id = target_cpu;
432 V3_Print("core now at %d\n", core->pcpu_id);
439 v3_lower_barrier(vm);
446 int v3_stop_vm(struct v3_vm_info * vm) {
448 vm->run_state = VM_STOPPED;
450 // force exit all cores via a cross call/IPI
454 int still_running = 0;
456 for (i = 0; i < vm->num_cores; i++) {
457 if (vm->cores[i].core_run_state != CORE_STOPPED) {
462 if (still_running == 0) {
469 V3_Print("VM stopped. Returning\n");
475 int v3_pause_vm(struct v3_vm_info * vm) {
477 if (vm->run_state != VM_RUNNING) {
478 PrintError("Tried to pause a VM that was not running\n");
482 while (v3_raise_barrier(vm, NULL) == -1);
484 vm->run_state = VM_PAUSED;
490 int v3_continue_vm(struct v3_vm_info * vm) {
492 if (vm->run_state != VM_PAUSED) {
493 PrintError("Tried to continue a VM that was not paused\n");
497 v3_lower_barrier(vm);
499 vm->run_state = VM_RUNNING;
504 #ifdef V3_CONFIG_CHECKPOINT
505 #include <palacios/vmm_checkpoint.h>
507 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url) {
508 return v3_chkpt_save_vm(vm, store, url);
512 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url) {
513 return v3_chkpt_load_vm(vm, store, url);
518 int v3_free_vm(struct v3_vm_info * vm) {
520 // deinitialize guest (free memory, etc...)
522 v3_free_vm_devices(vm);
525 for (i = 0; i < vm->num_cores; i++) {
526 v3_free_core(&(vm->cores[i]));
530 v3_free_vm_internal(vm);
542 v3_cpu_mode_t v3_get_host_cpu_mode() {
552 cr4 = (struct cr4_32 *)&(cr4_val);
555 return PROTECTED_PAE;
563 v3_cpu_mode_t v3_get_host_cpu_mode() {
570 #define V3_Yield(addr) \
572 extern struct v3_os_hooks * os_hooks; \
573 if ((os_hooks) && (os_hooks)->yield_cpu) { \
574 (os_hooks)->yield_cpu(); \
580 void v3_yield_cond(struct guest_info * info) {
582 cur_cycle = v3_get_host_time(&info->time_state);
584 if (cur_cycle > (info->yield_start_cycle + info->vm_info->yield_cycle_period)) {
587 PrintDebug("Conditional Yield (cur_cyle=%p, start_cycle=%p, period=%p)\n",
588 (void *)cur_cycle, (void *)info->yield_start_cycle, (void *)info->yield_cycle_period);
591 info->yield_start_cycle = v3_get_host_time(&info->time_state);
597 * unconditional cpu yield
598 * if the yielding thread is a guest context, the guest quantum is reset on resumption
599 * Non guest context threads should call this function with a NULL argument
601 void v3_yield(struct guest_info * info) {
605 info->yield_start_cycle = v3_get_host_time(&info->time_state);
612 void v3_print_cond(const char * fmt, ...) {
613 if (v3_dbg_enable == 1) {
618 vsnprintf(buf, 2048, fmt, ap);
626 #ifdef V3_CONFIG_MULTITHREAD_OS
628 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
629 extern struct v3_os_hooks * os_hooks;
631 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
632 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
639 int v3_vm_enter(struct guest_info * info) {
640 switch (v3_cpu_types[0]) {
643 case V3_SVM_REV3_CPU:
644 return v3_svm_enter(info);
650 case V3_VMX_EPT_UG_CPU:
651 return v3_vmx_enter(info);
655 PrintError("Attemping to enter a guest on an invalid CPU\n");