2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_intr.h>
22 #include <palacios/vmm_config.h>
23 #include <palacios/vm_guest.h>
24 #include <palacios/vmm_ctrl_regs.h>
25 #include <palacios/vmm_lowlevel.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vmm_extensions.h>
30 #include <palacios/svm.h>
33 #include <palacios/vmx.h>
36 #ifdef V3_CONFIG_CHECKPOINT
37 #include <palacios/vmm_checkpoint.h>
41 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
42 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
44 struct v3_os_hooks * os_hooks = NULL;
45 int v3_dbg_enable = 0;
50 static void init_cpu(void * arg) {
51 uint32_t cpu_id = (uint32_t)(addr_t)arg;
54 if (v3_is_svm_capable()) {
55 PrintDebug("Machine is SVM Capable\n");
56 v3_init_svm_cpu(cpu_id);
61 if (v3_is_vmx_capable()) {
62 PrintDebug("Machine is VMX Capable\n");
63 v3_init_vmx_cpu(cpu_id);
68 PrintError("CPU has no virtualization Extensions\n");
73 static void deinit_cpu(void * arg) {
74 uint32_t cpu_id = (uint32_t)(addr_t)arg;
77 switch (v3_cpu_types[cpu_id]) {
81 PrintDebug("Deinitializing SVM CPU %d\n", cpu_id);
82 v3_deinit_svm_cpu(cpu_id);
88 case V3_VMX_EPT_UG_CPU:
89 PrintDebug("Deinitializing VMX CPU %d\n", cpu_id);
90 v3_deinit_vmx_cpu(cpu_id);
95 PrintError("CPU has no virtualization Extensions\n");
102 void Init_V3(struct v3_os_hooks * hooks, int num_cpus) {
105 V3_Print("V3 Print statement to fix a Kitten page fault bug\n");
107 // Set global variables.
110 // Determine the global machine type
111 v3_mach_type = V3_INVALID_CPU;
113 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
114 v3_cpu_types[i] = V3_INVALID_CPU;
117 // Register all the possible device types
120 // Register all shadow paging handlers
121 V3_init_shdw_paging();
123 // Register all extensions
124 V3_init_extensions();
127 #ifdef V3_CONFIG_SYMMOD
131 #ifdef V3_CONFIG_CHECKPOINT
132 V3_init_checkpoint();
138 if ((hooks) && (hooks->call_on_cpu)) {
139 for (i = 0; i < num_cpus; i++) {
141 V3_Print("Initializing VMM extensions on cpu %d\n", i);
142 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
144 if (v3_mach_type == V3_INVALID_CPU) {
145 v3_mach_type = v3_cpu_types[i];
159 V3_deinit_shdw_paging();
161 V3_deinit_extensions();
163 #ifdef V3_CONFIG_SYMMOD
167 #ifdef V3_CONFIG_CHECKPOINT
168 V3_deinit_checkpoint();
172 if ((os_hooks) && (os_hooks->call_on_cpu)) {
173 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
174 if (v3_cpu_types[i] != V3_INVALID_CPU) {
175 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
176 //deinit_cpu((void *)(addr_t)i);
184 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
185 return v3_cpu_types[cpu_id];
189 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) {
190 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
193 PrintError("Could not configure guest\n");
197 V3_Print("CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
201 } else if (strlen(name) >= 128) {
202 PrintError("VM name is too long. Will be truncated to 128 chars.\n");
205 memset(vm->name, 0, 128);
206 strncpy(vm->name, name, 127);
214 static int start_core(void * p)
216 struct guest_info * core = (struct guest_info *)p;
219 PrintDebug("virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
220 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
222 switch (v3_mach_type) {
225 case V3_SVM_REV3_CPU:
226 return v3_start_svm_guest(core);
232 case V3_VMX_EPT_UG_CPU:
233 return v3_start_vmx_guest(core);
237 PrintError("Attempting to enter a guest on an invalid CPU\n");
245 // For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type...
249 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
251 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
252 uint32_t avail_cores = 0;
255 /// CHECK IF WE ARE MULTICORE ENABLED....
257 V3_Print("V3 -- Starting VM (%u cores)\n", vm->num_cores);
258 V3_Print("CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
261 // Check that enough cores are present in the mask to handle vcores
262 for (i = 0; i < MAX_CORES; i++) {
266 if (core_mask[major] & (0x1 << minor)) {
267 if (v3_cpu_types[i] == V3_INVALID_CPU) {
268 core_mask[major] &= ~(0x1 << minor);
276 if (vm->num_cores > avail_cores) {
277 PrintError("Attempted to start a VM with too many cores (vm->num_cores = %d, avail_cores = %d, MAX=%d)\n",
278 vm->num_cores, avail_cores, MAX_CORES);
282 vm->run_state = VM_RUNNING;
284 // Spawn off threads for each core.
285 // We work backwards, so that core 0 is always started last.
286 for (i = 0, vcore_id = vm->num_cores - 1; (i < MAX_CORES) && (vcore_id >= 0); i++) {
289 struct guest_info * core = &(vm->cores[vcore_id]);
290 char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu");
291 uint32_t core_idx = 0;
293 if (specified_cpu != NULL) {
294 core_idx = atoi(specified_cpu);
296 if ((core_idx < 0) || (core_idx >= MAX_CORES)) {
297 PrintError("Target CPU out of bounds (%d) (MAX_CORES=%d)\n", core_idx, MAX_CORES);
300 i--; // We reset the logical core idx. Not strictly necessary I guess...
305 major = core_idx / 8;
306 minor = core_idx % 8;
308 if ((core_mask[major] & (0x1 << minor)) == 0) {
309 PrintError("Logical CPU %d not available for virtual core %d; not started\n",
312 if (specified_cpu != NULL) {
313 PrintError("CPU was specified explicitly (%d). HARD ERROR\n", core_idx);
321 PrintDebug("Starting virtual core %u on logical core %u\n",
324 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
326 PrintDebug("run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
327 core_idx, start_core, core, core->exec_name);
329 core->core_run_state = CORE_STOPPED; // core zero will turn itself on
330 core->pcpu_id = core_idx;
331 core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
333 if (core->core_thread == NULL) {
334 PrintError("Thread launch failed\n");
343 PrintError("Error starting VM: Not enough available CPU cores\n");
354 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
356 switch (v3_cpu_types[core->pcpu_id]) {
359 case V3_SVM_REV3_CPU:
360 PrintDebug("Resetting SVM Guest CPU %d\n", core->vcpu_id);
361 return v3_reset_svm_vm_core(core, rip);
366 case V3_VMX_EPT_UG_CPU:
367 PrintDebug("Resetting VMX Guest CPU %d\n", core->vcpu_id);
368 return v3_reset_vmx_vm_core(core, rip);
372 PrintError("CPU has no virtualization Extensions\n");
381 /* move a virtual core to different physical core */
382 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
383 struct guest_info * core = NULL;
385 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
386 PrintError("Attempted to migrate invalid virtual core (%d)\n", vcore_id);
390 core = &(vm->cores[vcore_id]);
392 if (target_cpu == core->pcpu_id) {
393 PrintError("Attempted to migrate to local core (%d)\n", target_cpu);
394 // well that was pointless
398 if (core->core_thread == NULL) {
399 PrintError("Attempted to migrate a core without a valid thread context\n");
403 while (v3_raise_barrier(vm, NULL) == -1);
405 V3_Print("Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
407 // Double check that we weren't preemptively migrated
408 if (target_cpu != core->pcpu_id) {
410 V3_Print("Moving Core\n");
414 switch (v3_cpu_types[core->pcpu_id]) {
417 case V3_VMX_EPT_UG_CPU:
418 PrintDebug("Flushing VMX Guest CPU %d\n", core->vcpu_id);
419 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
426 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
427 PrintError("Failed to move Vcore %d to CPU %d\n",
428 core->vcpu_id, target_cpu);
429 v3_lower_barrier(vm);
433 /* There will be a benign race window here:
434 core->pcpu_id will be set to the target core before its fully "migrated"
435 However the core will NEVER run on the old core again, its just in flight to the new core
437 core->pcpu_id = target_cpu;
439 V3_Print("core now at %d\n", core->pcpu_id);
442 v3_lower_barrier(vm);
449 int v3_stop_vm(struct v3_vm_info * vm) {
451 vm->run_state = VM_STOPPED;
453 // force exit all cores via a cross call/IPI
457 int still_running = 0;
459 for (i = 0; i < vm->num_cores; i++) {
460 if (vm->cores[i].core_run_state != CORE_STOPPED) {
465 if (still_running == 0) {
472 V3_Print("VM stopped. Returning\n");
478 int v3_pause_vm(struct v3_vm_info * vm) {
480 if (vm->run_state != VM_RUNNING) {
481 PrintError("Tried to pause a VM that was not running\n");
485 while (v3_raise_barrier(vm, NULL) == -1);
487 vm->run_state = VM_PAUSED;
493 int v3_continue_vm(struct v3_vm_info * vm) {
495 if (vm->run_state != VM_PAUSED) {
496 PrintError("Tried to continue a VM that was not paused\n");
500 v3_lower_barrier(vm);
502 vm->run_state = VM_RUNNING;
507 #ifdef V3_CONFIG_CHECKPOINT
508 #include <palacios/vmm_checkpoint.h>
510 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url) {
511 return v3_chkpt_save_vm(vm, store, url);
515 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url) {
516 return v3_chkpt_load_vm(vm, store, url);
521 int v3_free_vm(struct v3_vm_info * vm) {
523 // deinitialize guest (free memory, etc...)
525 v3_free_vm_devices(vm);
528 for (i = 0; i < vm->num_cores; i++) {
529 v3_free_core(&(vm->cores[i]));
533 v3_free_vm_internal(vm);
545 v3_cpu_mode_t v3_get_host_cpu_mode() {
555 cr4 = (struct cr4_32 *)&(cr4_val);
558 return PROTECTED_PAE;
566 v3_cpu_mode_t v3_get_host_cpu_mode() {
573 #define V3_Yield(addr) \
575 extern struct v3_os_hooks * os_hooks; \
576 if ((os_hooks) && (os_hooks)->yield_cpu) { \
577 (os_hooks)->yield_cpu(); \
583 void v3_yield_cond(struct guest_info * info) {
585 cur_cycle = v3_get_host_time(&info->time_state);
587 if (cur_cycle > (info->yield_start_cycle + info->vm_info->yield_cycle_period)) {
588 //PrintDebug("Conditional Yield (cur_cyle=%p, start_cycle=%p, period=%p)\n",
589 // (void *)cur_cycle, (void *)info->yield_start_cycle,
590 // (void *)info->yield_cycle_period);
593 info->yield_start_cycle = v3_get_host_time(&info->time_state);
599 * unconditional cpu yield
600 * if the yielding thread is a guest context, the guest quantum is reset on resumption
601 * Non guest context threads should call this function with a NULL argument
603 void v3_yield(struct guest_info * info) {
607 info->yield_start_cycle = v3_get_host_time(&info->time_state);
614 void v3_print_cond(const char * fmt, ...) {
615 if (v3_dbg_enable == 1) {
620 vsnprintf(buf, 2048, fmt, ap);
629 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
630 extern struct v3_os_hooks * os_hooks;
632 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
633 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
639 int v3_vm_enter(struct guest_info * info) {
640 switch (v3_mach_type) {
643 case V3_SVM_REV3_CPU:
644 return v3_svm_enter(info);
650 case V3_VMX_EPT_UG_CPU:
651 return v3_vmx_enter(info);
655 PrintError("Attemping to enter a guest on an invalid CPU\n");