2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_intr.h>
22 #include <palacios/vmm_config.h>
23 #include <palacios/vm_guest.h>
24 #include <palacios/vmm_ctrl_regs.h>
25 #include <palacios/vmm_lowlevel.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vmm_extensions.h>
30 #include <palacios/svm.h>
33 #include <palacios/vmx.h>
36 #ifdef V3_CONFIG_CHECKPOINT
37 #include <palacios/vmm_checkpoint.h>
41 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
42 struct v3_os_hooks * os_hooks = NULL;
43 int v3_dbg_enable = 0;
48 static void init_cpu(void * arg) {
49 uint32_t cpu_id = (uint32_t)(addr_t)arg;
52 if (v3_is_svm_capable()) {
53 PrintDebug("Machine is SVM Capable\n");
54 v3_init_svm_cpu(cpu_id);
59 if (v3_is_vmx_capable()) {
60 PrintDebug("Machine is VMX Capable\n");
61 v3_init_vmx_cpu(cpu_id);
66 PrintError("CPU has no virtualization Extensions\n");
71 static void deinit_cpu(void * arg) {
72 uint32_t cpu_id = (uint32_t)(addr_t)arg;
75 switch (v3_cpu_types[cpu_id]) {
79 PrintDebug("Deinitializing SVM CPU %d\n", cpu_id);
80 v3_deinit_svm_cpu(cpu_id);
86 case V3_VMX_EPT_UG_CPU:
87 PrintDebug("Deinitializing VMX CPU %d\n", cpu_id);
88 v3_deinit_vmx_cpu(cpu_id);
93 PrintError("CPU has no virtualization Extensions\n");
100 void Init_V3(struct v3_os_hooks * hooks, int num_cpus) {
103 V3_Print("V3 Print statement to fix a Kitten page fault bug\n");
105 // Set global variables.
108 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
109 v3_cpu_types[i] = V3_INVALID_CPU;
112 // Register all the possible device types
115 // Register all shadow paging handlers
116 V3_init_shdw_paging();
118 // Register all extensions
119 V3_init_extensions();
122 #ifdef V3_CONFIG_SYMMOD
126 #ifdef V3_CONFIG_CHECKPOINT
127 V3_init_checkpoint();
133 if ((hooks) && (hooks->call_on_cpu)) {
135 for (i = 0; i < num_cpus; i++) {
137 V3_Print("Initializing VMM extensions on cpu %d\n", i);
138 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
150 V3_deinit_shdw_paging();
152 V3_deinit_extensions();
154 #ifdef V3_CONFIG_SYMMOD
158 #ifdef V3_CONFIG_CHECKPOINT
159 V3_deinit_checkpoint();
163 if ((os_hooks) && (os_hooks->call_on_cpu)) {
164 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
165 if (v3_cpu_types[i] != V3_INVALID_CPU) {
166 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
167 //deinit_cpu((void *)(addr_t)i);
175 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
176 return v3_cpu_types[cpu_id];
180 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) {
181 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
184 PrintError("Could not configure guest\n");
188 V3_Print("CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
192 } else if (strlen(name) >= 128) {
193 PrintError("VM name is too long. Will be truncated to 128 chars.\n");
196 memset(vm->name, 0, 128);
197 strncpy(vm->name, name, 127);
205 static int start_core(void * p)
207 struct guest_info * core = (struct guest_info *)p;
210 PrintDebug("virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
211 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
213 switch (v3_cpu_types[0]) {
216 case V3_SVM_REV3_CPU:
217 return v3_start_svm_guest(core);
223 case V3_VMX_EPT_UG_CPU:
224 return v3_start_vmx_guest(core);
228 PrintError("Attempting to enter a guest on an invalid CPU\n");
236 // For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type...
240 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
242 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
243 uint32_t avail_cores = 0;
246 /// CHECK IF WE ARE MULTICORE ENABLED....
248 V3_Print("V3 -- Starting VM (%u cores)\n", vm->num_cores);
249 V3_Print("CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
252 // Check that enough cores are present in the mask to handle vcores
253 for (i = 0; i < MAX_CORES; i++) {
257 if (core_mask[major] & (0x1 << minor)) {
258 if (v3_cpu_types[i] == V3_INVALID_CPU) {
259 core_mask[major] &= ~(0x1 << minor);
267 if (vm->num_cores > avail_cores) {
268 PrintError("Attempted to start a VM with too many cores (vm->num_cores = %d, avail_cores = %d, MAX=%d)\n",
269 vm->num_cores, avail_cores, MAX_CORES);
273 vm->run_state = VM_RUNNING;
275 // Spawn off threads for each core.
276 // We work backwards, so that core 0 is always started last.
277 for (i = 0, vcore_id = vm->num_cores - 1; (i < MAX_CORES) && (vcore_id >= 0); i++) {
280 struct guest_info * core = &(vm->cores[vcore_id]);
281 char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu");
282 uint32_t core_idx = 0;
284 if (specified_cpu != NULL) {
285 core_idx = atoi(specified_cpu);
287 if ((core_idx < 0) || (core_idx >= MAX_CORES)) {
288 PrintError("Target CPU out of bounds (%d) (MAX_CORES=%d)\n", core_idx, MAX_CORES);
291 i--; // We reset the logical core idx. Not strictly necessary I guess...
296 major = core_idx / 8;
297 minor = core_idx % 8;
299 if ((core_mask[major] & (0x1 << minor)) == 0) {
300 PrintError("Logical CPU %d not available for virtual core %d; not started\n",
303 if (specified_cpu != NULL) {
304 PrintError("CPU was specified explicitly (%d). HARD ERROR\n", core_idx);
312 PrintDebug("Starting virtual core %u on logical core %u\n",
315 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
317 PrintDebug("run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
318 core_idx, start_core, core, core->exec_name);
320 core->pcpu_id = core_idx;
321 core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
323 if (core->core_thread == NULL) {
324 PrintError("Thread launch failed\n");
333 PrintError("Error starting VM: Not enough available CPU cores\n");
344 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
346 switch (v3_cpu_types[core->pcpu_id]) {
349 case V3_SVM_REV3_CPU:
350 PrintDebug("Resetting SVM Guest CPU %d\n", core->vcpu_id);
351 return v3_reset_svm_vm_core(core, rip);
356 case V3_VMX_EPT_UG_CPU:
357 PrintDebug("Resetting VMX Guest CPU %d\n", core->vcpu_id);
358 return v3_reset_vmx_vm_core(core, rip);
362 PrintError("CPU has no virtualization Extensions\n");
371 /* move a virtual core to different physical core */
372 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
373 struct guest_info * core = NULL;
375 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
376 PrintError("Attempted to migrate invalid virtual core (%d)\n", vcore_id);
380 core = &(vm->cores[vcore_id]);
382 if (target_cpu == core->pcpu_id) {
383 PrintError("Attempted to migrate to local core (%d)\n", target_cpu);
384 // well that was pointless
388 if (core->core_thread == NULL) {
389 PrintError("Attempted to migrate a core without a valid thread context\n");
393 while (v3_raise_barrier(vm, NULL) == -1);
395 V3_Print("Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
397 // Double check that we weren't preemptively migrated
398 if (target_cpu != core->pcpu_id) {
400 V3_Print("Moving Core\n");
404 switch (v3_cpu_types[core->pcpu_id]) {
407 case V3_VMX_EPT_UG_CPU:
408 PrintDebug("Flushing VMX Guest CPU %d\n", core->vcpu_id);
409 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
416 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
417 PrintError("Failed to move Vcore %d to CPU %d\n",
418 core->vcpu_id, target_cpu);
419 v3_lower_barrier(vm);
423 /* There will be a benign race window here:
424 core->pcpu_id will be set to the target core before its fully "migrated"
425 However the core will NEVER run on the old core again, its just in flight to the new core
427 core->pcpu_id = target_cpu;
429 V3_Print("core now at %d\n", core->pcpu_id);
432 v3_lower_barrier(vm);
439 int v3_stop_vm(struct v3_vm_info * vm) {
441 vm->run_state = VM_STOPPED;
443 // force exit all cores via a cross call/IPI
447 int still_running = 0;
449 for (i = 0; i < vm->num_cores; i++) {
450 if (vm->cores[i].core_run_state != CORE_STOPPED) {
455 if (still_running == 0) {
462 V3_Print("VM stopped. Returning\n");
468 int v3_pause_vm(struct v3_vm_info * vm) {
470 if (vm->run_state != VM_RUNNING) {
471 PrintError("Tried to pause a VM that was not running\n");
475 while (v3_raise_barrier(vm, NULL) == -1);
477 vm->run_state = VM_PAUSED;
483 int v3_continue_vm(struct v3_vm_info * vm) {
485 if (vm->run_state != VM_PAUSED) {
486 PrintError("Tried to continue a VM that was not paused\n");
490 v3_lower_barrier(vm);
492 vm->run_state = VM_RUNNING;
497 #ifdef V3_CONFIG_CHECKPOINT
498 #include <palacios/vmm_checkpoint.h>
500 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url) {
501 return v3_chkpt_save_vm(vm, store, url);
505 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url) {
506 return v3_chkpt_load_vm(vm, store, url);
511 int v3_free_vm(struct v3_vm_info * vm) {
513 // deinitialize guest (free memory, etc...)
515 v3_free_vm_devices(vm);
518 for (i = 0; i < vm->num_cores; i++) {
519 v3_free_core(&(vm->cores[i]));
523 v3_free_vm_internal(vm);
535 v3_cpu_mode_t v3_get_host_cpu_mode() {
545 cr4 = (struct cr4_32 *)&(cr4_val);
548 return PROTECTED_PAE;
556 v3_cpu_mode_t v3_get_host_cpu_mode() {
563 #define V3_Yield(addr) \
565 extern struct v3_os_hooks * os_hooks; \
566 if ((os_hooks) && (os_hooks)->yield_cpu) { \
567 (os_hooks)->yield_cpu(); \
573 void v3_yield_cond(struct guest_info * info) {
575 cur_cycle = v3_get_host_time(&info->time_state);
577 if (cur_cycle > (info->yield_start_cycle + info->vm_info->yield_cycle_period)) {
578 //PrintDebug("Conditional Yield (cur_cyle=%p, start_cycle=%p, period=%p)\n",
579 // (void *)cur_cycle, (void *)info->yield_start_cycle,
580 // (void *)info->yield_cycle_period);
583 info->yield_start_cycle = v3_get_host_time(&info->time_state);
589 * unconditional cpu yield
590 * if the yielding thread is a guest context, the guest quantum is reset on resumption
591 * Non guest context threads should call this function with a NULL argument
593 void v3_yield(struct guest_info * info) {
597 info->yield_start_cycle = v3_get_host_time(&info->time_state);
604 void v3_print_cond(const char * fmt, ...) {
605 if (v3_dbg_enable == 1) {
610 vsnprintf(buf, 2048, fmt, ap);
619 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
620 extern struct v3_os_hooks * os_hooks;
622 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
623 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
629 int v3_vm_enter(struct guest_info * info) {
630 switch (v3_cpu_types[0]) {
633 case V3_SVM_REV3_CPU:
634 return v3_svm_enter(info);
640 case V3_VMX_EPT_UG_CPU:
641 return v3_vmx_enter(info);
645 PrintError("Attemping to enter a guest on an invalid CPU\n");