2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
34 #include <interfaces/vmm_file.h>
37 #include <palacios/svm.h>
40 #include <palacios/vmx.h>
43 #ifdef V3_CONFIG_CHECKPOINT
44 #include <palacios/vmm_checkpoint.h>
48 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
49 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
51 struct v3_os_hooks * os_hooks = NULL;
52 int v3_dbg_enable = 0;
56 static void init_cpu(void * arg) {
57 uint32_t cpu_id = (uint32_t)(addr_t)arg;
62 if (v3_is_svm_capable()) {
63 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
64 v3_init_svm_cpu(cpu_id);
69 if (v3_is_vmx_capable()) {
70 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
71 v3_init_vmx_cpu(cpu_id);
76 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
81 static void deinit_cpu(void * arg) {
82 uint32_t cpu_id = (uint32_t)(addr_t)arg;
85 switch (v3_cpu_types[cpu_id]) {
89 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
90 v3_deinit_svm_cpu(cpu_id);
96 case V3_VMX_EPT_UG_CPU:
97 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
98 v3_deinit_vmx_cpu(cpu_id);
103 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
112 static int in_long_mode()
116 v3_get_msr(0xc0000080,&high,&low); // EFER
118 return ((low & 0x500)== 0x500); // LMA and LME set
122 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
127 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
132 #error Palacios does not support compilation for a 32 bit host OS!!!!
134 if (!in_long_mode()) {
135 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
140 // Set global variables.
143 if (num_cpus>V3_CONFIG_MAX_CPUS) {
144 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
147 // Determine the global machine type
148 v3_mach_type = V3_INVALID_CPU;
150 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
151 v3_cpu_types[i] = V3_INVALID_CPU;
154 // Parse host-os defined options into an easily-accessed format.
155 v3_parse_options(options);
157 #ifdef V3_CONFIG_MULTIBOOT
165 // Memory manager initialization
168 // Register all the possible device types
171 // Register all shadow paging handlers
172 V3_init_shdw_paging();
174 #ifdef V3_CONFIG_SWAPPING
178 // Initialize the cpu_mapper framework (must be before extensions)
179 V3_init_cpu_mapper();
181 // Initialize the scheduler framework (must be before extensions)
182 V3_init_scheduling();
184 // Register all extensions
185 V3_init_extensions();
187 // Enabling cpu_mapper
188 V3_enable_cpu_mapper();
190 // Enabling scheduler
191 V3_enable_scheduler();
194 #ifdef V3_CONFIG_SYMMOD
198 #ifdef V3_CONFIG_CHECKPOINT
199 V3_init_checkpoint();
202 if ((hooks) && (hooks->call_on_cpu)) {
204 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
208 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
209 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
210 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
212 if (v3_mach_type == V3_INVALID_CPU) {
213 v3_mach_type = v3_cpu_types[i];
225 // Reverse order of Init_V3
229 if ((os_hooks) && (os_hooks->call_on_cpu)) {
230 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
231 if (v3_cpu_types[i] != V3_INVALID_CPU) {
232 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
233 //deinit_cpu((void *)(addr_t)i);
238 #ifdef V3_CONFIG_CHECKPOINT
239 V3_deinit_checkpoint();
242 #ifdef V3_CONFIG_SYMMOD
246 V3_disable_scheduler();
248 V3_disable_cpu_mapper();
250 V3_deinit_extensions();
252 V3_deinit_scheduling();
254 V3_deinit_cpu_mapper();
256 #ifdef V3_CONFIG_SWAPPING
257 v3_deinit_swapping();
260 V3_deinit_shdw_paging();
270 #ifdef V3_CONFIG_MULTIBOOT
271 v3_deinit_multiboot();
280 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
281 return v3_cpu_types[cpu_id];
284 static int start_core(void * p)
286 struct guest_info * core = (struct guest_info *)p;
288 if (v3_scheduler_register_core(core) == -1){
289 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
292 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
293 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
295 switch (v3_mach_type) {
298 case V3_SVM_REV3_CPU:
299 return v3_start_svm_guest(core);
305 case V3_VMX_EPT_UG_CPU:
306 return v3_start_vmx_guest(core);
310 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
317 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name, unsigned int cpu_mask) {
318 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
322 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
326 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
330 } else if (strlen(name) >= 128) {
331 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
334 memset(vm->name, 0, 128);
335 strncpy(vm->name, name, 127);
337 if(v3_cpu_mapper_register_vm(vm) == -1) {
339 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
343 * Register this VM with the palacios scheduler. It will ask for admission
346 if(v3_scheduler_register_vm(vm) == -1) {
348 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
351 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
352 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
355 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
357 struct guest_info * core = &(vm->cores[vcore_id]);
359 PrintDebug(vm, VCORE_NONE, "Creating virtual core %u on logical core %u\n",
360 vcore_id, core->pcpu_id);
362 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
364 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
365 core->pcpu_id, start_core, core, core->exec_name);
367 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
369 if (core->core_thread == NULL) {
370 PrintError(vm, VCORE_NONE, "Thread creation failed\n");
379 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
382 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
383 uint32_t avail_cores = 0;
387 PrintError(VM_NONE, VCORE_NONE, "Asked to start nonexistent VM\n");
391 if (vm->run_state != VM_STOPPED) {
392 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
396 #if V3_CONFIG_MULTIBOOT
397 if (v3_setup_multiboot_vm_for_boot(vm)) {
398 PrintError(vm, VCORE_NONE, "Multiboot setup for boot failed\n");
403 if (v3_setup_hvm_vm_for_boot(vm)) {
404 PrintError(vm, VCORE_NONE, "HVM setup for boot failed\n");
409 /// CHECK IF WE ARE MULTICORE ENABLED....
411 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
412 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
415 // Check that enough cores are present in the mask to handle vcores
416 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
420 if (core_mask[major] & (0x1 << minor)) {
421 if (v3_cpu_types[i] == V3_INVALID_CPU) {
422 core_mask[major] &= ~(0x1 << minor);
429 vm->avail_cores = avail_cores;
431 if (v3_scheduler_admit_vm(vm) != 0){
432 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
435 vm->run_state = VM_RUNNING;
437 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
439 struct guest_info * core = &(vm->cores[vcore_id]);
441 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
442 vcore_id, core->pcpu_id);
444 if (core->core_run_state==CORE_INVALID) {
445 // launch of a fresh VM
446 core->core_run_state = CORE_STOPPED;
447 // core zero will turn itself on
449 // this is a resume - use whatever its current run_state is
452 V3_START_THREAD(core->core_thread);
461 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
463 switch (v3_cpu_types[core->pcpu_id]) {
466 case V3_SVM_REV3_CPU:
467 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
468 return v3_reset_svm_vm_core(core, rip);
473 case V3_VMX_EPT_UG_CPU:
474 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
475 return v3_reset_vmx_vm_core(core, rip);
479 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
488 /* move a virtual core to different physical core */
489 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
490 struct guest_info * core = NULL;
493 PrintError(VM_NONE, VCORE_NONE, "Asked to move core of nonexistent VM\n");
497 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
498 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
502 core = &(vm->cores[vcore_id]);
504 if (target_cpu == core->pcpu_id) {
505 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
506 // well that was pointless
510 if (core->core_thread == NULL) {
511 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
515 while (v3_raise_barrier(vm, NULL) == -1);
517 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
519 // Double check that we weren't preemptively migrated
520 if (target_cpu != core->pcpu_id) {
522 V3_Print(vm, core, "Moving Core\n");
524 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
525 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
531 switch (v3_cpu_types[core->pcpu_id]) {
534 case V3_VMX_EPT_UG_CPU:
535 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
536 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
543 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
544 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
545 core->vcpu_id, target_cpu);
546 v3_lower_barrier(vm);
550 /* There will be a benign race window here:
551 core->pcpu_id will be set to the target core before its fully "migrated"
552 However the core will NEVER run on the old core again, its just in flight to the new core
554 core->pcpu_id = target_cpu;
556 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
559 v3_lower_barrier(vm);
564 /* move a memory region to memory with affinity for a specific physical core */
565 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
568 struct v3_mem_region *reg;
575 PrintError(VM_NONE, VCORE_NONE, "Asked to move memory of nonexistent VM\n");
579 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
582 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
586 new_node = v3_numa_cpu_to_node(target_cpu);
589 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
593 if (new_node==old_node) {
594 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
598 // We are now going to change the universe, so
599 // we'll barrier everyone first
601 while (v3_raise_barrier(vm, NULL) == -1);
605 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
608 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
612 if (!(reg->flags.base) || !(reg->flags.alloced)) {
613 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
617 // we now have the allocated base region corresponding to - and not a copy
618 // we will rewrite this region after moving its contents
620 // first, let's double check that we are in fact changing the numa_id...
622 if (reg->numa_id==new_node) {
623 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
627 // region uses exclusive addressing [guest_start,guest_end)
628 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
630 new_hpa = V3_AllocPagesExtended(num_pages,
633 0, 0); // no constraints given new shadow pager impl
636 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
640 // Note, assumes virtual contiguity in the host OS...
641 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
643 old_hpa = (void*)(reg->host_addr);
644 old_node = (int)(reg->numa_id);
646 reg->host_addr = (addr_t)new_hpa;
647 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
649 // flush all page tables / kill all humans
651 for (i=0;i<vm->num_cores;i++) {
652 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
653 v3_invalidate_shadow_pts(&(vm->cores[i]));
654 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
655 // nested invalidator uses inclusive addressing [start,end], not [start,end)
656 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
658 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
659 // We'll restore things...
660 reg->host_addr = (addr_t) old_hpa;
661 reg->numa_id = old_node;
662 V3_FreePages(new_hpa,num_pages);
667 // Now the old region can go away...
668 V3_FreePages(old_hpa,num_pages);
670 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
671 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
674 v3_lower_barrier(vm);
679 v3_lower_barrier(vm);
683 int v3_stop_vm(struct v3_vm_info * vm) {
685 struct guest_info * running_core;
688 PrintError(VM_NONE, VCORE_NONE, "Asked to stop nonexistent VM\n");
692 if ((vm->run_state != VM_RUNNING) &&
693 (vm->run_state != VM_SIMULATING)) {
694 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
698 vm->run_state = VM_STOPPED;
700 // Sanity check to catch any weird execution states
701 if (v3_wait_for_barrier(vm, NULL) == 0) {
702 v3_lower_barrier(vm);
705 // XXX force exit all cores via a cross call/IPI XXX
709 int still_running = 0;
711 for (i = 0; i < vm->num_cores; i++) {
712 if (vm->cores[i].core_run_state != CORE_STOPPED) {
713 running_core = &vm->cores[i];
718 if (still_running == 0) {
722 v3_scheduler_stop_core(running_core);
725 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
731 int v3_pause_vm(struct v3_vm_info * vm) {
734 PrintError(VM_NONE, VCORE_NONE, "Asked to pause nonexistent VM\n");
738 if (vm->run_state != VM_RUNNING) {
739 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
743 while (v3_raise_barrier(vm, NULL) == -1);
745 vm->run_state = VM_PAUSED;
751 int v3_continue_vm(struct v3_vm_info * vm) {
754 PrintError(VM_NONE, VCORE_NONE, "Asked to continue nonexistent VM\n");
758 if (vm->run_state != VM_PAUSED) {
759 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
763 vm->run_state = VM_RUNNING;
765 v3_lower_barrier(vm);
772 static int sim_callback(struct guest_info * core, void * private_data) {
773 struct v3_bitmap * timeout_map = private_data;
775 v3_bitmap_set(timeout_map, core->vcpu_id);
777 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
779 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
780 // We spin here if there is noone to yield to
790 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
791 struct v3_bitmap timeout_map;
795 uint64_t cpu_khz = V3_CPU_KHZ();
798 PrintError(VM_NONE, VCORE_NONE, "Asked to simulate nonexistent VM\n");
802 if (vm->run_state != VM_PAUSED) {
803 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
807 /* AT this point VM is paused */
810 v3_bitmap_init(&timeout_map, vm->num_cores);
815 // calculate cycles from msecs...
816 // IMPORTANT: Floating point not allowed.
817 cycles = (msecs * cpu_khz);
821 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
825 for (i = 0; i < vm->num_cores; i++) {
826 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
827 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
832 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
835 // Run the simulation
836 // vm->run_state = VM_SIMULATING;
837 vm->run_state = VM_RUNNING;
838 v3_lower_barrier(vm);
841 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
843 // block until simulation is complete
844 while (all_blocked == 0) {
847 for (i = 0; i < vm->num_cores; i++) {
848 if (v3_bitmap_check(&timeout_map, i) == 0) {
853 if (all_blocked == 1) {
857 // Intentionally spin if there is no one to yield to
862 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
864 // Simulation is complete
865 // Reset back to PAUSED state
867 v3_raise_barrier_nowait(vm, NULL);
868 vm->run_state = VM_PAUSED;
870 v3_bitmap_reset(&timeout_map);
872 v3_wait_for_barrier(vm, NULL);
878 int v3_get_state_vm(struct v3_vm_info *vm,
879 struct v3_vm_base_state *base,
880 struct v3_vm_core_state *core,
881 struct v3_vm_mem_state *mem)
886 extern uint64_t v3_mem_block_size;
888 if (!vm || !base || !core || !mem) {
889 PrintError(VM_NONE, VCORE_NONE, "Invalid rquest to v3_get_state_vm\n");
893 numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
894 numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
896 switch (vm->run_state) {
897 case VM_INVALID: base->state = V3_VM_INVALID; break;
898 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
899 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
900 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
901 case VM_ERROR: base->state = V3_VM_ERROR; break;
902 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
903 default: base->state = V3_VM_UNKNOWN; break;
906 for (i=0;i<numcores;i++) {
907 switch (vm->cores[i].core_run_state) {
908 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
909 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
910 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
911 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
913 switch (vm->cores[i].cpu_mode) {
914 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
915 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
916 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
917 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
918 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
919 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
920 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
922 switch (vm->cores[i].shdw_pg_mode) {
923 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
924 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
925 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
927 switch (vm->cores[i].mem_mode) {
928 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
929 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
930 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
933 core->vcore[i].pcore=vm->cores[i].pcpu_id;
934 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
935 core->vcore[i].num_exits=vm->cores[i].num_exits;
938 core->num_vcores=numcores;
940 for (i=0;i<numregions;i++) {
941 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
942 mem->region[i].size = v3_mem_block_size;
943 #ifdef V3_CONFIG_SWAPPING
944 mem->region[i].swapped = vm->mem_map.base_regions[i].flags.swapped;
945 mem->region[i].pinned = vm->mem_map.base_regions[i].flags.pinned;
947 mem->region[i].swapped = 0;
948 mem->region[i].pinned = 0;
953 mem->num_regions=numregions;
959 #ifdef V3_CONFIG_CHECKPOINT
960 #include <palacios/vmm_checkpoint.h>
962 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
963 if (!vm || !store || !url) {
964 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_save_vm\n");
967 return v3_chkpt_save_vm(vm, store, url, opts);
971 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
972 if (!vm || !store || !url) {
973 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_load_vm\n");
976 return v3_chkpt_load_vm(vm, store, url, opts);
979 #ifdef V3_CONFIG_LIVE_MIGRATION
980 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
981 if (!vm || !store || !url) {
982 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_send_vm\n");
985 return v3_chkpt_send_vm(vm, store, url, opts);
989 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
990 if (!vm || !store || !url) {
991 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_receive_vm\n");
994 return v3_chkpt_receive_vm(vm, store, url, opts);
1001 int v3_free_vm(struct v3_vm_info * vm) {
1003 // deinitialize guest (free memory, etc...)
1006 PrintError(VM_NONE, VCORE_NONE, "Asked to free nonexistent VM\n");
1010 if ((vm->run_state != VM_STOPPED) &&
1011 (vm->run_state != VM_ERROR)) {
1012 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
1016 v3_free_vm_devices(vm);
1019 for (i = 0; i < vm->num_cores; i++) {
1020 v3_scheduler_free_core(&(vm->cores[i]));
1021 v3_free_core(&(vm->cores[i]));
1025 v3_scheduler_free_vm(vm);
1026 v3_free_vm_internal(vm);
1038 v3_cpu_mode_t v3_get_host_cpu_mode() {
1040 struct cr4_32 * cr4;
1048 cr4 = (struct cr4_32 *)&(cr4_val);
1050 if (cr4->pae == 1) {
1051 return PROTECTED_PAE;
1059 v3_cpu_mode_t v3_get_host_cpu_mode() {
1065 void v3_print_cond(const char * fmt, ...) {
1066 if (v3_dbg_enable == 1) {
1071 vsnprintf(buf, 2048, fmt, ap);
1074 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
1080 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
1081 extern struct v3_os_hooks * os_hooks;
1083 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
1084 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
1090 int v3_vm_enter(struct guest_info * info) {
1091 switch (v3_mach_type) {
1092 #ifdef V3_CONFIG_SVM
1094 case V3_SVM_REV3_CPU:
1095 return v3_svm_enter(info);
1100 case V3_VMX_EPT_CPU:
1101 case V3_VMX_EPT_UG_CPU:
1102 return v3_vmx_enter(info);
1106 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
1112 void *v3_get_host_vm(struct v3_vm_info *x)
1115 return x->host_priv_data;
1121 int v3_get_vcore(struct guest_info *x)