2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
34 #include <interfaces/vmm_file.h>
37 #include <palacios/svm.h>
40 #include <palacios/vmx.h>
43 #ifdef V3_CONFIG_CHECKPOINT
44 #include <palacios/vmm_checkpoint.h>
48 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
49 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
51 struct v3_os_hooks * os_hooks = NULL;
52 int v3_dbg_enable = 0;
56 static void init_cpu(void * arg) {
57 uint32_t cpu_id = (uint32_t)(addr_t)arg;
62 if (v3_is_svm_capable()) {
63 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
64 v3_init_svm_cpu(cpu_id);
69 if (v3_is_vmx_capable()) {
70 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
71 v3_init_vmx_cpu(cpu_id);
76 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
81 static void deinit_cpu(void * arg) {
82 uint32_t cpu_id = (uint32_t)(addr_t)arg;
85 switch (v3_cpu_types[cpu_id]) {
89 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
90 v3_deinit_svm_cpu(cpu_id);
96 case V3_VMX_EPT_UG_CPU:
97 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
98 v3_deinit_vmx_cpu(cpu_id);
103 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
112 static int in_long_mode()
116 v3_get_msr(0xc0000080,&high,&low); // EFER
118 return ((low & 0x500)== 0x500); // LMA and LME set
122 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
127 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
132 #error Palacios does not support compilation for a 32 bit host OS!!!!
134 if (!in_long_mode()) {
135 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
140 // Set global variables.
143 if (num_cpus>V3_CONFIG_MAX_CPUS) {
144 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
147 // Determine the global machine type
148 v3_mach_type = V3_INVALID_CPU;
150 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
151 v3_cpu_types[i] = V3_INVALID_CPU;
154 // Parse host-os defined options into an easily-accessed format.
155 v3_parse_options(options);
161 // Memory manager initialization
164 // Register all the possible device types
167 // Register all shadow paging handlers
168 V3_init_shdw_paging();
170 #ifdef V3_CONFIG_SWAPPING
174 // Initialize the cpu_mapper framework (must be before extensions)
175 V3_init_cpu_mapper();
177 // Initialize the scheduler framework (must be before extensions)
178 V3_init_scheduling();
180 // Register all extensions
181 V3_init_extensions();
183 // Enabling cpu_mapper
184 V3_enable_cpu_mapper();
186 // Enabling scheduler
187 V3_enable_scheduler();
190 #ifdef V3_CONFIG_SYMMOD
194 #ifdef V3_CONFIG_CHECKPOINT
195 V3_init_checkpoint();
198 if ((hooks) && (hooks->call_on_cpu)) {
200 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
204 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
205 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
206 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
208 if (v3_mach_type == V3_INVALID_CPU) {
209 v3_mach_type = v3_cpu_types[i];
221 // Reverse order of Init_V3
225 if ((os_hooks) && (os_hooks->call_on_cpu)) {
226 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
227 if (v3_cpu_types[i] != V3_INVALID_CPU) {
228 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
229 //deinit_cpu((void *)(addr_t)i);
234 #ifdef V3_CONFIG_CHECKPOINT
235 V3_deinit_checkpoint();
238 #ifdef V3_CONFIG_SYMMOD
242 V3_disable_scheduler();
244 V3_disable_cpu_mapper();
246 V3_deinit_extensions();
248 V3_deinit_scheduling();
250 V3_deinit_cpu_mapper();
252 #ifdef V3_CONFIG_SWAPPING
253 v3_deinit_swapping();
256 V3_deinit_shdw_paging();
272 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
273 return v3_cpu_types[cpu_id];
276 static int start_core(void * p)
278 struct guest_info * core = (struct guest_info *)p;
280 if (v3_scheduler_register_core(core) == -1){
281 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
284 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
285 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
287 switch (v3_mach_type) {
290 case V3_SVM_REV3_CPU:
291 return v3_start_svm_guest(core);
297 case V3_VMX_EPT_UG_CPU:
298 return v3_start_vmx_guest(core);
302 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
309 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name, unsigned int cpu_mask) {
310 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
314 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
318 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
322 } else if (strlen(name) >= 128) {
323 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
326 memset(vm->name, 0, 128);
327 strncpy(vm->name, name, 127);
329 if(v3_cpu_mapper_register_vm(vm) == -1) {
331 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
335 * Register this VM with the palacios scheduler. It will ask for admission
338 if(v3_scheduler_register_vm(vm) == -1) {
340 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
343 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
344 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
347 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
349 struct guest_info * core = &(vm->cores[vcore_id]);
351 PrintDebug(vm, VCORE_NONE, "Creating virtual core %u on logical core %u\n",
352 vcore_id, core->pcpu_id);
354 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
356 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
357 core->pcpu_id, start_core, core, core->exec_name);
359 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
361 if (core->core_thread == NULL) {
362 PrintError(vm, VCORE_NONE, "Thread creation failed\n");
371 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
374 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
375 uint32_t avail_cores = 0;
379 PrintError(VM_NONE, VCORE_NONE, "Asked to start nonexistent VM\n");
383 if (vm->run_state != VM_STOPPED) {
384 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
389 if (v3_setup_hvm_vm_for_boot(vm)) {
390 PrintError(vm, VCORE_NONE, "HVM setup for boot failed\n");
395 /// CHECK IF WE ARE MULTICORE ENABLED....
397 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
398 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
401 // Check that enough cores are present in the mask to handle vcores
402 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
406 if (core_mask[major] & (0x1 << minor)) {
407 if (v3_cpu_types[i] == V3_INVALID_CPU) {
408 core_mask[major] &= ~(0x1 << minor);
415 vm->avail_cores = avail_cores;
417 if (v3_scheduler_admit_vm(vm) != 0){
418 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
421 vm->run_state = VM_RUNNING;
423 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
425 struct guest_info * core = &(vm->cores[vcore_id]);
427 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
428 vcore_id, core->pcpu_id);
430 if (core->core_run_state==CORE_INVALID) {
431 // launch of a fresh VM
432 core->core_run_state = CORE_STOPPED;
433 // core zero will turn itself on
435 // this is a resume - use whatever its current run_state is
438 V3_START_THREAD(core->core_thread);
447 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
449 switch (v3_cpu_types[core->pcpu_id]) {
452 case V3_SVM_REV3_CPU:
453 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
454 return v3_reset_svm_vm_core(core, rip);
459 case V3_VMX_EPT_UG_CPU:
460 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
461 return v3_reset_vmx_vm_core(core, rip);
465 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
474 /* move a virtual core to different physical core */
475 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
476 struct guest_info * core = NULL;
479 PrintError(VM_NONE, VCORE_NONE, "Asked to move core of nonexistent VM\n");
483 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
484 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
488 core = &(vm->cores[vcore_id]);
490 if (target_cpu == core->pcpu_id) {
491 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
492 // well that was pointless
496 if (core->core_thread == NULL) {
497 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
501 while (v3_raise_barrier(vm, NULL) == -1);
503 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
505 // Double check that we weren't preemptively migrated
506 if (target_cpu != core->pcpu_id) {
508 V3_Print(vm, core, "Moving Core\n");
510 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
511 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
517 switch (v3_cpu_types[core->pcpu_id]) {
520 case V3_VMX_EPT_UG_CPU:
521 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
522 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
529 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
530 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
531 core->vcpu_id, target_cpu);
532 v3_lower_barrier(vm);
536 /* There will be a benign race window here:
537 core->pcpu_id will be set to the target core before its fully "migrated"
538 However the core will NEVER run on the old core again, its just in flight to the new core
540 core->pcpu_id = target_cpu;
542 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
545 v3_lower_barrier(vm);
550 /* move a memory region to memory with affinity for a specific physical core */
551 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
554 struct v3_mem_region *reg;
561 PrintError(VM_NONE, VCORE_NONE, "Asked to move memory of nonexistent VM\n");
565 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
568 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
572 new_node = v3_numa_cpu_to_node(target_cpu);
575 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
579 if (new_node==old_node) {
580 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
584 // We are now going to change the universe, so
585 // we'll barrier everyone first
587 while (v3_raise_barrier(vm, NULL) == -1);
591 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
594 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
598 if (!(reg->flags.base) || !(reg->flags.alloced)) {
599 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
603 // we now have the allocated base region corresponding to - and not a copy
604 // we will rewrite this region after moving its contents
606 // first, let's double check that we are in fact changing the numa_id...
608 if (reg->numa_id==new_node) {
609 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
613 // region uses exclusive addressing [guest_start,guest_end)
614 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
616 new_hpa = V3_AllocPagesExtended(num_pages,
619 0); // no constraints given new shadow pager impl
622 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
626 // Note, assumes virtual contiguity in the host OS...
627 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
629 old_hpa = (void*)(reg->host_addr);
630 old_node = (int)(reg->numa_id);
632 reg->host_addr = (addr_t)new_hpa;
633 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
635 // flush all page tables / kill all humans
637 for (i=0;i<vm->num_cores;i++) {
638 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
639 v3_invalidate_shadow_pts(&(vm->cores[i]));
640 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
641 // nested invalidator uses inclusive addressing [start,end], not [start,end)
642 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
644 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
645 // We'll restore things...
646 reg->host_addr = (addr_t) old_hpa;
647 reg->numa_id = old_node;
648 V3_FreePages(new_hpa,num_pages);
653 // Now the old region can go away...
654 V3_FreePages(old_hpa,num_pages);
656 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
657 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
660 v3_lower_barrier(vm);
665 v3_lower_barrier(vm);
669 int v3_stop_vm(struct v3_vm_info * vm) {
671 struct guest_info * running_core;
674 PrintError(VM_NONE, VCORE_NONE, "Asked to stop nonexistent VM\n");
678 if ((vm->run_state != VM_RUNNING) &&
679 (vm->run_state != VM_SIMULATING)) {
680 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
684 vm->run_state = VM_STOPPED;
686 // Sanity check to catch any weird execution states
687 if (v3_wait_for_barrier(vm, NULL) == 0) {
688 v3_lower_barrier(vm);
691 // XXX force exit all cores via a cross call/IPI XXX
695 int still_running = 0;
697 for (i = 0; i < vm->num_cores; i++) {
698 if (vm->cores[i].core_run_state != CORE_STOPPED) {
699 running_core = &vm->cores[i];
704 if (still_running == 0) {
708 v3_scheduler_stop_core(running_core);
711 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
717 int v3_pause_vm(struct v3_vm_info * vm) {
720 PrintError(VM_NONE, VCORE_NONE, "Asked to pause nonexistent VM\n");
724 if (vm->run_state != VM_RUNNING) {
725 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
729 while (v3_raise_barrier(vm, NULL) == -1);
731 vm->run_state = VM_PAUSED;
737 int v3_continue_vm(struct v3_vm_info * vm) {
740 PrintError(VM_NONE, VCORE_NONE, "Asked to continue nonexistent VM\n");
744 if (vm->run_state != VM_PAUSED) {
745 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
749 vm->run_state = VM_RUNNING;
751 v3_lower_barrier(vm);
758 static int sim_callback(struct guest_info * core, void * private_data) {
759 struct v3_bitmap * timeout_map = private_data;
761 v3_bitmap_set(timeout_map, core->vcpu_id);
763 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
765 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
766 // We spin here if there is noone to yield to
776 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
777 struct v3_bitmap timeout_map;
781 uint64_t cpu_khz = V3_CPU_KHZ();
784 PrintError(VM_NONE, VCORE_NONE, "Asked to simulate nonexistent VM\n");
788 if (vm->run_state != VM_PAUSED) {
789 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
793 /* AT this point VM is paused */
796 v3_bitmap_init(&timeout_map, vm->num_cores);
801 // calculate cycles from msecs...
802 // IMPORTANT: Floating point not allowed.
803 cycles = (msecs * cpu_khz);
807 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
811 for (i = 0; i < vm->num_cores; i++) {
812 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
813 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
818 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
821 // Run the simulation
822 // vm->run_state = VM_SIMULATING;
823 vm->run_state = VM_RUNNING;
824 v3_lower_barrier(vm);
827 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
829 // block until simulation is complete
830 while (all_blocked == 0) {
833 for (i = 0; i < vm->num_cores; i++) {
834 if (v3_bitmap_check(&timeout_map, i) == 0) {
839 if (all_blocked == 1) {
843 // Intentionally spin if there is no one to yield to
848 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
850 // Simulation is complete
851 // Reset back to PAUSED state
853 v3_raise_barrier_nowait(vm, NULL);
854 vm->run_state = VM_PAUSED;
856 v3_bitmap_reset(&timeout_map);
858 v3_wait_for_barrier(vm, NULL);
864 int v3_get_state_vm(struct v3_vm_info *vm,
865 struct v3_vm_base_state *base,
866 struct v3_vm_core_state *core,
867 struct v3_vm_mem_state *mem)
872 extern uint64_t v3_mem_block_size;
874 if (!vm || !base || !core || !mem) {
875 PrintError(VM_NONE, VCORE_NONE, "Invalid rquest to v3_get_state_vm\n");
879 numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
880 numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
882 switch (vm->run_state) {
883 case VM_INVALID: base->state = V3_VM_INVALID; break;
884 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
885 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
886 case VM_RESETTING: base->state = V3_VM_RESETTING; break;
887 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
888 case VM_ERROR: base->state = V3_VM_ERROR; break;
889 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
890 default: base->state = V3_VM_UNKNOWN; break;
893 for (i=0;i<numcores;i++) {
894 switch (vm->cores[i].core_run_state) {
895 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
896 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
897 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
898 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
900 switch (vm->cores[i].cpu_mode) {
901 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
902 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
903 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
904 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
905 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
906 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
907 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
909 switch (vm->cores[i].shdw_pg_mode) {
910 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
911 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
912 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
914 switch (vm->cores[i].mem_mode) {
915 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
916 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
917 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
920 core->vcore[i].pcore=vm->cores[i].pcpu_id;
921 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
922 core->vcore[i].num_exits=vm->cores[i].num_exits;
925 core->num_vcores=numcores;
927 for (i=0;i<numregions;i++) {
928 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
929 mem->region[i].size = v3_mem_block_size;
930 #ifdef V3_CONFIG_SWAPPING
931 mem->region[i].swapped = vm->mem_map.base_regions[i].flags.swapped;
932 mem->region[i].pinned = vm->mem_map.base_regions[i].flags.pinned;
934 mem->region[i].swapped = 0;
935 mem->region[i].pinned = 0;
940 mem->num_regions=numregions;
946 #ifdef V3_CONFIG_CHECKPOINT
947 #include <palacios/vmm_checkpoint.h>
949 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
950 if (!vm || !store || !url) {
951 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_save_vm\n");
954 return v3_chkpt_save_vm(vm, store, url, opts);
958 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
959 if (!vm || !store || !url) {
960 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_load_vm\n");
963 return v3_chkpt_load_vm(vm, store, url, opts);
966 #ifdef V3_CONFIG_LIVE_MIGRATION
967 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
968 if (!vm || !store || !url) {
969 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_send_vm\n");
972 return v3_chkpt_send_vm(vm, store, url, opts);
976 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
977 if (!vm || !store || !url) {
978 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_receive_vm\n");
981 return v3_chkpt_receive_vm(vm, store, url, opts);
988 int v3_free_vm(struct v3_vm_info * vm) {
990 // deinitialize guest (free memory, etc...)
993 PrintError(VM_NONE, VCORE_NONE, "Asked to free nonexistent VM\n");
997 if ((vm->run_state != VM_STOPPED) &&
998 (vm->run_state != VM_ERROR)) {
999 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
1003 v3_free_vm_devices(vm);
1006 for (i = 0; i < vm->num_cores; i++) {
1007 v3_scheduler_free_core(&(vm->cores[i]));
1008 v3_free_core(&(vm->cores[i]));
1012 v3_scheduler_free_vm(vm);
1013 v3_free_vm_internal(vm);
1025 v3_cpu_mode_t v3_get_host_cpu_mode() {
1027 struct cr4_32 * cr4;
1035 cr4 = (struct cr4_32 *)&(cr4_val);
1037 if (cr4->pae == 1) {
1038 return PROTECTED_PAE;
1046 v3_cpu_mode_t v3_get_host_cpu_mode() {
1052 void v3_print_cond(const char * fmt, ...) {
1053 if (v3_dbg_enable == 1) {
1058 vsnprintf(buf, 2048, fmt, ap);
1061 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
1067 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
1068 extern struct v3_os_hooks * os_hooks;
1070 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
1071 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
1077 int v3_vm_enter(struct guest_info * info) {
1078 switch (v3_mach_type) {
1079 #ifdef V3_CONFIG_SVM
1081 case V3_SVM_REV3_CPU:
1082 return v3_svm_enter(info);
1087 case V3_VMX_EPT_CPU:
1088 case V3_VMX_EPT_UG_CPU:
1089 return v3_vmx_enter(info);
1093 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
1099 void *v3_get_host_vm(struct v3_vm_info *x)
1102 return x->host_priv_data;
1108 int v3_get_vcore(struct guest_info *x)