2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
34 #include <interfaces/vmm_file.h>
37 #include <palacios/svm.h>
40 #include <palacios/vmx.h>
43 #ifdef V3_CONFIG_CHECKPOINT
44 #include <palacios/vmm_checkpoint.h>
48 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
49 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
51 struct v3_os_hooks * os_hooks = NULL;
52 int v3_dbg_enable = 0;
56 static void init_cpu(void * arg) {
57 uint32_t cpu_id = (uint32_t)(addr_t)arg;
62 if (v3_is_svm_capable()) {
63 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
64 v3_init_svm_cpu(cpu_id);
69 if (v3_is_vmx_capable()) {
70 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
71 v3_init_vmx_cpu(cpu_id);
76 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
81 static void deinit_cpu(void * arg) {
82 uint32_t cpu_id = (uint32_t)(addr_t)arg;
85 switch (v3_cpu_types[cpu_id]) {
89 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
90 v3_deinit_svm_cpu(cpu_id);
96 case V3_VMX_EPT_UG_CPU:
97 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
98 v3_deinit_vmx_cpu(cpu_id);
103 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
112 static int in_long_mode()
116 v3_get_msr(0xc0000080,&high,&low); // EFER
118 return ((low & 0x500)== 0x500); // LMA and LME set
122 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
127 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
132 #error Palacios does not support compilation for a 32 bit host OS!!!!
134 if (!in_long_mode()) {
135 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
140 // Set global variables.
143 if (num_cpus>V3_CONFIG_MAX_CPUS) {
144 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
147 // Determine the global machine type
148 v3_mach_type = V3_INVALID_CPU;
150 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
151 v3_cpu_types[i] = V3_INVALID_CPU;
154 // Parse host-os defined options into an easily-accessed format.
155 v3_parse_options(options);
157 // Memory manager initialization
160 // Register all the possible device types
163 // Register all shadow paging handlers
164 V3_init_shdw_paging();
166 #ifdef V3_CONFIG_SWAPPING
170 // Initialize the cpu_mapper framework (must be before extensions)
171 V3_init_cpu_mapper();
173 // Initialize the scheduler framework (must be before extensions)
174 V3_init_scheduling();
176 // Register all extensions
177 V3_init_extensions();
179 // Enabling cpu_mapper
180 V3_enable_cpu_mapper();
182 // Enabling scheduler
183 V3_enable_scheduler();
186 #ifdef V3_CONFIG_SYMMOD
190 #ifdef V3_CONFIG_CHECKPOINT
191 V3_init_checkpoint();
194 if ((hooks) && (hooks->call_on_cpu)) {
196 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
200 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
201 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
202 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
204 if (v3_mach_type == V3_INVALID_CPU) {
205 v3_mach_type = v3_cpu_types[i];
217 // Reverse order of Init_V3
221 if ((os_hooks) && (os_hooks->call_on_cpu)) {
222 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
223 if (v3_cpu_types[i] != V3_INVALID_CPU) {
224 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
225 //deinit_cpu((void *)(addr_t)i);
230 #ifdef V3_CONFIG_CHECKPOINT
231 V3_deinit_checkpoint();
234 #ifdef V3_CONFIG_SYMMOD
238 V3_disable_scheduler();
240 V3_disable_cpu_mapper();
242 V3_deinit_extensions();
244 V3_deinit_scheduling();
246 V3_deinit_cpu_mapper();
248 #ifdef V3_CONFIG_SWAPPING
249 v3_deinit_swapping();
252 V3_deinit_shdw_paging();
264 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
265 return v3_cpu_types[cpu_id];
268 static int start_core(void * p)
270 struct guest_info * core = (struct guest_info *)p;
272 if (v3_scheduler_register_core(core) == -1){
273 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
276 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
277 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
279 switch (v3_mach_type) {
282 case V3_SVM_REV3_CPU:
283 return v3_start_svm_guest(core);
289 case V3_VMX_EPT_UG_CPU:
290 return v3_start_vmx_guest(core);
294 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
301 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name, unsigned int cpu_mask) {
302 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
306 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
310 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
314 } else if (strlen(name) >= 128) {
315 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
318 memset(vm->name, 0, 128);
319 strncpy(vm->name, name, 127);
321 if(v3_cpu_mapper_register_vm(vm) == -1) {
323 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
327 * Register this VM with the palacios scheduler. It will ask for admission
330 if(v3_scheduler_register_vm(vm) == -1) {
332 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
335 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
336 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
339 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
341 struct guest_info * core = &(vm->cores[vcore_id]);
343 PrintDebug(vm, VCORE_NONE, "Creating virtual core %u on logical core %u\n",
344 vcore_id, core->pcpu_id);
346 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
348 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
349 core->pcpu_id, start_core, core, core->exec_name);
351 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
353 if (core->core_thread == NULL) {
354 PrintError(vm, VCORE_NONE, "Thread creation failed\n");
363 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
366 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
367 uint32_t avail_cores = 0;
371 PrintError(VM_NONE, VCORE_NONE, "Asked to start nonexistent VM\n");
375 if (vm->run_state != VM_STOPPED) {
376 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
381 /// CHECK IF WE ARE MULTICORE ENABLED....
383 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
384 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
387 // Check that enough cores are present in the mask to handle vcores
388 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
392 if (core_mask[major] & (0x1 << minor)) {
393 if (v3_cpu_types[i] == V3_INVALID_CPU) {
394 core_mask[major] &= ~(0x1 << minor);
401 vm->avail_cores = avail_cores;
403 if (v3_scheduler_admit_vm(vm) != 0){
404 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
407 vm->run_state = VM_RUNNING;
409 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
411 struct guest_info * core = &(vm->cores[vcore_id]);
413 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
414 vcore_id, core->pcpu_id);
416 if (core->core_run_state==CORE_INVALID) {
417 // launch of a fresh VM
418 core->core_run_state = CORE_STOPPED;
419 // core zero will turn itself on
421 // this is a resume - use whatever its current run_state is
424 V3_START_THREAD(core->core_thread);
433 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
435 switch (v3_cpu_types[core->pcpu_id]) {
438 case V3_SVM_REV3_CPU:
439 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
440 return v3_reset_svm_vm_core(core, rip);
445 case V3_VMX_EPT_UG_CPU:
446 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
447 return v3_reset_vmx_vm_core(core, rip);
451 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
460 /* move a virtual core to different physical core */
461 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
462 struct guest_info * core = NULL;
465 PrintError(VM_NONE, VCORE_NONE, "Asked to move core of nonexistent VM\n");
469 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
470 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
474 core = &(vm->cores[vcore_id]);
476 if (target_cpu == core->pcpu_id) {
477 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
478 // well that was pointless
482 if (core->core_thread == NULL) {
483 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
487 while (v3_raise_barrier(vm, NULL) == -1);
489 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
491 // Double check that we weren't preemptively migrated
492 if (target_cpu != core->pcpu_id) {
494 V3_Print(vm, core, "Moving Core\n");
496 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
497 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
503 switch (v3_cpu_types[core->pcpu_id]) {
506 case V3_VMX_EPT_UG_CPU:
507 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
508 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
515 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
516 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
517 core->vcpu_id, target_cpu);
518 v3_lower_barrier(vm);
522 /* There will be a benign race window here:
523 core->pcpu_id will be set to the target core before its fully "migrated"
524 However the core will NEVER run on the old core again, its just in flight to the new core
526 core->pcpu_id = target_cpu;
528 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
531 v3_lower_barrier(vm);
536 /* move a memory region to memory with affinity for a specific physical core */
537 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
540 struct v3_mem_region *reg;
547 PrintError(VM_NONE, VCORE_NONE, "Asked to move memory of nonexistent VM\n");
551 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
554 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
558 new_node = v3_numa_cpu_to_node(target_cpu);
561 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
565 if (new_node==old_node) {
566 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
570 // We are now going to change the universe, so
571 // we'll barrier everyone first
573 while (v3_raise_barrier(vm, NULL) == -1);
577 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
580 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
584 if (!(reg->flags.base) || !(reg->flags.alloced)) {
585 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
589 // we now have the allocated base region corresponding to - and not a copy
590 // we will rewrite this region after moving its contents
592 // first, let's double check that we are in fact changing the numa_id...
594 if (reg->numa_id==new_node) {
595 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
599 // region uses exclusive addressing [guest_start,guest_end)
600 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
602 new_hpa = V3_AllocPagesExtended(num_pages,
605 0); // no constraints given new shadow pager impl
608 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
612 // Note, assumes virtual contiguity in the host OS...
613 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
615 old_hpa = (void*)(reg->host_addr);
616 old_node = (int)(reg->numa_id);
618 reg->host_addr = (addr_t)new_hpa;
619 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
621 // flush all page tables / kill all humans
623 for (i=0;i<vm->num_cores;i++) {
624 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
625 v3_invalidate_shadow_pts(&(vm->cores[i]));
626 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
627 // nested invalidator uses inclusive addressing [start,end], not [start,end)
628 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
630 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
631 // We'll restore things...
632 reg->host_addr = (addr_t) old_hpa;
633 reg->numa_id = old_node;
634 V3_FreePages(new_hpa,num_pages);
639 // Now the old region can go away...
640 V3_FreePages(old_hpa,num_pages);
642 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
643 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
646 v3_lower_barrier(vm);
651 v3_lower_barrier(vm);
655 int v3_stop_vm(struct v3_vm_info * vm) {
657 struct guest_info * running_core;
660 PrintError(VM_NONE, VCORE_NONE, "Asked to stop nonexistent VM\n");
664 if ((vm->run_state != VM_RUNNING) &&
665 (vm->run_state != VM_SIMULATING)) {
666 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
670 vm->run_state = VM_STOPPED;
672 // Sanity check to catch any weird execution states
673 if (v3_wait_for_barrier(vm, NULL) == 0) {
674 v3_lower_barrier(vm);
677 // XXX force exit all cores via a cross call/IPI XXX
681 int still_running = 0;
683 for (i = 0; i < vm->num_cores; i++) {
684 if (vm->cores[i].core_run_state != CORE_STOPPED) {
685 running_core = &vm->cores[i];
690 if (still_running == 0) {
694 v3_scheduler_stop_core(running_core);
697 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
703 int v3_pause_vm(struct v3_vm_info * vm) {
706 PrintError(VM_NONE, VCORE_NONE, "Asked to pause nonexistent VM\n");
710 if (vm->run_state != VM_RUNNING) {
711 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
715 while (v3_raise_barrier(vm, NULL) == -1);
717 vm->run_state = VM_PAUSED;
723 int v3_continue_vm(struct v3_vm_info * vm) {
726 PrintError(VM_NONE, VCORE_NONE, "Asked to continue nonexistent VM\n");
730 if (vm->run_state != VM_PAUSED) {
731 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
735 vm->run_state = VM_RUNNING;
737 v3_lower_barrier(vm);
744 static int sim_callback(struct guest_info * core, void * private_data) {
745 struct v3_bitmap * timeout_map = private_data;
747 v3_bitmap_set(timeout_map, core->vcpu_id);
749 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
751 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
752 // We spin here if there is noone to yield to
762 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
763 struct v3_bitmap timeout_map;
767 uint64_t cpu_khz = V3_CPU_KHZ();
770 PrintError(VM_NONE, VCORE_NONE, "Asked to simulate nonexistent VM\n");
774 if (vm->run_state != VM_PAUSED) {
775 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
779 /* AT this point VM is paused */
782 v3_bitmap_init(&timeout_map, vm->num_cores);
787 // calculate cycles from msecs...
788 // IMPORTANT: Floating point not allowed.
789 cycles = (msecs * cpu_khz);
793 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
797 for (i = 0; i < vm->num_cores; i++) {
798 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
799 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
804 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
807 // Run the simulation
808 // vm->run_state = VM_SIMULATING;
809 vm->run_state = VM_RUNNING;
810 v3_lower_barrier(vm);
813 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
815 // block until simulation is complete
816 while (all_blocked == 0) {
819 for (i = 0; i < vm->num_cores; i++) {
820 if (v3_bitmap_check(&timeout_map, i) == 0) {
825 if (all_blocked == 1) {
829 // Intentionally spin if there is no one to yield to
834 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
836 // Simulation is complete
837 // Reset back to PAUSED state
839 v3_raise_barrier_nowait(vm, NULL);
840 vm->run_state = VM_PAUSED;
842 v3_bitmap_reset(&timeout_map);
844 v3_wait_for_barrier(vm, NULL);
850 int v3_get_state_vm(struct v3_vm_info *vm,
851 struct v3_vm_base_state *base,
852 struct v3_vm_core_state *core,
853 struct v3_vm_mem_state *mem)
858 extern uint64_t v3_mem_block_size;
860 if (!vm || !base || !core || !mem) {
861 PrintError(VM_NONE, VCORE_NONE, "Invalid rquest to v3_get_state_vm\n");
865 numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
866 numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
868 switch (vm->run_state) {
869 case VM_INVALID: base->state = V3_VM_INVALID; break;
870 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
871 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
872 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
873 case VM_ERROR: base->state = V3_VM_ERROR; break;
874 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
875 default: base->state = V3_VM_UNKNOWN; break;
878 for (i=0;i<numcores;i++) {
879 switch (vm->cores[i].core_run_state) {
880 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
881 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
882 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
883 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
885 switch (vm->cores[i].cpu_mode) {
886 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
887 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
888 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
889 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
890 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
891 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
892 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
894 switch (vm->cores[i].shdw_pg_mode) {
895 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
896 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
897 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
899 switch (vm->cores[i].mem_mode) {
900 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
901 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
902 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
905 core->vcore[i].pcore=vm->cores[i].pcpu_id;
906 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
907 core->vcore[i].num_exits=vm->cores[i].num_exits;
910 core->num_vcores=numcores;
912 for (i=0;i<vm->mem_map.num_base_regions;i++) {
913 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
914 mem->region[i].size = v3_mem_block_size;
915 #ifdef V3_CONFIG_SWAPPING
916 mem->region[i].swapped = vm->mem_map.base_regions[i].flags.swapped;
917 mem->region[i].pinned = vm->mem_map.base_regions[i].flags.pinned;
919 mem->region[i].swapped = 0;
920 mem->region[i].pinned = 0;
925 mem->num_regions=numregions;
931 #ifdef V3_CONFIG_CHECKPOINT
932 #include <palacios/vmm_checkpoint.h>
934 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
935 if (!vm || !store || !url) {
936 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_save_vm\n");
939 return v3_chkpt_save_vm(vm, store, url, opts);
943 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
944 if (!vm || !store || !url) {
945 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_load_vm\n");
948 return v3_chkpt_load_vm(vm, store, url, opts);
951 #ifdef V3_CONFIG_LIVE_MIGRATION
952 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
953 if (!vm || !store || !url) {
954 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_send_vm\n");
957 return v3_chkpt_send_vm(vm, store, url, opts);
961 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
962 if (!vm || !store || !url) {
963 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_receive_vm\n");
966 return v3_chkpt_receive_vm(vm, store, url, opts);
973 int v3_free_vm(struct v3_vm_info * vm) {
975 // deinitialize guest (free memory, etc...)
978 PrintError(VM_NONE, VCORE_NONE, "Asked to free nonexistent VM\n");
982 if ((vm->run_state != VM_STOPPED) &&
983 (vm->run_state != VM_ERROR)) {
984 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
988 v3_free_vm_devices(vm);
991 for (i = 0; i < vm->num_cores; i++) {
992 v3_scheduler_free_core(&(vm->cores[i]));
993 v3_free_core(&(vm->cores[i]));
997 v3_scheduler_free_vm(vm);
998 v3_free_vm_internal(vm);
1010 v3_cpu_mode_t v3_get_host_cpu_mode() {
1012 struct cr4_32 * cr4;
1020 cr4 = (struct cr4_32 *)&(cr4_val);
1022 if (cr4->pae == 1) {
1023 return PROTECTED_PAE;
1031 v3_cpu_mode_t v3_get_host_cpu_mode() {
1037 void v3_print_cond(const char * fmt, ...) {
1038 if (v3_dbg_enable == 1) {
1043 vsnprintf(buf, 2048, fmt, ap);
1046 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
1052 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
1053 extern struct v3_os_hooks * os_hooks;
1055 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
1056 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
1062 int v3_vm_enter(struct guest_info * info) {
1063 switch (v3_mach_type) {
1064 #ifdef V3_CONFIG_SVM
1066 case V3_SVM_REV3_CPU:
1067 return v3_svm_enter(info);
1072 case V3_VMX_EPT_CPU:
1073 case V3_VMX_EPT_UG_CPU:
1074 return v3_vmx_enter(info);
1078 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
1084 void *v3_get_host_vm(struct v3_vm_info *x)
1087 return x->host_priv_data;
1093 int v3_get_vcore(struct guest_info *x)