2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
34 #include <interfaces/vmm_file.h>
37 #include <palacios/svm.h>
40 #include <palacios/vmx.h>
43 #ifdef V3_CONFIG_CHECKPOINT
44 #include <palacios/vmm_checkpoint.h>
48 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
49 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
51 struct v3_os_hooks * os_hooks = NULL;
52 int v3_dbg_enable = 0;
56 static void init_cpu(void * arg) {
57 uint32_t cpu_id = (uint32_t)(addr_t)arg;
62 if (v3_is_svm_capable()) {
63 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
64 v3_init_svm_cpu(cpu_id);
69 if (v3_is_vmx_capable()) {
70 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
71 v3_init_vmx_cpu(cpu_id);
76 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
81 static void deinit_cpu(void * arg) {
82 uint32_t cpu_id = (uint32_t)(addr_t)arg;
85 switch (v3_cpu_types[cpu_id]) {
89 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
90 v3_deinit_svm_cpu(cpu_id);
96 case V3_VMX_EPT_UG_CPU:
97 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
98 v3_deinit_vmx_cpu(cpu_id);
103 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
112 static int in_long_mode()
116 v3_get_msr(0xc0000080,&high,&low); // EFER
118 return ((low & 0x500)== 0x500); // LMA and LME set
122 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
127 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
132 #error Palacios does not support compilation for a 32 bit host OS!!!!
134 if (!in_long_mode()) {
135 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
140 // Set global variables.
143 if (num_cpus>V3_CONFIG_MAX_CPUS) {
144 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
147 // Determine the global machine type
148 v3_mach_type = V3_INVALID_CPU;
150 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
151 v3_cpu_types[i] = V3_INVALID_CPU;
154 // Parse host-os defined options into an easily-accessed format.
155 v3_parse_options(options);
157 // Memory manager initialization
160 // Register all the possible device types
163 // Register all shadow paging handlers
164 V3_init_shdw_paging();
166 #ifdef V3_CONFIG_SWAPPING
170 // Initialize the cpu_mapper framework (must be before extensions)
171 V3_init_cpu_mapper();
173 // Initialize the scheduler framework (must be before extensions)
174 V3_init_scheduling();
176 // Register all extensions
177 V3_init_extensions();
179 // Enabling cpu_mapper
180 V3_enable_cpu_mapper();
182 // Enabling scheduler
183 V3_enable_scheduler();
186 #ifdef V3_CONFIG_SYMMOD
190 #ifdef V3_CONFIG_CHECKPOINT
191 V3_init_checkpoint();
194 if ((hooks) && (hooks->call_on_cpu)) {
196 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
200 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
201 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
202 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
204 if (v3_mach_type == V3_INVALID_CPU) {
205 v3_mach_type = v3_cpu_types[i];
217 // Reverse order of Init_V3
221 if ((os_hooks) && (os_hooks->call_on_cpu)) {
222 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
223 if (v3_cpu_types[i] != V3_INVALID_CPU) {
224 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
225 //deinit_cpu((void *)(addr_t)i);
230 #ifdef V3_CONFIG_CHECKPOINT
231 V3_deinit_checkpoint();
234 #ifdef V3_CONFIG_SYMMOD
238 V3_disable_scheduler();
240 V3_disable_cpu_mapper();
242 V3_deinit_extensions();
244 V3_deinit_scheduling();
246 V3_deinit_cpu_mapper();
248 #ifdef V3_CONFIG_SWAPPING
249 v3_deinit_swapping();
252 V3_deinit_shdw_paging();
264 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
265 return v3_cpu_types[cpu_id];
269 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) {
270 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
273 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
277 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
281 } else if (strlen(name) >= 128) {
282 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
285 memset(vm->name, 0, 128);
286 strncpy(vm->name, name, 127);
288 if(v3_cpu_mapper_register_vm(vm) == -1) {
290 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
294 * Register this VM with the palacios scheduler. It will ask for admission
297 if(v3_scheduler_register_vm(vm) == -1) {
299 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
308 static int start_core(void * p)
310 struct guest_info * core = (struct guest_info *)p;
312 if (v3_scheduler_register_core(core) == -1){
313 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
316 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
317 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
319 switch (v3_mach_type) {
322 case V3_SVM_REV3_CPU:
323 return v3_start_svm_guest(core);
329 case V3_VMX_EPT_UG_CPU:
330 return v3_start_vmx_guest(core);
334 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
341 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
344 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
345 uint32_t avail_cores = 0;
349 PrintError(VM_NONE, VCORE_NONE, "Asked to start nonexistent VM\n");
353 if (vm->run_state != VM_STOPPED) {
354 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
359 /// CHECK IF WE ARE MULTICORE ENABLED....
361 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
362 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
365 // Check that enough cores are present in the mask to handle vcores
366 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
370 if (core_mask[major] & (0x1 << minor)) {
371 if (v3_cpu_types[i] == V3_INVALID_CPU) {
372 core_mask[major] &= ~(0x1 << minor);
379 vm->avail_cores = avail_cores;
381 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
382 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
385 if (v3_scheduler_admit_vm(vm) != 0){
386 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
389 vm->run_state = VM_RUNNING;
392 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
394 struct guest_info * core = &(vm->cores[vcore_id]);
396 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
397 vcore_id, core->pcpu_id);
399 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
401 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
402 core->pcpu_id, start_core, core, core->exec_name);
404 if (core->core_run_state==CORE_INVALID) {
405 // launch of a fresh VM
406 core->core_run_state = CORE_STOPPED;
407 // core zero will turn itself on
409 // this is a resume - use whatever its current run_state is
412 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
414 if (core->core_thread == NULL) {
415 PrintError(vm, VCORE_NONE, "Thread launch failed\n");
426 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
428 switch (v3_cpu_types[core->pcpu_id]) {
431 case V3_SVM_REV3_CPU:
432 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
433 return v3_reset_svm_vm_core(core, rip);
438 case V3_VMX_EPT_UG_CPU:
439 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
440 return v3_reset_vmx_vm_core(core, rip);
444 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
453 /* move a virtual core to different physical core */
454 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
455 struct guest_info * core = NULL;
458 PrintError(VM_NONE, VCORE_NONE, "Asked to move core of nonexistent VM\n");
462 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
463 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
467 core = &(vm->cores[vcore_id]);
469 if (target_cpu == core->pcpu_id) {
470 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
471 // well that was pointless
475 if (core->core_thread == NULL) {
476 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
480 while (v3_raise_barrier(vm, NULL) == -1);
482 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
484 // Double check that we weren't preemptively migrated
485 if (target_cpu != core->pcpu_id) {
487 V3_Print(vm, core, "Moving Core\n");
489 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
490 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
496 switch (v3_cpu_types[core->pcpu_id]) {
499 case V3_VMX_EPT_UG_CPU:
500 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
501 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
508 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
509 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
510 core->vcpu_id, target_cpu);
511 v3_lower_barrier(vm);
515 /* There will be a benign race window here:
516 core->pcpu_id will be set to the target core before its fully "migrated"
517 However the core will NEVER run on the old core again, its just in flight to the new core
519 core->pcpu_id = target_cpu;
521 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
524 v3_lower_barrier(vm);
529 /* move a memory region to memory with affinity for a specific physical core */
530 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
533 struct v3_mem_region *reg;
540 PrintError(VM_NONE, VCORE_NONE, "Asked to move memory of nonexistent VM\n");
544 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
547 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
551 new_node = v3_numa_cpu_to_node(target_cpu);
554 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
558 if (new_node==old_node) {
559 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
563 // We are now going to change the universe, so
564 // we'll barrier everyone first
566 while (v3_raise_barrier(vm, NULL) == -1);
570 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
573 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
577 if (!(reg->flags.base) || !(reg->flags.alloced)) {
578 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
582 // we now have the allocated base region corresponding to - and not a copy
583 // we will rewrite this region after moving its contents
585 // first, let's double check that we are in fact changing the numa_id...
587 if (reg->numa_id==new_node) {
588 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
592 // region uses exclusive addressing [guest_start,guest_end)
593 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
595 new_hpa = V3_AllocPagesExtended(num_pages,
598 0); // no constraints given new shadow pager impl
601 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
605 // Note, assumes virtual contiguity in the host OS...
606 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
608 old_hpa = (void*)(reg->host_addr);
609 old_node = (int)(reg->numa_id);
611 reg->host_addr = (addr_t)new_hpa;
612 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
614 // flush all page tables / kill all humans
616 for (i=0;i<vm->num_cores;i++) {
617 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
618 v3_invalidate_shadow_pts(&(vm->cores[i]));
619 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
620 // nested invalidator uses inclusive addressing [start,end], not [start,end)
621 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
623 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
624 // We'll restore things...
625 reg->host_addr = (addr_t) old_hpa;
626 reg->numa_id = old_node;
627 V3_FreePages(new_hpa,num_pages);
632 // Now the old region can go away...
633 V3_FreePages(old_hpa,num_pages);
635 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
636 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
639 v3_lower_barrier(vm);
644 v3_lower_barrier(vm);
648 int v3_stop_vm(struct v3_vm_info * vm) {
650 struct guest_info * running_core;
653 PrintError(VM_NONE, VCORE_NONE, "Asked to stop nonexistent VM\n");
657 if ((vm->run_state != VM_RUNNING) &&
658 (vm->run_state != VM_SIMULATING)) {
659 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
663 vm->run_state = VM_STOPPED;
665 // Sanity check to catch any weird execution states
666 if (v3_wait_for_barrier(vm, NULL) == 0) {
667 v3_lower_barrier(vm);
670 // XXX force exit all cores via a cross call/IPI XXX
674 int still_running = 0;
676 for (i = 0; i < vm->num_cores; i++) {
677 if (vm->cores[i].core_run_state != CORE_STOPPED) {
678 running_core = &vm->cores[i];
683 if (still_running == 0) {
687 v3_scheduler_stop_core(running_core);
690 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
696 int v3_pause_vm(struct v3_vm_info * vm) {
699 PrintError(VM_NONE, VCORE_NONE, "Asked to pause nonexistent VM\n");
703 if (vm->run_state != VM_RUNNING) {
704 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
708 while (v3_raise_barrier(vm, NULL) == -1);
710 vm->run_state = VM_PAUSED;
716 int v3_continue_vm(struct v3_vm_info * vm) {
719 PrintError(VM_NONE, VCORE_NONE, "Asked to continue nonexistent VM\n");
723 if (vm->run_state != VM_PAUSED) {
724 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
728 vm->run_state = VM_RUNNING;
730 v3_lower_barrier(vm);
737 static int sim_callback(struct guest_info * core, void * private_data) {
738 struct v3_bitmap * timeout_map = private_data;
740 v3_bitmap_set(timeout_map, core->vcpu_id);
742 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
744 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
745 // We spin here if there is noone to yield to
755 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
756 struct v3_bitmap timeout_map;
760 uint64_t cpu_khz = V3_CPU_KHZ();
763 PrintError(VM_NONE, VCORE_NONE, "Asked to simulate nonexistent VM\n");
767 if (vm->run_state != VM_PAUSED) {
768 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
772 /* AT this point VM is paused */
775 v3_bitmap_init(&timeout_map, vm->num_cores);
780 // calculate cycles from msecs...
781 // IMPORTANT: Floating point not allowed.
782 cycles = (msecs * cpu_khz);
786 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
790 for (i = 0; i < vm->num_cores; i++) {
791 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
792 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
797 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
800 // Run the simulation
801 // vm->run_state = VM_SIMULATING;
802 vm->run_state = VM_RUNNING;
803 v3_lower_barrier(vm);
806 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
808 // block until simulation is complete
809 while (all_blocked == 0) {
812 for (i = 0; i < vm->num_cores; i++) {
813 if (v3_bitmap_check(&timeout_map, i) == 0) {
818 if (all_blocked == 1) {
822 // Intentionally spin if there is no one to yield to
827 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
829 // Simulation is complete
830 // Reset back to PAUSED state
832 v3_raise_barrier_nowait(vm, NULL);
833 vm->run_state = VM_PAUSED;
835 v3_bitmap_reset(&timeout_map);
837 v3_wait_for_barrier(vm, NULL);
843 int v3_get_state_vm(struct v3_vm_info *vm,
844 struct v3_vm_base_state *base,
845 struct v3_vm_core_state *core,
846 struct v3_vm_mem_state *mem)
851 extern uint64_t v3_mem_block_size;
853 if (!vm || !base || !core || !mem) {
854 PrintError(VM_NONE, VCORE_NONE, "Invalid rquest to v3_get_state_vm\n");
858 numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
859 numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
861 switch (vm->run_state) {
862 case VM_INVALID: base->state = V3_VM_INVALID; break;
863 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
864 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
865 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
866 case VM_ERROR: base->state = V3_VM_ERROR; break;
867 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
868 default: base->state = V3_VM_UNKNOWN; break;
871 for (i=0;i<numcores;i++) {
872 switch (vm->cores[i].core_run_state) {
873 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
874 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
875 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
876 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
878 switch (vm->cores[i].cpu_mode) {
879 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
880 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
881 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
882 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
883 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
884 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
885 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
887 switch (vm->cores[i].shdw_pg_mode) {
888 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
889 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
890 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
892 switch (vm->cores[i].mem_mode) {
893 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
894 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
895 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
898 core->vcore[i].pcore=vm->cores[i].pcpu_id;
899 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
900 core->vcore[i].num_exits=vm->cores[i].num_exits;
903 core->num_vcores=numcores;
905 for (i=0;i<vm->mem_map.num_base_regions;i++) {
906 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
907 mem->region[i].size = v3_mem_block_size;
908 #ifdef V3_CONFIG_SWAPPING
909 mem->region[i].swapped = vm->mem_map.base_regions[i].flags.swapped;
910 mem->region[i].pinned = vm->mem_map.base_regions[i].flags.pinned;
912 mem->region[i].swapped = 0;
913 mem->region[i].pinned = 0;
918 mem->num_regions=numregions;
924 #ifdef V3_CONFIG_CHECKPOINT
925 #include <palacios/vmm_checkpoint.h>
927 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
928 if (!vm || !store || !url) {
929 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_save_vm\n");
932 return v3_chkpt_save_vm(vm, store, url, opts);
936 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
937 if (!vm || !store || !url) {
938 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_load_vm\n");
941 return v3_chkpt_load_vm(vm, store, url, opts);
944 #ifdef V3_CONFIG_LIVE_MIGRATION
945 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
946 if (!vm || !store || !url) {
947 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_send_vm\n");
950 return v3_chkpt_send_vm(vm, store, url, opts);
954 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
955 if (!vm || !store || !url) {
956 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_receive_vm\n");
959 return v3_chkpt_receive_vm(vm, store, url, opts);
966 int v3_free_vm(struct v3_vm_info * vm) {
968 // deinitialize guest (free memory, etc...)
971 PrintError(VM_NONE, VCORE_NONE, "Asked to free nonexistent VM\n");
975 if ((vm->run_state != VM_STOPPED) &&
976 (vm->run_state != VM_ERROR)) {
977 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
981 v3_free_vm_devices(vm);
984 for (i = 0; i < vm->num_cores; i++) {
985 v3_scheduler_free_core(&(vm->cores[i]));
986 v3_free_core(&(vm->cores[i]));
990 v3_scheduler_free_vm(vm);
991 v3_free_vm_internal(vm);
1003 v3_cpu_mode_t v3_get_host_cpu_mode() {
1005 struct cr4_32 * cr4;
1013 cr4 = (struct cr4_32 *)&(cr4_val);
1015 if (cr4->pae == 1) {
1016 return PROTECTED_PAE;
1024 v3_cpu_mode_t v3_get_host_cpu_mode() {
1030 void v3_print_cond(const char * fmt, ...) {
1031 if (v3_dbg_enable == 1) {
1036 vsnprintf(buf, 2048, fmt, ap);
1039 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
1045 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
1046 extern struct v3_os_hooks * os_hooks;
1048 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
1049 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
1055 int v3_vm_enter(struct guest_info * info) {
1056 switch (v3_mach_type) {
1057 #ifdef V3_CONFIG_SVM
1059 case V3_SVM_REV3_CPU:
1060 return v3_svm_enter(info);
1065 case V3_VMX_EPT_CPU:
1066 case V3_VMX_EPT_UG_CPU:
1067 return v3_vmx_enter(info);
1071 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
1077 void *v3_get_host_vm(struct v3_vm_info *x)
1080 return x->host_priv_data;
1086 int v3_get_vcore(struct guest_info *x)