2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
34 #include <interfaces/vmm_file.h>
37 #include <palacios/svm.h>
40 #include <palacios/vmx.h>
43 #ifdef V3_CONFIG_CHECKPOINT
44 #include <palacios/vmm_checkpoint.h>
48 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
49 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
51 struct v3_os_hooks * os_hooks = NULL;
52 int v3_dbg_enable = 0;
56 static void init_cpu(void * arg) {
57 uint32_t cpu_id = (uint32_t)(addr_t)arg;
62 if (v3_is_svm_capable()) {
63 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
64 v3_init_svm_cpu(cpu_id);
69 if (v3_is_vmx_capable()) {
70 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
71 v3_init_vmx_cpu(cpu_id);
76 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
81 static void deinit_cpu(void * arg) {
82 uint32_t cpu_id = (uint32_t)(addr_t)arg;
85 switch (v3_cpu_types[cpu_id]) {
89 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
90 v3_deinit_svm_cpu(cpu_id);
96 case V3_VMX_EPT_UG_CPU:
97 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
98 v3_deinit_vmx_cpu(cpu_id);
103 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
112 static int in_long_mode()
116 v3_get_msr(0xc0000080,&high,&low); // EFER
118 return ((low & 0x500)== 0x500); // LMA and LME set
122 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
127 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
132 #error Palacios does not support compilation for a 32 bit host OS!!!!
134 if (!in_long_mode()) {
135 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
140 // Set global variables.
143 if (num_cpus>V3_CONFIG_MAX_CPUS) {
144 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
147 // Determine the global machine type
148 v3_mach_type = V3_INVALID_CPU;
150 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
151 v3_cpu_types[i] = V3_INVALID_CPU;
154 // Parse host-os defined options into an easily-accessed format.
155 v3_parse_options(options);
161 // Memory manager initialization
164 // Register all the possible device types
167 // Register all shadow paging handlers
168 V3_init_shdw_paging();
170 #ifdef V3_CONFIG_SWAPPING
174 // Initialize the cpu_mapper framework (must be before extensions)
175 V3_init_cpu_mapper();
177 // Initialize the scheduler framework (must be before extensions)
178 V3_init_scheduling();
180 // Register all extensions
181 V3_init_extensions();
183 // Enabling cpu_mapper
184 V3_enable_cpu_mapper();
186 // Enabling scheduler
187 V3_enable_scheduler();
190 #ifdef V3_CONFIG_SYMMOD
194 #ifdef V3_CONFIG_CHECKPOINT
195 V3_init_checkpoint();
198 if ((hooks) && (hooks->call_on_cpu)) {
200 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
204 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
205 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
206 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
208 if (v3_mach_type == V3_INVALID_CPU) {
209 v3_mach_type = v3_cpu_types[i];
221 // Reverse order of Init_V3
225 if ((os_hooks) && (os_hooks->call_on_cpu)) {
226 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
227 if (v3_cpu_types[i] != V3_INVALID_CPU) {
228 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
229 //deinit_cpu((void *)(addr_t)i);
234 #ifdef V3_CONFIG_CHECKPOINT
235 V3_deinit_checkpoint();
238 #ifdef V3_CONFIG_SYMMOD
242 V3_disable_scheduler();
244 V3_disable_cpu_mapper();
246 V3_deinit_extensions();
248 V3_deinit_scheduling();
250 V3_deinit_cpu_mapper();
252 #ifdef V3_CONFIG_SWAPPING
253 v3_deinit_swapping();
256 V3_deinit_shdw_paging();
272 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
273 return v3_cpu_types[cpu_id];
276 static int start_core(void * p)
278 struct guest_info * core = (struct guest_info *)p;
280 if (v3_scheduler_register_core(core) == -1){
281 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
284 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
285 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
287 switch (v3_mach_type) {
290 case V3_SVM_REV3_CPU:
291 return v3_start_svm_guest(core);
297 case V3_VMX_EPT_UG_CPU:
298 return v3_start_vmx_guest(core);
302 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
309 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name, unsigned int cpu_mask) {
310 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
314 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
318 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
322 } else if (strlen(name) >= 128) {
323 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
326 memset(vm->name, 0, 128);
327 strncpy(vm->name, name, 127);
329 if(v3_cpu_mapper_register_vm(vm) == -1) {
331 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
335 * Register this VM with the palacios scheduler. It will ask for admission
338 if(v3_scheduler_register_vm(vm) == -1) {
340 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
343 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
344 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
347 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
349 struct guest_info * core = &(vm->cores[vcore_id]);
351 PrintDebug(vm, VCORE_NONE, "Creating virtual core %u on logical core %u\n",
352 vcore_id, core->pcpu_id);
354 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
356 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
357 core->pcpu_id, start_core, core, core->exec_name);
359 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
361 if (core->core_thread == NULL) {
362 PrintError(vm, VCORE_NONE, "Thread creation failed\n");
371 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
374 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
375 uint32_t avail_cores = 0;
379 PrintError(VM_NONE, VCORE_NONE, "Asked to start nonexistent VM\n");
383 if (vm->run_state != VM_STOPPED) {
384 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
389 /// CHECK IF WE ARE MULTICORE ENABLED....
391 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
392 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
395 // Check that enough cores are present in the mask to handle vcores
396 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
400 if (core_mask[major] & (0x1 << minor)) {
401 if (v3_cpu_types[i] == V3_INVALID_CPU) {
402 core_mask[major] &= ~(0x1 << minor);
409 vm->avail_cores = avail_cores;
411 if (v3_scheduler_admit_vm(vm) != 0){
412 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
415 vm->run_state = VM_RUNNING;
417 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
419 struct guest_info * core = &(vm->cores[vcore_id]);
421 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
422 vcore_id, core->pcpu_id);
424 if (core->core_run_state==CORE_INVALID) {
425 // launch of a fresh VM
426 core->core_run_state = CORE_STOPPED;
427 // core zero will turn itself on
429 // this is a resume - use whatever its current run_state is
432 V3_START_THREAD(core->core_thread);
441 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
443 switch (v3_cpu_types[core->pcpu_id]) {
446 case V3_SVM_REV3_CPU:
447 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
448 return v3_reset_svm_vm_core(core, rip);
453 case V3_VMX_EPT_UG_CPU:
454 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
455 return v3_reset_vmx_vm_core(core, rip);
459 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
468 /* move a virtual core to different physical core */
469 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
470 struct guest_info * core = NULL;
473 PrintError(VM_NONE, VCORE_NONE, "Asked to move core of nonexistent VM\n");
477 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
478 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
482 core = &(vm->cores[vcore_id]);
484 if (target_cpu == core->pcpu_id) {
485 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
486 // well that was pointless
490 if (core->core_thread == NULL) {
491 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
495 while (v3_raise_barrier(vm, NULL) == -1);
497 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
499 // Double check that we weren't preemptively migrated
500 if (target_cpu != core->pcpu_id) {
502 V3_Print(vm, core, "Moving Core\n");
504 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
505 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
511 switch (v3_cpu_types[core->pcpu_id]) {
514 case V3_VMX_EPT_UG_CPU:
515 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
516 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
523 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
524 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
525 core->vcpu_id, target_cpu);
526 v3_lower_barrier(vm);
530 /* There will be a benign race window here:
531 core->pcpu_id will be set to the target core before its fully "migrated"
532 However the core will NEVER run on the old core again, its just in flight to the new core
534 core->pcpu_id = target_cpu;
536 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
539 v3_lower_barrier(vm);
544 /* move a memory region to memory with affinity for a specific physical core */
545 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
548 struct v3_mem_region *reg;
555 PrintError(VM_NONE, VCORE_NONE, "Asked to move memory of nonexistent VM\n");
559 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
562 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
566 new_node = v3_numa_cpu_to_node(target_cpu);
569 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
573 if (new_node==old_node) {
574 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
578 // We are now going to change the universe, so
579 // we'll barrier everyone first
581 while (v3_raise_barrier(vm, NULL) == -1);
585 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
588 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
592 if (!(reg->flags.base) || !(reg->flags.alloced)) {
593 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
597 // we now have the allocated base region corresponding to - and not a copy
598 // we will rewrite this region after moving its contents
600 // first, let's double check that we are in fact changing the numa_id...
602 if (reg->numa_id==new_node) {
603 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
607 // region uses exclusive addressing [guest_start,guest_end)
608 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
610 new_hpa = V3_AllocPagesExtended(num_pages,
613 0); // no constraints given new shadow pager impl
616 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
620 // Note, assumes virtual contiguity in the host OS...
621 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
623 old_hpa = (void*)(reg->host_addr);
624 old_node = (int)(reg->numa_id);
626 reg->host_addr = (addr_t)new_hpa;
627 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
629 // flush all page tables / kill all humans
631 for (i=0;i<vm->num_cores;i++) {
632 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
633 v3_invalidate_shadow_pts(&(vm->cores[i]));
634 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
635 // nested invalidator uses inclusive addressing [start,end], not [start,end)
636 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
638 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
639 // We'll restore things...
640 reg->host_addr = (addr_t) old_hpa;
641 reg->numa_id = old_node;
642 V3_FreePages(new_hpa,num_pages);
647 // Now the old region can go away...
648 V3_FreePages(old_hpa,num_pages);
650 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
651 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
654 v3_lower_barrier(vm);
659 v3_lower_barrier(vm);
663 int v3_stop_vm(struct v3_vm_info * vm) {
665 struct guest_info * running_core;
668 PrintError(VM_NONE, VCORE_NONE, "Asked to stop nonexistent VM\n");
672 if ((vm->run_state != VM_RUNNING) &&
673 (vm->run_state != VM_SIMULATING)) {
674 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
678 vm->run_state = VM_STOPPED;
680 // Sanity check to catch any weird execution states
681 if (v3_wait_for_barrier(vm, NULL) == 0) {
682 v3_lower_barrier(vm);
685 // XXX force exit all cores via a cross call/IPI XXX
689 int still_running = 0;
691 for (i = 0; i < vm->num_cores; i++) {
692 if (vm->cores[i].core_run_state != CORE_STOPPED) {
693 running_core = &vm->cores[i];
698 if (still_running == 0) {
702 v3_scheduler_stop_core(running_core);
705 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
711 int v3_pause_vm(struct v3_vm_info * vm) {
714 PrintError(VM_NONE, VCORE_NONE, "Asked to pause nonexistent VM\n");
718 if (vm->run_state != VM_RUNNING) {
719 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
723 while (v3_raise_barrier(vm, NULL) == -1);
725 vm->run_state = VM_PAUSED;
731 int v3_continue_vm(struct v3_vm_info * vm) {
734 PrintError(VM_NONE, VCORE_NONE, "Asked to continue nonexistent VM\n");
738 if (vm->run_state != VM_PAUSED) {
739 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
743 vm->run_state = VM_RUNNING;
745 v3_lower_barrier(vm);
752 static int sim_callback(struct guest_info * core, void * private_data) {
753 struct v3_bitmap * timeout_map = private_data;
755 v3_bitmap_set(timeout_map, core->vcpu_id);
757 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
759 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
760 // We spin here if there is noone to yield to
770 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
771 struct v3_bitmap timeout_map;
775 uint64_t cpu_khz = V3_CPU_KHZ();
778 PrintError(VM_NONE, VCORE_NONE, "Asked to simulate nonexistent VM\n");
782 if (vm->run_state != VM_PAUSED) {
783 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
787 /* AT this point VM is paused */
790 v3_bitmap_init(&timeout_map, vm->num_cores);
795 // calculate cycles from msecs...
796 // IMPORTANT: Floating point not allowed.
797 cycles = (msecs * cpu_khz);
801 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
805 for (i = 0; i < vm->num_cores; i++) {
806 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
807 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
812 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
815 // Run the simulation
816 // vm->run_state = VM_SIMULATING;
817 vm->run_state = VM_RUNNING;
818 v3_lower_barrier(vm);
821 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
823 // block until simulation is complete
824 while (all_blocked == 0) {
827 for (i = 0; i < vm->num_cores; i++) {
828 if (v3_bitmap_check(&timeout_map, i) == 0) {
833 if (all_blocked == 1) {
837 // Intentionally spin if there is no one to yield to
842 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
844 // Simulation is complete
845 // Reset back to PAUSED state
847 v3_raise_barrier_nowait(vm, NULL);
848 vm->run_state = VM_PAUSED;
850 v3_bitmap_reset(&timeout_map);
852 v3_wait_for_barrier(vm, NULL);
858 int v3_get_state_vm(struct v3_vm_info *vm,
859 struct v3_vm_base_state *base,
860 struct v3_vm_core_state *core,
861 struct v3_vm_mem_state *mem)
866 extern uint64_t v3_mem_block_size;
868 if (!vm || !base || !core || !mem) {
869 PrintError(VM_NONE, VCORE_NONE, "Invalid rquest to v3_get_state_vm\n");
873 numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
874 numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
876 switch (vm->run_state) {
877 case VM_INVALID: base->state = V3_VM_INVALID; break;
878 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
879 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
880 case VM_RESETTING: base->state = V3_VM_RESETTING; break;
881 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
882 case VM_ERROR: base->state = V3_VM_ERROR; break;
883 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
884 default: base->state = V3_VM_UNKNOWN; break;
887 for (i=0;i<numcores;i++) {
888 switch (vm->cores[i].core_run_state) {
889 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
890 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
891 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
892 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
894 switch (vm->cores[i].cpu_mode) {
895 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
896 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
897 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
898 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
899 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
900 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
901 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
903 switch (vm->cores[i].shdw_pg_mode) {
904 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
905 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
906 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
908 switch (vm->cores[i].mem_mode) {
909 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
910 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
911 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
914 core->vcore[i].pcore=vm->cores[i].pcpu_id;
915 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
916 core->vcore[i].num_exits=vm->cores[i].num_exits;
919 core->num_vcores=numcores;
921 for (i=0;i<numregions;i++) {
922 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
923 mem->region[i].size = v3_mem_block_size;
924 #ifdef V3_CONFIG_SWAPPING
925 mem->region[i].swapped = vm->mem_map.base_regions[i].flags.swapped;
926 mem->region[i].pinned = vm->mem_map.base_regions[i].flags.pinned;
928 mem->region[i].swapped = 0;
929 mem->region[i].pinned = 0;
934 mem->num_regions=numregions;
940 #ifdef V3_CONFIG_CHECKPOINT
941 #include <palacios/vmm_checkpoint.h>
943 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
944 if (!vm || !store || !url) {
945 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_save_vm\n");
948 return v3_chkpt_save_vm(vm, store, url, opts);
952 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
953 if (!vm || !store || !url) {
954 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_load_vm\n");
957 return v3_chkpt_load_vm(vm, store, url, opts);
960 #ifdef V3_CONFIG_LIVE_MIGRATION
961 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
962 if (!vm || !store || !url) {
963 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_send_vm\n");
966 return v3_chkpt_send_vm(vm, store, url, opts);
970 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
971 if (!vm || !store || !url) {
972 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_receive_vm\n");
975 return v3_chkpt_receive_vm(vm, store, url, opts);
982 int v3_free_vm(struct v3_vm_info * vm) {
984 // deinitialize guest (free memory, etc...)
987 PrintError(VM_NONE, VCORE_NONE, "Asked to free nonexistent VM\n");
991 if ((vm->run_state != VM_STOPPED) &&
992 (vm->run_state != VM_ERROR)) {
993 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
997 v3_free_vm_devices(vm);
1000 for (i = 0; i < vm->num_cores; i++) {
1001 v3_scheduler_free_core(&(vm->cores[i]));
1002 v3_free_core(&(vm->cores[i]));
1006 v3_scheduler_free_vm(vm);
1007 v3_free_vm_internal(vm);
1019 v3_cpu_mode_t v3_get_host_cpu_mode() {
1021 struct cr4_32 * cr4;
1029 cr4 = (struct cr4_32 *)&(cr4_val);
1031 if (cr4->pae == 1) {
1032 return PROTECTED_PAE;
1040 v3_cpu_mode_t v3_get_host_cpu_mode() {
1046 void v3_print_cond(const char * fmt, ...) {
1047 if (v3_dbg_enable == 1) {
1052 vsnprintf(buf, 2048, fmt, ap);
1055 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
1061 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
1062 extern struct v3_os_hooks * os_hooks;
1064 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
1065 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
1071 int v3_vm_enter(struct guest_info * info) {
1072 switch (v3_mach_type) {
1073 #ifdef V3_CONFIG_SVM
1075 case V3_SVM_REV3_CPU:
1076 return v3_svm_enter(info);
1081 case V3_VMX_EPT_CPU:
1082 case V3_VMX_EPT_UG_CPU:
1083 return v3_vmx_enter(info);
1087 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
1093 void *v3_get_host_vm(struct v3_vm_info *x)
1096 return x->host_priv_data;
1102 int v3_get_vcore(struct guest_info *x)