2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
36 #include <palacios/svm.h>
39 #include <palacios/vmx.h>
42 #ifdef V3_CONFIG_CHECKPOINT
43 #include <palacios/vmm_checkpoint.h>
47 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
48 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
50 struct v3_os_hooks * os_hooks = NULL;
51 int v3_dbg_enable = 0;
55 static void init_cpu(void * arg) {
56 uint32_t cpu_id = (uint32_t)(addr_t)arg;
61 if (v3_is_svm_capable()) {
62 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
63 v3_init_svm_cpu(cpu_id);
68 if (v3_is_vmx_capable()) {
69 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
70 v3_init_vmx_cpu(cpu_id);
75 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
80 static void deinit_cpu(void * arg) {
81 uint32_t cpu_id = (uint32_t)(addr_t)arg;
84 switch (v3_cpu_types[cpu_id]) {
88 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
89 v3_deinit_svm_cpu(cpu_id);
95 case V3_VMX_EPT_UG_CPU:
96 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
97 v3_deinit_vmx_cpu(cpu_id);
102 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
111 static int in_long_mode()
115 v3_get_msr(0xc0000080,&high,&low); // EFER
117 return ((low & 0x500)== 0x500); // LMA and LME set
121 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
126 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
131 #error Palacios does not support compilation for a 32 bit host OS!!!!
133 if (!in_long_mode()) {
134 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
139 // Set global variables.
142 if (num_cpus>V3_CONFIG_MAX_CPUS) {
143 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
146 // Determine the global machine type
147 v3_mach_type = V3_INVALID_CPU;
149 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
150 v3_cpu_types[i] = V3_INVALID_CPU;
153 // Parse host-os defined options into an easily-accessed format.
154 v3_parse_options(options);
156 // Memory manager initialization
159 // Register all the possible device types
162 // Register all shadow paging handlers
163 V3_init_shdw_paging();
165 // Initialize the cpu_mapper framework (must be before extensions)
166 V3_init_cpu_mapper();
168 // Initialize the scheduler framework (must be before extensions)
169 V3_init_scheduling();
171 // Register all extensions
172 V3_init_extensions();
174 // Enabling cpu_mapper
175 V3_enable_cpu_mapper();
177 // Enabling scheduler
178 V3_enable_scheduler();
181 #ifdef V3_CONFIG_SYMMOD
185 #ifdef V3_CONFIG_CHECKPOINT
186 V3_init_checkpoint();
189 if ((hooks) && (hooks->call_on_cpu)) {
191 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
195 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
196 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
197 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
199 if (v3_mach_type == V3_INVALID_CPU) {
200 v3_mach_type = v3_cpu_types[i];
212 // Reverse order of Init_V3
216 if ((os_hooks) && (os_hooks->call_on_cpu)) {
217 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
218 if (v3_cpu_types[i] != V3_INVALID_CPU) {
219 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
220 //deinit_cpu((void *)(addr_t)i);
225 #ifdef V3_CONFIG_CHECKPOINT
226 V3_deinit_checkpoint();
229 #ifdef V3_CONFIG_SYMMOD
233 V3_disable_scheduler();
235 V3_disable_cpu_mapper();
237 V3_deinit_extensions();
239 V3_deinit_scheduling();
241 V3_deinit_cpu_mapper();
243 V3_deinit_shdw_paging();
255 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
256 return v3_cpu_types[cpu_id];
260 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) {
261 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
264 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
268 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
272 } else if (strlen(name) >= 128) {
273 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
276 memset(vm->name, 0, 128);
277 strncpy(vm->name, name, 127);
279 if(v3_cpu_mapper_register_vm(vm) == -1) {
281 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
285 * Register this VM with the palacios scheduler. It will ask for admission
288 if(v3_scheduler_register_vm(vm) == -1) {
290 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
299 static int start_core(void * p)
301 struct guest_info * core = (struct guest_info *)p;
303 if (v3_scheduler_register_core(core) == -1){
304 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
307 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
308 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
310 switch (v3_mach_type) {
313 case V3_SVM_REV3_CPU:
314 return v3_start_svm_guest(core);
320 case V3_VMX_EPT_UG_CPU:
321 return v3_start_vmx_guest(core);
325 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
332 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
335 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
336 uint32_t avail_cores = 0;
339 if (vm->run_state != VM_STOPPED) {
340 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
345 /// CHECK IF WE ARE MULTICORE ENABLED....
347 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
348 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
351 // Check that enough cores are present in the mask to handle vcores
352 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
356 if (core_mask[major] & (0x1 << minor)) {
357 if (v3_cpu_types[i] == V3_INVALID_CPU) {
358 core_mask[major] &= ~(0x1 << minor);
365 vm->avail_cores = avail_cores;
367 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
368 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
371 if (v3_scheduler_admit_vm(vm) != 0){
372 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
375 vm->run_state = VM_RUNNING;
378 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
380 struct guest_info * core = &(vm->cores[vcore_id]);
382 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
383 vcore_id, core->pcpu_id);
385 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
387 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
388 core->pcpu_id, start_core, core, core->exec_name);
390 if (core->core_run_state==CORE_INVALID) {
391 // launch of a fresh VM
392 core->core_run_state = CORE_STOPPED;
393 // core zero will turn itself on
395 // this is a resume - use whatever its current run_state is
398 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
400 if (core->core_thread == NULL) {
401 PrintError(vm, VCORE_NONE, "Thread launch failed\n");
412 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
414 switch (v3_cpu_types[core->pcpu_id]) {
417 case V3_SVM_REV3_CPU:
418 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
419 return v3_reset_svm_vm_core(core, rip);
424 case V3_VMX_EPT_UG_CPU:
425 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
426 return v3_reset_vmx_vm_core(core, rip);
430 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
439 /* move a virtual core to different physical core */
440 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
441 struct guest_info * core = NULL;
443 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
444 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
448 core = &(vm->cores[vcore_id]);
450 if (target_cpu == core->pcpu_id) {
451 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
452 // well that was pointless
456 if (core->core_thread == NULL) {
457 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
461 while (v3_raise_barrier(vm, NULL) == -1);
463 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
465 // Double check that we weren't preemptively migrated
466 if (target_cpu != core->pcpu_id) {
468 V3_Print(vm, core, "Moving Core\n");
470 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
471 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
477 switch (v3_cpu_types[core->pcpu_id]) {
480 case V3_VMX_EPT_UG_CPU:
481 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
482 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
489 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
490 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
491 core->vcpu_id, target_cpu);
492 v3_lower_barrier(vm);
496 /* There will be a benign race window here:
497 core->pcpu_id will be set to the target core before its fully "migrated"
498 However the core will NEVER run on the old core again, its just in flight to the new core
500 core->pcpu_id = target_cpu;
502 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
505 v3_lower_barrier(vm);
510 /* move a memory region to memory with affinity for a specific physical core */
511 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
514 struct v3_mem_region *reg;
520 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
523 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
527 new_node = v3_numa_cpu_to_node(target_cpu);
530 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
534 if (new_node==old_node) {
535 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
539 // We are now going to change the universe, so
540 // we'll barrier everyone first
542 while (v3_raise_barrier(vm, NULL) == -1);
546 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
549 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
553 if (!(reg->flags.base) || !(reg->flags.alloced)) {
554 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
558 // we now have the allocated base region corresponding to - and not a copy
559 // we will rewrite this region after moving its contents
561 // first, let's double check that we are in fact changing the numa_id...
563 if (reg->numa_id==new_node) {
564 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
568 // region uses exclusive addressing [guest_start,guest_end)
569 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
571 new_hpa = V3_AllocPagesExtended(num_pages,
574 0); // no constraints given new shadow pager impl
577 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
581 // Note, assumes virtual contiguity in the host OS...
582 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
584 old_hpa = (void*)(reg->host_addr);
585 old_node = (int)(reg->numa_id);
587 reg->host_addr = (addr_t)new_hpa;
588 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
590 // flush all page tables / kill all humans
592 for (i=0;i<vm->num_cores;i++) {
593 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
594 v3_invalidate_shadow_pts(&(vm->cores[i]));
595 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
596 // nested invalidator uses inclusive addressing [start,end], not [start,end)
597 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
599 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
600 // We'll restore things...
601 reg->host_addr = (addr_t) old_hpa;
602 reg->numa_id = old_node;
603 V3_FreePages(new_hpa,num_pages);
608 // Now the old region can go away...
609 V3_FreePages(old_hpa,num_pages);
611 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
612 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
615 v3_lower_barrier(vm);
620 v3_lower_barrier(vm);
624 int v3_stop_vm(struct v3_vm_info * vm) {
626 struct guest_info * running_core;
628 if ((vm->run_state != VM_RUNNING) &&
629 (vm->run_state != VM_SIMULATING)) {
630 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
634 vm->run_state = VM_STOPPED;
636 // Sanity check to catch any weird execution states
637 if (v3_wait_for_barrier(vm, NULL) == 0) {
638 v3_lower_barrier(vm);
641 // XXX force exit all cores via a cross call/IPI XXX
645 int still_running = 0;
647 for (i = 0; i < vm->num_cores; i++) {
648 if (vm->cores[i].core_run_state != CORE_STOPPED) {
649 running_core = &vm->cores[i];
654 if (still_running == 0) {
658 v3_scheduler_stop_core(running_core);
661 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
667 int v3_pause_vm(struct v3_vm_info * vm) {
669 if (vm->run_state != VM_RUNNING) {
670 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
674 while (v3_raise_barrier(vm, NULL) == -1);
676 vm->run_state = VM_PAUSED;
682 int v3_continue_vm(struct v3_vm_info * vm) {
684 if (vm->run_state != VM_PAUSED) {
685 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
689 vm->run_state = VM_RUNNING;
691 v3_lower_barrier(vm);
698 static int sim_callback(struct guest_info * core, void * private_data) {
699 struct v3_bitmap * timeout_map = private_data;
701 v3_bitmap_set(timeout_map, core->vcpu_id);
703 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
705 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
706 // We spin here if there is noone to yield to
716 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
717 struct v3_bitmap timeout_map;
721 uint64_t cpu_khz = V3_CPU_KHZ();
723 if (vm->run_state != VM_PAUSED) {
724 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
728 /* AT this point VM is paused */
731 v3_bitmap_init(&timeout_map, vm->num_cores);
736 // calculate cycles from msecs...
737 // IMPORTANT: Floating point not allowed.
738 cycles = (msecs * cpu_khz);
742 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
746 for (i = 0; i < vm->num_cores; i++) {
747 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
748 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
753 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
756 // Run the simulation
757 // vm->run_state = VM_SIMULATING;
758 vm->run_state = VM_RUNNING;
759 v3_lower_barrier(vm);
762 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
764 // block until simulation is complete
765 while (all_blocked == 0) {
768 for (i = 0; i < vm->num_cores; i++) {
769 if (v3_bitmap_check(&timeout_map, i) == 0) {
774 if (all_blocked == 1) {
778 // Intentionally spin if there is no one to yield to
783 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
785 // Simulation is complete
786 // Reset back to PAUSED state
788 v3_raise_barrier_nowait(vm, NULL);
789 vm->run_state = VM_PAUSED;
791 v3_bitmap_reset(&timeout_map);
793 v3_wait_for_barrier(vm, NULL);
799 int v3_get_state_vm(struct v3_vm_info *vm,
800 struct v3_vm_base_state *base,
801 struct v3_vm_core_state *core,
802 struct v3_vm_mem_state *mem)
805 uint32_t numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
806 uint32_t numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
807 extern uint64_t v3_mem_block_size;
809 switch (vm->run_state) {
810 case VM_INVALID: base->state = V3_VM_INVALID; break;
811 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
812 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
813 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
814 case VM_ERROR: base->state = V3_VM_ERROR; break;
815 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
816 default: base->state = V3_VM_UNKNOWN; break;
819 for (i=0;i<numcores;i++) {
820 switch (vm->cores[i].core_run_state) {
821 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
822 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
823 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
824 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
826 switch (vm->cores[i].cpu_mode) {
827 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
828 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
829 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
830 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
831 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
832 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
833 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
835 switch (vm->cores[i].shdw_pg_mode) {
836 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
837 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
838 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
840 switch (vm->cores[i].mem_mode) {
841 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
842 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
843 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
846 core->vcore[i].pcore=vm->cores[i].pcpu_id;
847 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
848 core->vcore[i].num_exits=vm->cores[i].num_exits;
851 core->num_vcores=numcores;
853 for (i=0;i<vm->mem_map.num_base_regions;i++) {
854 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
855 mem->region[i].size = v3_mem_block_size;
858 mem->num_regions=numregions;
864 #ifdef V3_CONFIG_CHECKPOINT
865 #include <palacios/vmm_checkpoint.h>
867 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
868 return v3_chkpt_save_vm(vm, store, url, opts);
872 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
873 return v3_chkpt_load_vm(vm, store, url, opts);
876 #ifdef V3_CONFIG_LIVE_MIGRATION
877 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
878 return v3_chkpt_send_vm(vm, store, url, opts);
882 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
883 return v3_chkpt_receive_vm(vm, store, url, opts);
890 int v3_free_vm(struct v3_vm_info * vm) {
892 // deinitialize guest (free memory, etc...)
894 if ((vm->run_state != VM_STOPPED) &&
895 (vm->run_state != VM_ERROR)) {
896 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
900 v3_free_vm_devices(vm);
903 for (i = 0; i < vm->num_cores; i++) {
904 v3_scheduler_free_core(&(vm->cores[i]));
905 v3_free_core(&(vm->cores[i]));
909 v3_scheduler_free_vm(vm);
910 v3_free_vm_internal(vm);
922 v3_cpu_mode_t v3_get_host_cpu_mode() {
932 cr4 = (struct cr4_32 *)&(cr4_val);
935 return PROTECTED_PAE;
943 v3_cpu_mode_t v3_get_host_cpu_mode() {
949 void v3_print_cond(const char * fmt, ...) {
950 if (v3_dbg_enable == 1) {
955 vsnprintf(buf, 2048, fmt, ap);
958 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
964 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
965 extern struct v3_os_hooks * os_hooks;
967 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
968 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
974 int v3_vm_enter(struct guest_info * info) {
975 switch (v3_mach_type) {
978 case V3_SVM_REV3_CPU:
979 return v3_svm_enter(info);
985 case V3_VMX_EPT_UG_CPU:
986 return v3_vmx_enter(info);
990 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
996 void *v3_get_host_vm(struct v3_vm_info *x)
999 return x->host_priv_data;
1005 int v3_get_vcore(struct guest_info *x)