2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
36 #include <palacios/svm.h>
39 #include <palacios/vmx.h>
42 #ifdef V3_CONFIG_CHECKPOINT
43 #include <palacios/vmm_checkpoint.h>
47 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
48 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
50 struct v3_os_hooks * os_hooks = NULL;
51 int v3_dbg_enable = 0;
55 static void init_cpu(void * arg) {
56 uint32_t cpu_id = (uint32_t)(addr_t)arg;
61 if (v3_is_svm_capable()) {
62 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
63 v3_init_svm_cpu(cpu_id);
68 if (v3_is_vmx_capable()) {
69 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
70 v3_init_vmx_cpu(cpu_id);
75 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
80 static void deinit_cpu(void * arg) {
81 uint32_t cpu_id = (uint32_t)(addr_t)arg;
84 switch (v3_cpu_types[cpu_id]) {
88 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
89 v3_deinit_svm_cpu(cpu_id);
95 case V3_VMX_EPT_UG_CPU:
96 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
97 v3_deinit_vmx_cpu(cpu_id);
102 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
111 static int in_long_mode()
115 v3_get_msr(0xc0000080,&high,&low); // EFER
117 return ((low & 0x500)== 0x500); // LMA and LME set
121 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
126 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
131 #error Palacios does not support compilation for a 32 bit host OS!!!!
133 if (!in_long_mode()) {
134 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
139 // Set global variables.
142 if (num_cpus>V3_CONFIG_MAX_CPUS) {
143 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
146 // Determine the global machine type
147 v3_mach_type = V3_INVALID_CPU;
149 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
150 v3_cpu_types[i] = V3_INVALID_CPU;
153 // Parse host-os defined options into an easily-accessed format.
154 v3_parse_options(options);
156 // Memory manager initialization
159 // Register all the possible device types
162 // Register all shadow paging handlers
163 V3_init_shdw_paging();
165 // Initialize the cpu_mapper framework (must be before extensions)
166 V3_init_cpu_mapper();
168 // Initialize the scheduler framework (must be before extensions)
169 V3_init_scheduling();
171 // Register all extensions
172 V3_init_extensions();
174 // Enabling cpu_mapper
175 V3_enable_cpu_mapper();
177 // Enabling scheduler
178 V3_enable_scheduler();
181 #ifdef V3_CONFIG_SYMMOD
185 #ifdef V3_CONFIG_CHECKPOINT
186 V3_init_checkpoint();
189 if ((hooks) && (hooks->call_on_cpu)) {
191 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
195 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
196 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
197 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
199 if (v3_mach_type == V3_INVALID_CPU) {
200 v3_mach_type = v3_cpu_types[i];
212 // Reverse order of Init_V3
216 if ((os_hooks) && (os_hooks->call_on_cpu)) {
217 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
218 if (v3_cpu_types[i] != V3_INVALID_CPU) {
219 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
220 //deinit_cpu((void *)(addr_t)i);
225 #ifdef V3_CONFIG_CHECKPOINT
226 V3_deinit_checkpoint();
229 #ifdef V3_CONFIG_SYMMOD
233 V3_disable_scheduler();
235 V3_disable_cpu_mapper();
237 V3_deinit_extensions();
239 V3_deinit_scheduling();
241 V3_deinit_cpu_mapper();
243 V3_deinit_shdw_paging();
255 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
256 return v3_cpu_types[cpu_id];
260 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) {
261 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
264 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
268 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
272 } else if (strlen(name) >= 128) {
273 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
276 memset(vm->name, 0, 128);
277 strncpy(vm->name, name, 127);
279 if(v3_cpu_mapper_register_vm(vm) == -1) {
281 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
285 * Register this VM with the palacios scheduler. It will ask for admission
288 if(v3_scheduler_register_vm(vm) == -1) {
290 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
299 static int start_core(void * p)
301 struct guest_info * core = (struct guest_info *)p;
303 if (v3_scheduler_register_core(core) == -1){
304 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
307 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
308 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
310 switch (v3_mach_type) {
313 case V3_SVM_REV3_CPU:
314 return v3_start_svm_guest(core);
320 case V3_VMX_EPT_UG_CPU:
321 return v3_start_vmx_guest(core);
325 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
332 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
335 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
336 uint32_t avail_cores = 0;
339 if (vm->run_state != VM_STOPPED) {
340 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
345 /// CHECK IF WE ARE MULTICORE ENABLED....
347 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
348 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
351 // Check that enough cores are present in the mask to handle vcores
352 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
356 if (core_mask[major] & (0x1 << minor)) {
357 if (v3_cpu_types[i] == V3_INVALID_CPU) {
358 core_mask[major] &= ~(0x1 << minor);
365 vm->avail_cores = avail_cores;
367 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
368 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
371 if (v3_scheduler_admit_vm(vm) != 0){
372 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
375 vm->run_state = VM_RUNNING;
378 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
380 struct guest_info * core = &(vm->cores[vcore_id]);
382 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
383 vcore_id, core->pcpu_id);
385 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
387 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
388 core->pcpu_id, start_core, core, core->exec_name);
390 core->core_run_state = CORE_STOPPED; // core zero will turn itself on
391 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
393 if (core->core_thread == NULL) {
394 PrintError(vm, VCORE_NONE, "Thread launch failed\n");
405 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
407 switch (v3_cpu_types[core->pcpu_id]) {
410 case V3_SVM_REV3_CPU:
411 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
412 return v3_reset_svm_vm_core(core, rip);
417 case V3_VMX_EPT_UG_CPU:
418 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
419 return v3_reset_vmx_vm_core(core, rip);
423 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
432 /* move a virtual core to different physical core */
433 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
434 struct guest_info * core = NULL;
436 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
437 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
441 core = &(vm->cores[vcore_id]);
443 if (target_cpu == core->pcpu_id) {
444 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
445 // well that was pointless
449 if (core->core_thread == NULL) {
450 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
454 while (v3_raise_barrier(vm, NULL) == -1);
456 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
458 // Double check that we weren't preemptively migrated
459 if (target_cpu != core->pcpu_id) {
461 V3_Print(vm, core, "Moving Core\n");
463 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
464 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
470 switch (v3_cpu_types[core->pcpu_id]) {
473 case V3_VMX_EPT_UG_CPU:
474 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
475 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
482 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
483 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
484 core->vcpu_id, target_cpu);
485 v3_lower_barrier(vm);
489 /* There will be a benign race window here:
490 core->pcpu_id will be set to the target core before its fully "migrated"
491 However the core will NEVER run on the old core again, its just in flight to the new core
493 core->pcpu_id = target_cpu;
495 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
498 v3_lower_barrier(vm);
503 /* move a memory region to memory with affinity for a specific physical core */
504 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
507 struct v3_mem_region *reg;
513 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
516 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
520 new_node = v3_numa_cpu_to_node(target_cpu);
523 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
527 if (new_node==old_node) {
528 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
532 // We are now going to change the universe, so
533 // we'll barrier everyone first
535 while (v3_raise_barrier(vm, NULL) == -1);
539 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
542 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
546 if (!(reg->flags.base) || !(reg->flags.alloced)) {
547 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
551 // we now have the allocated base region corresponding to - and not a copy
552 // we will rewrite this region after moving its contents
554 // first, let's double check that we are in fact changing the numa_id...
556 if (reg->numa_id==new_node) {
557 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
561 // region uses exclusive addressing [guest_start,guest_end)
562 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
564 new_hpa = V3_AllocPagesExtended(num_pages,
567 0); // no constraints given new shadow pager impl
570 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
574 // Note, assumes virtual contiguity in the host OS...
575 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
577 old_hpa = (void*)(reg->host_addr);
578 old_node = (int)(reg->numa_id);
580 reg->host_addr = (addr_t)new_hpa;
581 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
583 // flush all page tables / kill all humans
585 for (i=0;i<vm->num_cores;i++) {
586 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
587 v3_invalidate_shadow_pts(&(vm->cores[i]));
588 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
589 // nested invalidator uses inclusive addressing [start,end], not [start,end)
590 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1);
592 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
593 // We'll restore things...
594 reg->host_addr = (addr_t) old_hpa;
595 reg->numa_id = old_node;
596 V3_FreePages(new_hpa,num_pages);
601 // Now the old region can go away...
602 V3_FreePages(old_hpa,num_pages);
604 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
605 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
608 v3_lower_barrier(vm);
613 v3_lower_barrier(vm);
617 int v3_stop_vm(struct v3_vm_info * vm) {
619 struct guest_info * running_core;
621 if ((vm->run_state != VM_RUNNING) &&
622 (vm->run_state != VM_SIMULATING)) {
623 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
627 vm->run_state = VM_STOPPED;
629 // Sanity check to catch any weird execution states
630 if (v3_wait_for_barrier(vm, NULL) == 0) {
631 v3_lower_barrier(vm);
634 // XXX force exit all cores via a cross call/IPI XXX
638 int still_running = 0;
640 for (i = 0; i < vm->num_cores; i++) {
641 if (vm->cores[i].core_run_state != CORE_STOPPED) {
642 running_core = &vm->cores[i];
647 if (still_running == 0) {
651 v3_scheduler_stop_core(running_core);
654 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
660 int v3_pause_vm(struct v3_vm_info * vm) {
662 if (vm->run_state != VM_RUNNING) {
663 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
667 while (v3_raise_barrier(vm, NULL) == -1);
669 vm->run_state = VM_PAUSED;
675 int v3_continue_vm(struct v3_vm_info * vm) {
677 if (vm->run_state != VM_PAUSED) {
678 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
682 vm->run_state = VM_RUNNING;
684 v3_lower_barrier(vm);
691 static int sim_callback(struct guest_info * core, void * private_data) {
692 struct v3_bitmap * timeout_map = private_data;
694 v3_bitmap_set(timeout_map, core->vcpu_id);
696 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
698 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
699 // We spin here if there is noone to yield to
709 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
710 struct v3_bitmap timeout_map;
714 uint64_t cpu_khz = V3_CPU_KHZ();
716 if (vm->run_state != VM_PAUSED) {
717 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
721 /* AT this point VM is paused */
724 v3_bitmap_init(&timeout_map, vm->num_cores);
729 // calculate cycles from msecs...
730 // IMPORTANT: Floating point not allowed.
731 cycles = (msecs * cpu_khz);
735 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
739 for (i = 0; i < vm->num_cores; i++) {
740 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
741 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
746 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
749 // Run the simulation
750 // vm->run_state = VM_SIMULATING;
751 vm->run_state = VM_RUNNING;
752 v3_lower_barrier(vm);
755 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
757 // block until simulation is complete
758 while (all_blocked == 0) {
761 for (i = 0; i < vm->num_cores; i++) {
762 if (v3_bitmap_check(&timeout_map, i) == 0) {
767 if (all_blocked == 1) {
771 // Intentionally spin if there is no one to yield to
776 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
778 // Simulation is complete
779 // Reset back to PAUSED state
781 v3_raise_barrier_nowait(vm, NULL);
782 vm->run_state = VM_PAUSED;
784 v3_bitmap_reset(&timeout_map);
786 v3_wait_for_barrier(vm, NULL);
792 int v3_get_state_vm(struct v3_vm_info *vm,
793 struct v3_vm_base_state *base,
794 struct v3_vm_core_state *core,
795 struct v3_vm_mem_state *mem)
798 uint32_t numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
799 uint32_t numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
800 extern uint64_t v3_mem_block_size;
802 switch (vm->run_state) {
803 case VM_INVALID: base->state = V3_VM_INVALID; break;
804 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
805 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
806 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
807 case VM_ERROR: base->state = V3_VM_ERROR; break;
808 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
809 default: base->state = V3_VM_UNKNOWN; break;
812 for (i=0;i<numcores;i++) {
813 switch (vm->cores[i].core_run_state) {
814 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
815 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
816 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
817 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
819 switch (vm->cores[i].cpu_mode) {
820 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
821 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
822 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
823 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
824 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
825 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
826 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
828 switch (vm->cores[i].shdw_pg_mode) {
829 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
830 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
831 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
833 switch (vm->cores[i].mem_mode) {
834 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
835 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
836 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
839 core->vcore[i].pcore=vm->cores[i].pcpu_id;
840 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
841 core->vcore[i].num_exits=vm->cores[i].num_exits;
844 core->num_vcores=numcores;
846 for (i=0;i<vm->mem_map.num_base_regions;i++) {
847 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
848 mem->region[i].size = v3_mem_block_size;
851 mem->num_regions=numregions;
857 #ifdef V3_CONFIG_CHECKPOINT
858 #include <palacios/vmm_checkpoint.h>
860 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
861 return v3_chkpt_save_vm(vm, store, url, opts);
865 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
866 return v3_chkpt_load_vm(vm, store, url, opts);
869 #ifdef V3_CONFIG_LIVE_MIGRATION
870 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
871 return v3_chkpt_send_vm(vm, store, url, opts);
875 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
876 return v3_chkpt_receive_vm(vm, store, url, opts);
883 int v3_free_vm(struct v3_vm_info * vm) {
885 // deinitialize guest (free memory, etc...)
887 if ((vm->run_state != VM_STOPPED) &&
888 (vm->run_state != VM_ERROR)) {
889 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
893 v3_free_vm_devices(vm);
896 for (i = 0; i < vm->num_cores; i++) {
897 v3_scheduler_free_core(&(vm->cores[i]));
898 v3_free_core(&(vm->cores[i]));
902 v3_scheduler_free_vm(vm);
903 v3_free_vm_internal(vm);
915 v3_cpu_mode_t v3_get_host_cpu_mode() {
925 cr4 = (struct cr4_32 *)&(cr4_val);
928 return PROTECTED_PAE;
936 v3_cpu_mode_t v3_get_host_cpu_mode() {
942 void v3_print_cond(const char * fmt, ...) {
943 if (v3_dbg_enable == 1) {
948 vsnprintf(buf, 2048, fmt, ap);
951 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
957 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
958 extern struct v3_os_hooks * os_hooks;
960 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
961 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
967 int v3_vm_enter(struct guest_info * info) {
968 switch (v3_mach_type) {
971 case V3_SVM_REV3_CPU:
972 return v3_svm_enter(info);
978 case V3_VMX_EPT_UG_CPU:
979 return v3_vmx_enter(info);
983 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
989 void *v3_get_host_vm(struct v3_vm_info *x)
992 return x->host_priv_data;
998 int v3_get_vcore(struct guest_info *x)