2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
34 #include <interfaces/vmm_file.h>
37 #include <palacios/svm.h>
40 #include <palacios/vmx.h>
43 #ifdef V3_CONFIG_CHECKPOINT
44 #include <palacios/vmm_checkpoint.h>
48 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
49 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
51 struct v3_os_hooks * os_hooks = NULL;
52 int v3_dbg_enable = 0;
56 static void init_cpu(void * arg) {
57 uint32_t cpu_id = (uint32_t)(addr_t)arg;
62 if (v3_is_svm_capable()) {
63 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
64 v3_init_svm_cpu(cpu_id);
69 if (v3_is_vmx_capable()) {
70 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
71 v3_init_vmx_cpu(cpu_id);
76 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
81 static void deinit_cpu(void * arg) {
82 uint32_t cpu_id = (uint32_t)(addr_t)arg;
85 switch (v3_cpu_types[cpu_id]) {
89 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
90 v3_deinit_svm_cpu(cpu_id);
96 case V3_VMX_EPT_UG_CPU:
97 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
98 v3_deinit_vmx_cpu(cpu_id);
103 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
112 static int in_long_mode()
116 v3_get_msr(0xc0000080,&high,&low); // EFER
118 return ((low & 0x500)== 0x500); // LMA and LME set
122 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
127 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
132 #error Palacios does not support compilation for a 32 bit host OS!!!!
134 if (!in_long_mode()) {
135 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
140 // Set global variables.
143 if (num_cpus>V3_CONFIG_MAX_CPUS) {
144 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
147 // Determine the global machine type
148 v3_mach_type = V3_INVALID_CPU;
150 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
151 v3_cpu_types[i] = V3_INVALID_CPU;
154 // Parse host-os defined options into an easily-accessed format.
155 v3_parse_options(options);
157 // Memory manager initialization
160 // Register all the possible device types
163 // Register all shadow paging handlers
164 V3_init_shdw_paging();
166 #ifdef V3_CONFIG_SWAPPING
170 // Initialize the cpu_mapper framework (must be before extensions)
171 V3_init_cpu_mapper();
173 // Initialize the scheduler framework (must be before extensions)
174 V3_init_scheduling();
176 // Register all extensions
177 V3_init_extensions();
179 // Enabling cpu_mapper
180 V3_enable_cpu_mapper();
182 // Enabling scheduler
183 V3_enable_scheduler();
186 #ifdef V3_CONFIG_SYMMOD
190 #ifdef V3_CONFIG_CHECKPOINT
191 V3_init_checkpoint();
194 if ((hooks) && (hooks->call_on_cpu)) {
196 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
200 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
201 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
202 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
204 if (v3_mach_type == V3_INVALID_CPU) {
205 v3_mach_type = v3_cpu_types[i];
217 // Reverse order of Init_V3
221 if ((os_hooks) && (os_hooks->call_on_cpu)) {
222 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
223 if (v3_cpu_types[i] != V3_INVALID_CPU) {
224 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
225 //deinit_cpu((void *)(addr_t)i);
230 #ifdef V3_CONFIG_CHECKPOINT
231 V3_deinit_checkpoint();
234 #ifdef V3_CONFIG_SYMMOD
238 V3_disable_scheduler();
240 V3_disable_cpu_mapper();
242 V3_deinit_extensions();
244 V3_deinit_scheduling();
246 V3_deinit_cpu_mapper();
248 #ifdef V3_CONFIG_SWAPPING
249 v3_deinit_swapping();
252 V3_deinit_shdw_paging();
264 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
265 return v3_cpu_types[cpu_id];
269 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) {
270 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
273 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
277 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
281 } else if (strlen(name) >= 128) {
282 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
285 memset(vm->name, 0, 128);
286 strncpy(vm->name, name, 127);
288 if(v3_cpu_mapper_register_vm(vm) == -1) {
290 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
294 * Register this VM with the palacios scheduler. It will ask for admission
297 if(v3_scheduler_register_vm(vm) == -1) {
299 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
308 static int start_core(void * p)
310 struct guest_info * core = (struct guest_info *)p;
312 if (v3_scheduler_register_core(core) == -1){
313 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
316 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
317 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
319 switch (v3_mach_type) {
322 case V3_SVM_REV3_CPU:
323 return v3_start_svm_guest(core);
329 case V3_VMX_EPT_UG_CPU:
330 return v3_start_vmx_guest(core);
334 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
341 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
344 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
345 uint32_t avail_cores = 0;
348 if (vm->run_state != VM_STOPPED) {
349 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
354 /// CHECK IF WE ARE MULTICORE ENABLED....
356 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
357 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
360 // Check that enough cores are present in the mask to handle vcores
361 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
365 if (core_mask[major] & (0x1 << minor)) {
366 if (v3_cpu_types[i] == V3_INVALID_CPU) {
367 core_mask[major] &= ~(0x1 << minor);
374 vm->avail_cores = avail_cores;
376 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
377 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
380 if (v3_scheduler_admit_vm(vm) != 0){
381 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
384 vm->run_state = VM_RUNNING;
387 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
389 struct guest_info * core = &(vm->cores[vcore_id]);
391 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
392 vcore_id, core->pcpu_id);
394 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
396 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
397 core->pcpu_id, start_core, core, core->exec_name);
399 if (core->core_run_state==CORE_INVALID) {
400 // launch of a fresh VM
401 core->core_run_state = CORE_STOPPED;
402 // core zero will turn itself on
404 // this is a resume - use whatever its current run_state is
407 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
409 if (core->core_thread == NULL) {
410 PrintError(vm, VCORE_NONE, "Thread launch failed\n");
421 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
423 switch (v3_cpu_types[core->pcpu_id]) {
426 case V3_SVM_REV3_CPU:
427 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
428 return v3_reset_svm_vm_core(core, rip);
433 case V3_VMX_EPT_UG_CPU:
434 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
435 return v3_reset_vmx_vm_core(core, rip);
439 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
448 /* move a virtual core to different physical core */
449 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
450 struct guest_info * core = NULL;
452 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
453 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
457 core = &(vm->cores[vcore_id]);
459 if (target_cpu == core->pcpu_id) {
460 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
461 // well that was pointless
465 if (core->core_thread == NULL) {
466 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
470 while (v3_raise_barrier(vm, NULL) == -1);
472 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
474 // Double check that we weren't preemptively migrated
475 if (target_cpu != core->pcpu_id) {
477 V3_Print(vm, core, "Moving Core\n");
479 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
480 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
486 switch (v3_cpu_types[core->pcpu_id]) {
489 case V3_VMX_EPT_UG_CPU:
490 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
491 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
498 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
499 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
500 core->vcpu_id, target_cpu);
501 v3_lower_barrier(vm);
505 /* There will be a benign race window here:
506 core->pcpu_id will be set to the target core before its fully "migrated"
507 However the core will NEVER run on the old core again, its just in flight to the new core
509 core->pcpu_id = target_cpu;
511 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
514 v3_lower_barrier(vm);
519 /* move a memory region to memory with affinity for a specific physical core */
520 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
523 struct v3_mem_region *reg;
529 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
532 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
536 new_node = v3_numa_cpu_to_node(target_cpu);
539 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
543 if (new_node==old_node) {
544 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
548 // We are now going to change the universe, so
549 // we'll barrier everyone first
551 while (v3_raise_barrier(vm, NULL) == -1);
555 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
558 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
562 if (!(reg->flags.base) || !(reg->flags.alloced)) {
563 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
567 // we now have the allocated base region corresponding to - and not a copy
568 // we will rewrite this region after moving its contents
570 // first, let's double check that we are in fact changing the numa_id...
572 if (reg->numa_id==new_node) {
573 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
577 // region uses exclusive addressing [guest_start,guest_end)
578 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
580 new_hpa = V3_AllocPagesExtended(num_pages,
583 0); // no constraints given new shadow pager impl
586 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
590 // Note, assumes virtual contiguity in the host OS...
591 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
593 old_hpa = (void*)(reg->host_addr);
594 old_node = (int)(reg->numa_id);
596 reg->host_addr = (addr_t)new_hpa;
597 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
599 // flush all page tables / kill all humans
601 for (i=0;i<vm->num_cores;i++) {
602 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
603 v3_invalidate_shadow_pts(&(vm->cores[i]));
604 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
605 // nested invalidator uses inclusive addressing [start,end], not [start,end)
606 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
608 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
609 // We'll restore things...
610 reg->host_addr = (addr_t) old_hpa;
611 reg->numa_id = old_node;
612 V3_FreePages(new_hpa,num_pages);
617 // Now the old region can go away...
618 V3_FreePages(old_hpa,num_pages);
620 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
621 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
624 v3_lower_barrier(vm);
629 v3_lower_barrier(vm);
633 int v3_stop_vm(struct v3_vm_info * vm) {
635 struct guest_info * running_core;
637 if ((vm->run_state != VM_RUNNING) &&
638 (vm->run_state != VM_SIMULATING)) {
639 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
643 vm->run_state = VM_STOPPED;
645 // Sanity check to catch any weird execution states
646 if (v3_wait_for_barrier(vm, NULL) == 0) {
647 v3_lower_barrier(vm);
650 // XXX force exit all cores via a cross call/IPI XXX
654 int still_running = 0;
656 for (i = 0; i < vm->num_cores; i++) {
657 if (vm->cores[i].core_run_state != CORE_STOPPED) {
658 running_core = &vm->cores[i];
663 if (still_running == 0) {
667 v3_scheduler_stop_core(running_core);
670 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
676 int v3_pause_vm(struct v3_vm_info * vm) {
678 if (vm->run_state != VM_RUNNING) {
679 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
683 while (v3_raise_barrier(vm, NULL) == -1);
685 vm->run_state = VM_PAUSED;
691 int v3_continue_vm(struct v3_vm_info * vm) {
693 if (vm->run_state != VM_PAUSED) {
694 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
698 vm->run_state = VM_RUNNING;
700 v3_lower_barrier(vm);
707 static int sim_callback(struct guest_info * core, void * private_data) {
708 struct v3_bitmap * timeout_map = private_data;
710 v3_bitmap_set(timeout_map, core->vcpu_id);
712 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
714 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
715 // We spin here if there is noone to yield to
725 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
726 struct v3_bitmap timeout_map;
730 uint64_t cpu_khz = V3_CPU_KHZ();
732 if (vm->run_state != VM_PAUSED) {
733 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
737 /* AT this point VM is paused */
740 v3_bitmap_init(&timeout_map, vm->num_cores);
745 // calculate cycles from msecs...
746 // IMPORTANT: Floating point not allowed.
747 cycles = (msecs * cpu_khz);
751 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
755 for (i = 0; i < vm->num_cores; i++) {
756 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
757 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
762 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
765 // Run the simulation
766 // vm->run_state = VM_SIMULATING;
767 vm->run_state = VM_RUNNING;
768 v3_lower_barrier(vm);
771 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
773 // block until simulation is complete
774 while (all_blocked == 0) {
777 for (i = 0; i < vm->num_cores; i++) {
778 if (v3_bitmap_check(&timeout_map, i) == 0) {
783 if (all_blocked == 1) {
787 // Intentionally spin if there is no one to yield to
792 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
794 // Simulation is complete
795 // Reset back to PAUSED state
797 v3_raise_barrier_nowait(vm, NULL);
798 vm->run_state = VM_PAUSED;
800 v3_bitmap_reset(&timeout_map);
802 v3_wait_for_barrier(vm, NULL);
808 int v3_get_state_vm(struct v3_vm_info *vm,
809 struct v3_vm_base_state *base,
810 struct v3_vm_core_state *core,
811 struct v3_vm_mem_state *mem)
814 uint32_t numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
815 uint32_t numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
816 extern uint64_t v3_mem_block_size;
818 switch (vm->run_state) {
819 case VM_INVALID: base->state = V3_VM_INVALID; break;
820 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
821 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
822 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
823 case VM_ERROR: base->state = V3_VM_ERROR; break;
824 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
825 default: base->state = V3_VM_UNKNOWN; break;
828 for (i=0;i<numcores;i++) {
829 switch (vm->cores[i].core_run_state) {
830 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
831 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
832 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
833 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
835 switch (vm->cores[i].cpu_mode) {
836 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
837 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
838 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
839 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
840 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
841 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
842 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
844 switch (vm->cores[i].shdw_pg_mode) {
845 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
846 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
847 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
849 switch (vm->cores[i].mem_mode) {
850 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
851 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
852 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
855 core->vcore[i].pcore=vm->cores[i].pcpu_id;
856 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
857 core->vcore[i].num_exits=vm->cores[i].num_exits;
860 core->num_vcores=numcores;
862 for (i=0;i<vm->mem_map.num_base_regions;i++) {
863 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
864 mem->region[i].size = v3_mem_block_size;
865 #ifdef V3_CONFIG_SWAPPING
866 mem->region[i].swapped = vm->mem_map.base_regions[i].flags.swapped;
867 mem->region[i].pinned = vm->mem_map.base_regions[i].flags.pinned;
869 mem->region[i].swapped = 0;
870 mem->region[i].pinned = 0;
875 mem->num_regions=numregions;
881 #ifdef V3_CONFIG_CHECKPOINT
882 #include <palacios/vmm_checkpoint.h>
884 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
885 return v3_chkpt_save_vm(vm, store, url, opts);
889 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
890 return v3_chkpt_load_vm(vm, store, url, opts);
893 #ifdef V3_CONFIG_LIVE_MIGRATION
894 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
895 return v3_chkpt_send_vm(vm, store, url, opts);
899 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
900 return v3_chkpt_receive_vm(vm, store, url, opts);
907 int v3_free_vm(struct v3_vm_info * vm) {
909 // deinitialize guest (free memory, etc...)
911 if ((vm->run_state != VM_STOPPED) &&
912 (vm->run_state != VM_ERROR)) {
913 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
917 v3_free_vm_devices(vm);
920 for (i = 0; i < vm->num_cores; i++) {
921 v3_scheduler_free_core(&(vm->cores[i]));
922 v3_free_core(&(vm->cores[i]));
926 v3_scheduler_free_vm(vm);
927 v3_free_vm_internal(vm);
939 v3_cpu_mode_t v3_get_host_cpu_mode() {
949 cr4 = (struct cr4_32 *)&(cr4_val);
952 return PROTECTED_PAE;
960 v3_cpu_mode_t v3_get_host_cpu_mode() {
966 void v3_print_cond(const char * fmt, ...) {
967 if (v3_dbg_enable == 1) {
972 vsnprintf(buf, 2048, fmt, ap);
975 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
981 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
982 extern struct v3_os_hooks * os_hooks;
984 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
985 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
991 int v3_vm_enter(struct guest_info * info) {
992 switch (v3_mach_type) {
995 case V3_SVM_REV3_CPU:
996 return v3_svm_enter(info);
1001 case V3_VMX_EPT_CPU:
1002 case V3_VMX_EPT_UG_CPU:
1003 return v3_vmx_enter(info);
1007 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
1013 void *v3_get_host_vm(struct v3_vm_info *x)
1016 return x->host_priv_data;
1022 int v3_get_vcore(struct guest_info *x)