2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
36 #include <palacios/svm.h>
39 #include <palacios/vmx.h>
42 #ifdef V3_CONFIG_CHECKPOINT
43 #include <palacios/vmm_checkpoint.h>
47 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
48 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
50 struct v3_os_hooks * os_hooks = NULL;
51 int v3_dbg_enable = 0;
55 static void init_cpu(void * arg) {
56 uint32_t cpu_id = (uint32_t)(addr_t)arg;
59 if (v3_is_svm_capable()) {
60 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
61 v3_init_svm_cpu(cpu_id);
66 if (v3_is_vmx_capable()) {
67 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
68 v3_init_vmx_cpu(cpu_id);
73 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
78 static void deinit_cpu(void * arg) {
79 uint32_t cpu_id = (uint32_t)(addr_t)arg;
82 switch (v3_cpu_types[cpu_id]) {
86 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
87 v3_deinit_svm_cpu(cpu_id);
93 case V3_VMX_EPT_UG_CPU:
94 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
95 v3_deinit_vmx_cpu(cpu_id);
100 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
105 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
110 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
113 // Set global variables.
116 if (num_cpus>V3_CONFIG_MAX_CPUS) {
117 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
120 // Determine the global machine type
121 v3_mach_type = V3_INVALID_CPU;
123 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
124 v3_cpu_types[i] = V3_INVALID_CPU;
127 // Parse host-os defined options into an easily-accessed format.
128 v3_parse_options(options);
130 // Memory manager initialization
133 // Register all the possible device types
136 // Register all shadow paging handlers
137 V3_init_shdw_paging();
139 // Initialize the cpu_mapper framework (must be before extensions)
140 V3_init_cpu_mapper();
142 // Initialize the scheduler framework (must be before extensions)
143 V3_init_scheduling();
145 // Register all extensions
146 V3_init_extensions();
148 // Enabling cpu_mapper
149 V3_enable_cpu_mapper();
151 // Enabling scheduler
152 V3_enable_scheduler();
155 #ifdef V3_CONFIG_SYMMOD
159 #ifdef V3_CONFIG_CHECKPOINT
160 V3_init_checkpoint();
163 if ((hooks) && (hooks->call_on_cpu)) {
165 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
169 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
170 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
171 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
173 if (v3_mach_type == V3_INVALID_CPU) {
174 v3_mach_type = v3_cpu_types[i];
186 // Reverse order of Init_V3
190 if ((os_hooks) && (os_hooks->call_on_cpu)) {
191 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
192 if (v3_cpu_types[i] != V3_INVALID_CPU) {
193 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
194 //deinit_cpu((void *)(addr_t)i);
199 #ifdef V3_CONFIG_CHECKPOINT
200 V3_deinit_checkpoint();
203 #ifdef V3_CONFIG_SYMMOD
207 V3_disable_scheduler();
209 V3_disable_cpu_mapper();
211 V3_deinit_extensions();
213 V3_deinit_scheduling();
215 V3_deinit_cpu_mapper();
217 V3_deinit_shdw_paging();
229 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
230 return v3_cpu_types[cpu_id];
234 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) {
235 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
238 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
242 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
246 } else if (strlen(name) >= 128) {
247 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
250 memset(vm->name, 0, 128);
251 strncpy(vm->name, name, 127);
253 if(v3_cpu_mapper_register_vm(vm) == -1) {
255 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
259 * Register this VM with the palacios scheduler. It will ask for admission
262 if(v3_scheduler_register_vm(vm) == -1) {
264 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
273 static int start_core(void * p)
275 struct guest_info * core = (struct guest_info *)p;
277 if (v3_scheduler_register_core(core) == -1){
278 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
281 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
282 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
284 switch (v3_mach_type) {
287 case V3_SVM_REV3_CPU:
288 return v3_start_svm_guest(core);
294 case V3_VMX_EPT_UG_CPU:
295 return v3_start_vmx_guest(core);
299 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
306 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
309 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
310 uint32_t avail_cores = 0;
312 extern uint64_t v3_mem_block_size;
315 if (vm->run_state != VM_STOPPED) {
316 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
321 // Do not run if any core is using shadow paging and we are out of 4 GB bounds
322 for (i=0;i<vm->num_cores;i++) {
323 if (vm->cores[i].shdw_pg_mode == SHADOW_PAGING) {
324 for (j=0;j<vm->mem_map.num_base_regions;j++) {
325 if ((vm->mem_map.base_regions[i].host_addr + v3_mem_block_size) >= 0x100000000ULL) {
326 PrintError(vm, VCORE_NONE, "Base memory region %d exceeds 4 GB boundary with shadow paging enabled on core %d.\n",j, i);
327 PrintError(vm, VCORE_NONE, "Any use of non-64 bit mode in the guest is likely to fail in this configuration.\n");
328 PrintError(vm, VCORE_NONE, "If you would like to proceed anyway, remove this check and recompile Palacios.\n");
329 PrintError(vm, VCORE_NONE, "Alternatively, change this VM to use nested paging.\n");
336 /// CHECK IF WE ARE MULTICORE ENABLED....
338 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
339 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
342 // Check that enough cores are present in the mask to handle vcores
343 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
347 if (core_mask[major] & (0x1 << minor)) {
348 if (v3_cpu_types[i] == V3_INVALID_CPU) {
349 core_mask[major] &= ~(0x1 << minor);
356 vm->avail_cores = avail_cores;
358 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
359 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
362 if (v3_scheduler_admit_vm(vm) != 0){
363 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
366 vm->run_state = VM_RUNNING;
369 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
371 struct guest_info * core = &(vm->cores[vcore_id]);
373 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
374 vcore_id, core->pcpu_id);
376 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
378 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
379 core->pcpu_id, start_core, core, core->exec_name);
381 core->core_run_state = CORE_STOPPED; // core zero will turn itself on
382 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
384 if (core->core_thread == NULL) {
385 PrintError(vm, VCORE_NONE, "Thread launch failed\n");
396 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
398 switch (v3_cpu_types[core->pcpu_id]) {
401 case V3_SVM_REV3_CPU:
402 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
403 return v3_reset_svm_vm_core(core, rip);
408 case V3_VMX_EPT_UG_CPU:
409 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
410 return v3_reset_vmx_vm_core(core, rip);
414 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
423 /* move a virtual core to different physical core */
424 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
425 struct guest_info * core = NULL;
427 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
428 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
432 core = &(vm->cores[vcore_id]);
434 if (target_cpu == core->pcpu_id) {
435 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
436 // well that was pointless
440 if (core->core_thread == NULL) {
441 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
445 while (v3_raise_barrier(vm, NULL) == -1);
447 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
449 // Double check that we weren't preemptively migrated
450 if (target_cpu != core->pcpu_id) {
452 V3_Print(vm, core, "Moving Core\n");
454 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
455 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
461 switch (v3_cpu_types[core->pcpu_id]) {
464 case V3_VMX_EPT_UG_CPU:
465 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
466 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
473 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
474 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
475 core->vcpu_id, target_cpu);
476 v3_lower_barrier(vm);
480 /* There will be a benign race window here:
481 core->pcpu_id will be set to the target core before its fully "migrated"
482 However the core will NEVER run on the old core again, its just in flight to the new core
484 core->pcpu_id = target_cpu;
486 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
489 v3_lower_barrier(vm);
494 /* move a memory region to memory with affinity for a specific physical core */
495 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
498 struct v3_mem_region *reg;
504 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
507 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
511 new_node = v3_numa_cpu_to_node(target_cpu);
514 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
518 if (new_node==old_node) {
519 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
523 // We are now going to change the universe, so
524 // we'll barrier everyone first
526 while (v3_raise_barrier(vm, NULL) == -1);
530 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
533 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
537 if (!(reg->flags.base) || !(reg->flags.alloced)) {
538 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
542 // we now have the allocated base region corresponding to - and not a copy
543 // we will rewrite this region after moving its contents
545 // first, let's double check that we are in fact changing the numa_id...
547 if (reg->numa_id==new_node) {
548 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
552 // region uses exclusive addressing [guest_start,guest_end)
553 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
555 // Now we allocate space for the new region with the same constraints as
557 new_hpa = V3_AllocPagesExtended(num_pages,
560 reg->flags.limit32 ? V3_ALLOC_PAGES_CONSTRAINT_4GB : 0);
563 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
567 // Note, assumes virtual contiguity in the host OS...
568 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
570 old_hpa = (void*)(reg->host_addr);
571 old_node = (int)(reg->numa_id);
573 reg->host_addr = (addr_t)new_hpa;
574 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
576 // flush all page tables / kill all humans
578 for (i=0;i<vm->num_cores;i++) {
579 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
580 v3_invalidate_shadow_pts(&(vm->cores[i]));
581 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
582 // nested invalidator uses inclusive addressing [start,end], not [start,end)
583 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1);
585 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
586 // We'll restore things...
587 reg->host_addr = (addr_t) old_hpa;
588 reg->numa_id = old_node;
589 V3_FreePages(new_hpa,num_pages);
594 // Now the old region can go away...
595 V3_FreePages(old_hpa,num_pages);
597 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
598 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
601 v3_lower_barrier(vm);
606 v3_lower_barrier(vm);
610 int v3_stop_vm(struct v3_vm_info * vm) {
612 struct guest_info * running_core;
614 if ((vm->run_state != VM_RUNNING) &&
615 (vm->run_state != VM_SIMULATING)) {
616 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
620 vm->run_state = VM_STOPPED;
622 // Sanity check to catch any weird execution states
623 if (v3_wait_for_barrier(vm, NULL) == 0) {
624 v3_lower_barrier(vm);
627 // XXX force exit all cores via a cross call/IPI XXX
631 int still_running = 0;
633 for (i = 0; i < vm->num_cores; i++) {
634 if (vm->cores[i].core_run_state != CORE_STOPPED) {
635 running_core = &vm->cores[i];
640 if (still_running == 0) {
644 v3_scheduler_stop_core(running_core);
647 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
653 int v3_pause_vm(struct v3_vm_info * vm) {
655 if (vm->run_state != VM_RUNNING) {
656 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
660 while (v3_raise_barrier(vm, NULL) == -1);
662 vm->run_state = VM_PAUSED;
668 int v3_continue_vm(struct v3_vm_info * vm) {
670 if (vm->run_state != VM_PAUSED) {
671 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
675 vm->run_state = VM_RUNNING;
677 v3_lower_barrier(vm);
684 static int sim_callback(struct guest_info * core, void * private_data) {
685 struct v3_bitmap * timeout_map = private_data;
687 v3_bitmap_set(timeout_map, core->vcpu_id);
689 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
691 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
701 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
702 struct v3_bitmap timeout_map;
706 uint64_t cpu_khz = V3_CPU_KHZ();
708 if (vm->run_state != VM_PAUSED) {
709 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
713 /* AT this point VM is paused */
716 v3_bitmap_init(&timeout_map, vm->num_cores);
721 // calculate cycles from msecs...
722 // IMPORTANT: Floating point not allowed.
723 cycles = (msecs * cpu_khz);
727 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
731 for (i = 0; i < vm->num_cores; i++) {
732 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
733 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
738 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
741 // Run the simulation
742 // vm->run_state = VM_SIMULATING;
743 vm->run_state = VM_RUNNING;
744 v3_lower_barrier(vm);
747 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
749 // block until simulation is complete
750 while (all_blocked == 0) {
753 for (i = 0; i < vm->num_cores; i++) {
754 if (v3_bitmap_check(&timeout_map, i) == 0) {
759 if (all_blocked == 1) {
767 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
769 // Simulation is complete
770 // Reset back to PAUSED state
772 v3_raise_barrier_nowait(vm, NULL);
773 vm->run_state = VM_PAUSED;
775 v3_bitmap_reset(&timeout_map);
777 v3_wait_for_barrier(vm, NULL);
783 int v3_get_state_vm(struct v3_vm_info *vm,
784 struct v3_vm_base_state *base,
785 struct v3_vm_core_state *core,
786 struct v3_vm_mem_state *mem)
789 uint32_t numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
790 uint32_t numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
791 extern uint64_t v3_mem_block_size;
793 switch (vm->run_state) {
794 case VM_INVALID: base->state = V3_VM_INVALID; break;
795 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
796 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
797 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
798 case VM_ERROR: base->state = V3_VM_ERROR; break;
799 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
800 default: base->state = V3_VM_UNKNOWN; break;
803 for (i=0;i<numcores;i++) {
804 switch (vm->cores[i].core_run_state) {
805 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
806 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
807 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
808 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
810 switch (vm->cores[i].cpu_mode) {
811 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
812 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
813 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
814 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
815 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
816 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
817 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
819 switch (vm->cores[i].shdw_pg_mode) {
820 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
821 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
822 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
824 switch (vm->cores[i].mem_mode) {
825 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
826 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
827 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
830 core->vcore[i].pcore=vm->cores[i].pcpu_id;
831 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
832 core->vcore[i].num_exits=vm->cores[i].num_exits;
835 core->num_vcores=numcores;
837 for (i=0;i<vm->mem_map.num_base_regions;i++) {
838 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
839 mem->region[i].size = v3_mem_block_size;
842 mem->num_regions=numregions;
848 #ifdef V3_CONFIG_CHECKPOINT
849 #include <palacios/vmm_checkpoint.h>
851 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
852 return v3_chkpt_save_vm(vm, store, url, opts);
856 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
857 return v3_chkpt_load_vm(vm, store, url, opts);
860 #ifdef V3_CONFIG_LIVE_MIGRATION
861 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
862 return v3_chkpt_send_vm(vm, store, url, opts);
866 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
867 return v3_chkpt_receive_vm(vm, store, url, opts);
874 int v3_free_vm(struct v3_vm_info * vm) {
876 // deinitialize guest (free memory, etc...)
878 if ((vm->run_state != VM_STOPPED) &&
879 (vm->run_state != VM_ERROR)) {
880 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
884 v3_free_vm_devices(vm);
887 for (i = 0; i < vm->num_cores; i++) {
888 v3_scheduler_free_core(&(vm->cores[i]));
889 v3_free_core(&(vm->cores[i]));
893 v3_scheduler_free_vm(vm);
894 v3_free_vm_internal(vm);
906 v3_cpu_mode_t v3_get_host_cpu_mode() {
916 cr4 = (struct cr4_32 *)&(cr4_val);
919 return PROTECTED_PAE;
927 v3_cpu_mode_t v3_get_host_cpu_mode() {
933 void v3_print_cond(const char * fmt, ...) {
934 if (v3_dbg_enable == 1) {
939 vsnprintf(buf, 2048, fmt, ap);
942 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
948 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
949 extern struct v3_os_hooks * os_hooks;
951 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
952 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
958 int v3_vm_enter(struct guest_info * info) {
959 switch (v3_mach_type) {
962 case V3_SVM_REV3_CPU:
963 return v3_svm_enter(info);
969 case V3_VMX_EPT_UG_CPU:
970 return v3_vmx_enter(info);
974 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
980 void *v3_get_host_vm(struct v3_vm_info *x)
983 return x->host_priv_data;
989 int v3_get_vcore(struct guest_info *x)