2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
36 #include <palacios/svm.h>
39 #include <palacios/vmx.h>
42 #ifdef V3_CONFIG_CHECKPOINT
43 #include <palacios/vmm_checkpoint.h>
47 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
48 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
50 struct v3_os_hooks * os_hooks = NULL;
51 int v3_dbg_enable = 0;
55 static void init_cpu(void * arg) {
56 uint32_t cpu_id = (uint32_t)(addr_t)arg;
61 if (v3_is_svm_capable()) {
62 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
63 v3_init_svm_cpu(cpu_id);
68 if (v3_is_vmx_capable()) {
69 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
70 v3_init_vmx_cpu(cpu_id);
75 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
80 static void deinit_cpu(void * arg) {
81 uint32_t cpu_id = (uint32_t)(addr_t)arg;
84 switch (v3_cpu_types[cpu_id]) {
88 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
89 v3_deinit_svm_cpu(cpu_id);
95 case V3_VMX_EPT_UG_CPU:
96 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
97 v3_deinit_vmx_cpu(cpu_id);
102 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
110 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
115 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
118 // Set global variables.
121 if (num_cpus>V3_CONFIG_MAX_CPUS) {
122 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
125 // Determine the global machine type
126 v3_mach_type = V3_INVALID_CPU;
128 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
129 v3_cpu_types[i] = V3_INVALID_CPU;
132 // Parse host-os defined options into an easily-accessed format.
133 v3_parse_options(options);
135 // Memory manager initialization
138 // Register all the possible device types
141 // Register all shadow paging handlers
142 V3_init_shdw_paging();
144 // Initialize the cpu_mapper framework (must be before extensions)
145 V3_init_cpu_mapper();
147 // Initialize the scheduler framework (must be before extensions)
148 V3_init_scheduling();
150 // Register all extensions
151 V3_init_extensions();
153 // Enabling cpu_mapper
154 V3_enable_cpu_mapper();
156 // Enabling scheduler
157 V3_enable_scheduler();
160 #ifdef V3_CONFIG_SYMMOD
164 #ifdef V3_CONFIG_CHECKPOINT
165 V3_init_checkpoint();
168 if ((hooks) && (hooks->call_on_cpu)) {
170 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
174 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
175 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
176 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
178 if (v3_mach_type == V3_INVALID_CPU) {
179 v3_mach_type = v3_cpu_types[i];
191 // Reverse order of Init_V3
195 if ((os_hooks) && (os_hooks->call_on_cpu)) {
196 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
197 if (v3_cpu_types[i] != V3_INVALID_CPU) {
198 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
199 //deinit_cpu((void *)(addr_t)i);
204 #ifdef V3_CONFIG_CHECKPOINT
205 V3_deinit_checkpoint();
208 #ifdef V3_CONFIG_SYMMOD
212 V3_disable_scheduler();
214 V3_disable_cpu_mapper();
216 V3_deinit_extensions();
218 V3_deinit_scheduling();
220 V3_deinit_cpu_mapper();
222 V3_deinit_shdw_paging();
234 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
235 return v3_cpu_types[cpu_id];
239 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name) {
240 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
243 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
247 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
251 } else if (strlen(name) >= 128) {
252 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
255 memset(vm->name, 0, 128);
256 strncpy(vm->name, name, 127);
258 if(v3_cpu_mapper_register_vm(vm) == -1) {
260 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
264 * Register this VM with the palacios scheduler. It will ask for admission
267 if(v3_scheduler_register_vm(vm) == -1) {
269 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
278 static int start_core(void * p)
280 struct guest_info * core = (struct guest_info *)p;
282 if (v3_scheduler_register_core(core) == -1){
283 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
286 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
287 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
289 switch (v3_mach_type) {
292 case V3_SVM_REV3_CPU:
293 return v3_start_svm_guest(core);
299 case V3_VMX_EPT_UG_CPU:
300 return v3_start_vmx_guest(core);
304 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
311 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
314 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
315 uint32_t avail_cores = 0;
317 extern uint64_t v3_mem_block_size;
320 if (vm->run_state != VM_STOPPED) {
321 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
326 // Do not run if any core is using shadow paging and we are out of 4 GB bounds
327 for (i=0;i<vm->num_cores;i++) {
328 if (vm->cores[i].shdw_pg_mode == SHADOW_PAGING) {
329 for (j=0;j<vm->mem_map.num_base_regions;j++) {
330 if ((vm->mem_map.base_regions[i].host_addr + v3_mem_block_size) >= 0x100000000ULL) {
331 PrintError(vm, VCORE_NONE, "Base memory region %d exceeds 4 GB boundary with shadow paging enabled on core %d.\n",j, i);
332 PrintError(vm, VCORE_NONE, "Any use of non-64 bit mode in the guest is likely to fail in this configuration.\n");
333 PrintError(vm, VCORE_NONE, "If you would like to proceed anyway, remove this check and recompile Palacios.\n");
334 PrintError(vm, VCORE_NONE, "Alternatively, change this VM to use nested paging.\n");
341 /// CHECK IF WE ARE MULTICORE ENABLED....
343 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
344 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
347 // Check that enough cores are present in the mask to handle vcores
348 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
352 if (core_mask[major] & (0x1 << minor)) {
353 if (v3_cpu_types[i] == V3_INVALID_CPU) {
354 core_mask[major] &= ~(0x1 << minor);
361 vm->avail_cores = avail_cores;
363 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
364 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
367 if (v3_scheduler_admit_vm(vm) != 0){
368 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
371 vm->run_state = VM_RUNNING;
374 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
376 struct guest_info * core = &(vm->cores[vcore_id]);
378 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
379 vcore_id, core->pcpu_id);
381 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
383 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
384 core->pcpu_id, start_core, core, core->exec_name);
386 core->core_run_state = CORE_STOPPED; // core zero will turn itself on
387 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
389 if (core->core_thread == NULL) {
390 PrintError(vm, VCORE_NONE, "Thread launch failed\n");
401 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
403 switch (v3_cpu_types[core->pcpu_id]) {
406 case V3_SVM_REV3_CPU:
407 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
408 return v3_reset_svm_vm_core(core, rip);
413 case V3_VMX_EPT_UG_CPU:
414 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
415 return v3_reset_vmx_vm_core(core, rip);
419 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
428 /* move a virtual core to different physical core */
429 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
430 struct guest_info * core = NULL;
432 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
433 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
437 core = &(vm->cores[vcore_id]);
439 if (target_cpu == core->pcpu_id) {
440 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
441 // well that was pointless
445 if (core->core_thread == NULL) {
446 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
450 while (v3_raise_barrier(vm, NULL) == -1);
452 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
454 // Double check that we weren't preemptively migrated
455 if (target_cpu != core->pcpu_id) {
457 V3_Print(vm, core, "Moving Core\n");
459 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
460 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
466 switch (v3_cpu_types[core->pcpu_id]) {
469 case V3_VMX_EPT_UG_CPU:
470 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
471 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
478 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
479 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
480 core->vcpu_id, target_cpu);
481 v3_lower_barrier(vm);
485 /* There will be a benign race window here:
486 core->pcpu_id will be set to the target core before its fully "migrated"
487 However the core will NEVER run on the old core again, its just in flight to the new core
489 core->pcpu_id = target_cpu;
491 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
494 v3_lower_barrier(vm);
499 /* move a memory region to memory with affinity for a specific physical core */
500 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
503 struct v3_mem_region *reg;
509 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
512 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
516 new_node = v3_numa_cpu_to_node(target_cpu);
519 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
523 if (new_node==old_node) {
524 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
528 // We are now going to change the universe, so
529 // we'll barrier everyone first
531 while (v3_raise_barrier(vm, NULL) == -1);
535 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
538 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
542 if (!(reg->flags.base) || !(reg->flags.alloced)) {
543 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
547 // we now have the allocated base region corresponding to - and not a copy
548 // we will rewrite this region after moving its contents
550 // first, let's double check that we are in fact changing the numa_id...
552 if (reg->numa_id==new_node) {
553 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
557 // region uses exclusive addressing [guest_start,guest_end)
558 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
560 // Now we allocate space for the new region with the same constraints as
562 new_hpa = V3_AllocPagesExtended(num_pages,
565 reg->flags.limit32 ? V3_ALLOC_PAGES_CONSTRAINT_4GB : 0);
568 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
572 // Note, assumes virtual contiguity in the host OS...
573 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
575 old_hpa = (void*)(reg->host_addr);
576 old_node = (int)(reg->numa_id);
578 reg->host_addr = (addr_t)new_hpa;
579 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
581 // flush all page tables / kill all humans
583 for (i=0;i<vm->num_cores;i++) {
584 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
585 v3_invalidate_shadow_pts(&(vm->cores[i]));
586 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
587 // nested invalidator uses inclusive addressing [start,end], not [start,end)
588 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1);
590 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
591 // We'll restore things...
592 reg->host_addr = (addr_t) old_hpa;
593 reg->numa_id = old_node;
594 V3_FreePages(new_hpa,num_pages);
599 // Now the old region can go away...
600 V3_FreePages(old_hpa,num_pages);
602 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
603 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
606 v3_lower_barrier(vm);
611 v3_lower_barrier(vm);
615 int v3_stop_vm(struct v3_vm_info * vm) {
617 struct guest_info * running_core;
619 if ((vm->run_state != VM_RUNNING) &&
620 (vm->run_state != VM_SIMULATING)) {
621 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
625 vm->run_state = VM_STOPPED;
627 // Sanity check to catch any weird execution states
628 if (v3_wait_for_barrier(vm, NULL) == 0) {
629 v3_lower_barrier(vm);
632 // XXX force exit all cores via a cross call/IPI XXX
636 int still_running = 0;
638 for (i = 0; i < vm->num_cores; i++) {
639 if (vm->cores[i].core_run_state != CORE_STOPPED) {
640 running_core = &vm->cores[i];
645 if (still_running == 0) {
649 v3_scheduler_stop_core(running_core);
652 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
658 int v3_pause_vm(struct v3_vm_info * vm) {
660 if (vm->run_state != VM_RUNNING) {
661 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
665 while (v3_raise_barrier(vm, NULL) == -1);
667 vm->run_state = VM_PAUSED;
673 int v3_continue_vm(struct v3_vm_info * vm) {
675 if (vm->run_state != VM_PAUSED) {
676 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
680 vm->run_state = VM_RUNNING;
682 v3_lower_barrier(vm);
689 static int sim_callback(struct guest_info * core, void * private_data) {
690 struct v3_bitmap * timeout_map = private_data;
692 v3_bitmap_set(timeout_map, core->vcpu_id);
694 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
696 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
697 // We spin here if there is noone to yield to
707 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
708 struct v3_bitmap timeout_map;
712 uint64_t cpu_khz = V3_CPU_KHZ();
714 if (vm->run_state != VM_PAUSED) {
715 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
719 /* AT this point VM is paused */
722 v3_bitmap_init(&timeout_map, vm->num_cores);
727 // calculate cycles from msecs...
728 // IMPORTANT: Floating point not allowed.
729 cycles = (msecs * cpu_khz);
733 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
737 for (i = 0; i < vm->num_cores; i++) {
738 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
739 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
744 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
747 // Run the simulation
748 // vm->run_state = VM_SIMULATING;
749 vm->run_state = VM_RUNNING;
750 v3_lower_barrier(vm);
753 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
755 // block until simulation is complete
756 while (all_blocked == 0) {
759 for (i = 0; i < vm->num_cores; i++) {
760 if (v3_bitmap_check(&timeout_map, i) == 0) {
765 if (all_blocked == 1) {
769 // Intentionally spin if there is no one to yield to
774 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
776 // Simulation is complete
777 // Reset back to PAUSED state
779 v3_raise_barrier_nowait(vm, NULL);
780 vm->run_state = VM_PAUSED;
782 v3_bitmap_reset(&timeout_map);
784 v3_wait_for_barrier(vm, NULL);
790 int v3_get_state_vm(struct v3_vm_info *vm,
791 struct v3_vm_base_state *base,
792 struct v3_vm_core_state *core,
793 struct v3_vm_mem_state *mem)
796 uint32_t numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
797 uint32_t numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
798 extern uint64_t v3_mem_block_size;
800 switch (vm->run_state) {
801 case VM_INVALID: base->state = V3_VM_INVALID; break;
802 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
803 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
804 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
805 case VM_ERROR: base->state = V3_VM_ERROR; break;
806 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
807 default: base->state = V3_VM_UNKNOWN; break;
810 for (i=0;i<numcores;i++) {
811 switch (vm->cores[i].core_run_state) {
812 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
813 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
814 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
815 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
817 switch (vm->cores[i].cpu_mode) {
818 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
819 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
820 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
821 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
822 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
823 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
824 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
826 switch (vm->cores[i].shdw_pg_mode) {
827 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
828 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
829 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
831 switch (vm->cores[i].mem_mode) {
832 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
833 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
834 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
837 core->vcore[i].pcore=vm->cores[i].pcpu_id;
838 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
839 core->vcore[i].num_exits=vm->cores[i].num_exits;
842 core->num_vcores=numcores;
844 for (i=0;i<vm->mem_map.num_base_regions;i++) {
845 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
846 mem->region[i].size = v3_mem_block_size;
849 mem->num_regions=numregions;
855 #ifdef V3_CONFIG_CHECKPOINT
856 #include <palacios/vmm_checkpoint.h>
858 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
859 return v3_chkpt_save_vm(vm, store, url, opts);
863 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
864 return v3_chkpt_load_vm(vm, store, url, opts);
867 #ifdef V3_CONFIG_LIVE_MIGRATION
868 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
869 return v3_chkpt_send_vm(vm, store, url, opts);
873 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
874 return v3_chkpt_receive_vm(vm, store, url, opts);
881 int v3_free_vm(struct v3_vm_info * vm) {
883 // deinitialize guest (free memory, etc...)
885 if ((vm->run_state != VM_STOPPED) &&
886 (vm->run_state != VM_ERROR)) {
887 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
891 v3_free_vm_devices(vm);
894 for (i = 0; i < vm->num_cores; i++) {
895 v3_scheduler_free_core(&(vm->cores[i]));
896 v3_free_core(&(vm->cores[i]));
900 v3_scheduler_free_vm(vm);
901 v3_free_vm_internal(vm);
913 v3_cpu_mode_t v3_get_host_cpu_mode() {
923 cr4 = (struct cr4_32 *)&(cr4_val);
926 return PROTECTED_PAE;
934 v3_cpu_mode_t v3_get_host_cpu_mode() {
940 void v3_print_cond(const char * fmt, ...) {
941 if (v3_dbg_enable == 1) {
946 vsnprintf(buf, 2048, fmt, ap);
949 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
955 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
956 extern struct v3_os_hooks * os_hooks;
958 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
959 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
965 int v3_vm_enter(struct guest_info * info) {
966 switch (v3_mach_type) {
969 case V3_SVM_REV3_CPU:
970 return v3_svm_enter(info);
976 case V3_VMX_EPT_UG_CPU:
977 return v3_vmx_enter(info);
981 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
987 void *v3_get_host_vm(struct v3_vm_info *x)
990 return x->host_priv_data;
996 int v3_get_vcore(struct guest_info *x)