2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
34 #include <interfaces/vmm_file.h>
37 #include <palacios/svm.h>
40 #include <palacios/vmx.h>
43 #ifdef V3_CONFIG_CHECKPOINT
44 #include <palacios/vmm_checkpoint.h>
48 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
49 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
51 struct v3_os_hooks * os_hooks = NULL;
52 int v3_dbg_enable = 0;
56 static void init_cpu(void * arg) {
57 uint32_t cpu_id = (uint32_t)(addr_t)arg;
62 if (v3_is_svm_capable()) {
63 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
64 v3_init_svm_cpu(cpu_id);
69 if (v3_is_vmx_capable()) {
70 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
71 v3_init_vmx_cpu(cpu_id);
76 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
81 static void deinit_cpu(void * arg) {
82 uint32_t cpu_id = (uint32_t)(addr_t)arg;
85 switch (v3_cpu_types[cpu_id]) {
89 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
90 v3_deinit_svm_cpu(cpu_id);
96 case V3_VMX_EPT_UG_CPU:
97 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
98 v3_deinit_vmx_cpu(cpu_id);
103 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
112 static int in_long_mode()
116 v3_get_msr(0xc0000080,&high,&low); // EFER
118 return ((low & 0x500)== 0x500); // LMA and LME set
122 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
127 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
132 #error Palacios does not support compilation for a 32 bit host OS!!!!
134 if (!in_long_mode()) {
135 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
140 // Set global variables.
143 if (num_cpus>V3_CONFIG_MAX_CPUS) {
144 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
147 // Determine the global machine type
148 v3_mach_type = V3_INVALID_CPU;
150 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
151 v3_cpu_types[i] = V3_INVALID_CPU;
154 // Parse host-os defined options into an easily-accessed format.
155 v3_parse_options(options);
157 #ifdef V3_CONFIG_MULTIBOOT
165 // Memory manager initialization
168 // Register all the possible device types
171 // Register all shadow paging handlers
172 V3_init_shdw_paging();
174 #ifdef V3_CONFIG_SWAPPING
178 // Initialize the cpu_mapper framework (must be before extensions)
179 V3_init_cpu_mapper();
181 // Initialize the scheduler framework (must be before extensions)
182 V3_init_scheduling();
184 // Register all extensions
185 V3_init_extensions();
187 // Enabling cpu_mapper
188 V3_enable_cpu_mapper();
190 // Enabling scheduler
191 V3_enable_scheduler();
194 #ifdef V3_CONFIG_SYMMOD
198 #ifdef V3_CONFIG_CHECKPOINT
199 V3_init_checkpoint();
202 if ((hooks) && (hooks->call_on_cpu)) {
204 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
208 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
209 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
210 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
212 if (v3_mach_type == V3_INVALID_CPU) {
213 v3_mach_type = v3_cpu_types[i];
225 // Reverse order of Init_V3
229 if ((os_hooks) && (os_hooks->call_on_cpu)) {
230 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
231 if (v3_cpu_types[i] != V3_INVALID_CPU) {
232 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
233 //deinit_cpu((void *)(addr_t)i);
238 #ifdef V3_CONFIG_CHECKPOINT
239 V3_deinit_checkpoint();
242 #ifdef V3_CONFIG_SYMMOD
246 V3_disable_scheduler();
248 V3_disable_cpu_mapper();
250 V3_deinit_extensions();
252 V3_deinit_scheduling();
254 V3_deinit_cpu_mapper();
256 #ifdef V3_CONFIG_SWAPPING
257 v3_deinit_swapping();
260 V3_deinit_shdw_paging();
270 #ifdef V3_CONFIG_MULTIBOOT
271 v3_deinit_multiboot();
280 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
281 return v3_cpu_types[cpu_id];
284 static int start_core(void * p)
286 struct guest_info * core = (struct guest_info *)p;
288 if (v3_scheduler_register_core(core) == -1){
289 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
292 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
293 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
295 switch (v3_mach_type) {
298 case V3_SVM_REV3_CPU:
299 return v3_start_svm_guest(core);
305 case V3_VMX_EPT_UG_CPU:
306 return v3_start_vmx_guest(core);
310 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
317 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name, unsigned int cpu_mask) {
318 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
322 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
326 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
330 } else if (strlen(name) >= 128) {
331 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
334 memset(vm->name, 0, 128);
335 strncpy(vm->name, name, 127);
337 if(v3_cpu_mapper_register_vm(vm) == -1) {
339 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
343 * Register this VM with the palacios scheduler. It will ask for admission
346 if(v3_scheduler_register_vm(vm) == -1) {
348 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
351 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
352 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
355 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
357 struct guest_info * core = &(vm->cores[vcore_id]);
359 PrintDebug(vm, VCORE_NONE, "Creating virtual core %u on logical core %u\n",
360 vcore_id, core->pcpu_id);
362 sprintf(core->exec_name, "%s-%u", vm->name, vcore_id);
364 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
365 core->pcpu_id, start_core, core, core->exec_name);
367 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name);
369 if (core->core_thread == NULL) {
370 PrintError(vm, VCORE_NONE, "Thread creation failed\n");
379 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
382 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
383 uint32_t avail_cores = 0;
387 PrintError(VM_NONE, VCORE_NONE, "Asked to start nonexistent VM\n");
391 if (vm->run_state != VM_STOPPED) {
392 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
396 #if V3_CONFIG_MULTIBOOT
397 if (v3_setup_multiboot_vm_for_boot(vm)) {
398 PrintError(vm, VCORE_NONE, "Multiboot setup for boot failed\n");
403 if (v3_setup_hvm_vm_for_boot(vm)) {
404 PrintError(vm, VCORE_NONE, "HVM setup for boot failed\n");
409 /// CHECK IF WE ARE MULTICORE ENABLED....
411 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
412 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
415 // Check that enough cores are present in the mask to handle vcores
416 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
420 if (core_mask[major] & (0x1 << minor)) {
421 if (v3_cpu_types[i] == V3_INVALID_CPU) {
422 core_mask[major] &= ~(0x1 << minor);
429 vm->avail_cores = avail_cores;
431 if (v3_scheduler_admit_vm(vm) != 0){
432 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
435 vm->run_state = VM_RUNNING;
437 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
439 struct guest_info * core = &(vm->cores[vcore_id]);
441 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
442 vcore_id, core->pcpu_id);
444 if (core->core_run_state==CORE_INVALID) {
445 // launch of a fresh VM
446 core->core_run_state = CORE_STOPPED;
447 // core zero will turn itself on
449 // this is a resume - use whatever its current run_state is
452 V3_START_THREAD(core->core_thread);
461 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
463 switch (v3_cpu_types[core->pcpu_id]) {
466 case V3_SVM_REV3_CPU:
467 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
468 return v3_reset_svm_vm_core(core, rip);
473 case V3_VMX_EPT_UG_CPU:
474 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
475 return v3_reset_vmx_vm_core(core, rip);
479 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
487 // resets the whole VM (non-HVM) or the ROS (HVM)
488 int v3_reset_vm(struct v3_vm_info *vm)
491 if (vm->hvm_state.is_hvm) {
492 return v3_reset_vm_extended(vm,V3_VM_RESET_ROS,0);
494 return v3_reset_vm_extended(vm,V3_VM_RESET_ALL,0);
497 return v3_reset_vm_extended(vm,V3_VM_RESET_ALL,0);
501 int v3_reset_vm_extended(struct v3_vm_info *vm, v3_vm_reset_type t, void *data)
503 uint32_t start, end, i;
506 if (vm->run_state != VM_RUNNING) {
507 PrintError(vm,VCORE_NONE,"Attempt to reset VM in state %d (must be in running state)\n",vm->run_state);
513 case V3_VM_RESET_ALL:
515 if (vm->hvm_state.is_hvm) {
516 PrintError(vm,VCORE_NONE,"Attempt to do ALL reset of HVM (not allowed)\n");
520 start=0; end=vm->num_cores-1;
523 case V3_VM_RESET_HRT:
524 case V3_VM_RESET_ROS:
525 if (vm->hvm_state.is_hvm) {
526 if (t==V3_VM_RESET_HRT) {
527 start = vm->hvm_state.first_hrt_core;
528 end = vm->num_cores-1;
531 end = vm->hvm_state.first_hrt_core-1;
534 PrintError(vm,VCORE_NONE,"This is not an HVM and so HVM-specific resets do not apply\n");
539 case V3_VM_RESET_CORE_RANGE:
540 start = ((uint32_t*)data)[0];
541 end = ((uint32_t*)data)[1];
544 PrintError(vm,VCORE_NONE,"Unsupported reset type %d for this VM\n",t);
549 PrintDebug(vm,VCORE_NONE,"Resetting cores %d through %d\n",start,end);
551 newcount = end-start+1;
553 for (i=start;i<=end;i++) {
554 if (!(vm->cores[i].core_run_state == CORE_RUNNING || vm->cores[i].core_run_state == CORE_STOPPED)) {
555 PrintError(vm,VCORE_NONE,"Cannot reset VM as core %u is in state %d (must be running or stopped)\n",i,vm->cores[i].core_run_state);
561 // This had better be the only thread using the barrier at this point...
562 v3_init_counting_barrier(&vm->reset_barrier,newcount);
564 // OK, I am the reseter, tell the relevant cores what to do
565 // each will atomically decrement the reset countdown and then
566 // spin waiting for it to hit zero.
568 for (i=start;i<=end;i++) {
569 vm->cores[i].core_run_state = CORE_RESETTING;
570 // force exit of core
571 v3_interrupt_cpu(vm, vm->cores[i].pcpu_id, 0);
574 // we don't wait for reset to finish
575 // because reset could have been initiated by a core
581 /* move a virtual core to different physical core */
582 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
583 struct guest_info * core = NULL;
586 PrintError(VM_NONE, VCORE_NONE, "Asked to move core of nonexistent VM\n");
590 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
591 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
595 core = &(vm->cores[vcore_id]);
597 if (target_cpu == core->pcpu_id) {
598 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
599 // well that was pointless
603 if (core->core_thread == NULL) {
604 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
608 while (v3_raise_barrier(vm, NULL) == -1);
610 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
612 // Double check that we weren't preemptively migrated
613 if (target_cpu != core->pcpu_id) {
615 V3_Print(vm, core, "Moving Core\n");
617 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
618 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
624 switch (v3_cpu_types[core->pcpu_id]) {
627 case V3_VMX_EPT_UG_CPU:
628 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
629 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
636 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
637 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
638 core->vcpu_id, target_cpu);
639 v3_lower_barrier(vm);
643 /* There will be a benign race window here:
644 core->pcpu_id will be set to the target core before its fully "migrated"
645 However the core will NEVER run on the old core again, its just in flight to the new core
647 core->pcpu_id = target_cpu;
649 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
652 v3_lower_barrier(vm);
657 /* move a memory region to memory with affinity for a specific physical core */
658 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
661 struct v3_mem_region *reg;
668 PrintError(VM_NONE, VCORE_NONE, "Asked to move memory of nonexistent VM\n");
672 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
675 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
679 new_node = v3_numa_cpu_to_node(target_cpu);
682 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
686 if (new_node==old_node) {
687 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
691 // We are now going to change the universe, so
692 // we'll barrier everyone first
694 while (v3_raise_barrier(vm, NULL) == -1);
698 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
701 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
705 if (!(reg->flags.base) || !(reg->flags.alloced)) {
706 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
710 // we now have the allocated base region corresponding to - and not a copy
711 // we will rewrite this region after moving its contents
713 // first, let's double check that we are in fact changing the numa_id...
715 if (reg->numa_id==new_node) {
716 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
720 // region uses exclusive addressing [guest_start,guest_end)
721 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
723 new_hpa = V3_AllocPagesExtended(num_pages,
726 0, 0); // no constraints given new shadow pager impl
729 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
733 // Note, assumes virtual contiguity in the host OS...
734 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
736 old_hpa = (void*)(reg->host_addr);
737 old_node = (int)(reg->numa_id);
739 reg->host_addr = (addr_t)new_hpa;
740 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
742 // flush all page tables / kill all humans
744 for (i=0;i<vm->num_cores;i++) {
745 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
746 v3_invalidate_shadow_pts(&(vm->cores[i]));
747 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
748 // nested invalidator uses inclusive addressing [start,end], not [start,end)
749 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
751 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
752 // We'll restore things...
753 reg->host_addr = (addr_t) old_hpa;
754 reg->numa_id = old_node;
755 V3_FreePages(new_hpa,num_pages);
760 // Now the old region can go away...
761 V3_FreePages(old_hpa,num_pages);
763 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
764 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
767 v3_lower_barrier(vm);
772 v3_lower_barrier(vm);
776 int v3_stop_vm(struct v3_vm_info * vm) {
778 struct guest_info * running_core;
781 PrintError(VM_NONE, VCORE_NONE, "Asked to stop nonexistent VM\n");
785 if ((vm->run_state != VM_RUNNING) &&
786 (vm->run_state != VM_SIMULATING)) {
787 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
791 vm->run_state = VM_STOPPED;
793 // Sanity check to catch any weird execution states
794 if (v3_wait_for_barrier(vm, NULL) == 0) {
795 v3_lower_barrier(vm);
798 // XXX force exit all cores via a cross call/IPI XXX
802 int still_running = 0;
804 for (i = 0; i < vm->num_cores; i++) {
805 if (vm->cores[i].core_run_state != CORE_STOPPED) {
806 running_core = &vm->cores[i];
811 if (still_running == 0) {
815 v3_scheduler_stop_core(running_core);
818 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
824 int v3_pause_vm(struct v3_vm_info * vm) {
827 PrintError(VM_NONE, VCORE_NONE, "Asked to pause nonexistent VM\n");
831 if (vm->run_state != VM_RUNNING) {
832 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
836 while (v3_raise_barrier(vm, NULL) == -1);
838 vm->run_state = VM_PAUSED;
844 int v3_continue_vm(struct v3_vm_info * vm) {
847 PrintError(VM_NONE, VCORE_NONE, "Asked to continue nonexistent VM\n");
851 if (vm->run_state != VM_PAUSED) {
852 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
856 vm->run_state = VM_RUNNING;
858 v3_lower_barrier(vm);
865 static int sim_callback(struct guest_info * core, void * private_data) {
866 struct v3_bitmap * timeout_map = private_data;
868 v3_bitmap_set(timeout_map, core->vcpu_id);
870 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
872 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
873 // We spin here if there is noone to yield to
883 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
884 struct v3_bitmap timeout_map;
888 uint64_t cpu_khz = V3_CPU_KHZ();
891 PrintError(VM_NONE, VCORE_NONE, "Asked to simulate nonexistent VM\n");
895 if (vm->run_state != VM_PAUSED) {
896 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
900 /* AT this point VM is paused */
903 v3_bitmap_init(&timeout_map, vm->num_cores);
908 // calculate cycles from msecs...
909 // IMPORTANT: Floating point not allowed.
910 cycles = (msecs * cpu_khz);
914 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
918 for (i = 0; i < vm->num_cores; i++) {
919 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
920 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
925 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
928 // Run the simulation
929 // vm->run_state = VM_SIMULATING;
930 vm->run_state = VM_RUNNING;
931 v3_lower_barrier(vm);
934 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
936 // block until simulation is complete
937 while (all_blocked == 0) {
940 for (i = 0; i < vm->num_cores; i++) {
941 if (v3_bitmap_check(&timeout_map, i) == 0) {
946 if (all_blocked == 1) {
950 // Intentionally spin if there is no one to yield to
955 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
957 // Simulation is complete
958 // Reset back to PAUSED state
960 v3_raise_barrier_nowait(vm, NULL);
961 vm->run_state = VM_PAUSED;
963 v3_bitmap_reset(&timeout_map);
965 v3_wait_for_barrier(vm, NULL);
971 int v3_get_state_vm(struct v3_vm_info *vm,
972 struct v3_vm_base_state *base,
973 struct v3_vm_core_state *core,
974 struct v3_vm_mem_state *mem)
979 extern uint64_t v3_mem_block_size;
982 if (!vm || !base || !core || !mem) {
983 PrintError(VM_NONE, VCORE_NONE, "Invalid request to v3_get_state_vm\n");
987 numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
988 numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
990 switch (vm->run_state) {
991 case VM_INVALID: base->state = V3_VM_INVALID; break;
992 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
993 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
994 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
995 case VM_ERROR: base->state = V3_VM_ERROR; break;
996 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
997 case VM_RESETTING: base->state = V3_VM_RESETTING; break;
998 default: base->state = V3_VM_UNKNOWN; break;
1001 base->vm_type = V3_VM_GENERAL;
1003 #ifdef V3_CONFIG_HVM
1004 if (vm->hvm_state.is_hvm) {
1005 base->vm_type = V3_VM_HVM;
1009 for (i=0;i<numcores;i++) {
1010 switch (vm->cores[i].core_run_state) {
1011 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
1012 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
1013 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
1014 case CORE_RESETTING: core->vcore[i].state = V3_VCORE_RESETTING; break;
1015 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
1017 switch (vm->cores[i].cpu_mode) {
1018 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
1019 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
1020 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
1021 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
1022 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
1023 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
1024 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
1026 switch (vm->cores[i].shdw_pg_mode) {
1027 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
1028 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
1029 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
1031 switch (vm->cores[i].mem_mode) {
1032 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
1033 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
1034 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
1037 core->vcore[i].vcore_type = V3_VCORE_GENERAL;
1039 #ifdef V3_CONFIG_HVM
1040 if (vm->hvm_state.is_hvm) {
1041 if (v3_is_hvm_ros_core(&vm->cores[i])) {
1042 core->vcore[i].vcore_type = V3_VCORE_ROS;
1044 core->vcore[i].vcore_type = V3_VCORE_HRT;
1049 core->vcore[i].pcore=vm->cores[i].pcpu_id;
1050 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
1051 core->vcore[i].num_exits=vm->cores[i].num_exits;
1054 core->num_vcores=numcores;
1058 for (i=0;i<numregions;i++) {
1059 mem->region[i].guest_paddr = cur_gpa;
1060 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
1061 mem->region[i].size = v3_mem_block_size;
1062 #ifdef V3_CONFIG_SWAPPING
1063 mem->region[i].swapped = vm->mem_map.base_regions[i].flags.swapped;
1064 mem->region[i].pinned = vm->mem_map.base_regions[i].flags.pinned;
1066 mem->region[i].swapped = 0;
1067 mem->region[i].pinned = 0;
1070 cur_gpa += mem->region[i].size;
1073 mem->num_regions=numregions;
1076 mem->mem_size=vm->mem_size;
1077 mem->ros_mem_size=vm->mem_size;
1079 #ifdef V3_CONFIG_HVM
1080 if (vm->hvm_state.is_hvm) {
1081 mem->ros_mem_size=v3_get_hvm_ros_memsize(vm);
1088 int v3_get_state_sizes_vm(struct v3_vm_info *vm,
1089 unsigned long long *num_vcores,
1090 unsigned long long *num_regions)
1092 if (!vm || !num_vcores || !num_regions) {
1093 PrintError(VM_NONE, VCORE_NONE, "Invalid request to v3_get_state_sizes\n");
1097 *num_vcores = vm->num_cores;
1098 *num_regions = vm->mem_map.num_base_regions;
1104 #ifdef V3_CONFIG_CHECKPOINT
1105 #include <palacios/vmm_checkpoint.h>
1107 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1108 if (!vm || !store || !url) {
1109 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_save_vm\n");
1112 return v3_chkpt_save_vm(vm, store, url, opts);
1116 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1117 if (!vm || !store || !url) {
1118 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_load_vm\n");
1121 return v3_chkpt_load_vm(vm, store, url, opts);
1124 #ifdef V3_CONFIG_LIVE_MIGRATION
1125 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1126 if (!vm || !store || !url) {
1127 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_send_vm\n");
1130 return v3_chkpt_send_vm(vm, store, url, opts);
1134 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1135 if (!vm || !store || !url) {
1136 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_receive_vm\n");
1139 return v3_chkpt_receive_vm(vm, store, url, opts);
1146 int v3_free_vm(struct v3_vm_info * vm) {
1148 // deinitialize guest (free memory, etc...)
1151 PrintError(VM_NONE, VCORE_NONE, "Asked to free nonexistent VM\n");
1155 if ((vm->run_state != VM_STOPPED) &&
1156 (vm->run_state != VM_ERROR)) {
1157 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
1161 v3_free_vm_devices(vm);
1164 for (i = 0; i < vm->num_cores; i++) {
1165 v3_scheduler_free_core(&(vm->cores[i]));
1166 v3_free_core(&(vm->cores[i]));
1170 v3_scheduler_free_vm(vm);
1171 v3_free_vm_internal(vm);
1183 v3_cpu_mode_t v3_get_host_cpu_mode() {
1185 struct cr4_32 * cr4;
1193 cr4 = (struct cr4_32 *)&(cr4_val);
1195 if (cr4->pae == 1) {
1196 return PROTECTED_PAE;
1204 v3_cpu_mode_t v3_get_host_cpu_mode() {
1210 void v3_print_cond(const char * fmt, ...) {
1211 if (v3_dbg_enable == 1) {
1216 vsnprintf(buf, 2048, fmt, ap);
1219 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
1225 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
1226 extern struct v3_os_hooks * os_hooks;
1228 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
1229 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
1235 int v3_vm_enter(struct guest_info * info) {
1236 switch (v3_mach_type) {
1237 #ifdef V3_CONFIG_SVM
1239 case V3_SVM_REV3_CPU:
1240 return v3_svm_enter(info);
1245 case V3_VMX_EPT_CPU:
1246 case V3_VMX_EPT_UG_CPU:
1247 return v3_vmx_enter(info);
1251 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
1257 void *v3_get_host_vm(struct v3_vm_info *x)
1260 return x->host_priv_data;
1266 int v3_get_vcore(struct guest_info *x)