2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
34 #include <interfaces/vmm_file.h>
37 #include <palacios/svm.h>
40 #include <palacios/vmx.h>
43 #ifdef V3_CONFIG_CHECKPOINT
44 #include <palacios/vmm_checkpoint.h>
47 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
48 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
50 struct v3_os_hooks * os_hooks = NULL;
51 int v3_dbg_enable = 0;
55 static void init_cpu(void * arg) {
56 uint32_t cpu_id = (uint32_t)(addr_t)arg;
61 if (v3_is_svm_capable()) {
62 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
63 v3_init_svm_cpu(cpu_id);
68 if (v3_is_vmx_capable()) {
69 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
70 v3_init_vmx_cpu(cpu_id);
75 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
80 static void deinit_cpu(void * arg) {
81 uint32_t cpu_id = (uint32_t)(addr_t)arg;
84 switch (v3_cpu_types[cpu_id]) {
88 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
89 v3_deinit_svm_cpu(cpu_id);
95 case V3_VMX_EPT_UG_CPU:
96 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
97 v3_deinit_vmx_cpu(cpu_id);
102 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
111 static int in_long_mode()
115 v3_get_msr(0xc0000080,&high,&low); // EFER
117 return ((low & 0x500)== 0x500); // LMA and LME set
121 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
126 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
131 #error Palacios does not support compilation for a 32 bit host OS!!!!
133 if (!in_long_mode()) {
134 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
139 // Set global variables.
142 if (num_cpus>V3_CONFIG_MAX_CPUS) {
143 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
146 // Determine the global machine type
147 v3_mach_type = V3_INVALID_CPU;
149 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
150 v3_cpu_types[i] = V3_INVALID_CPU;
153 #ifdef V3_CONFIG_CACHEPART
157 // Parse host-os defined options into an easily-accessed format.
158 v3_parse_options(options);
160 #ifdef V3_CONFIG_MULTIBOOT
169 // Memory manager initialization
172 // Register all the possible device types
175 // Register all shadow paging handlers
176 V3_init_shdw_paging();
178 #ifdef V3_CONFIG_SWAPPING
182 // Initialize the cpu_mapper framework (must be before extensions)
183 V3_init_cpu_mapper();
185 // Initialize the scheduler framework (must be before extensions)
186 V3_init_scheduling();
188 // Register all extensions
189 V3_init_extensions();
191 // Enabling cpu_mapper
192 V3_enable_cpu_mapper();
194 // Enabling scheduler
195 V3_enable_scheduler();
198 #ifdef V3_CONFIG_SYMMOD
202 #ifdef V3_CONFIG_CHECKPOINT
203 V3_init_checkpoint();
206 if ((hooks) && (hooks->call_on_cpu)) {
208 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
212 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
213 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
214 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
216 if (v3_mach_type == V3_INVALID_CPU) {
217 v3_mach_type = v3_cpu_types[i];
229 // Reverse order of Init_V3
233 if ((os_hooks) && (os_hooks->call_on_cpu)) {
234 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
235 if (v3_cpu_types[i] != V3_INVALID_CPU) {
236 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
237 //deinit_cpu((void *)(addr_t)i);
242 #ifdef V3_CONFIG_CHECKPOINT
243 V3_deinit_checkpoint();
246 #ifdef V3_CONFIG_SYMMOD
250 V3_disable_scheduler();
252 V3_disable_cpu_mapper();
254 V3_deinit_extensions();
256 V3_deinit_scheduling();
258 V3_deinit_cpu_mapper();
260 #ifdef V3_CONFIG_SWAPPING
261 v3_deinit_swapping();
264 V3_deinit_shdw_paging();
274 #ifdef V3_CONFIG_MULTIBOOT
275 v3_deinit_multiboot();
280 #ifdef V3_CONFIG_CACHEPART
281 v3_deinit_cachepart();
287 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
288 return v3_cpu_types[cpu_id];
291 static int start_core(void * p)
293 struct guest_info * core = (struct guest_info *)p;
295 if (v3_scheduler_register_core(core) == -1){
296 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
299 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
300 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
302 switch (v3_mach_type) {
305 case V3_SVM_REV3_CPU:
306 return v3_start_svm_guest(core);
312 case V3_VMX_EPT_UG_CPU:
313 return v3_start_vmx_guest(core);
317 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
324 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name, unsigned int cpu_mask) {
325 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
329 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
333 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
337 } else if (strlen(name) >= 128) {
338 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
341 memset(vm->name, 0, 128);
342 strncpy(vm->name, name, 128);
346 if(v3_cpu_mapper_register_vm(vm) == -1) {
348 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
352 * Register this VM with the palacios scheduler. It will ask for admission
355 if(v3_scheduler_register_vm(vm) == -1) {
357 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
360 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
361 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
364 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
366 struct guest_info * core = &(vm->cores[vcore_id]);
368 PrintDebug(vm, VCORE_NONE, "Creating virtual core %u on logical core %u\n",
369 vcore_id, core->pcpu_id);
371 sprintf(core->exec_name, "%s-%d", vm->name, vcore_id);
373 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
374 core->pcpu_id, start_core, core, core->exec_name);
377 // Resource controls for cores can be independent, but
378 // currently are not, hence this copy.
379 core->resource_control = vm->resource_control;
381 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name, &core->resource_control);
383 if (core->core_thread == NULL) {
384 PrintError(vm, VCORE_NONE, "Thread creation failed\n");
393 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
396 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
397 uint32_t avail_cores = 0;
401 PrintError(VM_NONE, VCORE_NONE, "Asked to start nonexistent VM\n");
405 if (vm->run_state != VM_STOPPED) {
406 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
410 #if V3_CONFIG_MULTIBOOT
411 if (v3_setup_multiboot_vm_for_boot(vm)) {
412 PrintError(vm, VCORE_NONE, "Multiboot setup for boot failed\n");
417 if (v3_setup_hvm_vm_for_boot(vm)) {
418 PrintError(vm, VCORE_NONE, "HVM setup for boot failed\n");
423 /// CHECK IF WE ARE MULTICORE ENABLED....
425 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
426 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
429 // Check that enough cores are present in the mask to handle vcores
430 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
434 if (core_mask[major] & (0x1 << minor)) {
435 if (v3_cpu_types[i] == V3_INVALID_CPU) {
436 core_mask[major] &= ~(0x1 << minor);
443 vm->avail_cores = avail_cores;
445 if (v3_scheduler_admit_vm(vm) != 0){
446 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
449 vm->run_state = VM_RUNNING;
451 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
453 struct guest_info * core = &(vm->cores[vcore_id]);
455 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
456 vcore_id, core->pcpu_id);
458 if (core->core_run_state==CORE_INVALID) {
459 // launch of a fresh VM
460 core->core_run_state = CORE_STOPPED;
461 // core zero will turn itself on
463 // this is a resume - use whatever its current run_state is
466 V3_START_THREAD(core->core_thread);
475 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
477 switch (v3_cpu_types[core->pcpu_id]) {
480 case V3_SVM_REV3_CPU:
481 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
482 return v3_reset_svm_vm_core(core, rip);
487 case V3_VMX_EPT_UG_CPU:
488 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
489 return v3_reset_vmx_vm_core(core, rip);
493 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
501 // resets the whole VM (non-HVM) or the ROS (HVM)
502 int v3_reset_vm(struct v3_vm_info *vm)
505 if (vm->hvm_state.is_hvm) {
506 return v3_reset_vm_extended(vm,V3_VM_RESET_ROS,0);
508 return v3_reset_vm_extended(vm,V3_VM_RESET_ALL,0);
511 return v3_reset_vm_extended(vm,V3_VM_RESET_ALL,0);
515 int v3_reset_vm_extended(struct v3_vm_info *vm, v3_vm_reset_type t, void *data)
517 uint32_t start, end, i;
520 if (vm->run_state != VM_RUNNING) {
521 PrintError(vm,VCORE_NONE,"Attempt to reset VM in state %d (must be in running state)\n",vm->run_state);
527 case V3_VM_RESET_ALL:
529 if (vm->hvm_state.is_hvm) {
530 PrintError(vm,VCORE_NONE,"Attempt to do ALL reset of HVM (not allowed)\n");
534 start=0; end=vm->num_cores-1;
537 case V3_VM_RESET_HRT:
538 case V3_VM_RESET_ROS:
539 if (vm->hvm_state.is_hvm) {
540 if (t==V3_VM_RESET_HRT) {
541 start = vm->hvm_state.first_hrt_core;
542 end = vm->num_cores-1;
545 end = vm->hvm_state.first_hrt_core-1;
548 PrintError(vm,VCORE_NONE,"This is not an HVM and so HVM-specific resets do not apply\n");
553 case V3_VM_RESET_CORE_RANGE:
555 start = ((uint32_t*)data)[0];
556 end = ((uint32_t*)data)[1];
562 PrintError(vm,VCORE_NONE,"Unsupported reset type %d for this VM\n",t);
567 PrintDebug(vm,VCORE_NONE,"Resetting cores %d through %d\n",start,end);
569 newcount = end-start+1;
571 for (i=start;i<=end;i++) {
572 if (!(vm->cores[i].core_run_state == CORE_RUNNING || vm->cores[i].core_run_state == CORE_STOPPED)) {
573 PrintError(vm,VCORE_NONE,"Cannot reset VM as core %u is in state %d (must be running or stopped)\n",i,vm->cores[i].core_run_state);
579 // This had better be the only thread using the barrier at this point...
580 v3_init_counting_barrier(&vm->reset_barrier,newcount);
582 // OK, I am the reseter, tell the relevant cores what to do
583 // each will atomically decrement the reset countdown and then
584 // spin waiting for it to hit zero.
586 for (i=start;i<=end;i++) {
587 vm->cores[i].core_run_state = CORE_RESETTING;
588 // force exit of core
589 v3_interrupt_cpu(vm, vm->cores[i].pcpu_id, 0);
592 // we don't wait for reset to finish
593 // because reset could have been initiated by a core
599 /* move a virtual core to different physical core */
600 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
601 struct guest_info * core = NULL;
604 PrintError(VM_NONE, VCORE_NONE, "Asked to move core of nonexistent VM\n");
608 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
609 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
613 core = &(vm->cores[vcore_id]);
615 if (target_cpu == core->pcpu_id) {
616 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
617 // well that was pointless
621 if (core->core_thread == NULL) {
622 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
626 while (v3_raise_barrier(vm, NULL) == -1);
628 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
630 // Double check that we weren't preemptively migrated
631 if (target_cpu != core->pcpu_id) {
633 V3_Print(vm, core, "Moving Core\n");
635 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
636 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
642 switch (v3_cpu_types[core->pcpu_id]) {
645 case V3_VMX_EPT_UG_CPU:
646 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
647 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
654 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
655 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
656 core->vcpu_id, target_cpu);
657 v3_lower_barrier(vm);
661 /* There will be a benign race window here:
662 core->pcpu_id will be set to the target core before its fully "migrated"
663 However the core will NEVER run on the old core again, its just in flight to the new core
665 core->pcpu_id = target_cpu;
667 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
670 v3_lower_barrier(vm);
675 /* move a memory region to memory with affinity for a specific physical core */
676 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
679 struct v3_mem_region *reg;
686 PrintError(VM_NONE, VCORE_NONE, "Asked to move memory of nonexistent VM\n");
690 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
693 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
697 new_node = v3_numa_cpu_to_node(target_cpu);
700 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
704 if (new_node==old_node) {
705 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
709 // We are now going to change the universe, so
710 // we'll barrier everyone first
712 while (v3_raise_barrier(vm, NULL) == -1);
716 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
719 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
723 if (!(reg->flags.base) || !(reg->flags.alloced)) {
724 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
728 // we now have the allocated base region corresponding to - and not a copy
729 // we will rewrite this region after moving its contents
731 // first, let's double check that we are in fact changing the numa_id...
733 if (reg->numa_id==new_node) {
734 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
738 // region uses exclusive addressing [guest_start,guest_end)
739 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
741 new_hpa = V3_AllocPagesExtended(num_pages,
744 vm->resource_control.pg_filter_func,
745 vm->resource_control.pg_filter_state);
748 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
752 // Note, assumes virtual contiguity in the host OS...
753 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
755 old_hpa = (void*)(reg->host_addr);
756 old_node = (int)(reg->numa_id);
758 reg->host_addr = (addr_t)new_hpa;
759 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
761 // flush all page tables / kill all humans
763 for (i=0;i<vm->num_cores;i++) {
764 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
765 v3_invalidate_shadow_pts(&(vm->cores[i]));
766 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
767 // nested invalidator uses inclusive addressing [start,end], not [start,end)
768 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
770 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
771 // We'll restore things...
772 reg->host_addr = (addr_t) old_hpa;
773 reg->numa_id = old_node;
774 V3_FreePages(new_hpa,num_pages);
779 // Now the old region can go away...
780 V3_FreePages(old_hpa,num_pages);
782 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
783 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
786 v3_lower_barrier(vm);
791 v3_lower_barrier(vm);
795 int v3_stop_vm(struct v3_vm_info * vm) {
797 struct guest_info * running_core;
800 PrintError(VM_NONE, VCORE_NONE, "Asked to stop nonexistent VM\n");
804 if ((vm->run_state != VM_RUNNING) &&
805 (vm->run_state != VM_SIMULATING)) {
806 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
810 vm->run_state = VM_STOPPED;
812 // Sanity check to catch any weird execution states
813 if (v3_wait_for_barrier(vm, NULL) == 0) {
814 v3_lower_barrier(vm);
817 // XXX force exit all cores via a cross call/IPI XXX
821 int still_running = 0;
823 for (i = 0; i < vm->num_cores; i++) {
824 if (vm->cores[i].core_run_state != CORE_STOPPED) {
825 running_core = &vm->cores[i];
830 if (still_running == 0) {
834 v3_scheduler_stop_core(running_core);
837 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
843 int v3_pause_vm(struct v3_vm_info * vm) {
846 PrintError(VM_NONE, VCORE_NONE, "Asked to pause nonexistent VM\n");
850 if (vm->run_state != VM_RUNNING) {
851 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
855 while (v3_raise_barrier(vm, NULL) == -1);
857 vm->run_state = VM_PAUSED;
863 int v3_continue_vm(struct v3_vm_info * vm) {
866 PrintError(VM_NONE, VCORE_NONE, "Asked to continue nonexistent VM\n");
870 if (vm->run_state != VM_PAUSED) {
871 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
875 vm->run_state = VM_RUNNING;
877 v3_lower_barrier(vm);
884 static int sim_callback(struct guest_info * core, void * private_data) {
885 struct v3_bitmap * timeout_map = private_data;
887 v3_bitmap_set(timeout_map, core->vcpu_id);
889 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
891 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
892 // We spin here if there is noone to yield to
902 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
903 struct v3_bitmap timeout_map;
907 uint64_t cpu_khz = V3_CPU_KHZ();
910 PrintError(VM_NONE, VCORE_NONE, "Asked to simulate nonexistent VM\n");
914 if (vm->run_state != VM_PAUSED) {
915 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
919 /* AT this point VM is paused */
922 v3_bitmap_init(&timeout_map, vm->num_cores);
927 // calculate cycles from msecs...
928 // IMPORTANT: Floating point not allowed.
929 cycles = (msecs * cpu_khz);
933 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
937 for (i = 0; i < vm->num_cores; i++) {
938 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
939 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
944 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
947 // Run the simulation
948 // vm->run_state = VM_SIMULATING;
949 vm->run_state = VM_RUNNING;
950 v3_lower_barrier(vm);
953 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
955 // block until simulation is complete
956 while (all_blocked == 0) {
959 for (i = 0; i < vm->num_cores; i++) {
960 if (v3_bitmap_check(&timeout_map, i) == 0) {
965 if (all_blocked == 1) {
969 // Intentionally spin if there is no one to yield to
974 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
976 // Simulation is complete
977 // Reset back to PAUSED state
979 v3_raise_barrier_nowait(vm, NULL);
980 vm->run_state = VM_PAUSED;
982 v3_bitmap_reset(&timeout_map);
984 v3_wait_for_barrier(vm, NULL);
990 int v3_get_state_vm(struct v3_vm_info *vm,
991 struct v3_vm_base_state *base,
992 struct v3_vm_core_state *core,
993 struct v3_vm_mem_state *mem)
998 extern uint64_t v3_mem_block_size;
1001 if (!vm || !base || !core || !mem) {
1002 PrintError(VM_NONE, VCORE_NONE, "Invalid request to v3_get_state_vm\n");
1006 numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
1007 numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
1009 switch (vm->run_state) {
1010 case VM_INVALID: base->state = V3_VM_INVALID; break;
1011 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
1012 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
1013 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
1014 case VM_ERROR: base->state = V3_VM_ERROR; break;
1015 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
1016 case VM_RESETTING: base->state = V3_VM_RESETTING; break;
1017 default: base->state = V3_VM_UNKNOWN; break;
1020 base->vm_type = V3_VM_GENERAL;
1022 #ifdef V3_CONFIG_HVM
1023 if (vm->hvm_state.is_hvm) {
1024 base->vm_type = V3_VM_HVM;
1028 for (i=0;i<numcores;i++) {
1029 switch (vm->cores[i].core_run_state) {
1030 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
1031 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
1032 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
1033 case CORE_RESETTING: core->vcore[i].state = V3_VCORE_RESETTING; break;
1034 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
1036 switch (vm->cores[i].cpu_mode) {
1037 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
1038 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
1039 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
1040 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
1041 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
1042 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
1043 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
1045 switch (vm->cores[i].shdw_pg_mode) {
1046 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
1047 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
1048 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
1050 switch (vm->cores[i].mem_mode) {
1051 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
1052 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
1053 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
1056 core->vcore[i].vcore_type = V3_VCORE_GENERAL;
1058 #ifdef V3_CONFIG_HVM
1059 if (vm->hvm_state.is_hvm) {
1060 if (v3_is_hvm_ros_core(&vm->cores[i])) {
1061 core->vcore[i].vcore_type = V3_VCORE_ROS;
1063 core->vcore[i].vcore_type = V3_VCORE_HRT;
1068 core->vcore[i].pcore=vm->cores[i].pcpu_id;
1069 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
1070 core->vcore[i].num_exits=vm->cores[i].num_exits;
1073 core->num_vcores=numcores;
1077 for (i=0;i<numregions;i++) {
1078 mem->region[i].guest_paddr = cur_gpa;
1079 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
1080 mem->region[i].size = v3_mem_block_size;
1081 #ifdef V3_CONFIG_SWAPPING
1082 mem->region[i].swapped = vm->mem_map.base_regions[i].flags.swapped;
1083 mem->region[i].pinned = vm->mem_map.base_regions[i].flags.pinned;
1085 mem->region[i].swapped = 0;
1086 mem->region[i].pinned = 0;
1089 cur_gpa += mem->region[i].size;
1092 mem->num_regions=numregions;
1095 mem->mem_size=vm->mem_size;
1096 mem->ros_mem_size=vm->mem_size;
1098 #ifdef V3_CONFIG_HVM
1099 if (vm->hvm_state.is_hvm) {
1100 mem->ros_mem_size=v3_get_hvm_ros_memsize(vm);
1107 int v3_get_state_sizes_vm(struct v3_vm_info *vm,
1108 unsigned long long *num_vcores,
1109 unsigned long long *num_regions)
1111 if (!vm || !num_vcores || !num_regions) {
1112 PrintError(VM_NONE, VCORE_NONE, "Invalid request to v3_get_state_sizes\n");
1116 *num_vcores = vm->num_cores;
1117 *num_regions = vm->mem_map.num_base_regions;
1123 #ifdef V3_CONFIG_CHECKPOINT
1124 #include <palacios/vmm_checkpoint.h>
1126 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1127 if (!vm || !store || !url) {
1128 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_save_vm\n");
1131 return v3_chkpt_save_vm(vm, store, url, opts);
1135 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1136 if (!vm || !store || !url) {
1137 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_load_vm\n");
1140 return v3_chkpt_load_vm(vm, store, url, opts);
1143 #ifdef V3_CONFIG_LIVE_MIGRATION
1144 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1145 if (!vm || !store || !url) {
1146 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_send_vm\n");
1149 return v3_chkpt_send_vm(vm, store, url, opts);
1153 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1154 if (!vm || !store || !url) {
1155 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_receive_vm\n");
1158 return v3_chkpt_receive_vm(vm, store, url, opts);
1165 int v3_free_vm(struct v3_vm_info * vm) {
1167 // deinitialize guest (free memory, etc...)
1170 PrintError(VM_NONE, VCORE_NONE, "Asked to free nonexistent VM\n");
1174 if ((vm->run_state != VM_STOPPED) &&
1175 (vm->run_state != VM_ERROR)) {
1176 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
1180 v3_free_vm_devices(vm);
1183 for (i = 0; i < vm->num_cores; i++) {
1184 v3_scheduler_free_core(&(vm->cores[i]));
1185 v3_free_core(&(vm->cores[i]));
1189 v3_scheduler_free_vm(vm);
1190 v3_free_vm_internal(vm);
1202 v3_cpu_mode_t v3_get_host_cpu_mode() {
1204 struct cr4_32 * cr4;
1212 cr4 = (struct cr4_32 *)&(cr4_val);
1214 if (cr4->pae == 1) {
1215 return PROTECTED_PAE;
1223 v3_cpu_mode_t v3_get_host_cpu_mode() {
1232 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
1233 extern struct v3_os_hooks * os_hooks;
1235 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
1236 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
1242 int v3_vm_enter(struct guest_info * info) {
1243 switch (v3_mach_type) {
1244 #ifdef V3_CONFIG_SVM
1246 case V3_SVM_REV3_CPU:
1247 return v3_svm_enter(info);
1252 case V3_VMX_EPT_CPU:
1253 case V3_VMX_EPT_UG_CPU:
1254 return v3_vmx_enter(info);
1258 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
1264 void *v3_get_host_vm(struct v3_vm_info *x)
1267 return x->host_priv_data;
1273 int v3_get_vcore(struct guest_info *x)