2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_mem.h>
22 #include <palacios/vmm_intr.h>
23 #include <palacios/vmm_config.h>
24 #include <palacios/vm_guest.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <palacios/vmm_extensions.h>
29 #include <palacios/vmm_timeout.h>
30 #include <palacios/vmm_options.h>
31 #include <palacios/vmm_cpu_mapper.h>
32 #include <palacios/vmm_direct_paging.h>
33 #include <interfaces/vmm_numa.h>
34 #include <interfaces/vmm_file.h>
37 #include <palacios/svm.h>
40 #include <palacios/vmx.h>
43 #ifdef V3_CONFIG_CHECKPOINT
44 #include <palacios/vmm_checkpoint.h>
47 v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
48 v3_cpu_arch_t v3_mach_type = V3_INVALID_CPU;
50 struct v3_os_hooks * os_hooks = NULL;
51 int v3_dbg_enable = 0;
55 static void init_cpu(void * arg) {
56 uint32_t cpu_id = (uint32_t)(addr_t)arg;
61 if (v3_is_svm_capable()) {
62 PrintDebug(VM_NONE, VCORE_NONE, "Machine is SVM Capable\n");
63 v3_init_svm_cpu(cpu_id);
68 if (v3_is_vmx_capable()) {
69 PrintDebug(VM_NONE, VCORE_NONE, "Machine is VMX Capable\n");
70 v3_init_vmx_cpu(cpu_id);
75 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
80 static void deinit_cpu(void * arg) {
81 uint32_t cpu_id = (uint32_t)(addr_t)arg;
84 switch (v3_cpu_types[cpu_id]) {
88 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing SVM CPU %d\n", cpu_id);
89 v3_deinit_svm_cpu(cpu_id);
95 case V3_VMX_EPT_UG_CPU:
96 PrintDebug(VM_NONE, VCORE_NONE, "Deinitializing VMX CPU %d\n", cpu_id);
97 v3_deinit_vmx_cpu(cpu_id);
102 PrintError(VM_NONE, VCORE_NONE, "CPU has no virtualization Extensions\n");
111 static int in_long_mode()
115 v3_get_msr(0xc0000080,&high,&low); // EFER
117 return ((low & 0x500)== 0x500); // LMA and LME set
121 void Init_V3(struct v3_os_hooks * hooks, char * cpu_mask, int num_cpus, char *options) {
126 V3_Print(VM_NONE, VCORE_NONE, "V3 Print statement to fix a Kitten page fault bug\n");
131 #error Palacios does not support compilation for a 32 bit host OS!!!!
133 if (!in_long_mode()) {
134 PrintError(VM_NONE,VCORE_NONE,"Palacios supports execution only in long mode (64 bit).\n");
139 // Set global variables.
142 if (num_cpus>V3_CONFIG_MAX_CPUS) {
143 PrintError(VM_NONE,VCORE_NONE, "Requesting as many as %d cpus, but Palacios is compiled for a maximum of %d. Only the first %d cpus will be considered\n", num_cpus, V3_CONFIG_MAX_CPUS, V3_CONFIG_MAX_CPUS);
146 // Determine the global machine type
147 v3_mach_type = V3_INVALID_CPU;
149 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
150 v3_cpu_types[i] = V3_INVALID_CPU;
153 #ifdef V3_CONFIG_CACHEPART
157 // Parse host-os defined options into an easily-accessed format.
158 v3_parse_options(options);
160 #ifdef V3_CONFIG_MULTIBOOT
169 // Memory manager initialization
172 // Register all the possible device types
175 // Register all shadow paging handlers
176 V3_init_shdw_paging();
178 #ifdef V3_CONFIG_SWAPPING
182 // Initialize the cpu_mapper framework (must be before extensions)
183 V3_init_cpu_mapper();
185 // Initialize the scheduler framework (must be before extensions)
186 V3_init_scheduling();
188 // Register all extensions
189 V3_init_extensions();
191 // Enabling cpu_mapper
192 V3_enable_cpu_mapper();
194 // Enabling scheduler
195 V3_enable_scheduler();
198 #ifdef V3_CONFIG_SYMMOD
202 #ifdef V3_CONFIG_CHECKPOINT
203 V3_init_checkpoint();
206 if ((hooks) && (hooks->call_on_cpu)) {
208 for (i = 0; i < num_cpus && i < V3_CONFIG_MAX_CPUS; i++) {
212 if ((cpu_mask == NULL) || (*(cpu_mask + major) & (0x1 << minor))) {
213 V3_Print(VM_NONE, VCORE_NONE, "Initializing VMM extensions on cpu %d\n", i);
214 hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i);
216 if (v3_mach_type == V3_INVALID_CPU) {
217 v3_mach_type = v3_cpu_types[i];
229 // Reverse order of Init_V3
233 if ((os_hooks) && (os_hooks->call_on_cpu)) {
234 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
235 if (v3_cpu_types[i] != V3_INVALID_CPU) {
236 V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
237 //deinit_cpu((void *)(addr_t)i);
242 #ifdef V3_CONFIG_CHECKPOINT
243 V3_deinit_checkpoint();
246 #ifdef V3_CONFIG_SYMMOD
250 V3_disable_scheduler();
252 V3_disable_cpu_mapper();
254 V3_deinit_extensions();
256 V3_deinit_scheduling();
258 V3_deinit_cpu_mapper();
260 #ifdef V3_CONFIG_SWAPPING
261 v3_deinit_swapping();
264 V3_deinit_shdw_paging();
274 #ifdef V3_CONFIG_MULTIBOOT
275 v3_deinit_multiboot();
280 #ifdef V3_CONFIG_CACHEPART
281 v3_deinit_cachepart();
287 v3_cpu_arch_t v3_get_cpu_type(int cpu_id) {
288 return v3_cpu_types[cpu_id];
291 static int start_core(void * p)
293 struct guest_info * core = (struct guest_info *)p;
295 if (v3_scheduler_register_core(core) == -1){
296 PrintError(core->vm_info, core,"Error initializing scheduling in core %d\n", core->vcpu_id);
299 PrintDebug(core->vm_info,core,"virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
300 core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
302 switch (v3_mach_type) {
305 case V3_SVM_REV3_CPU:
306 return v3_start_svm_guest(core);
312 case V3_VMX_EPT_UG_CPU:
313 return v3_start_vmx_guest(core);
317 PrintError(core->vm_info, core, "Attempting to enter a guest on an invalid CPU\n");
324 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name, unsigned int cpu_mask) {
325 struct v3_vm_info * vm = v3_config_guest(cfg, priv_data);
329 PrintError(VM_NONE, VCORE_NONE, "Could not configure guest\n");
333 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
337 } else if (strlen(name) >= 128) {
338 PrintError(vm, VCORE_NONE,"VM name is too long. Will be truncated to 128 chars.\n");
341 memset(vm->name, 0, 128);
342 strncpy(vm->name, name, 127);
344 if(v3_cpu_mapper_register_vm(vm) == -1) {
346 PrintError(vm, VCORE_NONE,"Error registering VM with cpu_mapper\n");
350 * Register this VM with the palacios scheduler. It will ask for admission
353 if(v3_scheduler_register_vm(vm) == -1) {
355 PrintError(vm, VCORE_NONE,"Error registering VM with scheduler\n");
358 if (v3_cpu_mapper_admit_vm(vm,cpu_mask) != 0){
359 PrintError(vm, VCORE_NONE,"Error admitting VM %s for mapping", vm->name);
362 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
364 struct guest_info * core = &(vm->cores[vcore_id]);
366 PrintDebug(vm, VCORE_NONE, "Creating virtual core %u on logical core %u\n",
367 vcore_id, core->pcpu_id);
369 sprintf(core->exec_name, "%s-%d", vm->name, vcore_id);
371 PrintDebug(vm, VCORE_NONE, "run: core=%u, func=0x%p, arg=0x%p, name=%s\n",
372 core->pcpu_id, start_core, core, core->exec_name);
375 // Resource controls for cores can be independent, but
376 // currently are not, hence this copy.
377 core->resource_control = vm->resource_control;
379 core->core_thread = V3_CREATE_THREAD_ON_CPU(core->pcpu_id, start_core, core, core->exec_name, &core->resource_control);
381 if (core->core_thread == NULL) {
382 PrintError(vm, VCORE_NONE, "Thread creation failed\n");
391 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
394 uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier
395 uint32_t avail_cores = 0;
399 PrintError(VM_NONE, VCORE_NONE, "Asked to start nonexistent VM\n");
403 if (vm->run_state != VM_STOPPED) {
404 PrintError(vm, VCORE_NONE, "VM has already been launched (state=%d)\n", (int)vm->run_state);
408 #if V3_CONFIG_MULTIBOOT
409 if (v3_setup_multiboot_vm_for_boot(vm)) {
410 PrintError(vm, VCORE_NONE, "Multiboot setup for boot failed\n");
415 if (v3_setup_hvm_vm_for_boot(vm)) {
416 PrintError(vm, VCORE_NONE, "HVM setup for boot failed\n");
421 /// CHECK IF WE ARE MULTICORE ENABLED....
423 V3_Print(vm, VCORE_NONE, "V3 -- Starting VM (%u cores)\n", vm->num_cores);
424 V3_Print(vm, VCORE_NONE, "CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip));
427 // Check that enough cores are present in the mask to handle vcores
428 for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
432 if (core_mask[major] & (0x1 << minor)) {
433 if (v3_cpu_types[i] == V3_INVALID_CPU) {
434 core_mask[major] &= ~(0x1 << minor);
441 vm->avail_cores = avail_cores;
443 if (v3_scheduler_admit_vm(vm) != 0){
444 PrintError(vm, VCORE_NONE,"Error admitting VM %s for scheduling", vm->name);
447 vm->run_state = VM_RUNNING;
449 for (vcore_id = 0; vcore_id < vm->num_cores; vcore_id++) {
451 struct guest_info * core = &(vm->cores[vcore_id]);
453 PrintDebug(vm, VCORE_NONE, "Starting virtual core %u on logical core %u\n",
454 vcore_id, core->pcpu_id);
456 if (core->core_run_state==CORE_INVALID) {
457 // launch of a fresh VM
458 core->core_run_state = CORE_STOPPED;
459 // core zero will turn itself on
461 // this is a resume - use whatever its current run_state is
464 V3_START_THREAD(core->core_thread);
473 int v3_reset_vm_core(struct guest_info * core, addr_t rip) {
475 switch (v3_cpu_types[core->pcpu_id]) {
478 case V3_SVM_REV3_CPU:
479 PrintDebug(core->vm_info, core, "Resetting SVM Guest CPU %d\n", core->vcpu_id);
480 return v3_reset_svm_vm_core(core, rip);
485 case V3_VMX_EPT_UG_CPU:
486 PrintDebug(core->vm_info, core, "Resetting VMX Guest CPU %d\n", core->vcpu_id);
487 return v3_reset_vmx_vm_core(core, rip);
491 PrintError(core->vm_info, core, "CPU has no virtualization Extensions\n");
499 // resets the whole VM (non-HVM) or the ROS (HVM)
500 int v3_reset_vm(struct v3_vm_info *vm)
503 if (vm->hvm_state.is_hvm) {
504 return v3_reset_vm_extended(vm,V3_VM_RESET_ROS,0);
506 return v3_reset_vm_extended(vm,V3_VM_RESET_ALL,0);
509 return v3_reset_vm_extended(vm,V3_VM_RESET_ALL,0);
513 int v3_reset_vm_extended(struct v3_vm_info *vm, v3_vm_reset_type t, void *data)
515 uint32_t start, end, i;
518 if (vm->run_state != VM_RUNNING) {
519 PrintError(vm,VCORE_NONE,"Attempt to reset VM in state %d (must be in running state)\n",vm->run_state);
525 case V3_VM_RESET_ALL:
527 if (vm->hvm_state.is_hvm) {
528 PrintError(vm,VCORE_NONE,"Attempt to do ALL reset of HVM (not allowed)\n");
532 start=0; end=vm->num_cores-1;
535 case V3_VM_RESET_HRT:
536 case V3_VM_RESET_ROS:
537 if (vm->hvm_state.is_hvm) {
538 if (t==V3_VM_RESET_HRT) {
539 start = vm->hvm_state.first_hrt_core;
540 end = vm->num_cores-1;
543 end = vm->hvm_state.first_hrt_core-1;
546 PrintError(vm,VCORE_NONE,"This is not an HVM and so HVM-specific resets do not apply\n");
551 case V3_VM_RESET_CORE_RANGE:
553 start = ((uint32_t*)data)[0];
554 end = ((uint32_t*)data)[1];
560 PrintError(vm,VCORE_NONE,"Unsupported reset type %d for this VM\n",t);
565 PrintDebug(vm,VCORE_NONE,"Resetting cores %d through %d\n",start,end);
567 newcount = end-start+1;
569 for (i=start;i<=end;i++) {
570 if (!(vm->cores[i].core_run_state == CORE_RUNNING || vm->cores[i].core_run_state == CORE_STOPPED)) {
571 PrintError(vm,VCORE_NONE,"Cannot reset VM as core %u is in state %d (must be running or stopped)\n",i,vm->cores[i].core_run_state);
577 // This had better be the only thread using the barrier at this point...
578 v3_init_counting_barrier(&vm->reset_barrier,newcount);
580 // OK, I am the reseter, tell the relevant cores what to do
581 // each will atomically decrement the reset countdown and then
582 // spin waiting for it to hit zero.
584 for (i=start;i<=end;i++) {
585 vm->cores[i].core_run_state = CORE_RESETTING;
586 // force exit of core
587 v3_interrupt_cpu(vm, vm->cores[i].pcpu_id, 0);
590 // we don't wait for reset to finish
591 // because reset could have been initiated by a core
597 /* move a virtual core to different physical core */
598 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
599 struct guest_info * core = NULL;
602 PrintError(VM_NONE, VCORE_NONE, "Asked to move core of nonexistent VM\n");
606 if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
607 PrintError(vm, VCORE_NONE, "Attempted to migrate invalid virtual core (%d)\n", vcore_id);
611 core = &(vm->cores[vcore_id]);
613 if (target_cpu == core->pcpu_id) {
614 PrintError(vm, core, "Attempted to migrate to local core (%d)\n", target_cpu);
615 // well that was pointless
619 if (core->core_thread == NULL) {
620 PrintError(vm, core, "Attempted to migrate a core without a valid thread context\n");
624 while (v3_raise_barrier(vm, NULL) == -1);
626 V3_Print(vm, core, "Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
628 // Double check that we weren't preemptively migrated
629 if (target_cpu != core->pcpu_id) {
631 V3_Print(vm, core, "Moving Core\n");
633 if(v3_cpu_mapper_admit_core(vm, vcore_id, target_cpu) == -1){
634 PrintError(vm, core, "Core %d can not be admitted in cpu %d\n",vcore_id, target_cpu);
640 switch (v3_cpu_types[core->pcpu_id]) {
643 case V3_VMX_EPT_UG_CPU:
644 PrintDebug(vm, core, "Flushing VMX Guest CPU %d\n", core->vcpu_id);
645 V3_Call_On_CPU(core->pcpu_id, (void (*)(void *))v3_flush_vmx_vm_core, (void *)core);
652 if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
653 PrintError(vm, core, "Failed to move Vcore %d to CPU %d\n",
654 core->vcpu_id, target_cpu);
655 v3_lower_barrier(vm);
659 /* There will be a benign race window here:
660 core->pcpu_id will be set to the target core before its fully "migrated"
661 However the core will NEVER run on the old core again, its just in flight to the new core
663 core->pcpu_id = target_cpu;
665 V3_Print(vm, core, "core now at %d\n", core->pcpu_id);
668 v3_lower_barrier(vm);
673 /* move a memory region to memory with affinity for a specific physical core */
674 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu) {
677 struct v3_mem_region *reg;
684 PrintError(VM_NONE, VCORE_NONE, "Asked to move memory of nonexistent VM\n");
688 old_node = v3_numa_gpa_to_node(vm,(addr_t)gpa);
691 PrintError(vm, VCORE_NONE, "Cannot determine current node of gpa %p\n",gpa);
695 new_node = v3_numa_cpu_to_node(target_cpu);
698 PrintError(vm, VCORE_NONE, "Cannot determine current node of cpu %d\n",target_cpu);
702 if (new_node==old_node) {
703 PrintDebug(vm, VCORE_NONE, "Affinity is already established - ignoring request\n");
707 // We are now going to change the universe, so
708 // we'll barrier everyone first
710 while (v3_raise_barrier(vm, NULL) == -1);
714 reg = v3_get_mem_region(vm, V3_MEM_CORE_ANY, (addr_t) gpa);
717 PrintError(vm, VCORE_NONE, "Attempt to migrate non-existent memory\n");
721 if (!(reg->flags.base) || !(reg->flags.alloced)) {
722 PrintError(vm, VCORE_NONE, "Attempt to migrate invalid region: base=%d alloced=%d\n", reg->flags.base, reg->flags.alloced);
726 // we now have the allocated base region corresponding to - and not a copy
727 // we will rewrite this region after moving its contents
729 // first, let's double check that we are in fact changing the numa_id...
731 if (reg->numa_id==new_node) {
732 PrintDebug(vm, VCORE_NONE, "Affinity for this base region is already established - ignoring...\n");
736 // region uses exclusive addressing [guest_start,guest_end)
737 num_pages = (reg->guest_end-reg->guest_start)/PAGE_SIZE;
739 new_hpa = V3_AllocPagesExtended(num_pages,
742 vm->resource_control.pg_filter_func,
743 vm->resource_control.pg_filter_state);
746 PrintError(vm, VCORE_NONE, "Cannot allocate memory for new base region...\n");
750 // Note, assumes virtual contiguity in the host OS...
751 memcpy(V3_VAddr((void*)new_hpa), V3_VAddr((void*)(reg->host_addr)), num_pages*PAGE_SIZE);
753 old_hpa = (void*)(reg->host_addr);
754 old_node = (int)(reg->numa_id);
756 reg->host_addr = (addr_t)new_hpa;
757 reg->numa_id = v3_numa_hpa_to_node((addr_t)new_hpa);
759 // flush all page tables / kill all humans
761 for (i=0;i<vm->num_cores;i++) {
762 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
763 v3_invalidate_shadow_pts(&(vm->cores[i]));
764 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
765 // nested invalidator uses inclusive addressing [start,end], not [start,end)
766 v3_invalidate_nested_addr_range(&(vm->cores[i]),reg->guest_start,reg->guest_end-1,NULL,NULL);
768 PrintError(vm,VCORE_NONE, "Cannot determine how to invalidate paging structures! Reverting to previous region.\n");
769 // We'll restore things...
770 reg->host_addr = (addr_t) old_hpa;
771 reg->numa_id = old_node;
772 V3_FreePages(new_hpa,num_pages);
777 // Now the old region can go away...
778 V3_FreePages(old_hpa,num_pages);
780 PrintDebug(vm,VCORE_NONE,"Migration of memory complete - new region is %p to %p\n",
781 (void*)(reg->host_addr),(void*)(reg->host_addr+num_pages*PAGE_SIZE-1));
784 v3_lower_barrier(vm);
789 v3_lower_barrier(vm);
793 int v3_stop_vm(struct v3_vm_info * vm) {
795 struct guest_info * running_core;
798 PrintError(VM_NONE, VCORE_NONE, "Asked to stop nonexistent VM\n");
802 if ((vm->run_state != VM_RUNNING) &&
803 (vm->run_state != VM_SIMULATING)) {
804 PrintError(vm, VCORE_NONE,"Tried to stop VM in invalid runstate (%d)\n", vm->run_state);
808 vm->run_state = VM_STOPPED;
810 // Sanity check to catch any weird execution states
811 if (v3_wait_for_barrier(vm, NULL) == 0) {
812 v3_lower_barrier(vm);
815 // XXX force exit all cores via a cross call/IPI XXX
819 int still_running = 0;
821 for (i = 0; i < vm->num_cores; i++) {
822 if (vm->cores[i].core_run_state != CORE_STOPPED) {
823 running_core = &vm->cores[i];
828 if (still_running == 0) {
832 v3_scheduler_stop_core(running_core);
835 V3_Print(vm, VCORE_NONE,"VM stopped. Returning\n");
841 int v3_pause_vm(struct v3_vm_info * vm) {
844 PrintError(VM_NONE, VCORE_NONE, "Asked to pause nonexistent VM\n");
848 if (vm->run_state != VM_RUNNING) {
849 PrintError(vm, VCORE_NONE,"Tried to pause a VM that was not running\n");
853 while (v3_raise_barrier(vm, NULL) == -1);
855 vm->run_state = VM_PAUSED;
861 int v3_continue_vm(struct v3_vm_info * vm) {
864 PrintError(VM_NONE, VCORE_NONE, "Asked to continue nonexistent VM\n");
868 if (vm->run_state != VM_PAUSED) {
869 PrintError(vm, VCORE_NONE,"Tried to continue a VM that was not paused\n");
873 vm->run_state = VM_RUNNING;
875 v3_lower_barrier(vm);
882 static int sim_callback(struct guest_info * core, void * private_data) {
883 struct v3_bitmap * timeout_map = private_data;
885 v3_bitmap_set(timeout_map, core->vcpu_id);
887 V3_Print(core->vm_info, core, "Simulation callback activated (guest_rip=%p)\n", (void *)core->rip);
889 while (v3_bitmap_check(timeout_map, core->vcpu_id) == 1) {
890 // We spin here if there is noone to yield to
900 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs) {
901 struct v3_bitmap timeout_map;
905 uint64_t cpu_khz = V3_CPU_KHZ();
908 PrintError(VM_NONE, VCORE_NONE, "Asked to simulate nonexistent VM\n");
912 if (vm->run_state != VM_PAUSED) {
913 PrintError(vm, VCORE_NONE,"VM must be paused before simulation begins\n");
917 /* AT this point VM is paused */
920 v3_bitmap_init(&timeout_map, vm->num_cores);
925 // calculate cycles from msecs...
926 // IMPORTANT: Floating point not allowed.
927 cycles = (msecs * cpu_khz);
931 V3_Print(vm, VCORE_NONE,"Simulating %u msecs (%llu cycles) [CPU_KHZ=%llu]\n", msecs, cycles, cpu_khz);
935 for (i = 0; i < vm->num_cores; i++) {
936 if (v3_add_core_timeout(&(vm->cores[i]), cycles, sim_callback, &timeout_map) == -1) {
937 PrintError(vm, VCORE_NONE,"Could not register simulation timeout for core %d\n", i);
942 V3_Print(vm, VCORE_NONE,"timeouts set on all cores\n ");
945 // Run the simulation
946 // vm->run_state = VM_SIMULATING;
947 vm->run_state = VM_RUNNING;
948 v3_lower_barrier(vm);
951 V3_Print(vm, VCORE_NONE,"Barrier lowered: We are now Simulating!!\n");
953 // block until simulation is complete
954 while (all_blocked == 0) {
957 for (i = 0; i < vm->num_cores; i++) {
958 if (v3_bitmap_check(&timeout_map, i) == 0) {
963 if (all_blocked == 1) {
967 // Intentionally spin if there is no one to yield to
972 V3_Print(vm, VCORE_NONE,"Simulation is complete\n");
974 // Simulation is complete
975 // Reset back to PAUSED state
977 v3_raise_barrier_nowait(vm, NULL);
978 vm->run_state = VM_PAUSED;
980 v3_bitmap_reset(&timeout_map);
982 v3_wait_for_barrier(vm, NULL);
988 int v3_get_state_vm(struct v3_vm_info *vm,
989 struct v3_vm_base_state *base,
990 struct v3_vm_core_state *core,
991 struct v3_vm_mem_state *mem)
996 extern uint64_t v3_mem_block_size;
999 if (!vm || !base || !core || !mem) {
1000 PrintError(VM_NONE, VCORE_NONE, "Invalid request to v3_get_state_vm\n");
1004 numcores = core->num_vcores > vm->num_cores ? vm->num_cores : core->num_vcores;
1005 numregions = mem->num_regions > vm->mem_map.num_base_regions ? vm->mem_map.num_base_regions : mem->num_regions;
1007 switch (vm->run_state) {
1008 case VM_INVALID: base->state = V3_VM_INVALID; break;
1009 case VM_RUNNING: base->state = V3_VM_RUNNING; break;
1010 case VM_STOPPED: base->state = V3_VM_STOPPED; break;
1011 case VM_PAUSED: base->state = V3_VM_PAUSED; break;
1012 case VM_ERROR: base->state = V3_VM_ERROR; break;
1013 case VM_SIMULATING: base->state = V3_VM_SIMULATING; break;
1014 case VM_RESETTING: base->state = V3_VM_RESETTING; break;
1015 default: base->state = V3_VM_UNKNOWN; break;
1018 base->vm_type = V3_VM_GENERAL;
1020 #ifdef V3_CONFIG_HVM
1021 if (vm->hvm_state.is_hvm) {
1022 base->vm_type = V3_VM_HVM;
1026 for (i=0;i<numcores;i++) {
1027 switch (vm->cores[i].core_run_state) {
1028 case CORE_INVALID: core->vcore[i].state = V3_VCORE_INVALID; break;
1029 case CORE_RUNNING: core->vcore[i].state = V3_VCORE_RUNNING; break;
1030 case CORE_STOPPED: core->vcore[i].state = V3_VCORE_STOPPED; break;
1031 case CORE_RESETTING: core->vcore[i].state = V3_VCORE_RESETTING; break;
1032 default: core->vcore[i].state = V3_VCORE_UNKNOWN; break;
1034 switch (vm->cores[i].cpu_mode) {
1035 case REAL: core->vcore[i].cpu_mode = V3_VCORE_CPU_REAL; break;
1036 case PROTECTED: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED; break;
1037 case PROTECTED_PAE: core->vcore[i].cpu_mode = V3_VCORE_CPU_PROTECTED_PAE; break;
1038 case LONG: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG; break;
1039 case LONG_32_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_32_COMPAT; break;
1040 case LONG_16_COMPAT: core->vcore[i].cpu_mode = V3_VCORE_CPU_LONG_16_COMPAT; break;
1041 default: core->vcore[i].cpu_mode = V3_VCORE_CPU_UNKNOWN; break;
1043 switch (vm->cores[i].shdw_pg_mode) {
1044 case SHADOW_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_SHADOW; break;
1045 case NESTED_PAGING: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_NESTED; break;
1046 default: core->vcore[i].mem_state = V3_VCORE_MEM_STATE_UNKNOWN; break;
1048 switch (vm->cores[i].mem_mode) {
1049 case PHYSICAL_MEM: core->vcore[i].mem_mode = V3_VCORE_MEM_MODE_PHYSICAL; break;
1050 case VIRTUAL_MEM: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_VIRTUAL; break;
1051 default: core->vcore[i].mem_mode=V3_VCORE_MEM_MODE_UNKNOWN; break;
1054 core->vcore[i].vcore_type = V3_VCORE_GENERAL;
1056 #ifdef V3_CONFIG_HVM
1057 if (vm->hvm_state.is_hvm) {
1058 if (v3_is_hvm_ros_core(&vm->cores[i])) {
1059 core->vcore[i].vcore_type = V3_VCORE_ROS;
1061 core->vcore[i].vcore_type = V3_VCORE_HRT;
1066 core->vcore[i].pcore=vm->cores[i].pcpu_id;
1067 core->vcore[i].last_rip=(void*)(vm->cores[i].rip);
1068 core->vcore[i].num_exits=vm->cores[i].num_exits;
1071 core->num_vcores=numcores;
1075 for (i=0;i<numregions;i++) {
1076 mem->region[i].guest_paddr = cur_gpa;
1077 mem->region[i].host_paddr = (void*)(vm->mem_map.base_regions[i].host_addr);
1078 mem->region[i].size = v3_mem_block_size;
1079 #ifdef V3_CONFIG_SWAPPING
1080 mem->region[i].swapped = vm->mem_map.base_regions[i].flags.swapped;
1081 mem->region[i].pinned = vm->mem_map.base_regions[i].flags.pinned;
1083 mem->region[i].swapped = 0;
1084 mem->region[i].pinned = 0;
1087 cur_gpa += mem->region[i].size;
1090 mem->num_regions=numregions;
1093 mem->mem_size=vm->mem_size;
1094 mem->ros_mem_size=vm->mem_size;
1096 #ifdef V3_CONFIG_HVM
1097 if (vm->hvm_state.is_hvm) {
1098 mem->ros_mem_size=v3_get_hvm_ros_memsize(vm);
1105 int v3_get_state_sizes_vm(struct v3_vm_info *vm,
1106 unsigned long long *num_vcores,
1107 unsigned long long *num_regions)
1109 if (!vm || !num_vcores || !num_regions) {
1110 PrintError(VM_NONE, VCORE_NONE, "Invalid request to v3_get_state_sizes\n");
1114 *num_vcores = vm->num_cores;
1115 *num_regions = vm->mem_map.num_base_regions;
1121 #ifdef V3_CONFIG_CHECKPOINT
1122 #include <palacios/vmm_checkpoint.h>
1124 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1125 if (!vm || !store || !url) {
1126 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_save_vm\n");
1129 return v3_chkpt_save_vm(vm, store, url, opts);
1133 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1134 if (!vm || !store || !url) {
1135 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_load_vm\n");
1138 return v3_chkpt_load_vm(vm, store, url, opts);
1141 #ifdef V3_CONFIG_LIVE_MIGRATION
1142 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1143 if (!vm || !store || !url) {
1144 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_send_vm\n");
1147 return v3_chkpt_send_vm(vm, store, url, opts);
1151 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, v3_chkpt_options_t opts) {
1152 if (!vm || !store || !url) {
1153 PrintError(VM_NONE,VCORE_NONE, "Incorrect arguemnts for v3_receive_vm\n");
1156 return v3_chkpt_receive_vm(vm, store, url, opts);
1163 int v3_free_vm(struct v3_vm_info * vm) {
1165 // deinitialize guest (free memory, etc...)
1168 PrintError(VM_NONE, VCORE_NONE, "Asked to free nonexistent VM\n");
1172 if ((vm->run_state != VM_STOPPED) &&
1173 (vm->run_state != VM_ERROR)) {
1174 PrintError(vm, VCORE_NONE,"Tried to Free VM in invalid runstate (%d)\n", vm->run_state);
1178 v3_free_vm_devices(vm);
1181 for (i = 0; i < vm->num_cores; i++) {
1182 v3_scheduler_free_core(&(vm->cores[i]));
1183 v3_free_core(&(vm->cores[i]));
1187 v3_scheduler_free_vm(vm);
1188 v3_free_vm_internal(vm);
1200 v3_cpu_mode_t v3_get_host_cpu_mode() {
1202 struct cr4_32 * cr4;
1210 cr4 = (struct cr4_32 *)&(cr4_val);
1212 if (cr4->pae == 1) {
1213 return PROTECTED_PAE;
1221 v3_cpu_mode_t v3_get_host_cpu_mode() {
1227 void v3_print_cond(const char * fmt, ...) {
1228 if (v3_dbg_enable == 1) {
1233 vsnprintf(buf, 2048, fmt, ap);
1236 V3_Print(VM_NONE, VCORE_NONE,"%s", buf);
1242 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
1243 extern struct v3_os_hooks * os_hooks;
1245 if ((os_hooks) && (os_hooks)->interrupt_cpu) {
1246 (os_hooks)->interrupt_cpu(vm, logical_cpu, vector);
1252 int v3_vm_enter(struct guest_info * info) {
1253 switch (v3_mach_type) {
1254 #ifdef V3_CONFIG_SVM
1256 case V3_SVM_REV3_CPU:
1257 return v3_svm_enter(info);
1262 case V3_VMX_EPT_CPU:
1263 case V3_VMX_EPT_UG_CPU:
1264 return v3_vmx_enter(info);
1268 PrintError(info->vm_info, info, "Attemping to enter a guest on an invalid CPU\n");
1274 void *v3_get_host_vm(struct v3_vm_info *x)
1277 return x->host_priv_data;
1283 int v3_get_vcore(struct guest_info *x)