2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
15 * Peter Dinda <jarusl@cs.northwestern.edu> (Reset)
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/svm.h>
24 #include <palacios/vmm.h>
26 #include <palacios/vmcb.h>
27 #include <palacios/vmm_mem.h>
28 #include <palacios/vmm_paging.h>
29 #include <palacios/svm_handler.h>
31 #include <palacios/vmm_debug.h>
32 #include <palacios/vm_guest_mem.h>
34 #include <palacios/vmm_decoder.h>
35 #include <palacios/vmm_string.h>
36 #include <palacios/vmm_lowlevel.h>
37 #include <palacios/svm_msr.h>
39 #include <palacios/vmm_rbtree.h>
40 #include <palacios/vmm_barrier.h>
41 #include <palacios/vmm_debug.h>
43 #include <palacios/vmm_perftune.h>
45 #include <palacios/vmm_bios.h>
48 #ifdef V3_CONFIG_CHECKPOINT
49 #include <palacios/vmm_checkpoint.h>
52 #include <palacios/vmm_direct_paging.h>
54 #include <palacios/vmm_ctrl_regs.h>
55 #include <palacios/svm_io.h>
57 #include <palacios/vmm_sprintf.h>
59 #ifdef V3_CONFIG_MEM_TRACK
60 #include <palacios/vmm_mem_track.h>
63 #ifdef V3_CONFIG_TM_FUNC
64 #include <extensions/trans_mem.h>
67 #ifndef V3_CONFIG_DEBUG_SVM
69 #define PrintDebug(fmt, args...)
74 uint32_t v3_last_exit;
76 // This is a global pointer to the host's VMCB
77 // These are physical addresses
78 static addr_t host_vmcbs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
82 extern void v3_stgi();
83 extern void v3_clgi();
84 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
85 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
89 static vmcb_t * Allocate_VMCB() {
90 vmcb_t * vmcb_page = NULL;
91 addr_t vmcb_pa = (addr_t)V3_AllocPages(1); // need not be shadow safe, not exposed to guest
93 if ((void *)vmcb_pa == NULL) {
94 PrintError(VM_NONE, VCORE_NONE, "Error allocating VMCB\n");
98 vmcb_page = (vmcb_t *)V3_VAddr((void *)vmcb_pa);
100 memset(vmcb_page, 0, 4096);
106 static int v3_svm_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data)
110 // Call arch-independent handler
111 if ((status = v3_handle_efer_write(core, msr, src, priv_data)) != 0) {
117 // Ensure that hardware visible EFER.SVME bit is set (SVM Enable)
118 struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
126 * This is invoked both on an initial boot and on a reset
128 * The difference is that on a reset we will not rehook anything
132 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
133 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
134 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
137 if (core->core_run_state!=CORE_INVALID && core->core_run_state!=CORE_RESETTING) {
138 PrintError(core->vm_info, core, "Atempt to Init_VMCB_BIOS in invalid state (%d)\n",core->core_run_state);
142 // need to invalidate any shadow page tables early
143 if (core->shdw_pg_mode == SHADOW_PAGING && core->core_run_state==CORE_RESETTING) {
144 if (v3_get_vm_cpu_mode(core) != REAL) {
145 if (v3_invalidate_shadow_pts(core) == -1) {
146 PrintError(core->vm_info,core,"Could not invalidate shadow page tables\n");
152 // Guarantee we are starting from a clean slate
156 ctrl_area->svm_instrs.VMRUN = 1;
157 ctrl_area->svm_instrs.VMMCALL = 1;
158 ctrl_area->svm_instrs.VMLOAD = 1;
159 ctrl_area->svm_instrs.VMSAVE = 1;
160 ctrl_area->svm_instrs.STGI = 1;
161 ctrl_area->svm_instrs.CLGI = 1;
162 ctrl_area->svm_instrs.SKINIT = 1; // secure startup... why
163 ctrl_area->svm_instrs.ICEBP = 1; // in circuit emulator breakpoint
164 ctrl_area->svm_instrs.WBINVD = 1; // write back and invalidate caches... why?
165 ctrl_area->svm_instrs.MONITOR = 1;
166 ctrl_area->svm_instrs.MWAIT_always = 1;
167 ctrl_area->svm_instrs.MWAIT_if_armed = 1;
168 ctrl_area->instrs.INVLPGA = 1; // invalidate page in asid... AMD ERRATA
169 ctrl_area->instrs.CPUID = 1;
171 ctrl_area->instrs.HLT = 1;
173 /* Set at VMM launch as needed */
174 ctrl_area->instrs.RDTSC = 0;
175 ctrl_area->svm_instrs.RDTSCP = 0;
178 #ifdef V3_CONFIG_TM_FUNC
179 v3_tm_set_excp_intercepts(ctrl_area);
183 ctrl_area->instrs.NMI = 1;
184 ctrl_area->instrs.SMI = 0; // allow SMIs to run in guest
185 ctrl_area->instrs.INIT = 1;
186 // ctrl_area->instrs.PAUSE = 1; // do not care as does not halt
187 ctrl_area->instrs.shutdown_evts = 1;
190 /* DEBUG FOR RETURN CODE */
191 ctrl_area->exit_code = 1;
194 /* Setup Guest Machine state */
196 memset(&core->vm_regs,0,sizeof(core->vm_regs));
197 memset(&core->ctrl_regs,0,sizeof(core->ctrl_regs));
198 memset(&core->dbg_regs,0,sizeof(core->dbg_regs));
199 memset(&core->segments,0,sizeof(core->segments));
200 memset(&core->msrs,0,sizeof(core->msrs));
201 memset(&core->fp_state,0,sizeof(core->fp_state));
204 core->intr_core_state.irq_pending=0;
205 core->intr_core_state.irq_started=0;
206 core->intr_core_state.swintr_posted=0;
209 core->excp_state.excp_pending=0;
211 // reset of gprs to expected values at init
212 core->vm_regs.rsp = 0x00;
214 core->vm_regs.rdx = 0x00000f00; // family/stepping/etc
219 core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
221 core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
222 core->shdw_pg_state.guest_cr0 = core->ctrl_regs.cr0;
225 core->shdw_pg_state.guest_cr3 = core->ctrl_regs.cr3;
227 core->shdw_pg_state.guest_cr4 = core->ctrl_regs.cr4;
229 core->ctrl_regs.efer |= EFER_MSR_svm_enable ;
230 core->shdw_pg_state.guest_efer.value = core->ctrl_regs.efer;
232 core->segments.cs.selector = 0xf000;
233 core->segments.cs.limit = 0xffff;
234 core->segments.cs.base = 0x0000f0000LL;
236 // (raw attributes = 0xf3)
237 core->segments.cs.type = 0xa;
238 core->segments.cs.system = 0x1;
239 core->segments.cs.dpl = 0x0;
240 core->segments.cs.present = 1;
244 struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds),
245 &(core->segments.es), &(core->segments.fs),
246 &(core->segments.gs), NULL};
248 for ( i = 0; segregs[i] != NULL; i++) {
249 struct v3_segment * seg = segregs[i];
251 seg->selector = 0x0000;
252 // seg->base = seg->selector << 4;
253 seg->base = 0x00000000;
256 // (raw attributes = 0xf3)
263 core->segments.gdtr.selector = 0x0000;
264 core->segments.gdtr.limit = 0x0000ffff;
265 core->segments.gdtr.base = 0x0000000000000000LL;
266 core->segments.gdtr.dpl = 0x0;
268 core->segments.idtr.selector = 0x0000;
269 core->segments.idtr.limit = 0x0000ffff;
270 core->segments.idtr.base = 0x0000000000000000LL;
271 core->segments.ldtr.limit = 0x0000ffff;
272 core->segments.ldtr.base = 0x0000000000000000LL;
273 core->segments.ldtr.system = 0;
274 core->segments.ldtr.type = 0x2;
275 core->segments.ldtr.dpl = 0x0;
277 core->segments.tr.selector = 0x0000;
278 core->segments.tr.limit = 0x0000ffff;
279 core->segments.tr.base = 0x0000000000000000LL;
280 core->segments.tr.system = 0;
281 core->segments.tr.type = 0x3;
282 core->segments.tr.dpl = 0x0;
284 core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
285 core->dbg_regs.dr7 = 0x0000000000000400LL;
288 ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->io_map.arch_data);
289 ctrl_area->instrs.IOIO_PROT = 1;
291 ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data);
292 ctrl_area->instrs.MSR_PROT = 1;
295 ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
296 ctrl_area->instrs.INTR = 1;
297 // The above also assures the TPR changes (CR8) are only virtual
300 // However, we need to see TPR writes since they will
301 // affect the virtual apic
302 // we reflect out cr8 to ctrl_regs->apic_tpr
303 ctrl_area->cr_reads.cr8 = 1;
304 ctrl_area->cr_writes.cr8 = 1;
305 // We will do all TPR comparisons in the virtual apic
306 // We also do not want the V_TPR to be able to mask the PIC
307 ctrl_area->guest_ctrl.V_IGN_TPR = 1;
311 if (core->core_run_state == CORE_INVALID) {
312 v3_hook_msr(core->vm_info, EFER_MSR,
313 &v3_handle_efer_read,
314 &v3_svm_handle_efer_write,
318 if (core->shdw_pg_mode == SHADOW_PAGING) {
320 /* JRL: This is a performance killer, and a simplistic solution */
321 /* We need to fix this */
322 ctrl_area->TLB_CONTROL = 1;
323 ctrl_area->guest_ASID = 1;
326 if (core->core_run_state == CORE_INVALID) {
327 if (v3_init_passthrough_pts(core) == -1) {
328 PrintError(core->vm_info, core, "Could not initialize passthrough page tables\n");
331 // the shadow page tables are OK since we have not initialized hem yet
334 // invalidation of shadow page tables happened earlier in this function
337 core->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
339 core->ctrl_regs.cr0 |= 0x80000000;
341 v3_activate_passthrough_pt(core);
343 ctrl_area->cr_reads.cr0 = 1;
344 ctrl_area->cr_writes.cr0 = 1;
345 //intercept cr4 read so shadow pager can use PAE independently of guest
346 ctrl_area->cr_reads.cr4 = 1;
347 ctrl_area->cr_writes.cr4 = 1;
348 ctrl_area->cr_reads.cr3 = 1;
349 ctrl_area->cr_writes.cr3 = 1;
352 ctrl_area->instrs.INVLPG = 1;
354 ctrl_area->exceptions.pf = 1;
356 guest_state->g_pat = 0x7040600070406ULL;
359 } else if (core->shdw_pg_mode == NESTED_PAGING) {
360 // Flush the TLB on entries/exits
361 ctrl_area->TLB_CONTROL = 1;
362 ctrl_area->guest_ASID = 1;
364 // Enable Nested Paging
365 ctrl_area->NP_ENABLE = 1;
367 // Set the Nested Page Table pointer
368 if (core->core_run_state == CORE_INVALID) {
369 if (v3_init_passthrough_pts(core) == -1) {
370 PrintError(core->vm_info, core, "Could not initialize Nested page tables\n");
374 // the existing nested page tables will work fine
377 ctrl_area->N_CR3 = core->direct_map_pt;
379 guest_state->g_pat = 0x7040600070406ULL;
382 /* tell the guest that we don't support SVM */
383 if (core->core_run_state == CORE_INVALID) {
384 v3_hook_msr(core->vm_info, SVM_VM_CR_MSR,
385 &v3_handle_vm_cr_read,
386 &v3_handle_vm_cr_write,
390 if (core->core_run_state == CORE_INVALID) {
391 #define INT_PENDING_AMD_MSR 0xc0010055
393 v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
394 v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
395 v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
396 v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
397 v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
399 v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
400 v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
401 v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
404 v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
405 v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
407 // Passthrough read operations are ok.
408 v3_hook_msr(core->vm_info, INT_PENDING_AMD_MSR, NULL, v3_msr_unhandled_write, NULL);
415 int v3_init_svm_vmcb(struct guest_info * core, v3_vm_class_t vm_class) {
417 PrintDebug(core->vm_info, core, "Allocating VMCB\n");
418 core->vmm_data = (void *)Allocate_VMCB();
420 if (core->vmm_data == NULL) {
421 PrintError(core->vm_info, core, "Could not allocate VMCB, Exiting...\n");
425 if (vm_class == V3_PC_VM) {
426 PrintDebug(core->vm_info, core, "Initializing VMCB (addr=%p)\n", (void *)core->vmm_data);
427 Init_VMCB_BIOS((vmcb_t*)(core->vmm_data), core);
429 PrintError(core->vm_info, core, "Invalid VM class\n");
433 core->core_run_state = CORE_STOPPED;
439 int v3_deinit_svm_vmcb(struct guest_info * core) {
440 if (core && core->vmm_data) {
441 V3_FreePages(V3_PAddr(core->vmm_data), 1);
447 static int svm_handle_standard_reset(struct guest_info *core)
449 if (core->core_run_state != CORE_RESETTING) {
453 PrintDebug(core->vm_info,core,"Handling standard reset (guest state before follows)\n");
455 #ifdef V3_CONFIG_DEBUG_SVM
456 v3_print_guest_state(core);
459 // wait until all resetting cores get here (ROS or whole VM)
460 v3_counting_barrier(&core->vm_info->reset_barrier);
462 // I could be a ROS core, or I could be in a non-HVM
463 // either way, if I'm core 0, I'm the leader
464 if (core->vcpu_id==0) {
465 uint64_t mem_size=core->vm_info->mem_size;
468 // on a ROS reset, we should only
469 // manipulate the part of the memory seen by
471 if (core->vm_info->hvm_state.is_hvm) {
472 mem_size=v3_get_hvm_ros_memsize(core->vm_info);
475 core->vm_info->run_state = VM_RESETTING;
476 // copy bioses again because some,
477 // like seabios, assume
478 // this should also blow away the BDA and EBDA
479 PrintDebug(core->vm_info,core,"Clear memory (%p bytes)\n",(void*)core->vm_info->mem_size);
480 if (v3_set_gpa_memory(core, 0, mem_size, 0)!=mem_size) {
481 PrintError(core->vm_info,core,"Clear of memory failed\n");
483 PrintDebug(core->vm_info,core,"Copying bioses\n");
484 if (v3_setup_bioses(core->vm_info, core->vm_info->cfg_data->cfg)) {
485 PrintError(core->vm_info,core,"Setup of bioses failed\n");
489 Init_VMCB_BIOS((vmcb_t*)(core->vmm_data), core);
491 PrintDebug(core->vm_info,core,"InitVMCB done\n");
494 core->cpu_mode = REAL;
495 core->mem_mode = PHYSICAL_MEM;
498 PrintDebug(core->vm_info,core,"Machine reset to REAL/PHYSICAL\n");
500 memset(V3_VAddr((void*)(host_vmcbs[V3_Get_CPU()])),0,4096*4); // good measure...
502 // core zero will be restarted by the main execution loop
503 core->core_run_state = CORE_STOPPED;
505 if (core->vcpu_id==0) {
506 core->vm_info->run_state = VM_RUNNING;
509 #ifdef V3_CONFIG_DEBUG_SVM
510 PrintDebug(core->vm_info,core,"VMCB state at end of reset\n");
511 PrintDebugVMCB((vmcb_t*)(core->vmm_data));
512 PrintDebug(core->vm_info,core,"Guest state at end of reset\n");
513 v3_print_guest_state(core);
516 // wait until we are all ready to go
517 v3_counting_barrier(&core->vm_info->reset_barrier);
519 PrintDebug(core->vm_info,core,"Returning with request for recycle loop\n");
521 return 1; // reboot is occuring
525 #ifdef V3_CONFIG_CHECKPOINT
526 int v3_svm_save_core(struct guest_info * core, void * ctx){
528 vmcb_saved_state_t * guest_area = GET_VMCB_SAVE_STATE_AREA(core->vmm_data);
530 // Special case saves of data we need immediate access to
532 V3_CHKPT_SAVE(ctx, "CPL", core->cpl, failout);
533 V3_CHKPT_SAVE(ctx,"STAR", guest_area->star, failout);
534 V3_CHKPT_SAVE(ctx,"CSTAR", guest_area->cstar, failout);
535 V3_CHKPT_SAVE(ctx,"LSTAR", guest_area->lstar, failout);
536 V3_CHKPT_SAVE(ctx,"SFMASK", guest_area->sfmask, failout);
537 V3_CHKPT_SAVE(ctx,"KERNELGSBASE", guest_area->KernelGsBase, failout);
538 V3_CHKPT_SAVE(ctx,"SYSENTER_CS", guest_area->sysenter_cs, failout);
539 V3_CHKPT_SAVE(ctx,"SYSENTER_ESP", guest_area->sysenter_esp, failout);
540 V3_CHKPT_SAVE(ctx,"SYSENTER_EIP", guest_area->sysenter_eip, failout);
542 // and then we save the whole enchilada
543 if (v3_chkpt_save(ctx, "VMCB_DATA", PAGE_SIZE, core->vmm_data)) {
544 PrintError(core->vm_info, core, "Could not save SVM vmcb\n");
551 PrintError(core->vm_info, core, "Failed to save SVM state for core\n");
556 int v3_svm_load_core(struct guest_info * core, void * ctx){
559 vmcb_saved_state_t * guest_area = GET_VMCB_SAVE_STATE_AREA(core->vmm_data);
561 // Reload what we special cased, which we will overwrite in a minute
562 V3_CHKPT_LOAD(ctx, "CPL", core->cpl, failout);
563 V3_CHKPT_LOAD(ctx,"STAR", guest_area->star, failout);
564 V3_CHKPT_LOAD(ctx,"CSTAR", guest_area->cstar, failout);
565 V3_CHKPT_LOAD(ctx,"LSTAR", guest_area->lstar, failout);
566 V3_CHKPT_LOAD(ctx,"SFMASK", guest_area->sfmask, failout);
567 V3_CHKPT_LOAD(ctx,"KERNELGSBASE", guest_area->KernelGsBase, failout);
568 V3_CHKPT_LOAD(ctx,"SYSENTER_CS", guest_area->sysenter_cs, failout);
569 V3_CHKPT_LOAD(ctx,"SYSENTER_ESP", guest_area->sysenter_esp, failout);
570 V3_CHKPT_LOAD(ctx,"SYSENTER_EIP", guest_area->sysenter_eip, failout);
572 // and then we load the whole enchilada
573 if (v3_chkpt_load(ctx, "VMCB_DATA", PAGE_SIZE, core->vmm_data)) {
574 PrintError(core->vm_info, core, "Could not load SVM vmcb\n");
581 PrintError(core->vm_info, core, "Failed to save SVM state for core\n");
587 static int update_irq_exit_state(struct guest_info * info) {
588 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
590 // Fix for QEMU bug using EVENTINJ as an internal cache
591 guest_ctrl->EVENTINJ.valid = 0;
593 if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
595 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
596 PrintDebug(info->vm_info, info, "INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
599 info->intr_core_state.irq_started = 1;
600 info->intr_core_state.irq_pending = 0;
602 v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
605 if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
606 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
607 PrintDebug(info->vm_info, info, "Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
610 // Interrupt was taken fully vectored
611 info->intr_core_state.irq_started = 0;
613 } else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) {
614 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
615 PrintDebug(info->vm_info, info, "EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
623 static int update_irq_entry_state(struct guest_info * info) {
624 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
626 if (guest_ctrl->exit_int_info.valid) {
627 // We need to complete the previous injection
628 guest_ctrl->EVENTINJ = guest_ctrl->exit_int_info;
630 PrintDebug(info->vm_info,info,"Continuing injection of event - eventinj=0x%llx\n",*(uint64_t*)&guest_ctrl->EVENTINJ);
636 if (info->intr_core_state.irq_pending == 0) {
637 guest_ctrl->guest_ctrl.V_IRQ = 0;
638 guest_ctrl->guest_ctrl.V_INTR_VECTOR = 0;
641 if (v3_excp_pending(info)) {
643 uint_t excp = v3_get_excp_number(info);
645 guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION;
646 guest_ctrl->EVENTINJ.vector = excp;
648 if (info->excp_state.excp_error_code_valid) {
649 guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
650 guest_ctrl->EVENTINJ.ev = 1;
651 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
652 PrintDebug(info->vm_info, info, "Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
655 guest_ctrl->EVENTINJ.error_code = 0;
656 guest_ctrl->EVENTINJ.ev = 0;
659 guest_ctrl->EVENTINJ.rsvd = 0;
660 guest_ctrl->EVENTINJ.valid = 1;
662 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
663 PrintDebug(info->vm_info, info, "<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n",
664 (int)info->num_exits,
665 guest_ctrl->EVENTINJ.vector,
666 (void *)(addr_t)info->ctrl_regs.cr2,
667 (void *)(addr_t)info->rip);
670 v3_injecting_excp(info, excp);
672 } else if (info->intr_core_state.irq_started == 1) {
674 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
675 PrintDebug(info->vm_info, info, "IRQ pending from previous injection\n");
677 guest_ctrl->guest_ctrl.V_IRQ = 1;
678 guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector;
680 // We ignore the virtual TPR on this injection
681 // TPR/PPR tests have already been done in the APIC.
682 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
683 guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ; // 0xf;
686 switch (v3_intr_pending(info)) {
687 case V3_EXTERNAL_IRQ: {
688 int irq = v3_get_intr(info);
694 guest_ctrl->guest_ctrl.V_IRQ = 1;
695 guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
697 // We ignore the virtual TPR on this injection
698 // TPR/PPR tests have already been done in the APIC.
699 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
700 guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ; // 0xf;
702 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
703 PrintDebug(info->vm_info, info, "Injecting Interrupt %d (EIP=%p)\n",
704 guest_ctrl->guest_ctrl.V_INTR_VECTOR,
705 (void *)(addr_t)info->rip);
708 info->intr_core_state.irq_pending = 1;
709 info->intr_core_state.irq_vector = irq;
715 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
716 PrintDebug(info->vm_info, info, "Injecting NMI\n");
718 guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI;
719 guest_ctrl->EVENTINJ.ev = 0;
720 guest_ctrl->EVENTINJ.error_code = 0;
721 guest_ctrl->EVENTINJ.rsvd = 0;
722 guest_ctrl->EVENTINJ.valid = 1;
726 case V3_SOFTWARE_INTR:
727 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
728 PrintDebug(info->vm_info, info, "Injecting software interrupt -- type: %d, vector: %d\n",
729 SVM_INJECTION_SOFT_INTR, info->intr_core_state.swintr_vector);
731 guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
732 guest_ctrl->EVENTINJ.vector = info->intr_core_state.swintr_vector;
733 guest_ctrl->EVENTINJ.ev = 0;
734 guest_ctrl->EVENTINJ.error_code = 0;
735 guest_ctrl->EVENTINJ.rsvd = 0;
736 guest_ctrl->EVENTINJ.valid = 1;
738 /* reset swintr state */
739 info->intr_core_state.swintr_posted = 0;
740 info->intr_core_state.swintr_vector = 0;
744 guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
747 case V3_INVALID_INTR:
758 v3_svm_config_tsc_virtualization(struct guest_info * info) {
759 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
762 if (info->time_state.flags & VM_TIME_TRAP_RDTSC) {
763 ctrl_area->instrs.RDTSC = 1;
764 ctrl_area->svm_instrs.RDTSCP = 1;
766 ctrl_area->instrs.RDTSC = 0;
767 ctrl_area->svm_instrs.RDTSCP = 0;
769 if (info->time_state.flags & VM_TIME_TSC_PASSTHROUGH) {
770 ctrl_area->TSC_OFFSET = 0;
772 ctrl_area->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
781 * CAUTION and DANGER!!!
783 * The VMCB CANNOT(!!) be accessed outside of the clgi/stgi calls inside this function
784 * When exectuing a symbiotic call, the VMCB WILL be overwritten, so any dependencies
785 * on its contents will cause things to break. The contents at the time of the exit WILL
786 * change before the exit handler is executed.
788 int v3_svm_enter(struct guest_info * info) {
789 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
790 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
791 addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
792 uint64_t guest_cycles = 0;
795 // Conditionally yield the CPU if the timeslice has expired
798 #ifdef V3_CONFIG_MEM_TRACK
799 v3_mem_track_entry(info);
802 // Update timer devices after being in the VM before doing
803 // IRQ updates, so that any interrupts they raise get seen
806 v3_advance_time(info, NULL);
808 v3_update_timers(info);
811 // disable global interrupts for vm state transition
814 // Synchronize the guest state to the VMCB
815 guest_state->cr0 = info->ctrl_regs.cr0;
816 guest_state->cr2 = info->ctrl_regs.cr2;
817 guest_state->cr3 = info->ctrl_regs.cr3;
818 guest_state->cr4 = info->ctrl_regs.cr4;
819 guest_state->dr6 = info->dbg_regs.dr6;
820 guest_state->dr7 = info->dbg_regs.dr7;
822 // CR8 is now updated by read/writes and it contains the APIC TPR
823 // the V_TPR should be just the class part of that.
824 // This update is here just for completeness. We currently
825 // are ignoring V_TPR on all injections and doing the priority logivc
827 // guest_ctrl->guest_ctrl.V_TPR = ((info->ctrl_regs.apic_tpr) >> 4) & 0xf;
829 //guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
832 guest_state->rflags = info->ctrl_regs.rflags;
836 guest_state->efer = info->ctrl_regs.efer;
838 /* Synchronize MSRs */
839 guest_state->star = info->msrs.star;
840 guest_state->lstar = info->msrs.lstar;
841 guest_state->sfmask = info->msrs.sfmask;
842 guest_state->KernelGsBase = info->msrs.kern_gs_base;
844 guest_state->cpl = info->cpl;
846 v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
848 guest_state->rax = info->vm_regs.rax;
849 guest_state->rip = info->rip;
850 guest_state->rsp = info->vm_regs.rsp;
852 V3_FP_ENTRY_RESTORE(info);
854 #ifdef V3_CONFIG_SYMCALL
855 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
856 update_irq_entry_state(info);
860 update_irq_entry_state(info);
863 #ifdef V3_CONFIG_TM_FUNC
864 v3_tm_check_intr_state(info, guest_ctrl, guest_state);
871 PrintDebug(info->vm_info, info, "SVM Entry to CS=%p rip=%p...\n",
872 (void *)(addr_t)info->segments.cs.base,
873 (void *)(addr_t)info->rip);
876 #ifdef V3_CONFIG_SYMCALL
877 if (info->sym_core_state.symcall_state.sym_call_active == 1) {
878 if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
879 V3_Print(info->vm_info, info, "!!! Injecting Interrupt during Sym call !!!\n");
884 v3_svm_config_tsc_virtualization(info);
886 //V3_Print(info->vm_info, info, "Calling v3_svm_launch\n");
888 uint64_t entry_tsc = 0;
889 uint64_t exit_tsc = 0;
891 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
892 v3_pwrstat_telemetry_enter(info);
895 #ifdef V3_CONFIG_PMU_TELEMETRY
896 v3_pmu_telemetry_enter(info);
900 if (guest_ctrl->EVENTINJ.valid && guest_ctrl->interrupt_shadow) {
901 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
902 PrintDebug(info->vm_info,info,"Event injection during an interrupt shadow\n");
908 v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
912 #ifdef V3_CONFIG_PMU_TELEMETRY
913 v3_pmu_telemetry_exit(info);
916 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
917 v3_pwrstat_telemetry_exit(info);
920 guest_cycles = exit_tsc - entry_tsc;
924 //V3_Print(info->vm_info, info, "SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
926 v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
928 v3_advance_time(info, &guest_cycles);
932 V3_FP_EXIT_SAVE(info);
934 // Save Guest state from VMCB
935 info->rip = guest_state->rip;
936 info->vm_regs.rsp = guest_state->rsp;
937 info->vm_regs.rax = guest_state->rax;
939 info->cpl = guest_state->cpl;
941 info->ctrl_regs.cr0 = guest_state->cr0;
942 info->ctrl_regs.cr2 = guest_state->cr2;
943 info->ctrl_regs.cr3 = guest_state->cr3;
944 info->ctrl_regs.cr4 = guest_state->cr4;
945 info->dbg_regs.dr6 = guest_state->dr6;
946 info->dbg_regs.dr7 = guest_state->dr7;
948 // We do not track this anymore
949 // V_TPR is ignored and we do the logic in the APIC
950 //info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
952 info->ctrl_regs.rflags = guest_state->rflags;
953 info->ctrl_regs.efer = guest_state->efer;
955 /* Synchronize MSRs */
956 info->msrs.star = guest_state->star;
957 info->msrs.lstar = guest_state->lstar;
958 info->msrs.sfmask = guest_state->sfmask;
959 info->msrs.kern_gs_base = guest_state->KernelGsBase;
961 v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
962 info->cpu_mode = v3_get_vm_cpu_mode(info);
963 info->mem_mode = v3_get_vm_mem_mode(info);
966 // save exit info here
967 exit_code = guest_ctrl->exit_code;
968 exit_info1 = guest_ctrl->exit_info1;
969 exit_info2 = guest_ctrl->exit_info2;
971 #ifdef V3_CONFIG_SYMCALL
972 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
973 update_irq_exit_state(info);
976 update_irq_exit_state(info);
979 // reenable global interrupts after vm exit
982 // Conditionally yield the CPU if the timeslice has expired
985 // This update timers is for time-dependent handlers
986 // if we're slaved to host time
987 v3_advance_time(info, NULL);
988 v3_update_timers(info);
992 int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2);
995 PrintError(info->vm_info, info, "Error in SVM exit handler (ret=%d)\n", ret);
996 PrintError(info->vm_info, info, " last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code);
1003 if (info->timeouts.timeout_active) {
1004 /* Check to see if any timeouts have expired */
1005 v3_handle_timeouts(info, guest_cycles);
1008 #ifdef V3_CONFIG_MEM_TRACK
1009 v3_mem_track_exit(info);
1016 int v3_start_svm_guest(struct guest_info * info) {
1020 // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
1021 // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
1023 PrintDebug(info->vm_info, info, "Starting SVM core %u (on logical core %u)\n", info->vcpu_id, info->pcpu_id);
1026 #ifdef V3_CONFIG_MULTIBOOT
1027 if (v3_setup_multiboot_core_for_boot(info)) {
1028 PrintError(info->vm_info, info, "Failed to setup Multiboot core...\n");
1033 #ifdef V3_CONFIG_HVM
1034 if (v3_setup_hvm_hrt_core_for_boot(info)) {
1035 PrintError(info->vm_info, info, "Failed to setup HRT core...\n");
1042 if (info->core_run_state == CORE_STOPPED) {
1044 if (info->vcpu_id == 0) {
1045 info->core_run_state = CORE_RUNNING;
1047 PrintDebug(info->vm_info, info, "SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
1051 // Compiler must not optimize away this read
1052 while (*((volatile int *)(&info->core_run_state)) == CORE_STOPPED) {
1054 if (info->vm_info->run_state == VM_STOPPED) {
1055 // The VM was stopped before this core was initialized.
1059 V3_STILL_NO_WORK(info);
1061 //PrintDebug(info->vm_info, info, "SVM core %u: still waiting for INIT\n", info->vcpu_id);
1064 V3_HAVE_WORK_AGAIN(info);
1066 PrintDebug(info->vm_info, info, "SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
1068 // We'll be paranoid about race conditions here
1069 v3_wait_at_barrier(info);
1077 PrintDebug(info->vm_info, info, "SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
1078 info->vcpu_id, info->pcpu_id,
1079 info->segments.cs.selector, (void *)(info->segments.cs.base),
1080 info->segments.cs.limit, (void *)(info->rip));
1084 PrintDebug(info->vm_info, info, "SVM core %u: Launching SVM VM (vmcb=%p) (on cpu %u)\n",
1085 info->vcpu_id, (void *)info->vmm_data, info->pcpu_id);
1087 #ifdef V3_CONFIG_DEBUG_SVM
1088 PrintDebugVMCB((vmcb_t*)(info->vmm_data));
1091 v3_start_time(info);
1094 if (info->vm_info->run_state == VM_STOPPED) {
1095 info->core_run_state = CORE_STOPPED;
1100 #ifdef V3_CONFIG_HVM
1101 if (v3_handle_hvm_reset(info) > 0) {
1106 #ifdef V3_CONFIG_MULTIBOOT
1107 if (v3_handle_multiboot_reset(info) > 0) {
1112 if (svm_handle_standard_reset(info) > 0 ) {
1118 #ifdef V3_CONFIG_PMU_TELEMETRY
1119 v3_pmu_telemetry_start(info);
1122 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
1123 v3_pwrstat_telemetry_start(info);
1126 if (v3_svm_enter(info) == -1 ) {
1127 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
1129 addr_t linear_addr = 0;
1131 info->vm_info->run_state = VM_ERROR;
1133 V3_Print(info->vm_info, info, "SVM core %u: SVM ERROR!!\n", info->vcpu_id);
1135 v3_print_guest_state(info);
1137 V3_Print(info->vm_info, info, "SVM core %u: SVM Exit Code: %p\n", info->vcpu_id, (void *)(addr_t)guest_ctrl->exit_code);
1139 V3_Print(info->vm_info, info, "SVM core %u: exit_info1 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
1140 V3_Print(info->vm_info, info, "SVM core %u: exit_info1 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
1142 V3_Print(info->vm_info, info, "SVM core %u: exit_info2 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
1143 V3_Print(info->vm_info, info, "SVM core %u: exit_info2 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
1145 linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
1147 if (info->mem_mode == PHYSICAL_MEM) {
1148 if (v3_gpa_to_hva(info, linear_addr, &host_addr)) {
1149 PrintError(info->vm_info, info, "Cannot translate address\n");
1152 } else if (info->mem_mode == VIRTUAL_MEM) {
1153 if (v3_gva_to_hva(info, linear_addr, &host_addr)) {
1154 PrintError(info->vm_info, info, "Cannot translate address\n");
1159 V3_Print(info->vm_info, info, "SVM core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
1161 V3_Print(info->vm_info, info, "SVM core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
1162 v3_dump_mem((uint8_t *)host_addr, 15);
1164 v3_print_stack(info);
1169 v3_wait_at_barrier(info);
1172 if (info->vm_info->run_state == VM_STOPPED) {
1173 PrintDebug(info->vm_info,info,"Stopping core as VM is stopped\n");
1174 info->core_run_state = CORE_STOPPED;
1181 if ((info->num_exits % 50000) == 0) {
1182 V3_Print(info->vm_info, info, "SVM Exit number %d\n", (uint32_t)info->num_exits);
1183 v3_print_guest_state(info);
1189 #ifdef V3_CONFIG_PMU_TELEMETRY
1190 v3_pmu_telemetry_end(info);
1193 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
1194 v3_pwrstat_telemetry_end(info);
1196 // Need to take down the other cores on error...
1204 int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) {
1207 // Write the RIP, CS, and descriptor
1208 // assume the rest is already good to go
1210 // vector VV -> rip at 0
1212 // This means we start executing at linear address VV000
1214 // So the selector needs to be VV00
1215 // and the base needs to be VV000
1218 core->segments.cs.selector = rip << 8;
1219 core->segments.cs.limit = 0xffff;
1220 core->segments.cs.base = rip << 12;
1230 /* Checks machine SVM capability */
1231 /* Implemented from: AMD Arch Manual 3, sect 15.4 */
1232 int v3_is_svm_capable() {
1233 uint_t vm_cr_low = 0, vm_cr_high = 0;
1234 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1236 v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
1238 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
1240 if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
1241 V3_Print(VM_NONE, VCORE_NONE, "SVM Not Available\n");
1244 v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
1246 PrintDebug(VM_NONE, VCORE_NONE, "SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
1248 if (vm_cr_low & SVM_VM_CR_MSR_svmdis) {
1249 V3_Print(VM_NONE, VCORE_NONE, "SVM is available but is disabled.\n");
1251 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
1253 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
1255 if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
1256 V3_Print(VM_NONE, VCORE_NONE, "SVM BIOS Disabled, not unlockable\n");
1258 V3_Print(VM_NONE, VCORE_NONE, "SVM is locked with a key\n");
1263 V3_Print(VM_NONE, VCORE_NONE, "SVM is available and enabled.\n");
1265 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
1266 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
1267 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
1268 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
1269 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
1272 PrintError(VM_NONE,VCORE_NONE, "WARNING: NO SVM SUPPORT FOR NRIP - SW INTR INJECTION WILL LIKELY FAIL\n");
1280 static int has_svm_nested_paging() {
1281 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1283 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
1285 //PrintDebug(VM_NONE, VCORE_NONE, "CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
1287 if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
1288 V3_Print(VM_NONE, VCORE_NONE, "SVM Nested Paging not supported\n");
1291 V3_Print(VM_NONE, VCORE_NONE, "SVM Nested Paging supported\n");
1298 void v3_init_svm_cpu(int cpu_id) {
1300 extern v3_cpu_arch_t v3_cpu_types[];
1302 // Enable SVM on the CPU
1303 v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
1304 msr.e_reg.low |= EFER_MSR_svm_enable;
1305 v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
1307 V3_Print(VM_NONE, VCORE_NONE, "SVM Enabled\n");
1309 // Setup the host state save area
1310 host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4); // need not be shadow-safe, not exposed to guest
1312 if (!host_vmcbs[cpu_id]) {
1313 PrintError(VM_NONE, VCORE_NONE, "Failed to allocate VMCB\n");
1318 // msr.e_reg.high = 0;
1319 //msr.e_reg.low = (uint_t)host_vmcb;
1320 msr.r_reg = host_vmcbs[cpu_id];
1322 PrintDebug(VM_NONE, VCORE_NONE, "Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
1323 v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
1326 if (has_svm_nested_paging() == 1) {
1327 v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
1329 v3_cpu_types[cpu_id] = V3_SVM_CPU;
1335 void v3_deinit_svm_cpu(int cpu_id) {
1337 extern v3_cpu_arch_t v3_cpu_types[];
1339 // reset SVM_VM_HSAVE_PA_MSR
1340 // Does setting it to NULL disable??
1342 v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
1345 v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
1346 msr.e_reg.low &= ~EFER_MSR_svm_enable;
1347 v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
1349 v3_cpu_types[cpu_id] = V3_INVALID_CPU;
1351 V3_FreePages((void *)host_vmcbs[cpu_id], 4);
1353 V3_Print(VM_NONE, VCORE_NONE, "Host CPU %d host area freed, and SVM disabled\n", cpu_id);
1408 * Test VMSAVE/VMLOAD Latency
1410 #define vmsave ".byte 0x0F,0x01,0xDB ; "
1411 #define vmload ".byte 0x0F,0x01,0xDA ; "
1413 uint32_t start_lo, start_hi;
1414 uint32_t end_lo, end_hi;
1415 uint64_t start, end;
1417 __asm__ __volatile__ (
1419 "movl %%eax, %%esi ; "
1420 "movl %%edx, %%edi ; "
1421 "movq %%rcx, %%rax ; "
1424 : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
1425 : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
1436 PrintDebug(core->vm_info, core, "VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
1438 __asm__ __volatile__ (
1440 "movl %%eax, %%esi ; "
1441 "movl %%edx, %%edi ; "
1442 "movq %%rcx, %%rax ; "
1445 : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
1446 : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
1458 PrintDebug(core->vm_info, core, "VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
1460 /* End Latency Test */