3 * This file is part of the Palacios Virtual Machine Monitor developed
4 * by the V3VEE Project with funding from the United States National
5 * Science Foundation and the Department of Energy.
7 * The V3VEE Project is a joint project between Northwestern University
8 * and the University of New Mexico. You can find out more at
11 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Jack Lange <jarusl@cs.northwestern.edu>
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <palacios/svm.h>
23 #include <palacios/vmm.h>
25 #include <palacios/vmcb.h>
26 #include <palacios/vmm_mem.h>
27 #include <palacios/vmm_paging.h>
28 #include <palacios/svm_handler.h>
30 #include <palacios/vmm_debug.h>
31 #include <palacios/vm_guest_mem.h>
33 #include <palacios/vmm_decoder.h>
34 #include <palacios/vmm_string.h>
35 #include <palacios/vmm_lowlevel.h>
36 #include <palacios/svm_msr.h>
38 #include <palacios/vmm_rbtree.h>
40 #include <palacios/vmm_direct_paging.h>
42 #include <palacios/vmm_ctrl_regs.h>
43 #include <palacios/svm_io.h>
45 #include <palacios/vmm_sprintf.h>
48 #ifndef V3_CONFIG_DEBUG_SVM
50 #define PrintDebug(fmt, args...)
54 uint32_t v3_last_exit;
56 // This is a global pointer to the host's VMCB
57 static addr_t host_vmcbs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
61 extern void v3_stgi();
62 extern void v3_clgi();
63 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
64 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
67 static vmcb_t * Allocate_VMCB() {
68 vmcb_t * vmcb_page = NULL;
69 addr_t vmcb_pa = (addr_t)V3_AllocPages(1);
71 if ((void *)vmcb_pa == NULL) {
72 PrintError("Error allocating VMCB\n");
76 vmcb_page = (vmcb_t *)V3_VAddr((void *)vmcb_pa);
78 memset(vmcb_page, 0, 4096);
85 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
86 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
87 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
92 ctrl_area->svm_instrs.VMRUN = 1;
93 ctrl_area->svm_instrs.VMMCALL = 1;
94 ctrl_area->svm_instrs.VMLOAD = 1;
95 ctrl_area->svm_instrs.VMSAVE = 1;
96 ctrl_area->svm_instrs.STGI = 1;
97 ctrl_area->svm_instrs.CLGI = 1;
98 ctrl_area->svm_instrs.SKINIT = 1;
99 ctrl_area->svm_instrs.ICEBP = 1;
100 ctrl_area->svm_instrs.WBINVD = 1;
101 ctrl_area->svm_instrs.MONITOR = 1;
102 ctrl_area->svm_instrs.MWAIT_always = 1;
103 ctrl_area->svm_instrs.MWAIT_if_armed = 1;
104 ctrl_area->instrs.INVLPGA = 1;
105 ctrl_area->instrs.CPUID = 1;
107 ctrl_area->instrs.HLT = 1;
109 #ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
110 ctrl_area->instrs.RDTSC = 1;
111 ctrl_area->svm_instrs.RDTSCP = 1;
114 // guest_state->cr0 = 0x00000001; // PE
117 ctrl_area->exceptions.de = 1;
118 ctrl_area->exceptions.df = 1;
120 ctrl_area->exceptions.ts = 1;
121 ctrl_area->exceptions.ss = 1;
122 ctrl_area->exceptions.ac = 1;
123 ctrl_area->exceptions.mc = 1;
124 ctrl_area->exceptions.gp = 1;
125 ctrl_area->exceptions.ud = 1;
126 ctrl_area->exceptions.np = 1;
127 ctrl_area->exceptions.of = 1;
129 ctrl_area->exceptions.nmi = 1;
133 ctrl_area->instrs.NMI = 1;
134 ctrl_area->instrs.SMI = 0; // allow SMIs to run in guest
135 ctrl_area->instrs.INIT = 1;
136 ctrl_area->instrs.PAUSE = 1;
137 ctrl_area->instrs.shutdown_evts = 1;
140 /* DEBUG FOR RETURN CODE */
141 ctrl_area->exit_code = 1;
144 /* Setup Guest Machine state */
146 core->vm_regs.rsp = 0x00;
149 core->vm_regs.rdx = 0x00000f00;
154 core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
155 core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
156 core->ctrl_regs.efer |= EFER_MSR_svm_enable;
162 core->segments.cs.selector = 0xf000;
163 core->segments.cs.limit = 0xffff;
164 core->segments.cs.base = 0x0000000f0000LL;
166 // (raw attributes = 0xf3)
167 core->segments.cs.type = 0x3;
168 core->segments.cs.system = 0x1;
169 core->segments.cs.dpl = 0x3;
170 core->segments.cs.present = 1;
174 struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds),
175 &(core->segments.es), &(core->segments.fs),
176 &(core->segments.gs), NULL};
178 for ( i = 0; segregs[i] != NULL; i++) {
179 struct v3_segment * seg = segregs[i];
181 seg->selector = 0x0000;
182 // seg->base = seg->selector << 4;
183 seg->base = 0x00000000;
186 // (raw attributes = 0xf3)
193 core->segments.gdtr.limit = 0x0000ffff;
194 core->segments.gdtr.base = 0x0000000000000000LL;
195 core->segments.idtr.limit = 0x0000ffff;
196 core->segments.idtr.base = 0x0000000000000000LL;
198 core->segments.ldtr.selector = 0x0000;
199 core->segments.ldtr.limit = 0x0000ffff;
200 core->segments.ldtr.base = 0x0000000000000000LL;
201 core->segments.tr.selector = 0x0000;
202 core->segments.tr.limit = 0x0000ffff;
203 core->segments.tr.base = 0x0000000000000000LL;
206 core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
207 core->dbg_regs.dr7 = 0x0000000000000400LL;
210 ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->io_map.arch_data);
211 ctrl_area->instrs.IOIO_PROT = 1;
213 ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data);
214 ctrl_area->instrs.MSR_PROT = 1;
217 PrintDebug("Exiting on interrupts\n");
218 ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
219 ctrl_area->instrs.INTR = 1;
222 v3_hook_msr(core->vm_info, EFER_MSR,
223 &v3_handle_efer_read,
224 &v3_handle_efer_write,
227 if (core->shdw_pg_mode == SHADOW_PAGING) {
228 PrintDebug("Creating initial shadow page table\n");
230 /* JRL: This is a performance killer, and a simplistic solution */
231 /* We need to fix this */
232 ctrl_area->TLB_CONTROL = 1;
233 ctrl_area->guest_ASID = 1;
236 if (v3_init_passthrough_pts(core) == -1) {
237 PrintError("Could not initialize passthrough page tables\n");
242 core->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
243 PrintDebug("Created\n");
245 core->ctrl_regs.cr0 |= 0x80000000;
246 core->ctrl_regs.cr3 = core->direct_map_pt;
248 ctrl_area->cr_reads.cr0 = 1;
249 ctrl_area->cr_writes.cr0 = 1;
250 //ctrl_area->cr_reads.cr4 = 1;
251 ctrl_area->cr_writes.cr4 = 1;
252 ctrl_area->cr_reads.cr3 = 1;
253 ctrl_area->cr_writes.cr3 = 1;
257 ctrl_area->instrs.INVLPG = 1;
259 ctrl_area->exceptions.pf = 1;
261 guest_state->g_pat = 0x7040600070406ULL;
265 } else if (core->shdw_pg_mode == NESTED_PAGING) {
266 // Flush the TLB on entries/exits
267 ctrl_area->TLB_CONTROL = 1;
268 ctrl_area->guest_ASID = 1;
270 // Enable Nested Paging
271 ctrl_area->NP_ENABLE = 1;
273 PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
275 // Set the Nested Page Table pointer
276 if (v3_init_passthrough_pts(core) == -1) {
277 PrintError("Could not initialize Nested page tables\n");
281 ctrl_area->N_CR3 = core->direct_map_pt;
283 guest_state->g_pat = 0x7040600070406ULL;
286 /* tell the guest that we don't support SVM */
287 v3_hook_msr(core->vm_info, SVM_VM_CR_MSR,
288 &v3_handle_vm_cr_read,
289 &v3_handle_vm_cr_write,
294 int v3_init_svm_vmcb(struct guest_info * core, v3_vm_class_t vm_class) {
296 PrintDebug("Allocating VMCB\n");
297 core->vmm_data = (void *)Allocate_VMCB();
299 if (core->vmm_data == NULL) {
300 PrintError("Could not allocate VMCB, Exiting...\n");
304 if (vm_class == V3_PC_VM) {
305 PrintDebug("Initializing VMCB (addr=%p)\n", (void *)core->vmm_data);
306 Init_VMCB_BIOS((vmcb_t*)(core->vmm_data), core);
308 PrintError("Invalid VM class\n");
316 int v3_deinit_svm_vmcb(struct guest_info * core) {
317 V3_FreePages(V3_PAddr(core->vmm_data), 1);
322 static int update_irq_exit_state(struct guest_info * info) {
323 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
325 // Fix for QEMU bug using EVENTINJ as an internal cache
326 guest_ctrl->EVENTINJ.valid = 0;
328 if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
330 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
331 PrintDebug("INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
334 info->intr_core_state.irq_started = 1;
335 info->intr_core_state.irq_pending = 0;
337 v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
340 if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
341 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
342 PrintDebug("Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
345 // Interrupt was taken fully vectored
346 info->intr_core_state.irq_started = 0;
348 } else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) {
349 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
350 PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
358 static int update_irq_entry_state(struct guest_info * info) {
359 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
362 if (info->intr_core_state.irq_pending == 0) {
363 guest_ctrl->guest_ctrl.V_IRQ = 0;
364 guest_ctrl->guest_ctrl.V_INTR_VECTOR = 0;
367 if (v3_excp_pending(info)) {
368 uint_t excp = v3_get_excp_number(info);
370 guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION;
372 if (info->excp_state.excp_error_code_valid) {
373 guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
374 guest_ctrl->EVENTINJ.ev = 1;
375 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
376 PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
380 guest_ctrl->EVENTINJ.vector = excp;
382 guest_ctrl->EVENTINJ.valid = 1;
384 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
385 PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n",
386 (int)info->num_exits,
387 guest_ctrl->EVENTINJ.vector,
388 (void *)(addr_t)info->ctrl_regs.cr2,
389 (void *)(addr_t)info->rip);
392 v3_injecting_excp(info, excp);
393 } else if (info->intr_core_state.irq_started == 1) {
394 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
395 PrintDebug("IRQ pending from previous injection\n");
397 guest_ctrl->guest_ctrl.V_IRQ = 1;
398 guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector;
399 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
400 guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
403 switch (v3_intr_pending(info)) {
404 case V3_EXTERNAL_IRQ: {
405 uint32_t irq = v3_get_intr(info);
407 guest_ctrl->guest_ctrl.V_IRQ = 1;
408 guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
409 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
410 guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
412 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
413 PrintDebug("Injecting Interrupt %d (EIP=%p)\n",
414 guest_ctrl->guest_ctrl.V_INTR_VECTOR,
415 (void *)(addr_t)info->rip);
418 info->intr_core_state.irq_pending = 1;
419 info->intr_core_state.irq_vector = irq;
424 guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI;
426 case V3_SOFTWARE_INTR:
427 guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
430 guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
433 case V3_INVALID_INTR:
445 * CAUTION and DANGER!!!
447 * The VMCB CANNOT(!!) be accessed outside of the clgi/stgi calls inside this function
448 * When exectuing a symbiotic call, the VMCB WILL be overwritten, so any dependencies
449 * on its contents will cause things to break. The contents at the time of the exit WILL
450 * change before the exit handler is executed.
452 int v3_svm_enter(struct guest_info * info) {
453 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
454 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
455 addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
457 // Conditionally yield the CPU if the timeslice has expired
460 // Perform any additional yielding needed for time adjustment
461 v3_adjust_time(info);
463 // disable global interrupts for vm state transition
466 // Update timer devices after being in the VM, with interupts
467 // disabled, but before doing IRQ updates, so that any interrupts they
468 //raise get seen immediately.
469 v3_update_timers(info);
471 // Synchronize the guest state to the VMCB
472 guest_state->cr0 = info->ctrl_regs.cr0;
473 guest_state->cr2 = info->ctrl_regs.cr2;
474 guest_state->cr3 = info->ctrl_regs.cr3;
475 guest_state->cr4 = info->ctrl_regs.cr4;
476 guest_state->dr6 = info->dbg_regs.dr6;
477 guest_state->dr7 = info->dbg_regs.dr7;
478 guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
479 guest_state->rflags = info->ctrl_regs.rflags;
480 guest_state->efer = info->ctrl_regs.efer;
482 guest_state->cpl = info->cpl;
484 v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
486 guest_state->rax = info->vm_regs.rax;
487 guest_state->rip = info->rip;
488 guest_state->rsp = info->vm_regs.rsp;
490 #ifdef V3_CONFIG_SYMCALL
491 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
492 update_irq_entry_state(info);
495 update_irq_entry_state(info);
502 PrintDebug("SVM Entry to CS=%p rip=%p...\n",
503 (void *)(addr_t)info->segments.cs.base,
504 (void *)(addr_t)info->rip);
507 #ifdef V3_CONFIG_SYMCALL
508 if (info->sym_core_state.symcall_state.sym_call_active == 1) {
509 if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
510 V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
515 v3_time_enter_vm(info);
516 guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
518 //V3_Print("Calling v3_svm_launch\n");
520 v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
522 //V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
524 v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
526 // Immediate exit from VM time bookkeeping
527 v3_time_exit_vm(info);
531 // Save Guest state from VMCB
532 info->rip = guest_state->rip;
533 info->vm_regs.rsp = guest_state->rsp;
534 info->vm_regs.rax = guest_state->rax;
536 info->cpl = guest_state->cpl;
538 info->ctrl_regs.cr0 = guest_state->cr0;
539 info->ctrl_regs.cr2 = guest_state->cr2;
540 info->ctrl_regs.cr3 = guest_state->cr3;
541 info->ctrl_regs.cr4 = guest_state->cr4;
542 info->dbg_regs.dr6 = guest_state->dr6;
543 info->dbg_regs.dr7 = guest_state->dr7;
544 info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
545 info->ctrl_regs.rflags = guest_state->rflags;
546 info->ctrl_regs.efer = guest_state->efer;
548 v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
549 info->cpu_mode = v3_get_vm_cpu_mode(info);
550 info->mem_mode = v3_get_vm_mem_mode(info);
553 // save exit info here
554 exit_code = guest_ctrl->exit_code;
555 exit_info1 = guest_ctrl->exit_info1;
556 exit_info2 = guest_ctrl->exit_info2;
558 #ifdef V3_CONFIG_SYMCALL
559 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
560 update_irq_exit_state(info);
563 update_irq_exit_state(info);
566 // reenable global interrupts after vm exit
569 // Conditionally yield the CPU if the timeslice has expired
573 int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2);
576 PrintError("Error in SVM exit handler (ret=%d)\n", ret);
577 PrintError(" last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code);
587 int v3_start_svm_guest(struct guest_info * info) {
588 // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
589 // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
591 PrintDebug("Starting SVM core %u (on logical core %u)\n", info->vcpu_id, info->pcpu_id);
593 if (info->vcpu_id == 0) {
594 info->core_run_state = CORE_RUNNING;
595 info->vm_info->run_state = VM_RUNNING;
597 PrintDebug("SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
599 while (info->core_run_state == CORE_STOPPED) {
601 //PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id);
604 PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
607 PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
608 info->vcpu_id, info->pcpu_id,
609 info->segments.cs.selector, (void *)(info->segments.cs.base),
610 info->segments.cs.limit, (void *)(info->rip));
614 PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p) (on cpu %u)\n",
615 info->vcpu_id, (void *)info->vmm_data, info->pcpu_id);
616 //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
622 if (info->vm_info->run_state == VM_STOPPED) {
623 info->core_run_state = CORE_STOPPED;
627 if (v3_svm_enter(info) == -1) {
628 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
630 addr_t linear_addr = 0;
632 info->vm_info->run_state = VM_ERROR;
634 V3_Print("SVM core %u: SVM ERROR!!\n", info->vcpu_id);
636 v3_print_guest_state(info);
638 V3_Print("SVM core %u: SVM Exit Code: %p\n", info->vcpu_id, (void *)(addr_t)guest_ctrl->exit_code);
640 V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
641 V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
643 V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
644 V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
646 linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
648 if (info->mem_mode == PHYSICAL_MEM) {
649 v3_gpa_to_hva(info, linear_addr, &host_addr);
650 } else if (info->mem_mode == VIRTUAL_MEM) {
651 v3_gva_to_hva(info, linear_addr, &host_addr);
654 V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
656 V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
657 v3_dump_mem((uint8_t *)host_addr, 15);
659 v3_print_stack(info);
665 if (info->vm_info->run_state == VM_STOPPED) {
666 info->core_run_state = CORE_STOPPED;
673 if ((info->num_exits % 50000) == 0) {
674 V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
675 v3_print_guest_state(info);
681 // Need to take down the other cores on error...
689 int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) {
692 // Write the RIP, CS, and descriptor
693 // assume the rest is already good to go
695 // vector VV -> rip at 0
697 // This means we start executing at linear address VV000
699 // So the selector needs to be VV00
700 // and the base needs to be VV000
703 core->segments.cs.selector = rip << 8;
704 core->segments.cs.limit = 0xffff;
705 core->segments.cs.base = rip << 12;
715 /* Checks machine SVM capability */
716 /* Implemented from: AMD Arch Manual 3, sect 15.4 */
717 int v3_is_svm_capable() {
718 uint_t vm_cr_low = 0, vm_cr_high = 0;
719 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
721 v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
723 PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
725 if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
726 V3_Print("SVM Not Available\n");
729 v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
731 PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
733 if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
734 V3_Print("SVM is available but is disabled.\n");
736 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
738 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
740 if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
741 V3_Print("SVM BIOS Disabled, not unlockable\n");
743 V3_Print("SVM is locked with a key\n");
748 V3_Print("SVM is available and enabled.\n");
750 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
751 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
752 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
753 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
754 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
761 static int has_svm_nested_paging() {
762 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
764 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
766 //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
768 if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
769 V3_Print("SVM Nested Paging not supported\n");
772 V3_Print("SVM Nested Paging supported\n");
779 void v3_init_svm_cpu(int cpu_id) {
781 extern v3_cpu_arch_t v3_cpu_types[];
783 // Enable SVM on the CPU
784 v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
785 msr.e_reg.low |= EFER_MSR_svm_enable;
786 v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
788 V3_Print("SVM Enabled\n");
790 // Setup the host state save area
791 host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
794 // msr.e_reg.high = 0;
795 //msr.e_reg.low = (uint_t)host_vmcb;
796 msr.r_reg = host_vmcbs[cpu_id];
798 PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
799 v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
802 if (has_svm_nested_paging() == 1) {
803 v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
805 v3_cpu_types[cpu_id] = V3_SVM_CPU;
811 void v3_deinit_svm_cpu(int cpu_id) {
813 extern v3_cpu_arch_t v3_cpu_types[];
815 // reset SVM_VM_HSAVE_PA_MSR
816 // Does setting it to NULL disable??
818 v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
821 v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
822 msr.e_reg.low &= ~EFER_MSR_svm_enable;
823 v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
825 v3_cpu_types[cpu_id] = V3_INVALID_CPU;
827 V3_FreePages((void *)host_vmcbs[cpu_id], 4);
829 V3_Print("Host CPU %d host area freed, and SVM disabled\n", cpu_id);
884 * Test VMSAVE/VMLOAD Latency
886 #define vmsave ".byte 0x0F,0x01,0xDB ; "
887 #define vmload ".byte 0x0F,0x01,0xDA ; "
889 uint32_t start_lo, start_hi;
890 uint32_t end_lo, end_hi;
893 __asm__ __volatile__ (
895 "movl %%eax, %%esi ; "
896 "movl %%edx, %%edi ; "
897 "movq %%rcx, %%rax ; "
900 : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
901 : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
912 PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
914 __asm__ __volatile__ (
916 "movl %%eax, %%esi ; "
917 "movl %%edx, %%edi ; "
918 "movq %%rcx, %%rax ; "
921 : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
922 : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
934 PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
936 /* End Latency Test */
947 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
948 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
949 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
953 guest_state->rsp = vm_info.vm_regs.rsp;
954 guest_state->rip = vm_info.rip;
957 /* I pretty much just gutted this from TVMM */
958 /* Note: That means its probably wrong */
960 // set the segment registers to mirror ours
961 guest_state->cs.selector = 1<<3;
962 guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
963 guest_state->cs.attrib.fields.S = 1;
964 guest_state->cs.attrib.fields.P = 1;
965 guest_state->cs.attrib.fields.db = 1;
966 guest_state->cs.attrib.fields.G = 1;
967 guest_state->cs.limit = 0xfffff;
968 guest_state->cs.base = 0;
970 struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
971 for ( i = 0; segregs[i] != NULL; i++) {
972 struct vmcb_selector * seg = segregs[i];
974 seg->selector = 2<<3;
975 seg->attrib.fields.type = 0x2; // Data Segment+read/write
976 seg->attrib.fields.S = 1;
977 seg->attrib.fields.P = 1;
978 seg->attrib.fields.db = 1;
979 seg->attrib.fields.G = 1;
980 seg->limit = 0xfffff;
986 /* JRL THIS HAS TO GO */
988 // guest_state->tr.selector = GetTR_Selector();
989 guest_state->tr.attrib.fields.type = 0x9;
990 guest_state->tr.attrib.fields.P = 1;
991 // guest_state->tr.limit = GetTR_Limit();
992 //guest_state->tr.base = GetTR_Base();// - 0x2000;
1000 guest_state->efer |= EFER_MSR_svm_enable;
1001 guest_state->rflags = 0x00000002; // The reserved bit is always 1
1002 ctrl_area->svm_instrs.VMRUN = 1;
1003 guest_state->cr0 = 0x00000001; // PE
1004 ctrl_area->guest_ASID = 1;
1007 // guest_state->cpl = 0;
1013 ctrl_area->cr_writes.cr4 = 1;
1015 ctrl_area->exceptions.de = 1;
1016 ctrl_area->exceptions.df = 1;
1017 ctrl_area->exceptions.pf = 1;
1018 ctrl_area->exceptions.ts = 1;
1019 ctrl_area->exceptions.ss = 1;
1020 ctrl_area->exceptions.ac = 1;
1021 ctrl_area->exceptions.mc = 1;
1022 ctrl_area->exceptions.gp = 1;
1023 ctrl_area->exceptions.ud = 1;
1024 ctrl_area->exceptions.np = 1;
1025 ctrl_area->exceptions.of = 1;
1026 ctrl_area->exceptions.nmi = 1;
1030 ctrl_area->instrs.IOIO_PROT = 1;
1031 ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
1035 tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
1036 memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
1039 ctrl_area->instrs.INTR = 1;
1046 memset(gdt_buf, 0, 6);
1047 memset(idt_buf, 0, 6);
1050 uint_t gdt_base, idt_base;
1051 ushort_t gdt_limit, idt_limit;
1054 gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
1055 gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
1056 PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
1059 idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
1060 idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
1061 PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
1064 // gdt_base -= 0x2000;
1065 //idt_base -= 0x2000;
1067 guest_state->gdtr.base = gdt_base;
1068 guest_state->gdtr.limit = gdt_limit;
1069 guest_state->idtr.base = idt_base;
1070 guest_state->idtr.limit = idt_limit;
1076 // also determine if CPU supports nested paging
1078 if (vm_info.page_tables) {
1080 // Flush the TLB on entries/exits
1081 ctrl_area->TLB_CONTROL = 1;
1083 // Enable Nested Paging
1084 ctrl_area->NP_ENABLE = 1;
1086 PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
1088 // Set the Nested Page Table pointer
1089 ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
1092 // ctrl_area->N_CR3 = Get_CR3();
1093 // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
1095 guest_state->g_pat = 0x7040600070406ULL;
1097 PrintDebug("Set Nested CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
1098 PrintDebug("Set Guest CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
1100 // guest_state->cr0 |= 0x80000000;