2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
37 #include <palacios/vmm_rbtree.h>
39 #include <palacios/vmm_direct_paging.h>
41 #include <palacios/vmm_ctrl_regs.h>
42 #include <palacios/svm_io.h>
44 #include <palacios/vmm_sprintf.h>
47 uint32_t v3_last_exit;
49 // This is a global pointer to the host's VMCB
50 static addr_t host_vmcbs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
54 extern void v3_stgi();
55 extern void v3_clgi();
56 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
57 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
60 static vmcb_t * Allocate_VMCB() {
61 vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
63 memset(vmcb_page, 0, 4096);
70 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
71 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
72 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
79 ctrl_area->svm_instrs.VMRUN = 1;
80 ctrl_area->svm_instrs.VMMCALL = 1;
81 ctrl_area->svm_instrs.VMLOAD = 1;
82 ctrl_area->svm_instrs.VMSAVE = 1;
83 ctrl_area->svm_instrs.STGI = 1;
84 ctrl_area->svm_instrs.CLGI = 1;
85 ctrl_area->svm_instrs.SKINIT = 1;
86 ctrl_area->svm_instrs.RDTSCP = 1;
87 ctrl_area->svm_instrs.ICEBP = 1;
88 ctrl_area->svm_instrs.WBINVD = 1;
89 ctrl_area->svm_instrs.MONITOR = 1;
90 ctrl_area->svm_instrs.MWAIT_always = 1;
91 ctrl_area->svm_instrs.MWAIT_if_armed = 1;
92 ctrl_area->instrs.INVLPGA = 1;
93 ctrl_area->instrs.CPUID = 1;
95 ctrl_area->instrs.HLT = 1;
96 // guest_state->cr0 = 0x00000001; // PE
99 ctrl_area->exceptions.de = 1;
100 ctrl_area->exceptions.df = 1;
102 ctrl_area->exceptions.ts = 1;
103 ctrl_area->exceptions.ss = 1;
104 ctrl_area->exceptions.ac = 1;
105 ctrl_area->exceptions.mc = 1;
106 ctrl_area->exceptions.gp = 1;
107 ctrl_area->exceptions.ud = 1;
108 ctrl_area->exceptions.np = 1;
109 ctrl_area->exceptions.of = 1;
111 ctrl_area->exceptions.nmi = 1;
115 ctrl_area->instrs.NMI = 1;
116 ctrl_area->instrs.SMI = 1;
117 ctrl_area->instrs.INIT = 1;
118 ctrl_area->instrs.PAUSE = 1;
119 ctrl_area->instrs.shutdown_evts = 1;
122 /* DEBUG FOR RETURN CODE */
123 ctrl_area->exit_code = 1;
126 /* Setup Guest Machine state */
128 core->vm_regs.rsp = 0x00;
131 core->vm_regs.rdx = 0x00000f00;
136 core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
137 core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
138 core->ctrl_regs.efer |= EFER_MSR_svm_enable;
144 core->segments.cs.selector = 0xf000;
145 core->segments.cs.limit = 0xffff;
146 core->segments.cs.base = 0x0000000f0000LL;
148 // (raw attributes = 0xf3)
149 core->segments.cs.type = 0x3;
150 core->segments.cs.system = 0x1;
151 core->segments.cs.dpl = 0x3;
152 core->segments.cs.present = 1;
156 struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds),
157 &(core->segments.es), &(core->segments.fs),
158 &(core->segments.gs), NULL};
160 for ( i = 0; segregs[i] != NULL; i++) {
161 struct v3_segment * seg = segregs[i];
163 seg->selector = 0x0000;
164 // seg->base = seg->selector << 4;
165 seg->base = 0x00000000;
168 // (raw attributes = 0xf3)
175 core->segments.gdtr.limit = 0x0000ffff;
176 core->segments.gdtr.base = 0x0000000000000000LL;
177 core->segments.idtr.limit = 0x0000ffff;
178 core->segments.idtr.base = 0x0000000000000000LL;
180 core->segments.ldtr.selector = 0x0000;
181 core->segments.ldtr.limit = 0x0000ffff;
182 core->segments.ldtr.base = 0x0000000000000000LL;
183 core->segments.tr.selector = 0x0000;
184 core->segments.tr.limit = 0x0000ffff;
185 core->segments.tr.base = 0x0000000000000000LL;
188 core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
189 core->dbg_regs.dr7 = 0x0000000000000400LL;
192 ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->io_map.arch_data);
193 ctrl_area->instrs.IOIO_PROT = 1;
195 ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data);
196 ctrl_area->instrs.MSR_PROT = 1;
199 PrintDebug("Exiting on interrupts\n");
200 ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
201 ctrl_area->instrs.INTR = 1;
204 if (core->shdw_pg_mode == SHADOW_PAGING) {
205 PrintDebug("Creating initial shadow page table\n");
207 /* JRL: This is a performance killer, and a simplistic solution */
208 /* We need to fix this */
209 ctrl_area->TLB_CONTROL = 1;
210 ctrl_area->guest_ASID = 1;
213 if (v3_init_passthrough_pts(core) == -1) {
214 PrintError("Could not initialize passthrough page tables\n");
219 core->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
220 PrintDebug("Created\n");
222 core->ctrl_regs.cr0 |= 0x80000000;
223 core->ctrl_regs.cr3 = core->direct_map_pt;
225 ctrl_area->cr_reads.cr0 = 1;
226 ctrl_area->cr_writes.cr0 = 1;
227 //ctrl_area->cr_reads.cr4 = 1;
228 ctrl_area->cr_writes.cr4 = 1;
229 ctrl_area->cr_reads.cr3 = 1;
230 ctrl_area->cr_writes.cr3 = 1;
232 v3_hook_msr(core->vm_info, EFER_MSR,
233 &v3_handle_efer_read,
234 &v3_handle_efer_write,
237 ctrl_area->instrs.INVLPG = 1;
239 ctrl_area->exceptions.pf = 1;
241 guest_state->g_pat = 0x7040600070406ULL;
245 } else if (core->shdw_pg_mode == NESTED_PAGING) {
246 // Flush the TLB on entries/exits
247 ctrl_area->TLB_CONTROL = 1;
248 ctrl_area->guest_ASID = 1;
250 // Enable Nested Paging
251 ctrl_area->NP_ENABLE = 1;
253 PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
255 // Set the Nested Page Table pointer
256 if (v3_init_passthrough_pts(core) == -1) {
257 PrintError("Could not initialize Nested page tables\n");
261 ctrl_area->N_CR3 = core->direct_map_pt;
263 guest_state->g_pat = 0x7040600070406ULL;
268 int v3_init_svm_vmcb(struct guest_info * info, v3_vm_class_t vm_class) {
270 PrintDebug("Allocating VMCB\n");
271 info->vmm_data = (void*)Allocate_VMCB();
273 if (vm_class == V3_PC_VM) {
274 PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
275 Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
277 PrintError("Invalid VM class\n");
286 static int update_irq_exit_state(struct guest_info * info) {
287 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
289 // Fix for QEMU bug using EVENTINJ as an internal cache
290 guest_ctrl->EVENTINJ.valid = 0;
292 if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
294 #ifdef CONFIG_DEBUG_INTERRUPTS
295 PrintDebug("INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
298 info->intr_core_state.irq_started = 1;
299 info->intr_core_state.irq_pending = 0;
301 v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
304 if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
305 #ifdef CONFIG_DEBUG_INTERRUPTS
306 PrintDebug("Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
309 // Interrupt was taken fully vectored
310 info->intr_core_state.irq_started = 0;
312 } else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) {
313 #ifdef CONFIG_DEBUG_INTERRUPTS
314 PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
322 static int update_irq_entry_state(struct guest_info * info) {
323 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
326 if (info->intr_core_state.irq_pending == 0) {
327 guest_ctrl->guest_ctrl.V_IRQ = 0;
328 guest_ctrl->guest_ctrl.V_INTR_VECTOR = 0;
331 if (v3_excp_pending(info)) {
332 uint_t excp = v3_get_excp_number(info);
334 guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION;
336 if (info->excp_state.excp_error_code_valid) {
337 guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
338 guest_ctrl->EVENTINJ.ev = 1;
339 #ifdef CONFIG_DEBUG_INTERRUPTS
340 PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
344 guest_ctrl->EVENTINJ.vector = excp;
346 guest_ctrl->EVENTINJ.valid = 1;
348 #ifdef CONFIG_DEBUG_INTERRUPTS
349 PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n",
350 (int)info->num_exits,
351 guest_ctrl->EVENTINJ.vector,
352 (void *)(addr_t)info->ctrl_regs.cr2,
353 (void *)(addr_t)info->rip);
356 v3_injecting_excp(info, excp);
357 } else if (info->intr_core_state.irq_started == 1) {
358 #ifdef CONFIG_DEBUG_INTERRUPTS
359 PrintDebug("IRQ pending from previous injection\n");
361 guest_ctrl->guest_ctrl.V_IRQ = 1;
362 guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector;
363 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
364 guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
367 switch (v3_intr_pending(info)) {
368 case V3_EXTERNAL_IRQ: {
369 uint32_t irq = v3_get_intr(info);
371 guest_ctrl->guest_ctrl.V_IRQ = 1;
372 guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
373 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
374 guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
376 #ifdef CONFIG_DEBUG_INTERRUPTS
377 PrintDebug("Injecting Interrupt %d (EIP=%p)\n",
378 guest_ctrl->guest_ctrl.V_INTR_VECTOR,
379 (void *)(addr_t)info->rip);
382 info->intr_core_state.irq_pending = 1;
383 info->intr_core_state.irq_vector = irq;
388 guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI;
390 case V3_SOFTWARE_INTR:
391 guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
394 guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
397 case V3_INVALID_INTR:
409 * CAUTION and DANGER!!!
411 * The VMCB CANNOT(!!) be accessed outside of the clgi/stgi calls inside this function
412 * When exectuing a symbiotic call, the VMCB WILL be overwritten, so any dependencies
413 * on its contents will cause things to break. The contents at the time of the exit WILL
414 * change before the exit handler is executed.
416 int v3_svm_enter(struct guest_info * info) {
417 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
418 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
420 addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
422 // Conditionally yield the CPU if the timeslice has expired
425 // disable global interrupts for vm state transition
428 // Synchronize the guest state to the VMCB
429 guest_state->cr0 = info->ctrl_regs.cr0;
430 guest_state->cr2 = info->ctrl_regs.cr2;
431 guest_state->cr3 = info->ctrl_regs.cr3;
432 guest_state->cr4 = info->ctrl_regs.cr4;
433 guest_state->dr6 = info->dbg_regs.dr6;
434 guest_state->dr7 = info->dbg_regs.dr7;
435 guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
436 guest_state->rflags = info->ctrl_regs.rflags;
437 guest_state->efer = info->ctrl_regs.efer;
439 guest_state->cpl = info->cpl;
441 v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
443 guest_state->rax = info->vm_regs.rax;
444 guest_state->rip = info->rip;
445 guest_state->rsp = info->vm_regs.rsp;
447 #ifdef CONFIG_SYMCALL
448 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
449 update_irq_entry_state(info);
452 update_irq_entry_state(info);
459 PrintDebug("SVM Entry to CS=%p rip=%p...\n",
460 (void *)(addr_t)info->segments.cs.base,
461 (void *)(addr_t)info->rip);
464 #ifdef CONFIG_SYMCALL
465 if (info->sym_core_state.symcall_state.sym_call_active == 1) {
466 if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
467 V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
474 v3_update_time(info, (tmp_tsc - info->time_state.cached_host_tsc));
475 rdtscll(info->time_state.cached_host_tsc);
476 // guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
478 //V3_Print("Calling v3_svm_launch\n");
480 v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
482 //V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
485 v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
488 // v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
490 //PrintDebug("SVM Returned\n");
497 // Save Guest state from VMCB
498 info->rip = guest_state->rip;
499 info->vm_regs.rsp = guest_state->rsp;
500 info->vm_regs.rax = guest_state->rax;
502 info->cpl = guest_state->cpl;
504 info->ctrl_regs.cr0 = guest_state->cr0;
505 info->ctrl_regs.cr2 = guest_state->cr2;
506 info->ctrl_regs.cr3 = guest_state->cr3;
507 info->ctrl_regs.cr4 = guest_state->cr4;
508 info->dbg_regs.dr6 = guest_state->dr6;
509 info->dbg_regs.dr7 = guest_state->dr7;
510 info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
511 info->ctrl_regs.rflags = guest_state->rflags;
512 info->ctrl_regs.efer = guest_state->efer;
514 v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
515 info->cpu_mode = v3_get_vm_cpu_mode(info);
516 info->mem_mode = v3_get_vm_mem_mode(info);
520 // save exit info here
521 exit_code = guest_ctrl->exit_code;
522 exit_info1 = guest_ctrl->exit_info1;
523 exit_info2 = guest_ctrl->exit_info2;
526 #ifdef CONFIG_SYMCALL
527 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
528 update_irq_exit_state(info);
531 update_irq_exit_state(info);
535 // reenable global interrupts after vm exit
539 // Conditionally yield the CPU if the timeslice has expired
543 if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) {
544 PrintError("Error in SVM exit handler\n");
553 int v3_start_svm_guest(struct guest_info *info) {
554 // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
555 // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
558 PrintDebug("Starting SVM core %u\n",info->cpu_id);
559 if (info->cpu_mode==INIT) {
560 PrintDebug("SVM core %u: I am an AP in INIT mode, waiting for that to change\n",info->cpu_id);
561 while (info->cpu_mode==INIT) {
563 //PrintDebug("SVM core %u: still waiting for INIT\n",info->cpu_id);
565 PrintDebug("SVM core %u: I am out of INIT\n",info->cpu_id);
566 if (info->cpu_mode==SIPI) {
567 PrintDebug("SVM core %u: I am waiting on a SIPI to set my starting address\n",info->cpu_id);
568 while (info->cpu_mode==SIPI) {
570 //PrintDebug("SVM core %u: still waiting for SIPI\n",info->cpu_id);
573 PrintDebug("SVM core %u: I have my SIPI\n", info->cpu_id);
576 if (info->cpu_mode!=REAL) {
577 PrintError("SVM core %u: I am not in REAL mode at launch! Huh?!\n", info->cpu_id);
581 PrintDebug("SVM core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
582 info->cpu_id, info->segments.cs.selector, (void*)(info->segments.cs.base),
583 info->segments.cs.limit,(void*)(info->rip));
587 PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p)\n", info->cpu_id, (void *)info->vmm_data);
588 //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
590 info->vm_info->run_state = VM_RUNNING;
591 rdtscll(info->yield_start_cycle);
595 if (v3_svm_enter(info) == -1) {
596 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
598 addr_t linear_addr = 0;
600 info->vm_info->run_state = VM_ERROR;
602 V3_Print("SVM core %u: SVM ERROR!!\n", info->cpu_id);
604 v3_print_guest_state(info);
606 V3_Print("SVM core %u: SVM Exit Code: %p\n", info->cpu_id, (void *)(addr_t)guest_ctrl->exit_code);
608 V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
609 V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
611 V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
612 V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
614 linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
616 if (info->mem_mode == PHYSICAL_MEM) {
617 v3_gpa_to_hva(info, linear_addr, &host_addr);
618 } else if (info->mem_mode == VIRTUAL_MEM) {
619 v3_gva_to_hva(info, linear_addr, &host_addr);
622 V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->cpu_id, (void *)host_addr);
624 V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->cpu_id, (void *)host_addr);
625 v3_dump_mem((uint8_t *)host_addr, 15);
627 v3_print_stack(info);
633 if ((info->num_exits % 5000) == 0) {
634 V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
640 // Need to take down the other cores on error...
649 /* Checks machine SVM capability */
650 /* Implemented from: AMD Arch Manual 3, sect 15.4 */
651 int v3_is_svm_capable() {
652 uint_t vm_cr_low = 0, vm_cr_high = 0;
653 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
655 v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
657 PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
659 if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
660 V3_Print("SVM Not Available\n");
663 v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
665 PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
667 if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
668 V3_Print("SVM is available but is disabled.\n");
670 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
672 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
674 if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
675 V3_Print("SVM BIOS Disabled, not unlockable\n");
677 V3_Print("SVM is locked with a key\n");
682 V3_Print("SVM is available and enabled.\n");
684 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
685 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
686 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
687 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
688 PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
695 static int has_svm_nested_paging() {
696 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
698 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
700 //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
702 if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
703 V3_Print("SVM Nested Paging not supported\n");
706 V3_Print("SVM Nested Paging supported\n");
712 void v3_init_svm_cpu(int cpu_id) {
714 extern v3_cpu_arch_t v3_cpu_types[];
716 // Enable SVM on the CPU
717 v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
718 msr.e_reg.low |= EFER_MSR_svm_enable;
719 v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
721 V3_Print("SVM Enabled\n");
723 // Setup the host state save area
724 host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
727 // msr.e_reg.high = 0;
728 //msr.e_reg.low = (uint_t)host_vmcb;
729 msr.r_reg = host_vmcbs[cpu_id];
731 PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
732 v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
735 if (has_svm_nested_paging() == 1) {
736 v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
738 v3_cpu_types[cpu_id] = V3_SVM_CPU;
797 * Test VMSAVE/VMLOAD Latency
799 #define vmsave ".byte 0x0F,0x01,0xDB ; "
800 #define vmload ".byte 0x0F,0x01,0xDA ; "
802 uint32_t start_lo, start_hi;
803 uint32_t end_lo, end_hi;
806 __asm__ __volatile__ (
808 "movl %%eax, %%esi ; "
809 "movl %%edx, %%edi ; "
810 "movq %%rcx, %%rax ; "
813 : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
814 : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
825 PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
827 __asm__ __volatile__ (
829 "movl %%eax, %%esi ; "
830 "movl %%edx, %%edi ; "
831 "movq %%rcx, %%rax ; "
834 : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
835 : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
847 PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
849 /* End Latency Test */
860 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
861 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
862 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
866 guest_state->rsp = vm_info.vm_regs.rsp;
867 guest_state->rip = vm_info.rip;
870 /* I pretty much just gutted this from TVMM */
871 /* Note: That means its probably wrong */
873 // set the segment registers to mirror ours
874 guest_state->cs.selector = 1<<3;
875 guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
876 guest_state->cs.attrib.fields.S = 1;
877 guest_state->cs.attrib.fields.P = 1;
878 guest_state->cs.attrib.fields.db = 1;
879 guest_state->cs.attrib.fields.G = 1;
880 guest_state->cs.limit = 0xfffff;
881 guest_state->cs.base = 0;
883 struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
884 for ( i = 0; segregs[i] != NULL; i++) {
885 struct vmcb_selector * seg = segregs[i];
887 seg->selector = 2<<3;
888 seg->attrib.fields.type = 0x2; // Data Segment+read/write
889 seg->attrib.fields.S = 1;
890 seg->attrib.fields.P = 1;
891 seg->attrib.fields.db = 1;
892 seg->attrib.fields.G = 1;
893 seg->limit = 0xfffff;
899 /* JRL THIS HAS TO GO */
901 // guest_state->tr.selector = GetTR_Selector();
902 guest_state->tr.attrib.fields.type = 0x9;
903 guest_state->tr.attrib.fields.P = 1;
904 // guest_state->tr.limit = GetTR_Limit();
905 //guest_state->tr.base = GetTR_Base();// - 0x2000;
913 guest_state->efer |= EFER_MSR_svm_enable;
914 guest_state->rflags = 0x00000002; // The reserved bit is always 1
915 ctrl_area->svm_instrs.VMRUN = 1;
916 guest_state->cr0 = 0x00000001; // PE
917 ctrl_area->guest_ASID = 1;
920 // guest_state->cpl = 0;
926 ctrl_area->cr_writes.cr4 = 1;
928 ctrl_area->exceptions.de = 1;
929 ctrl_area->exceptions.df = 1;
930 ctrl_area->exceptions.pf = 1;
931 ctrl_area->exceptions.ts = 1;
932 ctrl_area->exceptions.ss = 1;
933 ctrl_area->exceptions.ac = 1;
934 ctrl_area->exceptions.mc = 1;
935 ctrl_area->exceptions.gp = 1;
936 ctrl_area->exceptions.ud = 1;
937 ctrl_area->exceptions.np = 1;
938 ctrl_area->exceptions.of = 1;
939 ctrl_area->exceptions.nmi = 1;
943 ctrl_area->instrs.IOIO_PROT = 1;
944 ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
948 tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
949 memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
952 ctrl_area->instrs.INTR = 1;
959 memset(gdt_buf, 0, 6);
960 memset(idt_buf, 0, 6);
963 uint_t gdt_base, idt_base;
964 ushort_t gdt_limit, idt_limit;
967 gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
968 gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
969 PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
972 idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
973 idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
974 PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
977 // gdt_base -= 0x2000;
978 //idt_base -= 0x2000;
980 guest_state->gdtr.base = gdt_base;
981 guest_state->gdtr.limit = gdt_limit;
982 guest_state->idtr.base = idt_base;
983 guest_state->idtr.limit = idt_limit;
989 // also determine if CPU supports nested paging
991 if (vm_info.page_tables) {
993 // Flush the TLB on entries/exits
994 ctrl_area->TLB_CONTROL = 1;
996 // Enable Nested Paging
997 ctrl_area->NP_ENABLE = 1;
999 PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
1001 // Set the Nested Page Table pointer
1002 ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
1005 // ctrl_area->N_CR3 = Get_CR3();
1006 // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
1008 guest_state->g_pat = 0x7040600070406ULL;
1010 PrintDebug("Set Nested CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
1011 PrintDebug("Set Guest CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
1013 // guest_state->cr0 |= 0x80000000;