2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <palacios/svm.h>
23 #include <palacios/vmm.h>
25 #include <palacios/vmcb.h>
26 #include <palacios/vmm_mem.h>
27 #include <palacios/vmm_paging.h>
28 #include <palacios/svm_handler.h>
30 #include <palacios/vmm_debug.h>
31 #include <palacios/vm_guest_mem.h>
33 #include <palacios/vmm_decoder.h>
34 #include <palacios/vmm_string.h>
35 #include <palacios/vmm_lowlevel.h>
36 #include <palacios/svm_msr.h>
38 #include <palacios/vmm_rbtree.h>
39 #include <palacios/vmm_barrier.h>
40 #include <palacios/vmm_debug.h>
42 #include <palacios/vmm_perftune.h>
45 #ifdef V3_CONFIG_CHECKPOINT
46 #include <palacios/vmm_checkpoint.h>
49 #include <palacios/vmm_direct_paging.h>
51 #include <palacios/vmm_ctrl_regs.h>
52 #include <palacios/svm_io.h>
54 #include <palacios/vmm_sprintf.h>
56 #ifdef V3_CONFIG_MEM_TRACK
57 #include <palacios/vmm_mem_track.h>
60 #ifdef V3_CONFIG_TM_FUNC
61 #include <extensions/trans_mem.h>
64 #ifndef V3_CONFIG_DEBUG_SVM
66 #define PrintDebug(fmt, args...)
71 uint32_t v3_last_exit;
73 // This is a global pointer to the host's VMCB
74 static addr_t host_vmcbs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
78 extern void v3_stgi();
79 extern void v3_clgi();
80 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
81 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
84 static vmcb_t * Allocate_VMCB() {
85 vmcb_t * vmcb_page = NULL;
86 addr_t vmcb_pa = (addr_t)V3_AllocPages(1); // need not be shadow safe, not exposed to guest
88 if ((void *)vmcb_pa == NULL) {
89 PrintError(VM_NONE, VCORE_NONE, "Error allocating VMCB\n");
93 vmcb_page = (vmcb_t *)V3_VAddr((void *)vmcb_pa);
95 memset(vmcb_page, 0, 4096);
101 static int v3_svm_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data)
105 // Call arch-independent handler
106 if ((status = v3_handle_efer_write(core, msr, src, priv_data)) != 0) {
112 // Ensure that hardware visible EFER.SVME bit is set (SVM Enable)
113 struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
121 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
122 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
123 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
128 ctrl_area->svm_instrs.VMRUN = 1;
129 ctrl_area->svm_instrs.VMMCALL = 1;
130 ctrl_area->svm_instrs.VMLOAD = 1;
131 ctrl_area->svm_instrs.VMSAVE = 1;
132 ctrl_area->svm_instrs.STGI = 1;
133 ctrl_area->svm_instrs.CLGI = 1;
134 ctrl_area->svm_instrs.SKINIT = 1;
135 ctrl_area->svm_instrs.ICEBP = 1;
136 ctrl_area->svm_instrs.WBINVD = 1;
137 ctrl_area->svm_instrs.MONITOR = 1;
138 ctrl_area->svm_instrs.MWAIT_always = 1;
139 ctrl_area->svm_instrs.MWAIT_if_armed = 1;
140 ctrl_area->instrs.INVLPGA = 1;
141 ctrl_area->instrs.CPUID = 1;
143 ctrl_area->instrs.HLT = 1;
145 /* Set at VMM launch as needed */
146 ctrl_area->instrs.RDTSC = 0;
147 ctrl_area->svm_instrs.RDTSCP = 0;
149 // guest_state->cr0 = 0x00000001; // PE
152 ctrl_area->exceptions.de = 1;
153 ctrl_area->exceptions.df = 1;
155 ctrl_area->exceptions.ts = 1;
156 ctrl_area->exceptions.ss = 1;
157 ctrl_area->exceptions.ac = 1;
158 ctrl_area->exceptions.mc = 1;
159 ctrl_area->exceptions.gp = 1;
160 ctrl_area->exceptions.ud = 1;
161 ctrl_area->exceptions.np = 1;
162 ctrl_area->exceptions.of = 1;
164 ctrl_area->exceptions.nmi = 1;
167 #ifdef V3_CONFIG_TM_FUNC
168 v3_tm_set_excp_intercepts(ctrl_area);
172 ctrl_area->instrs.NMI = 1;
173 ctrl_area->instrs.SMI = 0; // allow SMIs to run in guest
174 ctrl_area->instrs.INIT = 1;
175 // ctrl_area->instrs.PAUSE = 1;
176 ctrl_area->instrs.shutdown_evts = 1;
179 /* DEBUG FOR RETURN CODE */
180 ctrl_area->exit_code = 1;
183 /* Setup Guest Machine state */
185 core->vm_regs.rsp = 0x00;
188 core->vm_regs.rdx = 0x00000f00;
193 core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
194 core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
195 core->ctrl_regs.efer |= EFER_MSR_svm_enable;
201 core->segments.cs.selector = 0xf000;
202 core->segments.cs.limit = 0xffff;
203 core->segments.cs.base = 0x0000000f0000LL;
205 // (raw attributes = 0xf3)
206 core->segments.cs.type = 0x3;
207 core->segments.cs.system = 0x1;
208 core->segments.cs.dpl = 0x3;
209 core->segments.cs.present = 1;
213 struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds),
214 &(core->segments.es), &(core->segments.fs),
215 &(core->segments.gs), NULL};
217 for ( i = 0; segregs[i] != NULL; i++) {
218 struct v3_segment * seg = segregs[i];
220 seg->selector = 0x0000;
221 // seg->base = seg->selector << 4;
222 seg->base = 0x00000000;
225 // (raw attributes = 0xf3)
232 core->segments.gdtr.limit = 0x0000ffff;
233 core->segments.gdtr.base = 0x0000000000000000LL;
234 core->segments.idtr.limit = 0x0000ffff;
235 core->segments.idtr.base = 0x0000000000000000LL;
237 core->segments.ldtr.selector = 0x0000;
238 core->segments.ldtr.limit = 0x0000ffff;
239 core->segments.ldtr.base = 0x0000000000000000LL;
240 core->segments.tr.selector = 0x0000;
241 core->segments.tr.limit = 0x0000ffff;
242 core->segments.tr.base = 0x0000000000000000LL;
245 core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
246 core->dbg_regs.dr7 = 0x0000000000000400LL;
249 ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->io_map.arch_data);
250 ctrl_area->instrs.IOIO_PROT = 1;
252 ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data);
253 ctrl_area->instrs.MSR_PROT = 1;
256 PrintDebug(core->vm_info, core, "Exiting on interrupts\n");
257 ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
258 ctrl_area->instrs.INTR = 1;
259 // The above also assures the TPR changes (CR8) are only virtual
262 // However, we need to see TPR writes since they will
263 // affect the virtual apic
264 // we reflect out cr8 to ctrl_regs->apic_tpr
265 ctrl_area->cr_reads.cr8 = 1;
266 ctrl_area->cr_writes.cr8 = 1;
267 // We will do all TPR comparisons in the virtual apic
268 // We also do not want the V_TPR to be able to mask the PIC
269 ctrl_area->guest_ctrl.V_IGN_TPR = 1;
273 v3_hook_msr(core->vm_info, EFER_MSR,
274 &v3_handle_efer_read,
275 &v3_svm_handle_efer_write,
278 if (core->shdw_pg_mode == SHADOW_PAGING) {
279 PrintDebug(core->vm_info, core, "Creating initial shadow page table\n");
281 /* JRL: This is a performance killer, and a simplistic solution */
282 /* We need to fix this */
283 ctrl_area->TLB_CONTROL = 1;
284 ctrl_area->guest_ASID = 1;
287 if (v3_init_passthrough_pts(core) == -1) {
288 PrintError(core->vm_info, core, "Could not initialize passthrough page tables\n");
293 core->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
294 PrintDebug(core->vm_info, core, "Created\n");
296 core->ctrl_regs.cr0 |= 0x80000000;
298 v3_activate_passthrough_pt(core);
300 ctrl_area->cr_reads.cr0 = 1;
301 ctrl_area->cr_writes.cr0 = 1;
302 //intercept cr4 read so shadow pager can use PAE independently of guest
303 ctrl_area->cr_reads.cr4 = 1;
304 ctrl_area->cr_writes.cr4 = 1;
305 ctrl_area->cr_reads.cr3 = 1;
306 ctrl_area->cr_writes.cr3 = 1;
309 ctrl_area->instrs.INVLPG = 1;
311 ctrl_area->exceptions.pf = 1;
313 guest_state->g_pat = 0x7040600070406ULL;
316 } else if (core->shdw_pg_mode == NESTED_PAGING) {
317 // Flush the TLB on entries/exits
318 ctrl_area->TLB_CONTROL = 1;
319 ctrl_area->guest_ASID = 1;
321 // Enable Nested Paging
322 ctrl_area->NP_ENABLE = 1;
324 PrintDebug(core->vm_info, core, "NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
326 // Set the Nested Page Table pointer
327 if (v3_init_passthrough_pts(core) == -1) {
328 PrintError(core->vm_info, core, "Could not initialize Nested page tables\n");
332 ctrl_area->N_CR3 = core->direct_map_pt;
334 guest_state->g_pat = 0x7040600070406ULL;
337 /* tell the guest that we don't support SVM */
338 v3_hook_msr(core->vm_info, SVM_VM_CR_MSR,
339 &v3_handle_vm_cr_read,
340 &v3_handle_vm_cr_write,
345 #define INT_PENDING_AMD_MSR 0xc0010055
347 v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
348 v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
349 v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
350 v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
351 v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
353 v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
354 v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
355 v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
358 v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
359 v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
361 // Passthrough read operations are ok.
362 v3_hook_msr(core->vm_info, INT_PENDING_AMD_MSR, NULL, v3_msr_unhandled_write, NULL);
367 int v3_init_svm_vmcb(struct guest_info * core, v3_vm_class_t vm_class) {
369 PrintDebug(core->vm_info, core, "Allocating VMCB\n");
370 core->vmm_data = (void *)Allocate_VMCB();
372 if (core->vmm_data == NULL) {
373 PrintError(core->vm_info, core, "Could not allocate VMCB, Exiting...\n");
377 if (vm_class == V3_PC_VM) {
378 PrintDebug(core->vm_info, core, "Initializing VMCB (addr=%p)\n", (void *)core->vmm_data);
379 Init_VMCB_BIOS((vmcb_t*)(core->vmm_data), core);
381 PrintError(core->vm_info, core, "Invalid VM class\n");
385 core->core_run_state = CORE_STOPPED;
391 int v3_deinit_svm_vmcb(struct guest_info * core) {
392 V3_FreePages(V3_PAddr(core->vmm_data), 1);
397 #ifdef V3_CONFIG_CHECKPOINT
398 int v3_svm_save_core(struct guest_info * core, void * ctx){
400 vmcb_saved_state_t * guest_area = GET_VMCB_SAVE_STATE_AREA(core->vmm_data);
402 // Special case saves of data we need immediate access to
404 V3_CHKPT_SAVE(ctx, "CPL", core->cpl, failout);
405 V3_CHKPT_SAVE(ctx,"STAR", guest_area->star, failout);
406 V3_CHKPT_SAVE(ctx,"CSTAR", guest_area->cstar, failout);
407 V3_CHKPT_SAVE(ctx,"LSTAR", guest_area->lstar, failout);
408 V3_CHKPT_SAVE(ctx,"SFMASK", guest_area->sfmask, failout);
409 V3_CHKPT_SAVE(ctx,"KERNELGSBASE", guest_area->KernelGsBase, failout);
410 V3_CHKPT_SAVE(ctx,"SYSENTER_CS", guest_area->sysenter_cs, failout);
411 V3_CHKPT_SAVE(ctx,"SYSENTER_ESP", guest_area->sysenter_esp, failout);
412 V3_CHKPT_SAVE(ctx,"SYSENTER_EIP", guest_area->sysenter_eip, failout);
414 // and then we save the whole enchilada
415 if (v3_chkpt_save(ctx, "VMCB_DATA", PAGE_SIZE, core->vmm_data)) {
416 PrintError(core->vm_info, core, "Could not save SVM vmcb\n");
423 PrintError(core->vm_info, core, "Failed to save SVM state for core\n");
428 int v3_svm_load_core(struct guest_info * core, void * ctx){
431 vmcb_saved_state_t * guest_area = GET_VMCB_SAVE_STATE_AREA(core->vmm_data);
433 // Reload what we special cased, which we will overwrite in a minute
434 V3_CHKPT_LOAD(ctx, "CPL", core->cpl, failout);
435 V3_CHKPT_LOAD(ctx,"STAR", guest_area->star, failout);
436 V3_CHKPT_LOAD(ctx,"CSTAR", guest_area->cstar, failout);
437 V3_CHKPT_LOAD(ctx,"LSTAR", guest_area->lstar, failout);
438 V3_CHKPT_LOAD(ctx,"SFMASK", guest_area->sfmask, failout);
439 V3_CHKPT_LOAD(ctx,"KERNELGSBASE", guest_area->KernelGsBase, failout);
440 V3_CHKPT_LOAD(ctx,"SYSENTER_CS", guest_area->sysenter_cs, failout);
441 V3_CHKPT_LOAD(ctx,"SYSENTER_ESP", guest_area->sysenter_esp, failout);
442 V3_CHKPT_LOAD(ctx,"SYSENTER_EIP", guest_area->sysenter_eip, failout);
444 // and then we load the whole enchilada
445 if (v3_chkpt_load(ctx, "VMCB_DATA", PAGE_SIZE, core->vmm_data)) {
446 PrintError(core->vm_info, core, "Could not load SVM vmcb\n");
453 PrintError(core->vm_info, core, "Failed to save SVM state for core\n");
459 static int update_irq_exit_state(struct guest_info * info) {
460 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
462 // Fix for QEMU bug using EVENTINJ as an internal cache
463 guest_ctrl->EVENTINJ.valid = 0;
465 if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
467 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
468 PrintDebug(info->vm_info, info, "INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
471 info->intr_core_state.irq_started = 1;
472 info->intr_core_state.irq_pending = 0;
474 v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
477 if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
478 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
479 PrintDebug(info->vm_info, info, "Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
482 // Interrupt was taken fully vectored
483 info->intr_core_state.irq_started = 0;
485 } else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) {
486 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
487 PrintDebug(info->vm_info, info, "EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
495 static int update_irq_entry_state(struct guest_info * info) {
496 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
499 if (info->intr_core_state.irq_pending == 0) {
500 guest_ctrl->guest_ctrl.V_IRQ = 0;
501 guest_ctrl->guest_ctrl.V_INTR_VECTOR = 0;
504 if (v3_excp_pending(info)) {
505 uint_t excp = v3_get_excp_number(info);
507 guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION;
509 if (info->excp_state.excp_error_code_valid) {
510 guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
511 guest_ctrl->EVENTINJ.ev = 1;
512 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
513 PrintDebug(info->vm_info, info, "Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
517 guest_ctrl->EVENTINJ.vector = excp;
519 guest_ctrl->EVENTINJ.valid = 1;
521 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
522 PrintDebug(info->vm_info, info, "<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n",
523 (int)info->num_exits,
524 guest_ctrl->EVENTINJ.vector,
525 (void *)(addr_t)info->ctrl_regs.cr2,
526 (void *)(addr_t)info->rip);
529 v3_injecting_excp(info, excp);
530 } else if (info->intr_core_state.irq_started == 1) {
531 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
532 PrintDebug(info->vm_info, info, "IRQ pending from previous injection\n");
534 guest_ctrl->guest_ctrl.V_IRQ = 1;
535 guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector;
537 // We ignore the virtual TPR on this injection
538 // TPR/PPR tests have already been done in the APIC.
539 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
540 guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ; // 0xf;
543 switch (v3_intr_pending(info)) {
544 case V3_EXTERNAL_IRQ: {
545 int irq = v3_get_intr(info);
551 guest_ctrl->guest_ctrl.V_IRQ = 1;
552 guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
554 // We ignore the virtual TPR on this injection
555 // TPR/PPR tests have already been done in the APIC.
556 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
557 guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ; // 0xf;
559 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
560 PrintDebug(info->vm_info, info, "Injecting Interrupt %d (EIP=%p)\n",
561 guest_ctrl->guest_ctrl.V_INTR_VECTOR,
562 (void *)(addr_t)info->rip);
565 info->intr_core_state.irq_pending = 1;
566 info->intr_core_state.irq_vector = irq;
571 guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI;
573 case V3_SOFTWARE_INTR:
574 guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
576 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
577 PrintDebug(info->vm_info, info, "Injecting software interrupt -- type: %d, vector: %d\n",
578 SVM_INJECTION_SOFT_INTR, info->intr_core_state.swintr_vector);
580 guest_ctrl->EVENTINJ.vector = info->intr_core_state.swintr_vector;
581 guest_ctrl->EVENTINJ.valid = 1;
583 /* reset swintr state */
584 info->intr_core_state.swintr_posted = 0;
585 info->intr_core_state.swintr_vector = 0;
589 guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
592 case V3_INVALID_INTR:
603 v3_svm_config_tsc_virtualization(struct guest_info * info) {
604 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
607 if (info->time_state.flags & VM_TIME_TRAP_RDTSC) {
608 ctrl_area->instrs.RDTSC = 1;
609 ctrl_area->svm_instrs.RDTSCP = 1;
611 ctrl_area->instrs.RDTSC = 0;
612 ctrl_area->svm_instrs.RDTSCP = 0;
614 if (info->time_state.flags & VM_TIME_TSC_PASSTHROUGH) {
615 ctrl_area->TSC_OFFSET = 0;
617 ctrl_area->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
624 * CAUTION and DANGER!!!
626 * The VMCB CANNOT(!!) be accessed outside of the clgi/stgi calls inside this function
627 * When exectuing a symbiotic call, the VMCB WILL be overwritten, so any dependencies
628 * on its contents will cause things to break. The contents at the time of the exit WILL
629 * change before the exit handler is executed.
631 int v3_svm_enter(struct guest_info * info) {
632 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
633 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
634 addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
635 uint64_t guest_cycles = 0;
637 // Conditionally yield the CPU if the timeslice has expired
640 #ifdef V3_CONFIG_MEM_TRACK
641 v3_mem_track_entry(info);
644 // Update timer devices after being in the VM before doing
645 // IRQ updates, so that any interrupts they raise get seen
647 v3_advance_time(info, NULL);
648 v3_update_timers(info);
651 // disable global interrupts for vm state transition
654 // Synchronize the guest state to the VMCB
655 guest_state->cr0 = info->ctrl_regs.cr0;
656 guest_state->cr2 = info->ctrl_regs.cr2;
657 guest_state->cr3 = info->ctrl_regs.cr3;
658 guest_state->cr4 = info->ctrl_regs.cr4;
659 guest_state->dr6 = info->dbg_regs.dr6;
660 guest_state->dr7 = info->dbg_regs.dr7;
662 // CR8 is now updated by read/writes and it contains the APIC TPR
663 // the V_TPR should be just the class part of that.
664 // This update is here just for completeness. We currently
665 // are ignoring V_TPR on all injections and doing the priority logivc
667 // guest_ctrl->guest_ctrl.V_TPR = ((info->ctrl_regs.apic_tpr) >> 4) & 0xf;
669 //guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
672 guest_state->rflags = info->ctrl_regs.rflags;
673 guest_state->efer = info->ctrl_regs.efer;
675 /* Synchronize MSRs */
676 guest_state->star = info->msrs.star;
677 guest_state->lstar = info->msrs.lstar;
678 guest_state->sfmask = info->msrs.sfmask;
679 guest_state->KernelGsBase = info->msrs.kern_gs_base;
681 guest_state->cpl = info->cpl;
683 v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
685 guest_state->rax = info->vm_regs.rax;
686 guest_state->rip = info->rip;
687 guest_state->rsp = info->vm_regs.rsp;
689 V3_FP_ENTRY_RESTORE(info);
691 #ifdef V3_CONFIG_SYMCALL
692 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
693 update_irq_entry_state(info);
696 update_irq_entry_state(info);
699 #ifdef V3_CONFIG_TM_FUNC
700 v3_tm_check_intr_state(info, guest_ctrl, guest_state);
707 PrintDebug(info->vm_info, info, "SVM Entry to CS=%p rip=%p...\n",
708 (void *)(addr_t)info->segments.cs.base,
709 (void *)(addr_t)info->rip);
712 #ifdef V3_CONFIG_SYMCALL
713 if (info->sym_core_state.symcall_state.sym_call_active == 1) {
714 if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
715 V3_Print(info->vm_info, info, "!!! Injecting Interrupt during Sym call !!!\n");
720 v3_svm_config_tsc_virtualization(info);
722 //V3_Print(info->vm_info, info, "Calling v3_svm_launch\n");
724 uint64_t entry_tsc = 0;
725 uint64_t exit_tsc = 0;
727 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
728 v3_pwrstat_telemetry_enter(info);
731 #ifdef V3_CONFIG_PMU_TELEMETRY
732 v3_pmu_telemetry_enter(info);
738 v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
742 #ifdef V3_CONFIG_PMU_TELEMETRY
743 v3_pmu_telemetry_exit(info);
746 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
747 v3_pwrstat_telemetry_exit(info);
750 guest_cycles = exit_tsc - entry_tsc;
754 //V3_Print(info->vm_info, info, "SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
756 v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
758 v3_advance_time(info, &guest_cycles);
762 V3_FP_EXIT_SAVE(info);
764 // Save Guest state from VMCB
765 info->rip = guest_state->rip;
766 info->vm_regs.rsp = guest_state->rsp;
767 info->vm_regs.rax = guest_state->rax;
769 info->cpl = guest_state->cpl;
771 info->ctrl_regs.cr0 = guest_state->cr0;
772 info->ctrl_regs.cr2 = guest_state->cr2;
773 info->ctrl_regs.cr3 = guest_state->cr3;
774 info->ctrl_regs.cr4 = guest_state->cr4;
775 info->dbg_regs.dr6 = guest_state->dr6;
776 info->dbg_regs.dr7 = guest_state->dr7;
778 // We do not track this anymore
779 // V_TPR is ignored and we do the logic in the APIC
780 //info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
782 info->ctrl_regs.rflags = guest_state->rflags;
783 info->ctrl_regs.efer = guest_state->efer;
785 /* Synchronize MSRs */
786 info->msrs.star = guest_state->star;
787 info->msrs.lstar = guest_state->lstar;
788 info->msrs.sfmask = guest_state->sfmask;
789 info->msrs.kern_gs_base = guest_state->KernelGsBase;
791 v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
792 info->cpu_mode = v3_get_vm_cpu_mode(info);
793 info->mem_mode = v3_get_vm_mem_mode(info);
796 // save exit info here
797 exit_code = guest_ctrl->exit_code;
798 exit_info1 = guest_ctrl->exit_info1;
799 exit_info2 = guest_ctrl->exit_info2;
801 #ifdef V3_CONFIG_SYMCALL
802 if (info->sym_core_state.symcall_state.sym_call_active == 0) {
803 update_irq_exit_state(info);
806 update_irq_exit_state(info);
809 // reenable global interrupts after vm exit
813 // Conditionally yield the CPU if the timeslice has expired
816 // This update timers is for time-dependent handlers
817 // if we're slaved to host time
818 v3_advance_time(info, NULL);
819 v3_update_timers(info);
823 int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2);
826 PrintError(info->vm_info, info, "Error in SVM exit handler (ret=%d)\n", ret);
827 PrintError(info->vm_info, info, " last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code);
832 if (info->timeouts.timeout_active) {
833 /* Check to see if any timeouts have expired */
834 v3_handle_timeouts(info, guest_cycles);
837 #ifdef V3_CONFIG_MEM_TRACK
838 v3_mem_track_exit(info);
846 int v3_start_svm_guest(struct guest_info * info) {
847 // vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
848 // vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
850 PrintDebug(info->vm_info, info, "Starting SVM core %u (on logical core %u)\n", info->vcpu_id, info->pcpu_id);
853 #ifdef V3_CONFIG_MULTIBOOT
854 if (v3_setup_multiboot_core_for_boot(info)) {
855 PrintError(info->vm_info, info, "Failed to setup Multiboot core...\n");
861 if (v3_setup_hvm_hrt_core_for_boot(info)) {
862 PrintError(info->vm_info, info, "Failed to setup HRT core...\n");
873 if (info->core_run_state == CORE_STOPPED) {
875 if (info->vcpu_id == 0) {
876 info->core_run_state = CORE_RUNNING;
878 PrintDebug(info->vm_info, info, "SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
882 while (info->core_run_state == CORE_STOPPED) {
884 if (info->vm_info->run_state == VM_STOPPED) {
885 // The VM was stopped before this core was initialized.
889 V3_STILL_NO_WORK(info);
891 //PrintDebug(info->vm_info, info, "SVM core %u: still waiting for INIT\n", info->vcpu_id);
894 V3_HAVE_WORK_AGAIN(info);
896 PrintDebug(info->vm_info, info, "SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
898 // We'll be paranoid about race conditions here
899 v3_wait_at_barrier(info);
902 PrintDebug(info->vm_info, info, "SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
903 info->vcpu_id, info->pcpu_id,
904 info->segments.cs.selector, (void *)(info->segments.cs.base),
905 info->segments.cs.limit, (void *)(info->rip));
909 PrintDebug(info->vm_info, info, "SVM core %u: Launching SVM VM (vmcb=%p) (on cpu %u)\n",
910 info->vcpu_id, (void *)info->vmm_data, info->pcpu_id);
911 //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
916 if (info->vm_info->run_state == VM_STOPPED) {
917 info->core_run_state = CORE_STOPPED;
921 #ifdef V3_CONFIG_PMU_TELEMETRY
922 v3_pmu_telemetry_start(info);
925 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
926 v3_pwrstat_telemetry_start(info);
929 if (v3_svm_enter(info) == -1) {
930 vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
932 addr_t linear_addr = 0;
934 info->vm_info->run_state = VM_ERROR;
936 V3_Print(info->vm_info, info, "SVM core %u: SVM ERROR!!\n", info->vcpu_id);
938 v3_print_guest_state(info);
940 V3_Print(info->vm_info, info, "SVM core %u: SVM Exit Code: %p\n", info->vcpu_id, (void *)(addr_t)guest_ctrl->exit_code);
942 V3_Print(info->vm_info, info, "SVM core %u: exit_info1 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
943 V3_Print(info->vm_info, info, "SVM core %u: exit_info1 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
945 V3_Print(info->vm_info, info, "SVM core %u: exit_info2 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
946 V3_Print(info->vm_info, info, "SVM core %u: exit_info2 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
948 linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
950 if (info->mem_mode == PHYSICAL_MEM) {
951 v3_gpa_to_hva(info, linear_addr, &host_addr);
952 } else if (info->mem_mode == VIRTUAL_MEM) {
953 v3_gva_to_hva(info, linear_addr, &host_addr);
956 V3_Print(info->vm_info, info, "SVM core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
958 V3_Print(info->vm_info, info, "SVM core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
959 v3_dump_mem((uint8_t *)host_addr, 15);
961 v3_print_stack(info);
966 v3_wait_at_barrier(info);
969 if (info->vm_info->run_state == VM_STOPPED) {
970 info->core_run_state = CORE_STOPPED;
977 if ((info->num_exits % 50000) == 0) {
978 V3_Print(info->vm_info, info, "SVM Exit number %d\n", (uint32_t)info->num_exits);
979 v3_print_guest_state(info);
985 #ifdef V3_CONFIG_PMU_TELEMETRY
986 v3_pmu_telemetry_end(info);
989 #ifdef V3_CONFIG_PWRSTAT_TELEMETRY
990 v3_pwrstat_telemetry_end(info);
992 // Need to take down the other cores on error...
1000 int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) {
1003 // Write the RIP, CS, and descriptor
1004 // assume the rest is already good to go
1006 // vector VV -> rip at 0
1008 // This means we start executing at linear address VV000
1010 // So the selector needs to be VV00
1011 // and the base needs to be VV000
1014 core->segments.cs.selector = rip << 8;
1015 core->segments.cs.limit = 0xffff;
1016 core->segments.cs.base = rip << 12;
1026 /* Checks machine SVM capability */
1027 /* Implemented from: AMD Arch Manual 3, sect 15.4 */
1028 int v3_is_svm_capable() {
1029 uint_t vm_cr_low = 0, vm_cr_high = 0;
1030 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1032 v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
1034 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
1036 if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
1037 V3_Print(VM_NONE, VCORE_NONE, "SVM Not Available\n");
1040 v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
1042 PrintDebug(VM_NONE, VCORE_NONE, "SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
1044 if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
1045 V3_Print(VM_NONE, VCORE_NONE, "SVM is available but is disabled.\n");
1047 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
1049 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
1051 if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
1052 V3_Print(VM_NONE, VCORE_NONE, "SVM BIOS Disabled, not unlockable\n");
1054 V3_Print(VM_NONE, VCORE_NONE, "SVM is locked with a key\n");
1059 V3_Print(VM_NONE, VCORE_NONE, "SVM is available and enabled.\n");
1061 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
1062 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
1063 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
1064 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
1065 PrintDebug(VM_NONE, VCORE_NONE, "CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
1072 static int has_svm_nested_paging() {
1073 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1075 v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
1077 //PrintDebug(VM_NONE, VCORE_NONE, "CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
1079 if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
1080 V3_Print(VM_NONE, VCORE_NONE, "SVM Nested Paging not supported\n");
1083 V3_Print(VM_NONE, VCORE_NONE, "SVM Nested Paging supported\n");
1090 void v3_init_svm_cpu(int cpu_id) {
1092 extern v3_cpu_arch_t v3_cpu_types[];
1094 // Enable SVM on the CPU
1095 v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
1096 msr.e_reg.low |= EFER_MSR_svm_enable;
1097 v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
1099 V3_Print(VM_NONE, VCORE_NONE, "SVM Enabled\n");
1101 // Setup the host state save area
1102 host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4); // need not be shadow-safe, not exposed to guest
1104 if (!host_vmcbs[cpu_id]) {
1105 PrintError(VM_NONE, VCORE_NONE, "Failed to allocate VMCB\n");
1110 // msr.e_reg.high = 0;
1111 //msr.e_reg.low = (uint_t)host_vmcb;
1112 msr.r_reg = host_vmcbs[cpu_id];
1114 PrintDebug(VM_NONE, VCORE_NONE, "Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
1115 v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
1118 if (has_svm_nested_paging() == 1) {
1119 v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
1121 v3_cpu_types[cpu_id] = V3_SVM_CPU;
1127 void v3_deinit_svm_cpu(int cpu_id) {
1129 extern v3_cpu_arch_t v3_cpu_types[];
1131 // reset SVM_VM_HSAVE_PA_MSR
1132 // Does setting it to NULL disable??
1134 v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
1137 v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
1138 msr.e_reg.low &= ~EFER_MSR_svm_enable;
1139 v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
1141 v3_cpu_types[cpu_id] = V3_INVALID_CPU;
1143 V3_FreePages((void *)host_vmcbs[cpu_id], 4);
1145 V3_Print(VM_NONE, VCORE_NONE, "Host CPU %d host area freed, and SVM disabled\n", cpu_id);
1200 * Test VMSAVE/VMLOAD Latency
1202 #define vmsave ".byte 0x0F,0x01,0xDB ; "
1203 #define vmload ".byte 0x0F,0x01,0xDA ; "
1205 uint32_t start_lo, start_hi;
1206 uint32_t end_lo, end_hi;
1207 uint64_t start, end;
1209 __asm__ __volatile__ (
1211 "movl %%eax, %%esi ; "
1212 "movl %%edx, %%edi ; "
1213 "movq %%rcx, %%rax ; "
1216 : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
1217 : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
1228 PrintDebug(core->vm_info, core, "VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
1230 __asm__ __volatile__ (
1232 "movl %%eax, %%esi ; "
1233 "movl %%edx, %%edi ; "
1234 "movq %%rcx, %%rax ; "
1237 : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
1238 : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
1250 PrintDebug(core->vm_info, core, "VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
1252 /* End Latency Test */
1263 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
1264 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
1265 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
1269 guest_state->rsp = vm_info.vm_regs.rsp;
1270 guest_state->rip = vm_info.rip;
1273 /* I pretty much just gutted this from TVMM */
1274 /* Note: That means its probably wrong */
1276 // set the segment registers to mirror ours
1277 guest_state->cs.selector = 1<<3;
1278 guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
1279 guest_state->cs.attrib.fields.S = 1;
1280 guest_state->cs.attrib.fields.P = 1;
1281 guest_state->cs.attrib.fields.db = 1;
1282 guest_state->cs.attrib.fields.G = 1;
1283 guest_state->cs.limit = 0xfffff;
1284 guest_state->cs.base = 0;
1286 struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
1287 for ( i = 0; segregs[i] != NULL; i++) {
1288 struct vmcb_selector * seg = segregs[i];
1290 seg->selector = 2<<3;
1291 seg->attrib.fields.type = 0x2; // Data Segment+read/write
1292 seg->attrib.fields.S = 1;
1293 seg->attrib.fields.P = 1;
1294 seg->attrib.fields.db = 1;
1295 seg->attrib.fields.G = 1;
1296 seg->limit = 0xfffff;
1302 /* JRL THIS HAS TO GO */
1304 // guest_state->tr.selector = GetTR_Selector();
1305 guest_state->tr.attrib.fields.type = 0x9;
1306 guest_state->tr.attrib.fields.P = 1;
1307 // guest_state->tr.limit = GetTR_Limit();
1308 //guest_state->tr.base = GetTR_Base();// - 0x2000;
1316 guest_state->efer |= EFER_MSR_svm_enable;
1317 guest_state->rflags = 0x00000002; // The reserved bit is always 1
1318 ctrl_area->svm_instrs.VMRUN = 1;
1319 guest_state->cr0 = 0x00000001; // PE
1320 ctrl_area->guest_ASID = 1;
1323 // guest_state->cpl = 0;
1329 ctrl_area->cr_writes.cr4 = 1;
1331 ctrl_area->exceptions.de = 1;
1332 ctrl_area->exceptions.df = 1;
1333 ctrl_area->exceptions.pf = 1;
1334 ctrl_area->exceptions.ts = 1;
1335 ctrl_area->exceptions.ss = 1;
1336 ctrl_area->exceptions.ac = 1;
1337 ctrl_area->exceptions.mc = 1;
1338 ctrl_area->exceptions.gp = 1;
1339 ctrl_area->exceptions.ud = 1;
1340 ctrl_area->exceptions.np = 1;
1341 ctrl_area->exceptions.of = 1;
1342 ctrl_area->exceptions.nmi = 1;
1346 ctrl_area->instrs.IOIO_PROT = 1;
1347 ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3); // need not be shadow-safe, not exposed to guest
1349 if (!ctrl_area->IOPM_BASE_PA) {
1350 PrintError(core->vm_info, core, "Cannot allocate IO bitmap\n");
1356 tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
1357 memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
1360 ctrl_area->instrs.INTR = 1;
1367 memset(gdt_buf, 0, 6);
1368 memset(idt_buf, 0, 6);
1371 uint_t gdt_base, idt_base;
1372 ushort_t gdt_limit, idt_limit;
1375 gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
1376 gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
1377 PrintDebug(core->vm_info, core, "GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
1380 idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
1381 idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
1382 PrintDebug(core->vm_info, core, "IDT: base: %x, limit: %x\n",idt_base, idt_limit);
1385 // gdt_base -= 0x2000;
1386 //idt_base -= 0x2000;
1388 guest_state->gdtr.base = gdt_base;
1389 guest_state->gdtr.limit = gdt_limit;
1390 guest_state->idtr.base = idt_base;
1391 guest_state->idtr.limit = idt_limit;
1397 // also determine if CPU supports nested paging
1399 if (vm_info.page_tables) {
1401 // Flush the TLB on entries/exits
1402 ctrl_area->TLB_CONTROL = 1;
1404 // Enable Nested Paging
1405 ctrl_area->NP_ENABLE = 1;
1407 PrintDebug(core->vm_info, core, "NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
1409 // Set the Nested Page Table pointer
1410 ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
1413 // ctrl_area->N_CR3 = Get_CR3();
1414 // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
1416 guest_state->g_pat = 0x7040600070406ULL;
1418 PrintDebug(core->vm_info, core, "Set Nested CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
1419 PrintDebug(core->vm_info, core, "Set Guest CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
1421 // guest_state->cr0 |= 0x80000000;