1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
14 extern struct vmm_os_hooks * os_hooks;
16 extern uint_t cpuid_ecx(uint_t op);
17 extern uint_t cpuid_edx(uint_t op);
18 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte);
19 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
20 extern uint_t launch_svm(vmcb_t * vmcb_addr);
21 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct guest_gprs * gprs);
26 extern uint_t Get_CR3();
29 extern void DisableInts();
31 /* Checks machine SVM capability */
32 /* Implemented from: AMD Arch Manual 3, sect 15.4 */
33 int is_svm_capable() {
34 uint_t ret = cpuid_ecx(CPUID_FEATURE_IDS);
35 uint_t vm_cr_low = 0, vm_cr_high = 0;
38 if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
39 PrintDebug("SVM Not Available\n");
43 Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
45 if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
46 PrintDebug("Nested Paging not supported\n");
49 if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
53 ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
55 if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
56 PrintDebug("SVM BIOS Disabled, not unlockable\n");
58 PrintDebug("SVM is locked with a key\n");
66 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
71 // Enable SVM on the CPU
72 Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
73 msr.e_reg.low |= EFER_MSR_svm_enable;
74 Set_MSR(EFER_MSR, 0, msr.e_reg.low);
76 PrintDebug("SVM Enabled\n");
79 // Setup the host state save area
80 host_state = os_hooks->allocate_pages(4);
83 msr.e_reg.low = (uint_t)host_state;
86 PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
87 Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
91 // Setup the SVM specific vmm operations
92 vmm_ops->init_guest = &init_svm_guest;
93 vmm_ops->start_guest = &start_svm_guest;
100 int init_svm_guest(struct guest_info *info) {
102 PrintDebug("Allocating VMCB\n");
103 info->vmm_data = (void*)Allocate_VMCB();
106 //PrintDebug("Generating Guest nested page tables\n");
107 // info->page_tables = NULL;
108 //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
109 //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
110 // PrintDebugPageTables(info->page_tables);
113 PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
114 Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
119 info->vm_regs.rdi = 0;
120 info->vm_regs.rsi = 0;
121 info->vm_regs.rbp = 0;
122 info->vm_regs.rsp = 0;
123 info->vm_regs.rbx = 0;
124 info->vm_regs.rdx = 0;
125 info->vm_regs.rcx = 0;
126 info->vm_regs.rax = 0;
132 // can we start a kernel thread here...
133 int start_svm_guest(struct guest_info *info) {
137 PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
138 //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
144 //PrintDebug("SVM Launch Args (vmcb=%x), (info=%x), (vm_regs=%x)\n", info->vmm_data, &(info->vm_regs));
145 //PrintDebug("Launching to RIP: %x\n", info->rip);
146 safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
147 //launch_svm((vmcb_t*)(info->vmm_data));
148 // PrintDebug("SVM Returned\n");
152 if (handle_svm_exit(info) != 0) {
153 PrintDebug("SVM ERROR!!\n");
162 vmcb_t * Allocate_VMCB() {
163 vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
166 memset(vmcb_page, 0, 4096);
173 void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
174 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
175 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
179 guest_state->rsp = vm_info.vm_regs.rsp;
180 guest_state->rip = vm_info.rip;
183 //ctrl_area->instrs.instrs.CR0 = 1;
184 ctrl_area->cr_reads.cr0 = 1;
185 ctrl_area->cr_writes.cr0 = 1;
187 guest_state->efer |= EFER_MSR_svm_enable;
188 guest_state->rflags = 0x00000002; // The reserved bit is always 1
189 ctrl_area->svm_instrs.VMRUN = 1;
190 // guest_state->cr0 = 0x00000001; // PE
191 ctrl_area->guest_ASID = 1;
194 ctrl_area->exceptions.de = 1;
195 ctrl_area->exceptions.df = 1;
196 ctrl_area->exceptions.pf = 1;
197 ctrl_area->exceptions.ts = 1;
198 ctrl_area->exceptions.ss = 1;
199 ctrl_area->exceptions.ac = 1;
200 ctrl_area->exceptions.mc = 1;
201 ctrl_area->exceptions.gp = 1;
202 ctrl_area->exceptions.ud = 1;
203 ctrl_area->exceptions.np = 1;
204 ctrl_area->exceptions.of = 1;
205 ctrl_area->exceptions.nmi = 1;
207 guest_state->cs.selector = 0x0000;
208 guest_state->cs.limit=~0u;
209 guest_state->cs.base = guest_state->cs.selector<<4;
210 guest_state->cs.attrib.raw = 0xf3;
213 struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
214 for ( i = 0; segregs[i] != NULL; i++) {
215 struct vmcb_selector * seg = segregs[i];
217 seg->selector = 0x0000;
218 seg->base = seg->selector << 4;
219 seg->attrib.raw = 0xf3;
223 if (vm_info.io_map.num_ports > 0) {
224 vmm_io_hook_t * iter;
225 addr_t io_port_bitmap;
227 io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
228 memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
230 ctrl_area->IOPM_BASE_PA = io_port_bitmap;
232 //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
234 FOREACH_IO_HOOK(vm_info.io_map, iter) {
235 ushort_t port = iter->port;
236 uchar_t * bitmap = (uchar_t *)io_port_bitmap;
238 bitmap += (port / 8);
239 PrintDebug("Setting Bit in block %x\n", bitmap);
240 *bitmap |= 1 << (port % 8);
244 //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
246 ctrl_area->instrs.IOIO_PROT = 1;
249 ctrl_area->instrs.INTR = 1;
253 if (vm_info.page_mode == SHADOW_PAGING) {
254 PrintDebug("Creating initial shadow page table\n");
255 vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
256 PrintDebug("Created\n");
258 guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
260 ctrl_area->cr_reads.cr3 = 1;
261 ctrl_area->cr_writes.cr3 = 1;
264 ctrl_area->instrs.INVLPG = 1;
265 ctrl_area->instrs.INVLPGA = 1;
267 guest_state->g_pat = 0x7040600070406ULL;
269 guest_state->cr0 |= 0x80000000;
270 } else if (vm_info.page_mode == NESTED_PAGING) {
271 // Flush the TLB on entries/exits
272 //ctrl_area->TLB_CONTROL = 1;
274 // Enable Nested Paging
275 //ctrl_area->NP_ENABLE = 1;
277 //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
279 // Set the Nested Page Table pointer
280 // ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
281 // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
283 // ctrl_area->N_CR3 = Get_CR3();
284 // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
286 // guest_state->g_pat = 0x7040600070406ULL;
295 void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
296 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
297 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
301 guest_state->rsp = vm_info.vm_regs.rsp;
302 // guest_state->rip = vm_info.rip;
303 guest_state->rip = 0xfff0;
305 //ctrl_area->instrs.instrs.CR0 = 1;
306 ctrl_area->cr_reads.cr0 = 1;
307 ctrl_area->cr_writes.cr0 = 1;
309 guest_state->efer |= EFER_MSR_svm_enable;
310 guest_state->rflags = 0x00000002; // The reserved bit is always 1
311 ctrl_area->svm_instrs.VMRUN = 1;
312 ctrl_area->instrs.HLT = 1;
313 // guest_state->cr0 = 0x00000001; // PE
314 ctrl_area->guest_ASID = 1;
316 ctrl_area->exceptions.de = 1;
317 ctrl_area->exceptions.df = 1;
318 ctrl_area->exceptions.pf = 1;
319 ctrl_area->exceptions.ts = 1;
320 ctrl_area->exceptions.ss = 1;
321 ctrl_area->exceptions.ac = 1;
322 ctrl_area->exceptions.mc = 1;
323 ctrl_area->exceptions.gp = 1;
324 ctrl_area->exceptions.ud = 1;
325 ctrl_area->exceptions.np = 1;
326 ctrl_area->exceptions.of = 1;
327 ctrl_area->exceptions.nmi = 1;
329 vm_info.vm_regs.rdx = 0x00000f00;
331 guest_state->cr0 = 0x60000010;
333 guest_state->cs.selector = 0xf000;
334 guest_state->cs.limit=0xffff;
335 guest_state->cs.base = 0x0000000f0000LL;
336 guest_state->cs.attrib.raw = 0xf3;
339 struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
340 for ( i = 0; segregs[i] != NULL; i++) {
341 struct vmcb_selector * seg = segregs[i];
343 seg->selector = 0x0000;
344 // seg->base = seg->selector << 4;
345 seg->base = 0x00000000;
346 seg->attrib.raw = 0xf3;
350 guest_state->gdtr.limit = 0x0000ffff;
351 guest_state->gdtr.base = 0x0000000000000000LL;
352 guest_state->idtr.limit = 0x0000ffff;
353 guest_state->idtr.base = 0x0000000000000000LL;
355 guest_state->ldtr.selector = 0x0000;
356 guest_state->ldtr.limit = 0x0000ffff;
357 guest_state->ldtr.base = 0x0000000000000000LL;
358 guest_state->tr.selector = 0x0000;
359 guest_state->tr.limit = 0x0000ffff;
360 guest_state->tr.base = 0x0000000000000000LL;
363 guest_state->dr6 = 0x00000000ffff0ff0LL;
364 guest_state->dr7 = 0x0000000000000400LL;
366 if (vm_info.io_map.num_ports > 0) {
367 vmm_io_hook_t * iter;
368 addr_t io_port_bitmap;
370 io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
371 memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
373 ctrl_area->IOPM_BASE_PA = io_port_bitmap;
375 //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
377 FOREACH_IO_HOOK(vm_info.io_map, iter) {
378 ushort_t port = iter->port;
379 uchar_t * bitmap = (uchar_t *)io_port_bitmap;
381 bitmap += (port / 8);
382 PrintDebug("Setting Bit for port 0x%x\n", port);
383 *bitmap |= 1 << (port % 8);
387 //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
389 ctrl_area->instrs.IOIO_PROT = 1;
394 PrintDebug("Exiting on interrupts\n");
395 ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
396 ctrl_area->instrs.INTR = 1;
399 if (vm_info.page_mode == SHADOW_PAGING) {
400 PrintDebug("Creating initial shadow page table\n");
401 vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
402 PrintDebug("Created\n");
404 guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
406 //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
408 ctrl_area->cr_reads.cr3 = 1;
409 ctrl_area->cr_writes.cr3 = 1;
412 ctrl_area->instrs.INVLPG = 1;
413 ctrl_area->instrs.INVLPGA = 1;
415 guest_state->g_pat = 0x7040600070406ULL;
417 guest_state->cr0 |= 0x80000000;
418 } else if (vm_info.page_mode == NESTED_PAGING) {
419 // Flush the TLB on entries/exits
420 //ctrl_area->TLB_CONTROL = 1;
422 // Enable Nested Paging
423 //ctrl_area->NP_ENABLE = 1;
425 //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
427 // Set the Nested Page Table pointer
428 // ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
429 // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
431 // ctrl_area->N_CR3 = Get_CR3();
432 // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
434 // guest_state->g_pat = 0x7040600070406ULL;
443 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
444 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
445 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
449 guest_state->rsp = vm_info.vm_regs.rsp;
450 guest_state->rip = vm_info.rip;
453 /* I pretty much just gutted this from TVMM */
454 /* Note: That means its probably wrong */
456 // set the segment registers to mirror ours
457 guest_state->cs.selector = 1<<3;
458 guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
459 guest_state->cs.attrib.fields.S = 1;
460 guest_state->cs.attrib.fields.P = 1;
461 guest_state->cs.attrib.fields.db = 1;
462 guest_state->cs.attrib.fields.G = 1;
463 guest_state->cs.limit = 0xfffff;
464 guest_state->cs.base = 0;
466 struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
467 for ( i = 0; segregs[i] != NULL; i++) {
468 struct vmcb_selector * seg = segregs[i];
470 seg->selector = 2<<3;
471 seg->attrib.fields.type = 0x2; // Data Segment+read/write
472 seg->attrib.fields.S = 1;
473 seg->attrib.fields.P = 1;
474 seg->attrib.fields.db = 1;
475 seg->attrib.fields.G = 1;
476 seg->limit = 0xfffff;
482 /* JRL THIS HAS TO GO */
484 // guest_state->tr.selector = GetTR_Selector();
485 guest_state->tr.attrib.fields.type = 0x9;
486 guest_state->tr.attrib.fields.P = 1;
487 // guest_state->tr.limit = GetTR_Limit();
488 //guest_state->tr.base = GetTR_Base();// - 0x2000;
496 guest_state->efer |= EFER_MSR_svm_enable;
497 guest_state->rflags = 0x00000002; // The reserved bit is always 1
498 ctrl_area->svm_instrs.VMRUN = 1;
499 guest_state->cr0 = 0x00000001; // PE
500 ctrl_area->guest_ASID = 1;
503 // guest_state->cpl = 0;
509 ctrl_area->cr_writes.cr4 = 1;
511 ctrl_area->exceptions.de = 1;
512 ctrl_area->exceptions.df = 1;
513 ctrl_area->exceptions.pf = 1;
514 ctrl_area->exceptions.ts = 1;
515 ctrl_area->exceptions.ss = 1;
516 ctrl_area->exceptions.ac = 1;
517 ctrl_area->exceptions.mc = 1;
518 ctrl_area->exceptions.gp = 1;
519 ctrl_area->exceptions.ud = 1;
520 ctrl_area->exceptions.np = 1;
521 ctrl_area->exceptions.of = 1;
522 ctrl_area->exceptions.nmi = 1;
526 ctrl_area->instrs.IOIO_PROT = 1;
527 ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
531 tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
532 memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
535 ctrl_area->instrs.INTR = 1;
542 memset(gdt_buf, 0, 6);
543 memset(idt_buf, 0, 6);
546 uint_t gdt_base, idt_base;
547 ushort_t gdt_limit, idt_limit;
550 gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
551 gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
552 PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
555 idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
556 idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
557 PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
560 // gdt_base -= 0x2000;
561 //idt_base -= 0x2000;
563 guest_state->gdtr.base = gdt_base;
564 guest_state->gdtr.limit = gdt_limit;
565 guest_state->idtr.base = idt_base;
566 guest_state->idtr.limit = idt_limit;
572 // also determine if CPU supports nested paging
574 if (vm_info.page_tables) {
576 // Flush the TLB on entries/exits
577 ctrl_area->TLB_CONTROL = 1;
579 // Enable Nested Paging
580 ctrl_area->NP_ENABLE = 1;
582 PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
584 // Set the Nested Page Table pointer
585 ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
588 // ctrl_area->N_CR3 = Get_CR3();
589 // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
591 guest_state->g_pat = 0x7040600070406ULL;
593 PrintDebug("Set Nested CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
594 PrintDebug("Set Guest CR3: lo: 0x%x hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
596 // guest_state->cr0 |= 0x80000000;
624 void Init_VMCB_Real(vmcb_t * vmcb, struct guest_info vm_info) {
625 vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
626 vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
630 guest_state->rsp = vm_info.vm_regs.rsp;
631 guest_state->rip = vm_info.rip;
634 guest_state->efer |= EFER_MSR_svm_enable;
635 guest_state->rflags = 0x00000002; // The reserved bit is always 1
636 ctrl_area->svm_instrs.instrs.VMRUN = 1;
637 ctrl_area->guest_ASID = 1;
638 guest_state->cr0 = 0x60000010;
641 ctrl_area->exceptions.de = 1;
642 ctrl_area->exceptions.df = 1;
643 ctrl_area->exceptions.pf = 1;
644 ctrl_area->exceptions.ts = 1;
645 ctrl_area->exceptions.ss = 1;
646 ctrl_area->exceptions.ac = 1;
647 ctrl_area->exceptions.mc = 1;
648 ctrl_area->exceptions.gp = 1;
649 ctrl_area->exceptions.ud = 1;
650 ctrl_area->exceptions.np = 1;
651 ctrl_area->exceptions.of = 1;
652 ctrl_area->exceptions.nmi = 1;
654 guest_state->cs.selector = 0xf000;
655 guest_state->cs.limit=0xffff;
656 guest_state->cs.base = 0xffff0000;
657 guest_state->cs.attrib.raw = 0x9a;
660 struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
661 for ( i = 0; segregs[i] != NULL; i++) {
662 struct vmcb_selector * seg = segregs[i];
664 seg->selector = 0x0000;
665 seg->base = 0xffff0000;
666 seg->attrib.raw = 0x9b;
673 // EAX, EBX, ECX, ESI, EDI, EBP, ESP == 0x0
676 guest_state->gdtr.base = 0;
677 guest_state->gdtr.limit = 0xffff;
678 guest_state->gdtr.attrib.raw = 0x0;
680 guest_state->idtr.base = 0;
681 guest_state->idtr.limit = 0xffff;
682 guest_state->idtr.attrib.raw = 0x0;
684 guest_state->ldtr.base = 0;
685 guest_state->ldtr.limit = 0xffff;
686 guest_state->ldtr.attrib.raw = 0x82;
688 guest_state->tr.base = 0;
689 guest_state->tr.limit = 0xffff;
690 guest_state->tr.attrib.raw = 0x83;
695 if (vm_info.io_map.num_ports > 0) {
696 vmm_io_hook_t * iter;
697 addr_t io_port_bitmap;
699 io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
700 memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
702 ctrl_area->IOPM_BASE_PA = io_port_bitmap;
704 //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
706 FOREACH_IO_HOOK(vm_info.io_map, iter) {
707 ushort_t port = iter->port;
708 uchar_t * bitmap = (uchar_t *)io_port_bitmap;
710 bitmap += (port / 8);
711 PrintDebug("Setting Bit in block %x\n", bitmap);
712 *bitmap |= 1 << (port % 8);
715 ctrl_area->instrs.instrs.IOIO_PROT = 1;
718 ctrl_area->instrs.instrs.INTR = 1;
720 // also determine if CPU supports nested paging
722 if (vm_info.page_mode == SHADOW_PAGING) {
723 PrintDebug("Creating initial shadow page table\n");
724 vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
725 PrintDebug("Created\n");
727 guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
729 ctrl_area->cr_reads.crs.cr3 = 1;
730 ctrl_area->cr_writes.crs.cr3 = 1;
731 ctrl_area->cr_reads.crs.cr0 = 1;
732 ctrl_area->cr_writes.crs.cr0 = 1;
734 ctrl_area->instrs.instrs.INVLPG = 1;
735 ctrl_area->instrs.instrs.INVLPGA = 1;
738 guest_state->g_pat = 0x7040600070406ULL;
740 vm_info.shdw_pg_state.guest_cr0.e_reg.low = guest_state->cr0;
741 guest_state->cr0 |= 0x80000000;
742 } else if (vm_info.page_mode == NESTED_PAGING) {
743 // Flush the TLB on entries/exits
744 //ctrl_area->TLB_CONTROL = 1;
746 // Enable Nested Paging
747 //ctrl_area->NP_ENABLE = 1;
749 //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
751 // Set the Nested Page Table pointer
752 // ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
753 // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
755 // ctrl_area->N_CR3 = Get_CR3();
756 // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
758 // guest_state->g_pat = 0x7040600070406ULL;