2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2015, The V3VEE Project <http://www.v3vee.org>
11 * All rights reserved.
13 * Author: Peter Dinda <pdinda@northwestern.edu>
15 * This is free software. You are permitted to use,
16 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19 #include <palacios/vmm_mem.h>
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_util.h>
22 #include <palacios/vmm_emulator.h>
23 #include <palacios/vm_guest.h>
24 #include <palacios/vmm_debug.h>
25 #include <palacios/vmm_hypercall.h>
27 #include <palacios/vmm_xml.h>
29 #include <palacios/vm_guest_mem.h>
36 MEM = Total size of memory in the GPA (in MB)
37 ROS_MEM = Total size of memory for the ROS (in MB) (<RAM)
39 GPAs [0,ROS_MEM) are what the ROS sees
40 GPAs [ROS_MEM, MEM) are HRT only
41 GPAS [0,MEM) are accessible by the HRT
43 CORES = Total number of cores in VM
44 ROS_CORES = Total numbber of cores for the ROS
46 Cores [0,ROS_CORES) are what the ROS sees
47 Cores [ROS_CORES,CORES) are HRT only
48 Cores [0,CORES) are accessible by the HRT
53 <file id="hrtelf" filename="hrtelf.o" />
56 <mem ... >RAM</mem> (MB) Note these are
57 <cores count="CORES" ...> backward compatible
60 <ros cores="ROS_CORES" mem="ROS_MEM" /> (MB)
61 <hrt file_id="hrtelf" /hrt>
66 #ifndef V3_CONFIG_DEBUG_HVM
68 #define PrintDebug(fmt, args...)
73 PrintDebug(VM_NONE,VCORE_NONE, "hvm: init\n");
79 PrintDebug(VM_NONE,VCORE_NONE, "hvm: deinit\n");
84 static int hvm_hcall_handler(struct guest_info * core , hcall_id_t hcall_id, void * priv_data)
86 V3_Print(core->vm_info,core, "hvm: received hypercall %x rax=%llx rbx=%llx rcx=%llx\n",
87 hcall_id, core->vm_regs.rax, core->vm_regs.rbx, core->vm_regs.rcx);
91 #define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y)))
93 int v3_init_hvm_vm(struct v3_vm_info *vm, struct v3_xml *config)
95 v3_cfg_tree_t *hvm_config;
96 v3_cfg_tree_t *ros_config;
97 v3_cfg_tree_t *hrt_config;
103 PrintDebug(vm, VCORE_NONE, "hvm: vm init\n");
108 memset(&vm->hvm_state,0,sizeof(struct v3_vm_hvm));
109 vm->hvm_state.is_hvm=0;
110 vm->hvm_state.first_hrt_core=vm->num_cores;
111 vm->hvm_state.first_hrt_gpa=vm->mem_size;
113 if (!config || !(hvm_config=v3_cfg_subtree(config,"hvm"))) {
114 PrintDebug(vm,VCORE_NONE,"hvm: no HVM configuration found (all HW is ROS)\n");
118 if (!(enable=v3_cfg_val(hvm_config,"enable")) || strcasecmp(enable,"y")) {
119 PrintDebug(vm,VCORE_NONE,"hvm: HVM configuration disabled (all HW is ROS)\n");
123 if (!(ros_config=v3_cfg_subtree(hvm_config,"ros"))) {
124 PrintError(vm,VCORE_NONE,"hvm: HVM configuration without ROS block...\n");
128 if (!(ros_cores=v3_cfg_val(ros_config,"cores"))) {
129 PrintError(vm,VCORE_NONE,"hvm: ROS block without cores...\n");
133 vm->hvm_state.first_hrt_core = ((uint32_t)atoi(ros_cores));
135 if (!(ros_mem=v3_cfg_val(ros_config,"mem"))) {
136 PrintError(vm,VCORE_NONE,"hvm: ROS block without mem...\n");
140 vm->hvm_state.first_hrt_gpa = ((uint64_t)atoi(ros_mem))*1024*1024;
142 if (!(hrt_config=v3_cfg_subtree(hvm_config,"hrt"))) {
143 PrintError(vm,VCORE_NONE,"hvm: HVM configuration without HRT block...\n");
147 if (!(hrt_file_id=v3_cfg_val(hrt_config,"file_id"))) {
148 PrintError(vm,VCORE_NONE,"hvm: HRT block without file_id...\n");
152 vm->hvm_state.hrt_file = v3_cfg_get_file(vm,hrt_file_id);
154 if (!vm->hvm_state.hrt_file) {
155 PrintError(vm,VCORE_NONE,"hvm: HRT block contains bad file_id (%s)\n",hrt_file_id);
159 if (v3_register_hypercall(vm, HVM_HCALL,
160 hvm_hcall_handler, 0)) {
161 PrintError(vm,VCORE_NONE, "hvm: cannot register hypercall....\n");
165 // XXX sanity check config here
167 vm->hvm_state.is_hvm=1;
170 if (vm->hvm_state.is_hvm) {
171 V3_Print(vm,VCORE_NONE,"hvm: [ROS: cores 0..%u, mem 0..%p] [HRT: cores %u..%u, mem %p..%p, file_id=%s (tag %s)]\n",
172 vm->hvm_state.first_hrt_core-1,
173 (void*) vm->hvm_state.first_hrt_gpa-1,
174 vm->hvm_state.first_hrt_core,
176 (void*) vm->hvm_state.first_hrt_gpa,
177 (void*)vm->mem_size-1,
179 vm->hvm_state.hrt_file->tag);
181 V3_Print(vm,VCORE_NONE,"hvm: This is a pure ROS VM\n");
188 int v3_deinit_hvm_vm(struct v3_vm_info *vm)
190 PrintDebug(vm, VCORE_NONE, "hvm: HVM VM deinit\n");
192 v3_remove_hypercall(vm,HVM_HCALL);
197 int v3_init_hvm_core(struct guest_info *core)
199 memset(&core->hvm_state,0,sizeof(core->hvm_state));
200 if (core->vm_info->hvm_state.is_hvm) {
201 if (core->vcpu_id >= core->vm_info->hvm_state.first_hrt_core) {
202 core->hvm_state.is_hrt=1;
208 int v3_deinit_hvm_core(struct guest_info *core)
210 PrintDebug(core->vm_info, VCORE_NONE, "hvm: HVM core deinit\n");
216 uint64_t v3_get_hvm_ros_memsize(struct v3_vm_info *vm)
218 if (vm->hvm_state.is_hvm) {
219 return vm->hvm_state.first_hrt_gpa;
224 uint64_t v3_get_hvm_hrt_memsize(struct v3_vm_info *vm)
226 if (vm->hvm_state.is_hvm) {
227 return vm->mem_size - vm->hvm_state.first_hrt_gpa;
233 uint32_t v3_get_hvm_ros_cores(struct v3_vm_info *vm)
235 if (vm->hvm_state.is_hvm) {
236 return vm->hvm_state.first_hrt_core;
238 return vm->num_cores;
242 uint32_t v3_get_hvm_hrt_cores(struct v3_vm_info *vm)
244 if (vm->hvm_state.is_hvm) {
245 return vm->num_cores - vm->hvm_state.first_hrt_core;
252 int v3_is_hvm_ros_mem_gpa(struct v3_vm_info *vm, addr_t gpa)
254 if (vm->hvm_state.is_hvm) {
255 return gpa>=0 && gpa<vm->hvm_state.first_hrt_gpa;
261 int v3_is_hvm_hrt_mem_gpa(struct v3_vm_info *vm, addr_t gpa)
263 if (vm->hvm_state.is_hvm) {
264 return gpa>=vm->hvm_state.first_hrt_gpa && gpa<vm->mem_size;
270 int v3_is_hvm_hrt_core(struct guest_info *core)
272 return core->hvm_state.is_hrt;
275 int v3_is_hvm_ros_core(struct guest_info *core)
277 return !core->hvm_state.is_hrt;
280 int v3_hvm_should_deliver_ipi(struct guest_info *src, struct guest_info *dest)
283 // ioapic or msi to apic
284 return !dest->hvm_state.is_hrt;
287 return src->hvm_state.is_hrt || (!src->hvm_state.is_hrt && !dest->hvm_state.is_hrt) ;
291 void v3_hvm_find_apics_seen_by_core(struct guest_info *core, struct v3_vm_info *vm,
292 uint32_t *start_apic, uint32_t *num_apics)
295 // Seen from ioapic, msi, etc:
296 if (vm->hvm_state.is_hvm) {
297 // HVM VM shows only the ROS cores/apics to ioapic, msi, etc
299 *num_apics = vm->hvm_state.first_hrt_core;
301 // Non-HVM shows all cores/APICs to apic, msi, etc.
303 *num_apics = vm->num_cores;
307 if (core->hvm_state.is_hrt) {
308 // HRT core/apic sees all apics
309 // (this policy may change...)
311 *num_apics = vm->num_cores;
313 // non-HRT core/apic sees only non-HRT cores/apics
315 *num_apics = vm->hvm_state.first_hrt_core;
321 static void get_null_int_handler_loc(struct v3_vm_info *vm, void **base, uint64_t *limit)
323 *base = (void*) PAGE_ADDR(vm->mem_size - PAGE_SIZE);
327 extern v3_cpu_arch_t v3_mach_type;
329 extern void *v3_hvm_svm_null_int_handler_start;
330 extern void *v3_hvm_svm_null_int_handler_end;
331 extern void *v3_hvm_vmx_null_int_handler_start;
332 extern void *v3_hvm_vmx_null_int_handler_end;
334 static void write_null_int_handler(struct v3_vm_info *vm)
341 get_null_int_handler_loc(vm,&base,&limit);
343 switch (v3_mach_type) {
346 case V3_SVM_REV3_CPU:
347 data = (void*) &v3_hvm_svm_null_int_handler_start;
348 len = (void*) &v3_hvm_svm_null_int_handler_end - data;
354 case V3_VMX_EPT_UG_CPU:
355 data = (void*) &v3_hvm_vmx_null_int_handler_start;
356 len = (void*) &v3_hvm_vmx_null_int_handler_end - data;
360 PrintError(vm,VCORE_NONE,"hvm: cannot determine CPU type to select null interrupt handler...\n");
366 v3_write_gpa_memory(&vm->cores[0],(addr_t)(base),len,(uint8_t*)data);
369 PrintDebug(vm,VCORE_NONE,"hvm: wrote null interrupt handler at %p (%llu bytes)\n",base,len);
373 static void get_idt_loc(struct v3_vm_info *vm, void **base, uint64_t *limit)
375 *base = (void*) PAGE_ADDR(vm->mem_size - 2 * PAGE_SIZE);
379 // default IDT entries (int and trap gates)
381 // Format is 16 bytes long:
383 // 16 selector => (target code selector) => 0x8 // entry 1 of GDT
384 // 3 ist => (stack) = 0 => current stack
386 // 4 type => 0xe=>INT, 0xf=>TRAP
391 // 32 offsethigh => 0 (total is a 64 bit offset)
394 // 00 00 | 08 00 | 00 | 8[typenybble] | offsetmid | offsethigh | reserved
396 // Note little endian
398 static uint64_t idt64_trap_gate_entry_mask[2] = { 0x00008f0000080000, 0x0 } ;
399 static uint64_t idt64_int_gate_entry_mask[2] = { 0x00008e0000080000, 0x0 };
401 static void write_idt(struct v3_vm_info *vm)
406 uint64_t handler_len;
408 uint64_t trap_gate[2];
409 uint64_t int_gate[2];
411 get_idt_loc(vm,&base,&limit);
413 get_null_int_handler_loc(vm,&handler,&handler_len);
415 memcpy(trap_gate,idt64_trap_gate_entry_mask,16);
416 memcpy(int_gate,idt64_int_gate_entry_mask,16);
419 // update the entries for the handler location
423 hand = (uint8_t*) &handler;
425 mask = (uint8_t *)trap_gate;
426 memcpy(&(mask[0]),&(hand[0]),2); // offset low
427 memcpy(&(mask[6]),&(hand[2]),2); // offset med
428 memcpy(&(mask[8]),&(hand[4]),4); // offset high
430 mask = (uint8_t *)int_gate;
431 memcpy(&(mask[0]),&(hand[0]),2); // offset low
432 memcpy(&(mask[6]),&(hand[2]),2); // offset med
433 memcpy(&(mask[8]),&(hand[4]),4); // offset high
435 PrintDebug(vm,VCORE_NONE,"hvm: Adding default null trap and int gates\n");
439 v3_write_gpa_memory(&vm->cores[0],(addr_t)(base+i*16),16,(uint8_t*)trap_gate);
442 for (i=32;i<256;i++) {
443 v3_write_gpa_memory(&vm->cores[0],(addr_t)(base+i*16),16,(uint8_t*)int_gate);
446 PrintDebug(vm,VCORE_NONE,"hvm: wrote IDT at %p\n",base);
451 static void get_gdt_loc(struct v3_vm_info *vm, void **base, uint64_t *limit)
453 *base = (void*)PAGE_ADDR(vm->mem_size - 3 * PAGE_SIZE);
457 static uint64_t gdt64[3] = {
458 0x0000000000000000, /* null */
459 0x00a09a0000000000, /* code (note lme bit) */
460 0x00a0920000000000, /* data (most entries don't matter) */
463 static void write_gdt(struct v3_vm_info *vm)
468 get_gdt_loc(vm,&base,&limit);
469 v3_write_gpa_memory(&vm->cores[0],(addr_t)base,limit,(uint8_t*) gdt64);
471 PrintDebug(vm,VCORE_NONE,"hvm: wrote GDT at %p\n",base);
476 static void get_tss_loc(struct v3_vm_info *vm, void **base, uint64_t *limit)
478 *base = (void*)PAGE_ADDR(vm->mem_size - 4 * PAGE_SIZE);
482 static uint64_t tss_data=0x0;
484 static void write_tss(struct v3_vm_info *vm)
490 get_tss_loc(vm,&base,&limit);
491 for (i=0;i<limit/8;i++) {
492 v3_write_gpa_memory(&vm->cores[0],(addr_t)(base+8*i),8,(uint8_t*) &tss_data);
495 PrintDebug(vm,VCORE_NONE,"hvm: wrote TSS at %p\n",base);
499 PTS MAP FIRST 512 GB identity mapped:
506 static void get_pt_loc(struct v3_vm_info *vm, void **base, uint64_t *limit)
508 *base = (void*)PAGE_ADDR(vm->mem_size-(5+1)*PAGE_SIZE);
509 *limit = 2*PAGE_SIZE;
512 static void write_pt(struct v3_vm_info *vm)
516 struct pml4e64 pml4e;
520 get_pt_loc(vm,&base, &size);
521 if (size!=2*PAGE_SIZE) {
522 PrintError(vm,VCORE_NONE,"Cannot support pt request, defaulting\n");
525 memset(&pdpe,0,sizeof(pdpe));
530 for (i=0;i<512;i++) {
531 pdpe.pd_base_addr = i*0x40000; // 0x4000 = 256K pages = 1 GB
532 v3_write_gpa_memory(&vm->cores[0],(addr_t)(base+PAGE_SIZE+i*sizeof(pdpe)),sizeof(pdpe),(uint8_t*)&pdpe);
535 memset(&pml4e,0,sizeof(pml4e));
538 pml4e.pdp_base_addr = PAGE_BASE_ADDR((addr_t)(base+PAGE_SIZE));
540 v3_write_gpa_memory(&vm->cores[0],(addr_t)base,sizeof(pml4e),(uint8_t*)&pml4e);
542 for (i=1;i<512;i++) {
544 v3_write_gpa_memory(&vm->cores[0],(addr_t)(base+i*sizeof(pml4e)),sizeof(pml4e),(uint8_t*)&pml4e);
547 PrintDebug(vm,VCORE_NONE,"hvm: Wrote page tables (1 PML4, 1 PDPE) at %p\n",base);
550 static void get_bp_loc(struct v3_vm_info *vm, void **base, uint64_t *limit)
552 *base = (void*) PAGE_ADDR(vm->mem_size-(6+1)*PAGE_SIZE);
556 static void write_bp(struct v3_vm_info *vm)
563 get_bp_loc(vm,&base,&limit);
565 for (i=0;i<limit/8;i++) {
566 v3_write_gpa_memory(&vm->cores[0],(addr_t)(base+i*8),8,(uint8_t*)&data);
569 PrintDebug(vm,VCORE_NONE,"hvm: wrote boundary page at %p\n", base);
573 #define MIN_STACK (4096*4)
576 static void get_hrt_loc(struct v3_vm_info *vm, void **base, uint64_t *limit)
581 get_bp_loc(vm,&bp_base,&bp_limit);
583 // assume at least a minimal stack
587 *base = (void*)PAGE_ADDR(vm->hvm_state.first_hrt_gpa);
589 if (bp_base < *base+PAGE_SIZE) {
590 PrintError(vm,VCORE_NONE,"hvm: HRT stack colides with HRT\n");
593 *limit = bp_base - *base;
596 static void write_hrt(struct v3_vm_info *vm)
601 get_hrt_loc(vm,&base,&limit);
603 if (vm->hvm_state.hrt_file->size > limit) {
604 PrintError(vm,VCORE_NONE,"hvm: Cannot map HRT because it is too big (%llu bytes, but only have %llu space\n", vm->hvm_state.hrt_file->size, (uint64_t)limit);
608 v3_write_gpa_memory(&vm->cores[0],(addr_t)base,vm->hvm_state.hrt_file->size,vm->hvm_state.hrt_file->data);
610 PrintDebug(vm,VCORE_NONE,"hvm: wrote HRT %s at %p\n", vm->hvm_state.hrt_file->tag,base);
624 We do not touch the ROS portion of the address space.
625 The HRT portion looks like:
627 INT_HANDLER (1 page - page aligned)
628 IDT (1 page - page aligned)
629 GDT (1 page - page aligned)
630 TSS (1 page - page asligned)
631 PAGETABLES (identy map of first N GB)
632 ROOT PT first, followed by 2nd level, etc.
633 Currently PML4 followed by 1 PDPE for 512 GB of mapping
634 BOUNDARY PAGE (all 0xff - avoid smashing page tables in case we keep going...)
635 (stack - we will push machine description)
637 HRT (as many pages as needed, page-aligned, starting at first HRT address)
644 int v3_setup_hvm_vm_for_boot(struct v3_vm_info *vm)
646 if (!vm->hvm_state.is_hvm) {
647 PrintDebug(vm,VCORE_NONE,"hvm: skipping HVM setup for boot as this is not an HVM\n");
651 PrintDebug(vm,VCORE_NONE,"hvm: setup of HVM memory begins\n");
653 write_null_int_handler(vm);
665 PrintDebug(vm,VCORE_NONE,"hvm: setup of HVM memory done\n");
673 IDTR points to stub IDT
674 GDTR points to stub GDT
675 TS points to stub TSS
676 CR3 points to root page table
679 RSP is TOS (looks like a call)
681 0 (fake return address) <= RSP
683 RIP is entry point to HRT
684 RDI points to machine info on stack
686 Other regs are zeroed
688 shadow/nested paging state reset for long mode
691 int v3_setup_hvm_hrt_core_for_boot(struct guest_info *core)
696 if (!core->hvm_state.is_hrt) {
697 PrintDebug(core->vm_info,core,"hvm: skipping HRT setup for core %u as it is not an HRT core\n", core->vcpu_id);
701 PrintDebug(core->vm_info, core, "hvm: setting up HRT core (%u) for boot\n", core->vcpu_id);
704 memset(&core->vm_regs,0,sizeof(core->vm_regs));
705 memset(&core->ctrl_regs,0,sizeof(core->ctrl_regs));
706 memset(&core->dbg_regs,0,sizeof(core->dbg_regs));
707 memset(&core->segments,0,sizeof(core->segments));
708 memset(&core->msrs,0,sizeof(core->msrs));
709 memset(&core->fp_state,0,sizeof(core->fp_state));
711 // We are in long mode with virtual memory and we want
712 // to start immediatley
713 core->cpl = 0; // we are going right into the kernel
714 core->cpu_mode = LONG;
715 core->mem_mode = VIRTUAL_MEM;
716 core->core_run_state = CORE_RUNNING ;
718 // We are going to enter right into the HRT
719 // HRT stack and argument passing
720 get_bp_loc(core->vm_info, &base,&limit);
721 // TODO: push description here
722 core->vm_regs.rsp = (v3_reg_t) base; // so if we ret, we will blow up
723 core->vm_regs.rbp = (v3_reg_t) base;
724 // TODO: RDI should really get pointer to description
725 core->vm_regs.rdi = (v3_reg_t) base;
727 get_hrt_loc(core->vm_info, &base,&limit);
728 core->rip = (uint64_t) base + 0x40; // hack for test.o
730 // Setup CRs for long mode and our stub page table
732 core->ctrl_regs.cr0 = 0x80000001;
733 // CR2: don't care (output from #PF)
734 // CE3: set to our PML4E, without setting PCD or PWT
735 get_pt_loc(core->vm_info, &base,&limit);
736 core->ctrl_regs.cr3 = PAGE_ADDR((addr_t)base);
737 // CR4: PGE, PAE, PSE (last byte: 1 0 1 1 0 0 0 0)
738 core->ctrl_regs.cr4 = 0xb0;
740 // RFLAGS zeroed is fine: come in with interrupts off
741 // EFER needs SVME LMA LME (last 16 bites: 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0
742 core->ctrl_regs.efer = 0x1500;
748 selector is 13 bits of index, 1 bit table indicator
751 index is scaled by 8, even in long mode, where some entries
752 are 16 bytes long....
753 -> code, data descriptors have 8 byte format
754 because base, limit, etc, are ignored (no segmentation)
755 -> interrupt/trap gates have 16 byte format
756 because offset needs to be 64 bits
759 // Install our stub IDT
760 get_idt_loc(core->vm_info, &base,&limit);
761 core->segments.idtr.selector = 0; // entry 0 (NULL) of the GDT
762 core->segments.idtr.base = (addr_t) base;
763 core->segments.idtr.limit = limit-1;
764 core->segments.idtr.type = 0xe;
765 core->segments.idtr.system = 1;
766 core->segments.idtr.dpl = 0;
767 core->segments.idtr.present = 1;
768 core->segments.idtr.long_mode = 1;
770 // Install our stub GDT
771 get_gdt_loc(core->vm_info, &base,&limit);
772 core->segments.gdtr.selector = 0;
773 core->segments.gdtr.base = (addr_t) base;
774 core->segments.gdtr.limit = limit-1;
775 core->segments.gdtr.type = 0x6;
776 core->segments.gdtr.system = 1;
777 core->segments.gdtr.dpl = 0;
778 core->segments.gdtr.present = 1;
779 core->segments.gdtr.long_mode = 1;
782 get_tss_loc(core->vm_info, &base,&limit);
783 core->segments.tr.selector = 0;
784 core->segments.tr.base = (addr_t) base;
785 core->segments.tr.limit = limit-1;
786 core->segments.tr.type = 0x6;
787 core->segments.tr.system = 1;
788 core->segments.tr.dpl = 0;
789 core->segments.tr.present = 1;
790 core->segments.tr.long_mode = 1;
796 core->segments.cs.selector = 0x8 ; // entry 1 of GDT (RPL=0)
797 core->segments.cs.base = (addr_t) base;
798 core->segments.cs.limit = limit;
799 core->segments.cs.type = 0xe;
800 core->segments.cs.system = 0;
801 core->segments.cs.dpl = 0;
802 core->segments.cs.present = 1;
803 core->segments.cs.long_mode = 1;
805 // DS, SS, etc are identical
806 core->segments.ds.selector = 0x10; // entry 2 of GDT (RPL=0)
807 core->segments.ds.base = (addr_t) base;
808 core->segments.ds.limit = limit;
809 core->segments.ds.type = 0x6;
810 core->segments.ds.system = 0;
811 core->segments.ds.dpl = 0;
812 core->segments.ds.present = 1;
813 core->segments.ds.long_mode = 1;
815 memcpy(&core->segments.ss,&core->segments.ds,sizeof(core->segments.ds));
816 memcpy(&core->segments.es,&core->segments.ds,sizeof(core->segments.ds));
817 memcpy(&core->segments.fs,&core->segments.ds,sizeof(core->segments.ds));
818 memcpy(&core->segments.gs,&core->segments.ds,sizeof(core->segments.ds));
820 // reset paging here for shadow...
822 if (core->shdw_pg_mode != NESTED_PAGING) {
823 PrintError(core->vm_info, core, "hvm: shadow paging guest... this will end badly\n");