Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


large change to break apart the guest operation mode parameters
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31
32
33
34
35
36
37
38 static vmcb_t * Allocate_VMCB() {
39   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
40
41
42   memset(vmcb_page, 0, 4096);
43
44   return vmcb_page;
45 }
46
47
48
49
50
51 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
52   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
53   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
54   uint_t i;
55
56
57   guest_state->rsp = vm_info.vm_regs.rsp;
58   // guest_state->rip = vm_info.rip;
59   guest_state->rip = 0xfff0;
60
61   guest_state->cpl = 0;
62
63   //ctrl_area->instrs.instrs.CR0 = 1;
64   ctrl_area->cr_reads.cr0 = 1;
65   ctrl_area->cr_writes.cr0 = 1;
66
67   guest_state->efer |= EFER_MSR_svm_enable;
68   guest_state->rflags = 0x00000002; // The reserved bit is always 1
69   ctrl_area->svm_instrs.VMRUN = 1;
70   ctrl_area->instrs.HLT = 1;
71   // guest_state->cr0 = 0x00000001;    // PE 
72   ctrl_area->guest_ASID = 1;
73
74   ctrl_area->exceptions.de = 1;
75   ctrl_area->exceptions.df = 1;
76   ctrl_area->exceptions.pf = 1;
77   ctrl_area->exceptions.ts = 1;
78   ctrl_area->exceptions.ss = 1;
79   ctrl_area->exceptions.ac = 1;
80   ctrl_area->exceptions.mc = 1;
81   ctrl_area->exceptions.gp = 1;
82   ctrl_area->exceptions.ud = 1;
83   ctrl_area->exceptions.np = 1;
84   ctrl_area->exceptions.of = 1;
85   ctrl_area->exceptions.nmi = 1;
86
87   vm_info.vm_regs.rdx = 0x00000f00;
88
89   guest_state->cr0 = 0x60000010;
90
91   guest_state->cs.selector = 0xf000;
92   guest_state->cs.limit=0xffff;
93   guest_state->cs.base = 0x0000000f0000LL;
94   guest_state->cs.attrib.raw = 0xf3;
95
96   
97   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
98   for ( i = 0; segregs[i] != NULL; i++) {
99     struct vmcb_selector * seg = segregs[i];
100     
101     seg->selector = 0x0000;
102     //    seg->base = seg->selector << 4;
103     seg->base = 0x00000000;
104     seg->attrib.raw = 0xf3;
105     seg->limit = ~0u;
106   }
107   
108   guest_state->gdtr.limit = 0x0000ffff;
109   guest_state->gdtr.base = 0x0000000000000000LL;
110   guest_state->idtr.limit = 0x0000ffff;
111   guest_state->idtr.base = 0x0000000000000000LL;
112
113   guest_state->ldtr.selector = 0x0000;
114   guest_state->ldtr.limit = 0x0000ffff;
115   guest_state->ldtr.base = 0x0000000000000000LL;
116   guest_state->tr.selector = 0x0000;
117   guest_state->tr.limit = 0x0000ffff;
118   guest_state->tr.base = 0x0000000000000000LL;
119
120
121   guest_state->dr6 = 0x00000000ffff0ff0LL;
122   guest_state->dr7 = 0x0000000000000400LL;
123
124   if (vm_info.io_map.num_ports > 0) {
125     vmm_io_hook_t * iter;
126     addr_t io_port_bitmap;
127     
128     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
129     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
130     
131     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
132
133     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
134
135     FOREACH_IO_HOOK(vm_info.io_map, iter) {
136       ushort_t port = iter->port;
137       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
138
139       bitmap += (port / 8);
140       PrintDebug("Setting Bit for port 0x%x\n", port);
141       *bitmap |= 1 << (port % 8);
142     }
143
144
145     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
146
147     ctrl_area->instrs.IOIO_PROT = 1;
148   }
149
150
151
152   PrintDebug("Exiting on interrupts\n");
153   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
154   ctrl_area->instrs.INTR = 1;
155
156
157   if (vm_info.shdw_pg_mode == SHADOW_PAGING) {
158     PrintDebug("Creating initial shadow page table\n");
159     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
160     PrintDebug("Created\n");
161
162     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
163
164     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
165
166     ctrl_area->cr_reads.cr3 = 1;
167     ctrl_area->cr_writes.cr3 = 1;
168
169
170     ctrl_area->instrs.INVLPG = 1;
171     ctrl_area->instrs.INVLPGA = 1;
172
173     guest_state->g_pat = 0x7040600070406ULL;
174
175     guest_state->cr0 |= 0x80000000;
176   } else if (vm_info.shdw_pg_mode == NESTED_PAGING) {
177     // Flush the TLB on entries/exits
178     //ctrl_area->TLB_CONTROL = 1;
179
180     // Enable Nested Paging
181     //ctrl_area->NP_ENABLE = 1;
182
183     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
184
185         // Set the Nested Page Table pointer
186     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
187     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
188
189     //   ctrl_area->N_CR3 = Get_CR3();
190     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
191
192     //    guest_state->g_pat = 0x7040600070406ULL;
193   }
194
195
196
197 }
198
199
200
201
202
203
204
205
206
207 static int init_svm_guest(struct guest_info *info) {
208  
209   PrintDebug("Allocating VMCB\n");
210   info->vmm_data = (void*)Allocate_VMCB();
211
212
213   //PrintDebug("Generating Guest nested page tables\n");
214   //  info->page_tables = NULL;
215   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
216   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
217   //  PrintDebugPageTables(info->page_tables);
218
219
220   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
221   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
222   
223
224   //  info->rip = 0;
225
226   info->vm_regs.rdi = 0;
227   info->vm_regs.rsi = 0;
228   info->vm_regs.rbp = 0;
229   info->vm_regs.rsp = 0;
230   info->vm_regs.rbx = 0;
231   info->vm_regs.rdx = 0;
232   info->vm_regs.rcx = 0;
233   info->vm_regs.rax = 0;
234   
235   return 0;
236 }
237
238
239 // can we start a kernel thread here...
240 static int start_svm_guest(struct guest_info *info) {
241   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
242   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
243
244   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
245   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
246
247   while (1) {
248     ullong_t tmp_tsc;
249
250
251     CLGI();
252
253     //  PrintDebug("SVM Entry...\n");
254
255     rdtscll(info->time_state.cached_host_tsc);
256     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
257
258     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
259
260     rdtscll(tmp_tsc);
261     //PrintDebug("SVM Returned\n");
262
263
264     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
265
266     STGI();
267
268      
269     if (handle_svm_exit(info) != 0) {
270
271       addr_t host_addr;
272       addr_t linear_addr = 0;
273
274       PrintDebug("SVM ERROR!!\n"); 
275       
276       PrintDebug("RIP: %x\n", guest_state->rip);
277
278
279       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
280
281
282       PrintDebug("RIP Linear: %x\n", linear_addr);
283
284       
285       if (info->mem_mode == PHYSICAL_MEM) {
286         guest_pa_to_host_pa(info, linear_addr, &host_addr);
287       } else if (info->mem_mode == VIRTUAL_MEM) {
288         guest_va_to_host_pa(info, linear_addr, &host_addr);
289       }
290
291
292       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
293
294       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
295       PrintTraceMemDump((char*)host_addr, 15);
296
297       break;
298     }
299   }
300   return 0;
301 }
302
303
304
305
306 /* Checks machine SVM capability */
307 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
308 int is_svm_capable() {
309   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
310   uint_t vm_cr_low = 0, vm_cr_high = 0;
311
312
313   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
314     PrintDebug("SVM Not Available\n");
315     return 0;
316   } 
317
318   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
319
320   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
321     PrintDebug("Nested Paging not supported\n");
322   }
323
324   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
325     return 1;
326   }
327
328   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
329
330   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
331     PrintDebug("SVM BIOS Disabled, not unlockable\n");
332   } else {
333     PrintDebug("SVM is locked with a key\n");
334   }
335
336   return 0;
337 }
338
339
340
341 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
342   reg_ex_t msr;
343   void * host_state;
344
345
346   // Enable SVM on the CPU
347   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
348   msr.e_reg.low |= EFER_MSR_svm_enable;
349   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
350   
351   PrintDebug("SVM Enabled\n");
352
353
354   // Setup the host state save area
355   host_state = os_hooks->allocate_pages(4);
356   
357   msr.e_reg.high = 0;
358   msr.e_reg.low = (uint_t)host_state;
359
360
361   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
362   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
363
364
365
366   // Setup the SVM specific vmm operations
367   vmm_ops->init_guest = &init_svm_guest;
368   vmm_ops->start_guest = &start_svm_guest;
369
370
371   return;
372 }
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
426   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
427   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
428   uint_t i;
429
430
431   guest_state->rsp = vm_info.vm_regs.rsp;
432   guest_state->rip = vm_info.rip;
433
434
435   //ctrl_area->instrs.instrs.CR0 = 1;
436   ctrl_area->cr_reads.cr0 = 1;
437   ctrl_area->cr_writes.cr0 = 1;
438
439   guest_state->efer |= EFER_MSR_svm_enable;
440   guest_state->rflags = 0x00000002; // The reserved bit is always 1
441   ctrl_area->svm_instrs.VMRUN = 1;
442   // guest_state->cr0 = 0x00000001;    // PE 
443   ctrl_area->guest_ASID = 1;
444
445
446   ctrl_area->exceptions.de = 1;
447   ctrl_area->exceptions.df = 1;
448   ctrl_area->exceptions.pf = 1;
449   ctrl_area->exceptions.ts = 1;
450   ctrl_area->exceptions.ss = 1;
451   ctrl_area->exceptions.ac = 1;
452   ctrl_area->exceptions.mc = 1;
453   ctrl_area->exceptions.gp = 1;
454   ctrl_area->exceptions.ud = 1;
455   ctrl_area->exceptions.np = 1;
456   ctrl_area->exceptions.of = 1;
457   ctrl_area->exceptions.nmi = 1;
458
459   guest_state->cs.selector = 0x0000;
460   guest_state->cs.limit=~0u;
461   guest_state->cs.base = guest_state->cs.selector<<4;
462   guest_state->cs.attrib.raw = 0xf3;
463
464   
465   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
466   for ( i = 0; segregs[i] != NULL; i++) {
467     struct vmcb_selector * seg = segregs[i];
468     
469     seg->selector = 0x0000;
470     seg->base = seg->selector << 4;
471     seg->attrib.raw = 0xf3;
472     seg->limit = ~0u;
473   }
474   
475   if (vm_info.io_map.num_ports > 0) {
476     vmm_io_hook_t * iter;
477     addr_t io_port_bitmap;
478     
479     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
480     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
481     
482     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
483
484     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
485
486     FOREACH_IO_HOOK(vm_info.io_map, iter) {
487       ushort_t port = iter->port;
488       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
489
490       bitmap += (port / 8);
491       PrintDebug("Setting Bit in block %x\n", bitmap);
492       *bitmap |= 1 << (port % 8);
493     }
494
495
496     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
497
498     ctrl_area->instrs.IOIO_PROT = 1;
499   }
500
501   ctrl_area->instrs.INTR = 1;
502
503
504
505   if (vm_info.page_mode == SHADOW_PAGING) {
506     PrintDebug("Creating initial shadow page table\n");
507     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
508     PrintDebug("Created\n");
509
510     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
511
512     ctrl_area->cr_reads.cr3 = 1;
513     ctrl_area->cr_writes.cr3 = 1;
514
515
516     ctrl_area->instrs.INVLPG = 1;
517     ctrl_area->instrs.INVLPGA = 1;
518
519     guest_state->g_pat = 0x7040600070406ULL;
520
521     guest_state->cr0 |= 0x80000000;
522   } else if (vm_info.page_mode == NESTED_PAGING) {
523     // Flush the TLB on entries/exits
524     //ctrl_area->TLB_CONTROL = 1;
525
526     // Enable Nested Paging
527     //ctrl_area->NP_ENABLE = 1;
528
529     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
530
531         // Set the Nested Page Table pointer
532     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
533     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
534
535     //   ctrl_area->N_CR3 = Get_CR3();
536     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
537
538     //    guest_state->g_pat = 0x7040600070406ULL;
539   }
540
541
542
543 }
544 */
545
546
547
548
549
550
551
552 #if 0
553 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
554   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
555   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
556   uint_t i = 0;
557
558
559   guest_state->rsp = vm_info.vm_regs.rsp;
560   guest_state->rip = vm_info.rip;
561
562
563   /* I pretty much just gutted this from TVMM */
564   /* Note: That means its probably wrong */
565
566   // set the segment registers to mirror ours
567   guest_state->cs.selector = 1<<3;
568   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
569   guest_state->cs.attrib.fields.S = 1;
570   guest_state->cs.attrib.fields.P = 1;
571   guest_state->cs.attrib.fields.db = 1;
572   guest_state->cs.attrib.fields.G = 1;
573   guest_state->cs.limit = 0xfffff;
574   guest_state->cs.base = 0;
575   
576   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
577   for ( i = 0; segregs[i] != NULL; i++) {
578     struct vmcb_selector * seg = segregs[i];
579     
580     seg->selector = 2<<3;
581     seg->attrib.fields.type = 0x2; // Data Segment+read/write
582     seg->attrib.fields.S = 1;
583     seg->attrib.fields.P = 1;
584     seg->attrib.fields.db = 1;
585     seg->attrib.fields.G = 1;
586     seg->limit = 0xfffff;
587     seg->base = 0;
588   }
589
590
591   {
592     /* JRL THIS HAS TO GO */
593     
594     //    guest_state->tr.selector = GetTR_Selector();
595     guest_state->tr.attrib.fields.type = 0x9; 
596     guest_state->tr.attrib.fields.P = 1;
597     // guest_state->tr.limit = GetTR_Limit();
598     //guest_state->tr.base = GetTR_Base();// - 0x2000;
599     /* ** */
600   }
601
602
603   /* ** */
604
605
606   guest_state->efer |= EFER_MSR_svm_enable;
607   guest_state->rflags = 0x00000002; // The reserved bit is always 1
608   ctrl_area->svm_instrs.VMRUN = 1;
609   guest_state->cr0 = 0x00000001;    // PE 
610   ctrl_area->guest_ASID = 1;
611
612
613   //  guest_state->cpl = 0;
614
615
616
617   // Setup exits
618
619   ctrl_area->cr_writes.cr4 = 1;
620   
621   ctrl_area->exceptions.de = 1;
622   ctrl_area->exceptions.df = 1;
623   ctrl_area->exceptions.pf = 1;
624   ctrl_area->exceptions.ts = 1;
625   ctrl_area->exceptions.ss = 1;
626   ctrl_area->exceptions.ac = 1;
627   ctrl_area->exceptions.mc = 1;
628   ctrl_area->exceptions.gp = 1;
629   ctrl_area->exceptions.ud = 1;
630   ctrl_area->exceptions.np = 1;
631   ctrl_area->exceptions.of = 1;
632   ctrl_area->exceptions.nmi = 1;
633
634   
635
636   ctrl_area->instrs.IOIO_PROT = 1;
637   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
638   
639   {
640     reg_ex_t tmp_reg;
641     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
642     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
643   }
644
645   ctrl_area->instrs.INTR = 1;
646
647   
648   {
649     char gdt_buf[6];
650     char idt_buf[6];
651
652     memset(gdt_buf, 0, 6);
653     memset(idt_buf, 0, 6);
654
655
656     uint_t gdt_base, idt_base;
657     ushort_t gdt_limit, idt_limit;
658     
659     GetGDTR(gdt_buf);
660     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
661     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
662     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
663
664     GetIDTR(idt_buf);
665     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
666     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
667     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
668
669
670     // gdt_base -= 0x2000;
671     //idt_base -= 0x2000;
672
673     guest_state->gdtr.base = gdt_base;
674     guest_state->gdtr.limit = gdt_limit;
675     guest_state->idtr.base = idt_base;
676     guest_state->idtr.limit = idt_limit;
677
678
679   }
680   
681   
682   // also determine if CPU supports nested paging
683   /*
684   if (vm_info.page_tables) {
685     //   if (0) {
686     // Flush the TLB on entries/exits
687     ctrl_area->TLB_CONTROL = 1;
688
689     // Enable Nested Paging
690     ctrl_area->NP_ENABLE = 1;
691
692     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
693
694         // Set the Nested Page Table pointer
695     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
696
697
698     //   ctrl_area->N_CR3 = Get_CR3();
699     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
700
701     guest_state->g_pat = 0x7040600070406ULL;
702
703     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
704     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
705     // Enable Paging
706     //    guest_state->cr0 |= 0x80000000;
707   }
708   */
709
710 }
711
712
713
714
715
716 #endif
717
718