Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


*** empty log message ***
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_emulate.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31
32
33
34
35
36
37
38 static vmcb_t * Allocate_VMCB() {
39   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
40
41
42   memset(vmcb_page, 0, 4096);
43
44   return vmcb_page;
45 }
46
47
48
49
50
51 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
52   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
53   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
54   uint_t i;
55
56
57   guest_state->rsp = vm_info.vm_regs.rsp;
58   // guest_state->rip = vm_info.rip;
59   guest_state->rip = 0xfff0;
60
61   guest_state->cpl = 0;
62
63   //ctrl_area->instrs.instrs.CR0 = 1;
64   ctrl_area->cr_reads.cr0 = 1;
65   ctrl_area->cr_writes.cr0 = 1;
66
67   guest_state->efer |= EFER_MSR_svm_enable;
68   guest_state->rflags = 0x00000002; // The reserved bit is always 1
69   ctrl_area->svm_instrs.VMRUN = 1;
70   ctrl_area->instrs.HLT = 1;
71   // guest_state->cr0 = 0x00000001;    // PE 
72   ctrl_area->guest_ASID = 1;
73
74   ctrl_area->exceptions.de = 1;
75   ctrl_area->exceptions.df = 1;
76   ctrl_area->exceptions.pf = 1;
77   ctrl_area->exceptions.ts = 1;
78   ctrl_area->exceptions.ss = 1;
79   ctrl_area->exceptions.ac = 1;
80   ctrl_area->exceptions.mc = 1;
81   ctrl_area->exceptions.gp = 1;
82   ctrl_area->exceptions.ud = 1;
83   ctrl_area->exceptions.np = 1;
84   ctrl_area->exceptions.of = 1;
85   ctrl_area->exceptions.nmi = 1;
86
87   vm_info.vm_regs.rdx = 0x00000f00;
88
89   guest_state->cr0 = 0x60000010;
90
91   guest_state->cs.selector = 0xf000;
92   guest_state->cs.limit=0xffff;
93   guest_state->cs.base = 0x0000000f0000LL;
94   guest_state->cs.attrib.raw = 0xf3;
95
96   
97   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
98   for ( i = 0; segregs[i] != NULL; i++) {
99     struct vmcb_selector * seg = segregs[i];
100     
101     seg->selector = 0x0000;
102     //    seg->base = seg->selector << 4;
103     seg->base = 0x00000000;
104     seg->attrib.raw = 0xf3;
105     seg->limit = ~0u;
106   }
107   
108   guest_state->gdtr.limit = 0x0000ffff;
109   guest_state->gdtr.base = 0x0000000000000000LL;
110   guest_state->idtr.limit = 0x0000ffff;
111   guest_state->idtr.base = 0x0000000000000000LL;
112
113   guest_state->ldtr.selector = 0x0000;
114   guest_state->ldtr.limit = 0x0000ffff;
115   guest_state->ldtr.base = 0x0000000000000000LL;
116   guest_state->tr.selector = 0x0000;
117   guest_state->tr.limit = 0x0000ffff;
118   guest_state->tr.base = 0x0000000000000000LL;
119
120
121   guest_state->dr6 = 0x00000000ffff0ff0LL;
122   guest_state->dr7 = 0x0000000000000400LL;
123
124   if (vm_info.io_map.num_ports > 0) {
125     vmm_io_hook_t * iter;
126     addr_t io_port_bitmap;
127     
128     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
129     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
130     
131     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
132
133     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
134
135     FOREACH_IO_HOOK(vm_info.io_map, iter) {
136       ushort_t port = iter->port;
137       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
138
139       bitmap += (port / 8);
140       PrintDebug("Setting Bit for port 0x%x\n", port);
141       *bitmap |= 1 << (port % 8);
142     }
143
144
145     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
146
147     ctrl_area->instrs.IOIO_PROT = 1;
148   }
149
150
151
152   PrintDebug("Exiting on interrupts\n");
153   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
154   ctrl_area->instrs.INTR = 1;
155
156
157   if (vm_info.page_mode == SHADOW_PAGING) {
158     PrintDebug("Creating initial shadow page table\n");
159     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
160     PrintDebug("Created\n");
161
162     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
163
164     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
165
166     ctrl_area->cr_reads.cr3 = 1;
167     ctrl_area->cr_writes.cr3 = 1;
168
169
170     ctrl_area->instrs.INVLPG = 1;
171     ctrl_area->instrs.INVLPGA = 1;
172
173     guest_state->g_pat = 0x7040600070406ULL;
174
175     guest_state->cr0 |= 0x80000000;
176   } else if (vm_info.page_mode == NESTED_PAGING) {
177     // Flush the TLB on entries/exits
178     //ctrl_area->TLB_CONTROL = 1;
179
180     // Enable Nested Paging
181     //ctrl_area->NP_ENABLE = 1;
182
183     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
184
185         // Set the Nested Page Table pointer
186     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
187     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
188
189     //   ctrl_area->N_CR3 = Get_CR3();
190     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
191
192     //    guest_state->g_pat = 0x7040600070406ULL;
193   }
194
195
196
197 }
198
199
200
201
202
203
204
205
206
207 static int init_svm_guest(struct guest_info *info) {
208  
209   PrintDebug("Allocating VMCB\n");
210   info->vmm_data = (void*)Allocate_VMCB();
211
212
213   //PrintDebug("Generating Guest nested page tables\n");
214   //  info->page_tables = NULL;
215   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
216   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
217   //  PrintDebugPageTables(info->page_tables);
218
219
220   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
221   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
222   
223
224   //  info->rip = 0;
225
226   info->vm_regs.rdi = 0;
227   info->vm_regs.rsi = 0;
228   info->vm_regs.rbp = 0;
229   info->vm_regs.rsp = 0;
230   info->vm_regs.rbx = 0;
231   info->vm_regs.rdx = 0;
232   info->vm_regs.rcx = 0;
233   info->vm_regs.rax = 0;
234   
235   return 0;
236 }
237
238
239 // can we start a kernel thread here...
240 static int start_svm_guest(struct guest_info *info) {
241   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
242   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
243
244   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
245   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
246
247   while (1) {
248     ullong_t tmp_tsc;
249
250
251     CLGI();
252
253     PrintDebug("SVM Entry...\n");
254
255     rdtscll(info->time_state.cached_host_tsc);
256     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
257
258     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
259
260     rdtscll(tmp_tsc);
261     PrintDebug("SVM Returned\n");
262
263
264     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
265
266     STGI();
267
268      
269     if (handle_svm_exit(info) != 0) {
270
271       addr_t host_addr;
272       addr_t linear_addr = 0;
273
274       PrintDebug("SVM ERROR!!\n"); 
275       
276
277       PrintDebug("RIP: %x\n", guest_state->rip);
278
279
280       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
281
282
283       PrintDebug("RIP Linear: %x\n", linear_addr);
284
285       guest_pa_to_host_pa(info, linear_addr, &host_addr);
286
287       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
288
289       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
290       PrintTraceMemDump((char*)host_addr, 15);
291
292       break;
293     }
294   }
295   return 0;
296 }
297
298
299
300
301 /* Checks machine SVM capability */
302 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
303 int is_svm_capable() {
304   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
305   uint_t vm_cr_low = 0, vm_cr_high = 0;
306
307
308   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
309     PrintDebug("SVM Not Available\n");
310     return 0;
311   } 
312
313   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
314
315   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
316     PrintDebug("Nested Paging not supported\n");
317   }
318
319   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
320     return 1;
321   }
322
323   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
324
325   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
326     PrintDebug("SVM BIOS Disabled, not unlockable\n");
327   } else {
328     PrintDebug("SVM is locked with a key\n");
329   }
330
331   return 0;
332 }
333
334
335
336 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
337   reg_ex_t msr;
338   void * host_state;
339
340
341   // Enable SVM on the CPU
342   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
343   msr.e_reg.low |= EFER_MSR_svm_enable;
344   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
345   
346   PrintDebug("SVM Enabled\n");
347
348
349   // Setup the host state save area
350   host_state = os_hooks->allocate_pages(4);
351   
352   msr.e_reg.high = 0;
353   msr.e_reg.low = (uint_t)host_state;
354
355
356   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
357   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
358
359
360
361   // Setup the SVM specific vmm operations
362   vmm_ops->init_guest = &init_svm_guest;
363   vmm_ops->start_guest = &start_svm_guest;
364
365
366   return;
367 }
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
421   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
422   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
423   uint_t i;
424
425
426   guest_state->rsp = vm_info.vm_regs.rsp;
427   guest_state->rip = vm_info.rip;
428
429
430   //ctrl_area->instrs.instrs.CR0 = 1;
431   ctrl_area->cr_reads.cr0 = 1;
432   ctrl_area->cr_writes.cr0 = 1;
433
434   guest_state->efer |= EFER_MSR_svm_enable;
435   guest_state->rflags = 0x00000002; // The reserved bit is always 1
436   ctrl_area->svm_instrs.VMRUN = 1;
437   // guest_state->cr0 = 0x00000001;    // PE 
438   ctrl_area->guest_ASID = 1;
439
440
441   ctrl_area->exceptions.de = 1;
442   ctrl_area->exceptions.df = 1;
443   ctrl_area->exceptions.pf = 1;
444   ctrl_area->exceptions.ts = 1;
445   ctrl_area->exceptions.ss = 1;
446   ctrl_area->exceptions.ac = 1;
447   ctrl_area->exceptions.mc = 1;
448   ctrl_area->exceptions.gp = 1;
449   ctrl_area->exceptions.ud = 1;
450   ctrl_area->exceptions.np = 1;
451   ctrl_area->exceptions.of = 1;
452   ctrl_area->exceptions.nmi = 1;
453
454   guest_state->cs.selector = 0x0000;
455   guest_state->cs.limit=~0u;
456   guest_state->cs.base = guest_state->cs.selector<<4;
457   guest_state->cs.attrib.raw = 0xf3;
458
459   
460   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
461   for ( i = 0; segregs[i] != NULL; i++) {
462     struct vmcb_selector * seg = segregs[i];
463     
464     seg->selector = 0x0000;
465     seg->base = seg->selector << 4;
466     seg->attrib.raw = 0xf3;
467     seg->limit = ~0u;
468   }
469   
470   if (vm_info.io_map.num_ports > 0) {
471     vmm_io_hook_t * iter;
472     addr_t io_port_bitmap;
473     
474     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
475     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
476     
477     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
478
479     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
480
481     FOREACH_IO_HOOK(vm_info.io_map, iter) {
482       ushort_t port = iter->port;
483       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
484
485       bitmap += (port / 8);
486       PrintDebug("Setting Bit in block %x\n", bitmap);
487       *bitmap |= 1 << (port % 8);
488     }
489
490
491     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
492
493     ctrl_area->instrs.IOIO_PROT = 1;
494   }
495
496   ctrl_area->instrs.INTR = 1;
497
498
499
500   if (vm_info.page_mode == SHADOW_PAGING) {
501     PrintDebug("Creating initial shadow page table\n");
502     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
503     PrintDebug("Created\n");
504
505     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
506
507     ctrl_area->cr_reads.cr3 = 1;
508     ctrl_area->cr_writes.cr3 = 1;
509
510
511     ctrl_area->instrs.INVLPG = 1;
512     ctrl_area->instrs.INVLPGA = 1;
513
514     guest_state->g_pat = 0x7040600070406ULL;
515
516     guest_state->cr0 |= 0x80000000;
517   } else if (vm_info.page_mode == NESTED_PAGING) {
518     // Flush the TLB on entries/exits
519     //ctrl_area->TLB_CONTROL = 1;
520
521     // Enable Nested Paging
522     //ctrl_area->NP_ENABLE = 1;
523
524     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
525
526         // Set the Nested Page Table pointer
527     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
528     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
529
530     //   ctrl_area->N_CR3 = Get_CR3();
531     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
532
533     //    guest_state->g_pat = 0x7040600070406ULL;
534   }
535
536
537
538 }
539 */
540
541
542
543
544
545
546
547 #if 0
548 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
549   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
550   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
551   uint_t i = 0;
552
553
554   guest_state->rsp = vm_info.vm_regs.rsp;
555   guest_state->rip = vm_info.rip;
556
557
558   /* I pretty much just gutted this from TVMM */
559   /* Note: That means its probably wrong */
560
561   // set the segment registers to mirror ours
562   guest_state->cs.selector = 1<<3;
563   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
564   guest_state->cs.attrib.fields.S = 1;
565   guest_state->cs.attrib.fields.P = 1;
566   guest_state->cs.attrib.fields.db = 1;
567   guest_state->cs.attrib.fields.G = 1;
568   guest_state->cs.limit = 0xfffff;
569   guest_state->cs.base = 0;
570   
571   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
572   for ( i = 0; segregs[i] != NULL; i++) {
573     struct vmcb_selector * seg = segregs[i];
574     
575     seg->selector = 2<<3;
576     seg->attrib.fields.type = 0x2; // Data Segment+read/write
577     seg->attrib.fields.S = 1;
578     seg->attrib.fields.P = 1;
579     seg->attrib.fields.db = 1;
580     seg->attrib.fields.G = 1;
581     seg->limit = 0xfffff;
582     seg->base = 0;
583   }
584
585
586   {
587     /* JRL THIS HAS TO GO */
588     
589     //    guest_state->tr.selector = GetTR_Selector();
590     guest_state->tr.attrib.fields.type = 0x9; 
591     guest_state->tr.attrib.fields.P = 1;
592     // guest_state->tr.limit = GetTR_Limit();
593     //guest_state->tr.base = GetTR_Base();// - 0x2000;
594     /* ** */
595   }
596
597
598   /* ** */
599
600
601   guest_state->efer |= EFER_MSR_svm_enable;
602   guest_state->rflags = 0x00000002; // The reserved bit is always 1
603   ctrl_area->svm_instrs.VMRUN = 1;
604   guest_state->cr0 = 0x00000001;    // PE 
605   ctrl_area->guest_ASID = 1;
606
607
608   //  guest_state->cpl = 0;
609
610
611
612   // Setup exits
613
614   ctrl_area->cr_writes.cr4 = 1;
615   
616   ctrl_area->exceptions.de = 1;
617   ctrl_area->exceptions.df = 1;
618   ctrl_area->exceptions.pf = 1;
619   ctrl_area->exceptions.ts = 1;
620   ctrl_area->exceptions.ss = 1;
621   ctrl_area->exceptions.ac = 1;
622   ctrl_area->exceptions.mc = 1;
623   ctrl_area->exceptions.gp = 1;
624   ctrl_area->exceptions.ud = 1;
625   ctrl_area->exceptions.np = 1;
626   ctrl_area->exceptions.of = 1;
627   ctrl_area->exceptions.nmi = 1;
628
629   
630
631   ctrl_area->instrs.IOIO_PROT = 1;
632   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
633   
634   {
635     reg_ex_t tmp_reg;
636     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
637     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
638   }
639
640   ctrl_area->instrs.INTR = 1;
641
642   
643   {
644     char gdt_buf[6];
645     char idt_buf[6];
646
647     memset(gdt_buf, 0, 6);
648     memset(idt_buf, 0, 6);
649
650
651     uint_t gdt_base, idt_base;
652     ushort_t gdt_limit, idt_limit;
653     
654     GetGDTR(gdt_buf);
655     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
656     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
657     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
658
659     GetIDTR(idt_buf);
660     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
661     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
662     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
663
664
665     // gdt_base -= 0x2000;
666     //idt_base -= 0x2000;
667
668     guest_state->gdtr.base = gdt_base;
669     guest_state->gdtr.limit = gdt_limit;
670     guest_state->idtr.base = idt_base;
671     guest_state->idtr.limit = idt_limit;
672
673
674   }
675   
676   
677   // also determine if CPU supports nested paging
678   /*
679   if (vm_info.page_tables) {
680     //   if (0) {
681     // Flush the TLB on entries/exits
682     ctrl_area->TLB_CONTROL = 1;
683
684     // Enable Nested Paging
685     ctrl_area->NP_ENABLE = 1;
686
687     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
688
689         // Set the Nested Page Table pointer
690     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
691
692
693     //   ctrl_area->N_CR3 = Get_CR3();
694     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
695
696     guest_state->g_pat = 0x7040600070406ULL;
697
698     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
699     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
700     // Enable Paging
701     //    guest_state->cr0 |= 0x80000000;
702   }
703   */
704
705 }
706
707
708
709
710
711 #endif
712
713