Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


boot process to serial initialization
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31
32
33
34
35
36
37
38 static vmcb_t * Allocate_VMCB() {
39   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
40
41
42   memset(vmcb_page, 0, 4096);
43
44   return vmcb_page;
45 }
46
47
48
49
50
51 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
52   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
53   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
54   uint_t i;
55
56
57   guest_state->rsp = vm_info.vm_regs.rsp;
58   // guest_state->rip = vm_info.rip;
59   guest_state->rip = 0xfff0;
60
61   guest_state->cpl = 0;
62
63   //ctrl_area->instrs.instrs.CR0 = 1;
64   ctrl_area->cr_reads.cr0 = 1;
65   ctrl_area->cr_writes.cr0 = 1;
66
67   guest_state->efer |= EFER_MSR_svm_enable;
68   guest_state->rflags = 0x00000002; // The reserved bit is always 1
69   ctrl_area->svm_instrs.VMRUN = 1;
70   ctrl_area->instrs.HLT = 1;
71   // guest_state->cr0 = 0x00000001;    // PE 
72   ctrl_area->guest_ASID = 1;
73
74   ctrl_area->exceptions.de = 1;
75   ctrl_area->exceptions.df = 1;
76   ctrl_area->exceptions.pf = 1;
77   ctrl_area->exceptions.ts = 1;
78   ctrl_area->exceptions.ss = 1;
79   ctrl_area->exceptions.ac = 1;
80   ctrl_area->exceptions.mc = 1;
81   ctrl_area->exceptions.gp = 1;
82   ctrl_area->exceptions.ud = 1;
83   ctrl_area->exceptions.np = 1;
84   ctrl_area->exceptions.of = 1;
85   ctrl_area->exceptions.nmi = 1;
86
87   vm_info.vm_regs.rdx = 0x00000f00;
88
89   guest_state->cr0 = 0x60000010;
90
91   guest_state->cs.selector = 0xf000;
92   guest_state->cs.limit=0xffff;
93   guest_state->cs.base = 0x0000000f0000LL;
94   guest_state->cs.attrib.raw = 0xf3;
95
96   
97   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
98   for ( i = 0; segregs[i] != NULL; i++) {
99     struct vmcb_selector * seg = segregs[i];
100     
101     seg->selector = 0x0000;
102     //    seg->base = seg->selector << 4;
103     seg->base = 0x00000000;
104     seg->attrib.raw = 0xf3;
105     seg->limit = ~0u;
106   }
107   
108   guest_state->gdtr.limit = 0x0000ffff;
109   guest_state->gdtr.base = 0x0000000000000000LL;
110   guest_state->idtr.limit = 0x0000ffff;
111   guest_state->idtr.base = 0x0000000000000000LL;
112
113   guest_state->ldtr.selector = 0x0000;
114   guest_state->ldtr.limit = 0x0000ffff;
115   guest_state->ldtr.base = 0x0000000000000000LL;
116   guest_state->tr.selector = 0x0000;
117   guest_state->tr.limit = 0x0000ffff;
118   guest_state->tr.base = 0x0000000000000000LL;
119
120
121   guest_state->dr6 = 0x00000000ffff0ff0LL;
122   guest_state->dr7 = 0x0000000000000400LL;
123
124   if (vm_info.io_map.num_ports > 0) {
125     vmm_io_hook_t * iter;
126     addr_t io_port_bitmap;
127     
128     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
129     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
130     
131     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
132
133     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
134
135     FOREACH_IO_HOOK(vm_info.io_map, iter) {
136       ushort_t port = iter->port;
137       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
138
139       bitmap += (port / 8);
140       PrintDebug("Setting Bit for port 0x%x\n", port);
141       *bitmap |= 1 << (port % 8);
142     }
143
144
145     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
146
147     ctrl_area->instrs.IOIO_PROT = 1;
148   }
149
150
151
152   PrintDebug("Exiting on interrupts\n");
153   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
154   ctrl_area->instrs.INTR = 1;
155
156
157   if (vm_info.shdw_pg_mode == SHADOW_PAGING) {
158     PrintDebug("Creating initial shadow page table\n");
159     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
160     PrintDebug("Created\n");
161
162     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
163
164     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
165
166     ctrl_area->cr_reads.cr3 = 1;
167     ctrl_area->cr_writes.cr3 = 1;
168
169
170     ctrl_area->instrs.INVLPG = 1;
171     ctrl_area->instrs.INVLPGA = 1;
172
173     /* JRL: This is a performance killer, and a simplistic solution */
174     /* We need to fix this */
175     ctrl_area->TLB_CONTROL = 1;
176     
177
178
179     guest_state->g_pat = 0x7040600070406ULL;
180
181     guest_state->cr0 |= 0x80000000;
182
183   } else if (vm_info.shdw_pg_mode == NESTED_PAGING) {
184     // Flush the TLB on entries/exits
185
186
187     // Enable Nested Paging
188     //ctrl_area->NP_ENABLE = 1;
189
190     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
191
192         // Set the Nested Page Table pointer
193     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
194     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
195
196     //   ctrl_area->N_CR3 = Get_CR3();
197     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
198
199     //    guest_state->g_pat = 0x7040600070406ULL;
200   }
201
202
203
204 }
205
206
207
208
209
210
211
212
213
214 static int init_svm_guest(struct guest_info *info) {
215  
216   PrintDebug("Allocating VMCB\n");
217   info->vmm_data = (void*)Allocate_VMCB();
218
219
220   //PrintDebug("Generating Guest nested page tables\n");
221   //  info->page_tables = NULL;
222   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
223   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
224   //  PrintDebugPageTables(info->page_tables);
225
226
227   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
228   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
229   
230
231   //  info->rip = 0;
232
233   info->vm_regs.rdi = 0;
234   info->vm_regs.rsi = 0;
235   info->vm_regs.rbp = 0;
236   info->vm_regs.rsp = 0;
237   info->vm_regs.rbx = 0;
238   info->vm_regs.rdx = 0;
239   info->vm_regs.rcx = 0;
240   info->vm_regs.rax = 0;
241   
242   return 0;
243 }
244
245
246 // can we start a kernel thread here...
247 static int start_svm_guest(struct guest_info *info) {
248   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
249   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
250
251   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
252   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
253
254   while (1) {
255     ullong_t tmp_tsc;
256
257
258     CLGI();
259
260     PrintDebug("SVM Entry to rip=%x...\n", info->rip);
261
262     rdtscll(info->time_state.cached_host_tsc);
263     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
264
265     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
266
267     rdtscll(tmp_tsc);
268     //PrintDebug("SVM Returned\n");
269
270
271     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
272
273     STGI();
274
275      
276     if (handle_svm_exit(info) != 0) {
277
278       addr_t host_addr;
279       addr_t linear_addr = 0;
280
281       PrintDebug("SVM ERROR!!\n"); 
282       
283       PrintDebug("RIP: %x\n", guest_state->rip);
284
285
286       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
287
288
289       PrintDebug("RIP Linear: %x\n", linear_addr);
290
291       
292       if (info->mem_mode == PHYSICAL_MEM) {
293         guest_pa_to_host_pa(info, linear_addr, &host_addr);
294       } else if (info->mem_mode == VIRTUAL_MEM) {
295         guest_va_to_host_pa(info, linear_addr, &host_addr);
296       }
297
298
299       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
300
301       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
302       PrintTraceMemDump((char*)host_addr, 15);
303
304       break;
305     }
306   }
307   return 0;
308 }
309
310
311
312
313 /* Checks machine SVM capability */
314 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
315 int is_svm_capable() {
316   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
317   uint_t vm_cr_low = 0, vm_cr_high = 0;
318
319
320   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
321     PrintDebug("SVM Not Available\n");
322     return 0;
323   } 
324
325   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
326
327   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
328     PrintDebug("Nested Paging not supported\n");
329   }
330
331   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
332     return 1;
333   }
334
335   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
336
337   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
338     PrintDebug("SVM BIOS Disabled, not unlockable\n");
339   } else {
340     PrintDebug("SVM is locked with a key\n");
341   }
342
343   return 0;
344 }
345
346
347
348 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
349   reg_ex_t msr;
350   void * host_state;
351
352
353   // Enable SVM on the CPU
354   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
355   msr.e_reg.low |= EFER_MSR_svm_enable;
356   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
357   
358   PrintDebug("SVM Enabled\n");
359
360
361   // Setup the host state save area
362   host_state = os_hooks->allocate_pages(4);
363   
364   msr.e_reg.high = 0;
365   msr.e_reg.low = (uint_t)host_state;
366
367
368   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
369   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
370
371
372
373   // Setup the SVM specific vmm operations
374   vmm_ops->init_guest = &init_svm_guest;
375   vmm_ops->start_guest = &start_svm_guest;
376
377
378   return;
379 }
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
433   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
434   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
435   uint_t i;
436
437
438   guest_state->rsp = vm_info.vm_regs.rsp;
439   guest_state->rip = vm_info.rip;
440
441
442   //ctrl_area->instrs.instrs.CR0 = 1;
443   ctrl_area->cr_reads.cr0 = 1;
444   ctrl_area->cr_writes.cr0 = 1;
445
446   guest_state->efer |= EFER_MSR_svm_enable;
447   guest_state->rflags = 0x00000002; // The reserved bit is always 1
448   ctrl_area->svm_instrs.VMRUN = 1;
449   // guest_state->cr0 = 0x00000001;    // PE 
450   ctrl_area->guest_ASID = 1;
451
452
453   ctrl_area->exceptions.de = 1;
454   ctrl_area->exceptions.df = 1;
455   ctrl_area->exceptions.pf = 1;
456   ctrl_area->exceptions.ts = 1;
457   ctrl_area->exceptions.ss = 1;
458   ctrl_area->exceptions.ac = 1;
459   ctrl_area->exceptions.mc = 1;
460   ctrl_area->exceptions.gp = 1;
461   ctrl_area->exceptions.ud = 1;
462   ctrl_area->exceptions.np = 1;
463   ctrl_area->exceptions.of = 1;
464   ctrl_area->exceptions.nmi = 1;
465
466   guest_state->cs.selector = 0x0000;
467   guest_state->cs.limit=~0u;
468   guest_state->cs.base = guest_state->cs.selector<<4;
469   guest_state->cs.attrib.raw = 0xf3;
470
471   
472   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
473   for ( i = 0; segregs[i] != NULL; i++) {
474     struct vmcb_selector * seg = segregs[i];
475     
476     seg->selector = 0x0000;
477     seg->base = seg->selector << 4;
478     seg->attrib.raw = 0xf3;
479     seg->limit = ~0u;
480   }
481   
482   if (vm_info.io_map.num_ports > 0) {
483     vmm_io_hook_t * iter;
484     addr_t io_port_bitmap;
485     
486     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
487     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
488     
489     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
490
491     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
492
493     FOREACH_IO_HOOK(vm_info.io_map, iter) {
494       ushort_t port = iter->port;
495       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
496
497       bitmap += (port / 8);
498       PrintDebug("Setting Bit in block %x\n", bitmap);
499       *bitmap |= 1 << (port % 8);
500     }
501
502
503     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
504
505     ctrl_area->instrs.IOIO_PROT = 1;
506   }
507
508   ctrl_area->instrs.INTR = 1;
509
510
511
512   if (vm_info.page_mode == SHADOW_PAGING) {
513     PrintDebug("Creating initial shadow page table\n");
514     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
515     PrintDebug("Created\n");
516
517     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
518
519     ctrl_area->cr_reads.cr3 = 1;
520     ctrl_area->cr_writes.cr3 = 1;
521
522
523     ctrl_area->instrs.INVLPG = 1;
524     ctrl_area->instrs.INVLPGA = 1;
525
526     guest_state->g_pat = 0x7040600070406ULL;
527
528     guest_state->cr0 |= 0x80000000;
529   } else if (vm_info.page_mode == NESTED_PAGING) {
530     // Flush the TLB on entries/exits
531     //ctrl_area->TLB_CONTROL = 1;
532
533     // Enable Nested Paging
534     //ctrl_area->NP_ENABLE = 1;
535
536     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
537
538         // Set the Nested Page Table pointer
539     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
540     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
541
542     //   ctrl_area->N_CR3 = Get_CR3();
543     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
544
545     //    guest_state->g_pat = 0x7040600070406ULL;
546   }
547
548
549
550 }
551 */
552
553
554
555
556
557
558
559 #if 0
560 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
561   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
562   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
563   uint_t i = 0;
564
565
566   guest_state->rsp = vm_info.vm_regs.rsp;
567   guest_state->rip = vm_info.rip;
568
569
570   /* I pretty much just gutted this from TVMM */
571   /* Note: That means its probably wrong */
572
573   // set the segment registers to mirror ours
574   guest_state->cs.selector = 1<<3;
575   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
576   guest_state->cs.attrib.fields.S = 1;
577   guest_state->cs.attrib.fields.P = 1;
578   guest_state->cs.attrib.fields.db = 1;
579   guest_state->cs.attrib.fields.G = 1;
580   guest_state->cs.limit = 0xfffff;
581   guest_state->cs.base = 0;
582   
583   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
584   for ( i = 0; segregs[i] != NULL; i++) {
585     struct vmcb_selector * seg = segregs[i];
586     
587     seg->selector = 2<<3;
588     seg->attrib.fields.type = 0x2; // Data Segment+read/write
589     seg->attrib.fields.S = 1;
590     seg->attrib.fields.P = 1;
591     seg->attrib.fields.db = 1;
592     seg->attrib.fields.G = 1;
593     seg->limit = 0xfffff;
594     seg->base = 0;
595   }
596
597
598   {
599     /* JRL THIS HAS TO GO */
600     
601     //    guest_state->tr.selector = GetTR_Selector();
602     guest_state->tr.attrib.fields.type = 0x9; 
603     guest_state->tr.attrib.fields.P = 1;
604     // guest_state->tr.limit = GetTR_Limit();
605     //guest_state->tr.base = GetTR_Base();// - 0x2000;
606     /* ** */
607   }
608
609
610   /* ** */
611
612
613   guest_state->efer |= EFER_MSR_svm_enable;
614   guest_state->rflags = 0x00000002; // The reserved bit is always 1
615   ctrl_area->svm_instrs.VMRUN = 1;
616   guest_state->cr0 = 0x00000001;    // PE 
617   ctrl_area->guest_ASID = 1;
618
619
620   //  guest_state->cpl = 0;
621
622
623
624   // Setup exits
625
626   ctrl_area->cr_writes.cr4 = 1;
627   
628   ctrl_area->exceptions.de = 1;
629   ctrl_area->exceptions.df = 1;
630   ctrl_area->exceptions.pf = 1;
631   ctrl_area->exceptions.ts = 1;
632   ctrl_area->exceptions.ss = 1;
633   ctrl_area->exceptions.ac = 1;
634   ctrl_area->exceptions.mc = 1;
635   ctrl_area->exceptions.gp = 1;
636   ctrl_area->exceptions.ud = 1;
637   ctrl_area->exceptions.np = 1;
638   ctrl_area->exceptions.of = 1;
639   ctrl_area->exceptions.nmi = 1;
640
641   
642
643   ctrl_area->instrs.IOIO_PROT = 1;
644   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
645   
646   {
647     reg_ex_t tmp_reg;
648     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
649     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
650   }
651
652   ctrl_area->instrs.INTR = 1;
653
654   
655   {
656     char gdt_buf[6];
657     char idt_buf[6];
658
659     memset(gdt_buf, 0, 6);
660     memset(idt_buf, 0, 6);
661
662
663     uint_t gdt_base, idt_base;
664     ushort_t gdt_limit, idt_limit;
665     
666     GetGDTR(gdt_buf);
667     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
668     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
669     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
670
671     GetIDTR(idt_buf);
672     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
673     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
674     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
675
676
677     // gdt_base -= 0x2000;
678     //idt_base -= 0x2000;
679
680     guest_state->gdtr.base = gdt_base;
681     guest_state->gdtr.limit = gdt_limit;
682     guest_state->idtr.base = idt_base;
683     guest_state->idtr.limit = idt_limit;
684
685
686   }
687   
688   
689   // also determine if CPU supports nested paging
690   /*
691   if (vm_info.page_tables) {
692     //   if (0) {
693     // Flush the TLB on entries/exits
694     ctrl_area->TLB_CONTROL = 1;
695
696     // Enable Nested Paging
697     ctrl_area->NP_ENABLE = 1;
698
699     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
700
701         // Set the Nested Page Table pointer
702     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
703
704
705     //   ctrl_area->N_CR3 = Get_CR3();
706     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
707
708     guest_state->g_pat = 0x7040600070406ULL;
709
710     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
711     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
712     // Enable Paging
713     //    guest_state->cr0 |= 0x80000000;
714   }
715   */
716
717 }
718
719
720
721
722
723 #endif
724
725