Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Added fix to is_svm_capable() to correctly detect nested paging
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31
32
33
34
35
36
37
38 static vmcb_t * Allocate_VMCB() {
39   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
40
41
42   memset(vmcb_page, 0, 4096);
43
44   return vmcb_page;
45 }
46
47
48
49
50
51 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
52   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
53   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
54   uint_t i;
55
56
57   guest_state->rsp = vm_info.vm_regs.rsp;
58   // guest_state->rip = vm_info.rip;
59   guest_state->rip = 0xfff0;
60
61   guest_state->cpl = 0;
62
63   //ctrl_area->instrs.instrs.CR0 = 1;
64   ctrl_area->cr_reads.cr0 = 1;
65   ctrl_area->cr_writes.cr0 = 1;
66
67   guest_state->efer |= EFER_MSR_svm_enable;
68   guest_state->rflags = 0x00000002; // The reserved bit is always 1
69   ctrl_area->svm_instrs.VMRUN = 1;
70   ctrl_area->instrs.HLT = 1;
71   // guest_state->cr0 = 0x00000001;    // PE 
72   ctrl_area->guest_ASID = 1;
73
74   ctrl_area->exceptions.de = 1;
75   ctrl_area->exceptions.df = 1;
76   ctrl_area->exceptions.pf = 1;
77   ctrl_area->exceptions.ts = 1;
78   ctrl_area->exceptions.ss = 1;
79   ctrl_area->exceptions.ac = 1;
80   ctrl_area->exceptions.mc = 1;
81   ctrl_area->exceptions.gp = 1;
82   ctrl_area->exceptions.ud = 1;
83   ctrl_area->exceptions.np = 1;
84   ctrl_area->exceptions.of = 1;
85   ctrl_area->exceptions.nmi = 1;
86
87   vm_info.vm_regs.rdx = 0x00000f00;
88
89   guest_state->cr0 = 0x60000010;
90
91   guest_state->cs.selector = 0xf000;
92   guest_state->cs.limit=0xffff;
93   guest_state->cs.base = 0x0000000f0000LL;
94   guest_state->cs.attrib.raw = 0xf3;
95
96   
97   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
98   for ( i = 0; segregs[i] != NULL; i++) {
99     struct vmcb_selector * seg = segregs[i];
100     
101     seg->selector = 0x0000;
102     //    seg->base = seg->selector << 4;
103     seg->base = 0x00000000;
104     seg->attrib.raw = 0xf3;
105     seg->limit = ~0u;
106   }
107   
108   guest_state->gdtr.limit = 0x0000ffff;
109   guest_state->gdtr.base = 0x0000000000000000LL;
110   guest_state->idtr.limit = 0x0000ffff;
111   guest_state->idtr.base = 0x0000000000000000LL;
112
113   guest_state->ldtr.selector = 0x0000;
114   guest_state->ldtr.limit = 0x0000ffff;
115   guest_state->ldtr.base = 0x0000000000000000LL;
116   guest_state->tr.selector = 0x0000;
117   guest_state->tr.limit = 0x0000ffff;
118   guest_state->tr.base = 0x0000000000000000LL;
119
120
121   guest_state->dr6 = 0x00000000ffff0ff0LL;
122   guest_state->dr7 = 0x0000000000000400LL;
123
124   if (vm_info.io_map.num_ports > 0) {
125     vmm_io_hook_t * iter;
126     addr_t io_port_bitmap;
127     
128     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
129     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
130     
131     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
132
133     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
134
135     FOREACH_IO_HOOK(vm_info.io_map, iter) {
136       ushort_t port = iter->port;
137       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
138
139       bitmap += (port / 8);
140       PrintDebug("Setting Bit for port 0x%x\n", port);
141       *bitmap |= 1 << (port % 8);
142     }
143
144
145     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
146
147     ctrl_area->instrs.IOIO_PROT = 1;
148   }
149
150
151
152   PrintDebug("Exiting on interrupts\n");
153   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
154   ctrl_area->instrs.INTR = 1;
155
156
157   if (vm_info.shdw_pg_mode == SHADOW_PAGING) {
158     PrintDebug("Creating initial shadow page table\n");
159     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
160     PrintDebug("Created\n");
161
162     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
163
164     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
165
166     ctrl_area->cr_reads.cr3 = 1;
167     ctrl_area->cr_writes.cr3 = 1;
168
169
170     ctrl_area->instrs.INVLPG = 1;
171     ctrl_area->instrs.INVLPGA = 1;
172
173     /* JRL: This is a performance killer, and a simplistic solution */
174     /* We need to fix this */
175     ctrl_area->TLB_CONTROL = 1;
176     
177
178
179     guest_state->g_pat = 0x7040600070406ULL;
180
181     guest_state->cr0 |= 0x80000000;
182
183   } else if (vm_info.shdw_pg_mode == NESTED_PAGING) {
184     // Flush the TLB on entries/exits
185
186
187     // Enable Nested Paging
188     //ctrl_area->NP_ENABLE = 1;
189
190     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
191
192         // Set the Nested Page Table pointer
193     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
194     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
195
196     //   ctrl_area->N_CR3 = Get_CR3();
197     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
198
199     //    guest_state->g_pat = 0x7040600070406ULL;
200   }
201
202
203
204 }
205
206
207
208
209
210
211
212
213
214 static int init_svm_guest(struct guest_info *info) {
215  
216   PrintDebug("Allocating VMCB\n");
217   info->vmm_data = (void*)Allocate_VMCB();
218
219
220   //PrintDebug("Generating Guest nested page tables\n");
221   //  info->page_tables = NULL;
222   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
223   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
224   //  PrintDebugPageTables(info->page_tables);
225
226
227   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
228   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
229   
230
231   //  info->rip = 0;
232
233   info->vm_regs.rdi = 0;
234   info->vm_regs.rsi = 0;
235   info->vm_regs.rbp = 0;
236   info->vm_regs.rsp = 0;
237   info->vm_regs.rbx = 0;
238   info->vm_regs.rdx = 0;
239   info->vm_regs.rcx = 0;
240   info->vm_regs.rax = 0;
241   
242   return 0;
243 }
244
245
246 // can we start a kernel thread here...
247 static int start_svm_guest(struct guest_info *info) {
248   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
249   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
250
251   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
252   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
253
254   while (1) {
255     ullong_t tmp_tsc;
256
257
258     CLGI();
259
260     PrintDebug("SVM Entry to rip=%x...\n", info->rip);
261
262     rdtscll(info->time_state.cached_host_tsc);
263     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
264
265     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
266
267     rdtscll(tmp_tsc);
268     //PrintDebug("SVM Returned\n");
269
270
271     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
272
273     STGI();
274
275      
276     if (handle_svm_exit(info) != 0) {
277
278       addr_t host_addr;
279       addr_t linear_addr = 0;
280
281       PrintDebug("SVM ERROR!!\n"); 
282       
283       PrintDebug("RIP: %x\n", guest_state->rip);
284
285
286       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
287
288
289       PrintDebug("RIP Linear: %x\n", linear_addr);
290
291       
292       if (info->mem_mode == PHYSICAL_MEM) {
293         guest_pa_to_host_pa(info, linear_addr, &host_addr);
294       } else if (info->mem_mode == VIRTUAL_MEM) {
295         guest_va_to_host_pa(info, linear_addr, &host_addr);
296       }
297
298
299       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
300
301       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
302       PrintTraceMemDump((char*)host_addr, 15);
303
304       break;
305     }
306   }
307   return 0;
308 }
309
310
311
312
313 /* Checks machine SVM capability */
314 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
315 int is_svm_capable() {
316
317 #if 1
318   // Dinda
319
320   uint_t ret;
321   uint_t vm_cr_low = 0, vm_cr_high = 0;
322
323
324   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
325   
326   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
327
328   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
329     PrintDebug("SVM Not Available\n");
330     return 0;
331   }  else {
332     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
333     
334     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
335     
336     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
337       PrintDebug("SVM is available but is disabled.\n");
338
339       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
340       
341       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
342       
343       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
344         PrintDebug("SVM BIOS Disabled, not unlockable\n");
345       } else {
346         PrintDebug("SVM is locked with a key\n");
347       }
348       return 0;
349
350     } else {
351       PrintDebug("SVM is available and  enabled.\n");
352
353       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
354       
355       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
356
357       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
358         PrintDebug("SVM Nested Paging not supported\n");
359       } else {
360         PrintDebug("SVM Nested Paging supported\n");
361       }
362       
363       return 1;
364       
365     }
366   }
367
368 #else
369
370   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
371   uint_t vm_cr_low = 0, vm_cr_high = 0;
372
373
374   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
375     PrintDebug("SVM Not Available\n");
376     return 0;
377   } 
378
379   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
380
381   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
382
383
384   // this part is clearly wrong, since the np bit is in 
385   // edx, not ecx
386   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
387     PrintDebug("Nested Paging not supported\n");
388   } else {
389     PrintDebug("Nested Paging supported\n");
390   }
391
392   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
393     PrintDebug("SVM is disabled.\n");
394     return 1;
395   }
396
397   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
398
399   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
400     PrintDebug("SVM BIOS Disabled, not unlockable\n");
401   } else {
402     PrintDebug("SVM is locked with a key\n");
403   }
404
405   return 0;
406
407 #endif
408
409 }
410
411
412
413 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
414   reg_ex_t msr;
415   void * host_state;
416
417
418   // Enable SVM on the CPU
419   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
420   msr.e_reg.low |= EFER_MSR_svm_enable;
421   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
422   
423   PrintDebug("SVM Enabled\n");
424
425
426   // Setup the host state save area
427   host_state = os_hooks->allocate_pages(4);
428   
429   msr.e_reg.high = 0;
430   msr.e_reg.low = (uint_t)host_state;
431
432
433   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
434   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
435
436
437
438   // Setup the SVM specific vmm operations
439   vmm_ops->init_guest = &init_svm_guest;
440   vmm_ops->start_guest = &start_svm_guest;
441
442
443   return;
444 }
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
498   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
499   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
500   uint_t i;
501
502
503   guest_state->rsp = vm_info.vm_regs.rsp;
504   guest_state->rip = vm_info.rip;
505
506
507   //ctrl_area->instrs.instrs.CR0 = 1;
508   ctrl_area->cr_reads.cr0 = 1;
509   ctrl_area->cr_writes.cr0 = 1;
510
511   guest_state->efer |= EFER_MSR_svm_enable;
512   guest_state->rflags = 0x00000002; // The reserved bit is always 1
513   ctrl_area->svm_instrs.VMRUN = 1;
514   // guest_state->cr0 = 0x00000001;    // PE 
515   ctrl_area->guest_ASID = 1;
516
517
518   ctrl_area->exceptions.de = 1;
519   ctrl_area->exceptions.df = 1;
520   ctrl_area->exceptions.pf = 1;
521   ctrl_area->exceptions.ts = 1;
522   ctrl_area->exceptions.ss = 1;
523   ctrl_area->exceptions.ac = 1;
524   ctrl_area->exceptions.mc = 1;
525   ctrl_area->exceptions.gp = 1;
526   ctrl_area->exceptions.ud = 1;
527   ctrl_area->exceptions.np = 1;
528   ctrl_area->exceptions.of = 1;
529   ctrl_area->exceptions.nmi = 1;
530
531   guest_state->cs.selector = 0x0000;
532   guest_state->cs.limit=~0u;
533   guest_state->cs.base = guest_state->cs.selector<<4;
534   guest_state->cs.attrib.raw = 0xf3;
535
536   
537   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
538   for ( i = 0; segregs[i] != NULL; i++) {
539     struct vmcb_selector * seg = segregs[i];
540     
541     seg->selector = 0x0000;
542     seg->base = seg->selector << 4;
543     seg->attrib.raw = 0xf3;
544     seg->limit = ~0u;
545   }
546   
547   if (vm_info.io_map.num_ports > 0) {
548     vmm_io_hook_t * iter;
549     addr_t io_port_bitmap;
550     
551     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
552     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
553     
554     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
555
556     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
557
558     FOREACH_IO_HOOK(vm_info.io_map, iter) {
559       ushort_t port = iter->port;
560       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
561
562       bitmap += (port / 8);
563       PrintDebug("Setting Bit in block %x\n", bitmap);
564       *bitmap |= 1 << (port % 8);
565     }
566
567
568     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
569
570     ctrl_area->instrs.IOIO_PROT = 1;
571   }
572
573   ctrl_area->instrs.INTR = 1;
574
575
576
577   if (vm_info.page_mode == SHADOW_PAGING) {
578     PrintDebug("Creating initial shadow page table\n");
579     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
580     PrintDebug("Created\n");
581
582     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
583
584     ctrl_area->cr_reads.cr3 = 1;
585     ctrl_area->cr_writes.cr3 = 1;
586
587
588     ctrl_area->instrs.INVLPG = 1;
589     ctrl_area->instrs.INVLPGA = 1;
590
591     guest_state->g_pat = 0x7040600070406ULL;
592
593     guest_state->cr0 |= 0x80000000;
594   } else if (vm_info.page_mode == NESTED_PAGING) {
595     // Flush the TLB on entries/exits
596     //ctrl_area->TLB_CONTROL = 1;
597
598     // Enable Nested Paging
599     //ctrl_area->NP_ENABLE = 1;
600
601     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
602
603         // Set the Nested Page Table pointer
604     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
605     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
606
607     //   ctrl_area->N_CR3 = Get_CR3();
608     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
609
610     //    guest_state->g_pat = 0x7040600070406ULL;
611   }
612
613
614
615 }
616 */
617
618
619
620
621
622
623
624 #if 0
625 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
626   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
627   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
628   uint_t i = 0;
629
630
631   guest_state->rsp = vm_info.vm_regs.rsp;
632   guest_state->rip = vm_info.rip;
633
634
635   /* I pretty much just gutted this from TVMM */
636   /* Note: That means its probably wrong */
637
638   // set the segment registers to mirror ours
639   guest_state->cs.selector = 1<<3;
640   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
641   guest_state->cs.attrib.fields.S = 1;
642   guest_state->cs.attrib.fields.P = 1;
643   guest_state->cs.attrib.fields.db = 1;
644   guest_state->cs.attrib.fields.G = 1;
645   guest_state->cs.limit = 0xfffff;
646   guest_state->cs.base = 0;
647   
648   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
649   for ( i = 0; segregs[i] != NULL; i++) {
650     struct vmcb_selector * seg = segregs[i];
651     
652     seg->selector = 2<<3;
653     seg->attrib.fields.type = 0x2; // Data Segment+read/write
654     seg->attrib.fields.S = 1;
655     seg->attrib.fields.P = 1;
656     seg->attrib.fields.db = 1;
657     seg->attrib.fields.G = 1;
658     seg->limit = 0xfffff;
659     seg->base = 0;
660   }
661
662
663   {
664     /* JRL THIS HAS TO GO */
665     
666     //    guest_state->tr.selector = GetTR_Selector();
667     guest_state->tr.attrib.fields.type = 0x9; 
668     guest_state->tr.attrib.fields.P = 1;
669     // guest_state->tr.limit = GetTR_Limit();
670     //guest_state->tr.base = GetTR_Base();// - 0x2000;
671     /* ** */
672   }
673
674
675   /* ** */
676
677
678   guest_state->efer |= EFER_MSR_svm_enable;
679   guest_state->rflags = 0x00000002; // The reserved bit is always 1
680   ctrl_area->svm_instrs.VMRUN = 1;
681   guest_state->cr0 = 0x00000001;    // PE 
682   ctrl_area->guest_ASID = 1;
683
684
685   //  guest_state->cpl = 0;
686
687
688
689   // Setup exits
690
691   ctrl_area->cr_writes.cr4 = 1;
692   
693   ctrl_area->exceptions.de = 1;
694   ctrl_area->exceptions.df = 1;
695   ctrl_area->exceptions.pf = 1;
696   ctrl_area->exceptions.ts = 1;
697   ctrl_area->exceptions.ss = 1;
698   ctrl_area->exceptions.ac = 1;
699   ctrl_area->exceptions.mc = 1;
700   ctrl_area->exceptions.gp = 1;
701   ctrl_area->exceptions.ud = 1;
702   ctrl_area->exceptions.np = 1;
703   ctrl_area->exceptions.of = 1;
704   ctrl_area->exceptions.nmi = 1;
705
706   
707
708   ctrl_area->instrs.IOIO_PROT = 1;
709   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
710   
711   {
712     reg_ex_t tmp_reg;
713     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
714     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
715   }
716
717   ctrl_area->instrs.INTR = 1;
718
719   
720   {
721     char gdt_buf[6];
722     char idt_buf[6];
723
724     memset(gdt_buf, 0, 6);
725     memset(idt_buf, 0, 6);
726
727
728     uint_t gdt_base, idt_base;
729     ushort_t gdt_limit, idt_limit;
730     
731     GetGDTR(gdt_buf);
732     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
733     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
734     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
735
736     GetIDTR(idt_buf);
737     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
738     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
739     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
740
741
742     // gdt_base -= 0x2000;
743     //idt_base -= 0x2000;
744
745     guest_state->gdtr.base = gdt_base;
746     guest_state->gdtr.limit = gdt_limit;
747     guest_state->idtr.base = idt_base;
748     guest_state->idtr.limit = idt_limit;
749
750
751   }
752   
753   
754   // also determine if CPU supports nested paging
755   /*
756   if (vm_info.page_tables) {
757     //   if (0) {
758     // Flush the TLB on entries/exits
759     ctrl_area->TLB_CONTROL = 1;
760
761     // Enable Nested Paging
762     ctrl_area->NP_ENABLE = 1;
763
764     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
765
766         // Set the Nested Page Table pointer
767     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
768
769
770     //   ctrl_area->N_CR3 = Get_CR3();
771     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
772
773     guest_state->g_pat = 0x7040600070406ULL;
774
775     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
776     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
777     // Enable Paging
778     //    guest_state->cr0 |= 0x80000000;
779   }
780   */
781
782 }
783
784
785
786
787
788 #endif
789
790