Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


ugg
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31 extern void EnableInts();
32
33
34
35
36
37
38
39 static vmcb_t * Allocate_VMCB() {
40   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
41
42
43   memset(vmcb_page, 0, 4096);
44
45   return vmcb_page;
46 }
47
48
49
50
51
52 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
53   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
54   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
55   uint_t i;
56
57
58   guest_state->rsp = vm_info->vm_regs.rsp;
59   // guest_state->rip = vm_info->rip;
60   guest_state->rip = 0xfff0;
61
62   guest_state->cpl = 0;
63
64   //ctrl_area->instrs.instrs.CR0 = 1;
65   ctrl_area->cr_reads.cr0 = 1;
66   ctrl_area->cr_writes.cr0 = 1;
67
68   guest_state->efer |= EFER_MSR_svm_enable;
69   guest_state->rflags = 0x00000002; // The reserved bit is always 1
70   ctrl_area->svm_instrs.VMRUN = 1;
71   ctrl_area->instrs.HLT = 1;
72   // guest_state->cr0 = 0x00000001;    // PE 
73   ctrl_area->guest_ASID = 1;
74
75   ctrl_area->exceptions.de = 1;
76   ctrl_area->exceptions.df = 1;
77   ctrl_area->exceptions.pf = 1;
78   ctrl_area->exceptions.ts = 1;
79   ctrl_area->exceptions.ss = 1;
80   ctrl_area->exceptions.ac = 1;
81   ctrl_area->exceptions.mc = 1;
82   ctrl_area->exceptions.gp = 1;
83   ctrl_area->exceptions.ud = 1;
84   ctrl_area->exceptions.np = 1;
85   ctrl_area->exceptions.of = 1;
86   ctrl_area->exceptions.nmi = 1;
87
88   vm_info->vm_regs.rdx = 0x00000f00;
89
90   guest_state->cr0 = 0x60000010;
91
92   guest_state->cs.selector = 0xf000;
93   guest_state->cs.limit=0xffff;
94   guest_state->cs.base = 0x0000000f0000LL;
95   guest_state->cs.attrib.raw = 0xf3;
96
97   
98   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
99   for ( i = 0; segregs[i] != NULL; i++) {
100     struct vmcb_selector * seg = segregs[i];
101     
102     seg->selector = 0x0000;
103     //    seg->base = seg->selector << 4;
104     seg->base = 0x00000000;
105     seg->attrib.raw = 0xf3;
106     seg->limit = ~0u;
107   }
108   
109   guest_state->gdtr.limit = 0x0000ffff;
110   guest_state->gdtr.base = 0x0000000000000000LL;
111   guest_state->idtr.limit = 0x0000ffff;
112   guest_state->idtr.base = 0x0000000000000000LL;
113
114   guest_state->ldtr.selector = 0x0000;
115   guest_state->ldtr.limit = 0x0000ffff;
116   guest_state->ldtr.base = 0x0000000000000000LL;
117   guest_state->tr.selector = 0x0000;
118   guest_state->tr.limit = 0x0000ffff;
119   guest_state->tr.base = 0x0000000000000000LL;
120
121
122   guest_state->dr6 = 0x00000000ffff0ff0LL;
123   guest_state->dr7 = 0x0000000000000400LL;
124
125   if (vm_info->io_map.num_ports > 0) {
126     vmm_io_hook_t * iter;
127     addr_t io_port_bitmap;
128     
129     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
130     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
131     
132     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
133
134     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
135
136     FOREACH_IO_HOOK(vm_info->io_map, iter) {
137       ushort_t port = iter->port;
138       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
139
140       bitmap += (port / 8);
141       PrintDebug("Setting Bit for port 0x%x\n", port);
142       *bitmap |= 1 << (port % 8);
143     }
144
145
146     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
147
148     ctrl_area->instrs.IOIO_PROT = 1;
149   }
150
151
152
153   PrintDebug("Exiting on interrupts\n");
154   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
155   ctrl_area->instrs.INTR = 1;
156
157
158   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
159     PrintDebug("Creating initial shadow page table\n");
160     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
161     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
162     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
163     PrintDebug("Created\n");
164
165     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
166
167     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
168
169     ctrl_area->cr_reads.cr3 = 1;
170     ctrl_area->cr_writes.cr3 = 1;
171
172
173     ctrl_area->instrs.INVLPG = 1;
174     ctrl_area->instrs.INVLPGA = 1;
175
176     /* JRL: This is a performance killer, and a simplistic solution */
177     /* We need to fix this */
178     ctrl_area->TLB_CONTROL = 1;
179     
180
181
182     guest_state->g_pat = 0x7040600070406ULL;
183
184     guest_state->cr0 |= 0x80000000;
185
186   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
187     // Flush the TLB on entries/exits
188     ctrl_area->TLB_CONTROL = 1;
189
190     // Enable Nested Paging
191     ctrl_area->NP_ENABLE = 1;
192
193     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
194
195     // Set the Nested Page Table pointer
196     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
197     ctrl_area->N_CR3 = vm_info->direct_map_pt;
198
199     //   ctrl_area->N_CR3 = Get_CR3();
200     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
201
202     guest_state->g_pat = 0x7040600070406ULL;
203   }
204
205
206
207 }
208
209
210
211
212
213
214
215
216
217 static int init_svm_guest(struct guest_info *info) {
218  
219   PrintDebug("Allocating VMCB\n");
220   info->vmm_data = (void*)Allocate_VMCB();
221
222
223   //PrintDebug("Generating Guest nested page tables\n");
224   //  info->page_tables = NULL;
225   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
226   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
227   //  PrintDebugPageTables(info->page_tables);
228
229
230   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
231   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
232   
233
234   //  info->rip = 0;
235
236   info->vm_regs.rdi = 0;
237   info->vm_regs.rsi = 0;
238   info->vm_regs.rbp = 0;
239   info->vm_regs.rsp = 0;
240   info->vm_regs.rbx = 0;
241   info->vm_regs.rdx = 0;
242   info->vm_regs.rcx = 0;
243   info->vm_regs.rax = 0;
244   
245   return 0;
246 }
247
248
249 // can we start a kernel thread here...
250 static int start_svm_guest(struct guest_info *info) {
251   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
252   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
253
254   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
255   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
256
257   while (1) {
258     ullong_t tmp_tsc;
259
260
261     EnableInts();
262     CLGI();
263
264     PrintDebug("SVM Entry to rip=%x...\n", info->rip);
265
266     rdtscll(info->time_state.cached_host_tsc);
267     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
268
269     PrintDebug("Launching\n");
270     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
271
272     rdtscll(tmp_tsc);
273     //PrintDebug("SVM Returned\n");
274
275
276     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
277
278     STGI();
279
280      
281     if (handle_svm_exit(info) != 0) {
282
283       addr_t host_addr;
284       addr_t linear_addr = 0;
285
286       PrintDebug("SVM ERROR!!\n"); 
287       
288       PrintDebug("RIP: %x\n", guest_state->rip);
289
290
291       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
292
293
294       PrintDebug("RIP Linear: %x\n", linear_addr);
295       PrintV3Segments(&(info->segments));
296       PrintV3CtrlRegs(&(info->ctrl_regs));
297
298       
299       if (info->mem_mode == PHYSICAL_MEM) {
300         guest_pa_to_host_pa(info, linear_addr, &host_addr);
301       } else if (info->mem_mode == VIRTUAL_MEM) {
302         guest_va_to_host_pa(info, linear_addr, &host_addr);
303       }
304
305
306       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
307
308       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
309       PrintTraceMemDump((char*)host_addr, 15);
310
311       break;
312     }
313   }
314   return 0;
315 }
316
317
318
319
320 /* Checks machine SVM capability */
321 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
322 int is_svm_capable() {
323
324 #if 1
325   // Dinda
326
327   uint_t ret;
328   uint_t vm_cr_low = 0, vm_cr_high = 0;
329
330
331   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
332   
333   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
334
335   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
336     PrintDebug("SVM Not Available\n");
337     return 0;
338   }  else {
339     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
340     
341     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
342     
343     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
344       PrintDebug("SVM is available but is disabled.\n");
345
346       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
347       
348       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
349       
350       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
351         PrintDebug("SVM BIOS Disabled, not unlockable\n");
352       } else {
353         PrintDebug("SVM is locked with a key\n");
354       }
355       return 0;
356
357     } else {
358       PrintDebug("SVM is available and  enabled.\n");
359
360       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
361       
362       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
363
364       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
365         PrintDebug("SVM Nested Paging not supported\n");
366       } else {
367         PrintDebug("SVM Nested Paging supported\n");
368       }
369       
370       return 1;
371       
372     }
373   }
374
375 #else
376
377   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
378   uint_t vm_cr_low = 0, vm_cr_high = 0;
379
380
381   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
382     PrintDebug("SVM Not Available\n");
383     return 0;
384   } 
385
386   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
387
388   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
389
390
391   // this part is clearly wrong, since the np bit is in 
392   // edx, not ecx
393   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
394     PrintDebug("Nested Paging not supported\n");
395   } else {
396     PrintDebug("Nested Paging supported\n");
397   }
398
399   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
400     PrintDebug("SVM is disabled.\n");
401     return 1;
402   }
403
404   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
405
406   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
407     PrintDebug("SVM BIOS Disabled, not unlockable\n");
408   } else {
409     PrintDebug("SVM is locked with a key\n");
410   }
411
412   return 0;
413
414 #endif
415
416 }
417
418 int has_svm_nested_paging() {
419   uint32_t ret;
420
421   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
422       
423   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
424   
425   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
426     PrintDebug("SVM Nested Paging not supported\n");
427     return 0;
428   } else {
429     PrintDebug("SVM Nested Paging supported\n");
430     return 1;
431   }
432
433 }
434
435
436
437 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
438   reg_ex_t msr;
439   void * host_state;
440
441
442   // Enable SVM on the CPU
443   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
444   msr.e_reg.low |= EFER_MSR_svm_enable;
445   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
446   
447   PrintDebug("SVM Enabled\n");
448
449
450   // Setup the host state save area
451   host_state = os_hooks->allocate_pages(4);
452   
453   msr.e_reg.high = 0;
454   msr.e_reg.low = (uint_t)host_state;
455
456
457   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
458   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
459
460
461
462   // Setup the SVM specific vmm operations
463   vmm_ops->init_guest = &init_svm_guest;
464   vmm_ops->start_guest = &start_svm_guest;
465   vmm_ops->has_nested_paging = &has_svm_nested_paging;
466
467   return;
468 }
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
522   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
523   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
524   uint_t i;
525
526
527   guest_state->rsp = vm_info.vm_regs.rsp;
528   guest_state->rip = vm_info.rip;
529
530
531   //ctrl_area->instrs.instrs.CR0 = 1;
532   ctrl_area->cr_reads.cr0 = 1;
533   ctrl_area->cr_writes.cr0 = 1;
534
535   guest_state->efer |= EFER_MSR_svm_enable;
536   guest_state->rflags = 0x00000002; // The reserved bit is always 1
537   ctrl_area->svm_instrs.VMRUN = 1;
538   // guest_state->cr0 = 0x00000001;    // PE 
539   ctrl_area->guest_ASID = 1;
540
541
542   ctrl_area->exceptions.de = 1;
543   ctrl_area->exceptions.df = 1;
544   ctrl_area->exceptions.pf = 1;
545   ctrl_area->exceptions.ts = 1;
546   ctrl_area->exceptions.ss = 1;
547   ctrl_area->exceptions.ac = 1;
548   ctrl_area->exceptions.mc = 1;
549   ctrl_area->exceptions.gp = 1;
550   ctrl_area->exceptions.ud = 1;
551   ctrl_area->exceptions.np = 1;
552   ctrl_area->exceptions.of = 1;
553   ctrl_area->exceptions.nmi = 1;
554
555   guest_state->cs.selector = 0x0000;
556   guest_state->cs.limit=~0u;
557   guest_state->cs.base = guest_state->cs.selector<<4;
558   guest_state->cs.attrib.raw = 0xf3;
559
560   
561   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
562   for ( i = 0; segregs[i] != NULL; i++) {
563     struct vmcb_selector * seg = segregs[i];
564     
565     seg->selector = 0x0000;
566     seg->base = seg->selector << 4;
567     seg->attrib.raw = 0xf3;
568     seg->limit = ~0u;
569   }
570   
571   if (vm_info.io_map.num_ports > 0) {
572     vmm_io_hook_t * iter;
573     addr_t io_port_bitmap;
574     
575     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
576     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
577     
578     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
579
580     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
581
582     FOREACH_IO_HOOK(vm_info.io_map, iter) {
583       ushort_t port = iter->port;
584       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
585
586       bitmap += (port / 8);
587       PrintDebug("Setting Bit in block %x\n", bitmap);
588       *bitmap |= 1 << (port % 8);
589     }
590
591
592     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
593
594     ctrl_area->instrs.IOIO_PROT = 1;
595   }
596
597   ctrl_area->instrs.INTR = 1;
598
599
600
601   if (vm_info.page_mode == SHADOW_PAGING) {
602     PrintDebug("Creating initial shadow page table\n");
603     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
604     PrintDebug("Created\n");
605
606     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
607
608     ctrl_area->cr_reads.cr3 = 1;
609     ctrl_area->cr_writes.cr3 = 1;
610
611
612     ctrl_area->instrs.INVLPG = 1;
613     ctrl_area->instrs.INVLPGA = 1;
614
615     guest_state->g_pat = 0x7040600070406ULL;
616
617     guest_state->cr0 |= 0x80000000;
618   } else if (vm_info.page_mode == NESTED_PAGING) {
619     // Flush the TLB on entries/exits
620     //ctrl_area->TLB_CONTROL = 1;
621
622     // Enable Nested Paging
623     //ctrl_area->NP_ENABLE = 1;
624
625     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
626
627         // Set the Nested Page Table pointer
628     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
629     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
630
631     //   ctrl_area->N_CR3 = Get_CR3();
632     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
633
634     //    guest_state->g_pat = 0x7040600070406ULL;
635   }
636
637
638
639 }
640 */
641
642
643
644
645
646
647
648 #if 0
649 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
650   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
651   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
652   uint_t i = 0;
653
654
655   guest_state->rsp = vm_info.vm_regs.rsp;
656   guest_state->rip = vm_info.rip;
657
658
659   /* I pretty much just gutted this from TVMM */
660   /* Note: That means its probably wrong */
661
662   // set the segment registers to mirror ours
663   guest_state->cs.selector = 1<<3;
664   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
665   guest_state->cs.attrib.fields.S = 1;
666   guest_state->cs.attrib.fields.P = 1;
667   guest_state->cs.attrib.fields.db = 1;
668   guest_state->cs.attrib.fields.G = 1;
669   guest_state->cs.limit = 0xfffff;
670   guest_state->cs.base = 0;
671   
672   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
673   for ( i = 0; segregs[i] != NULL; i++) {
674     struct vmcb_selector * seg = segregs[i];
675     
676     seg->selector = 2<<3;
677     seg->attrib.fields.type = 0x2; // Data Segment+read/write
678     seg->attrib.fields.S = 1;
679     seg->attrib.fields.P = 1;
680     seg->attrib.fields.db = 1;
681     seg->attrib.fields.G = 1;
682     seg->limit = 0xfffff;
683     seg->base = 0;
684   }
685
686
687   {
688     /* JRL THIS HAS TO GO */
689     
690     //    guest_state->tr.selector = GetTR_Selector();
691     guest_state->tr.attrib.fields.type = 0x9; 
692     guest_state->tr.attrib.fields.P = 1;
693     // guest_state->tr.limit = GetTR_Limit();
694     //guest_state->tr.base = GetTR_Base();// - 0x2000;
695     /* ** */
696   }
697
698
699   /* ** */
700
701
702   guest_state->efer |= EFER_MSR_svm_enable;
703   guest_state->rflags = 0x00000002; // The reserved bit is always 1
704   ctrl_area->svm_instrs.VMRUN = 1;
705   guest_state->cr0 = 0x00000001;    // PE 
706   ctrl_area->guest_ASID = 1;
707
708
709   //  guest_state->cpl = 0;
710
711
712
713   // Setup exits
714
715   ctrl_area->cr_writes.cr4 = 1;
716   
717   ctrl_area->exceptions.de = 1;
718   ctrl_area->exceptions.df = 1;
719   ctrl_area->exceptions.pf = 1;
720   ctrl_area->exceptions.ts = 1;
721   ctrl_area->exceptions.ss = 1;
722   ctrl_area->exceptions.ac = 1;
723   ctrl_area->exceptions.mc = 1;
724   ctrl_area->exceptions.gp = 1;
725   ctrl_area->exceptions.ud = 1;
726   ctrl_area->exceptions.np = 1;
727   ctrl_area->exceptions.of = 1;
728   ctrl_area->exceptions.nmi = 1;
729
730   
731
732   ctrl_area->instrs.IOIO_PROT = 1;
733   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
734   
735   {
736     reg_ex_t tmp_reg;
737     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
738     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
739   }
740
741   ctrl_area->instrs.INTR = 1;
742
743   
744   {
745     char gdt_buf[6];
746     char idt_buf[6];
747
748     memset(gdt_buf, 0, 6);
749     memset(idt_buf, 0, 6);
750
751
752     uint_t gdt_base, idt_base;
753     ushort_t gdt_limit, idt_limit;
754     
755     GetGDTR(gdt_buf);
756     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
757     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
758     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
759
760     GetIDTR(idt_buf);
761     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
762     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
763     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
764
765
766     // gdt_base -= 0x2000;
767     //idt_base -= 0x2000;
768
769     guest_state->gdtr.base = gdt_base;
770     guest_state->gdtr.limit = gdt_limit;
771     guest_state->idtr.base = idt_base;
772     guest_state->idtr.limit = idt_limit;
773
774
775   }
776   
777   
778   // also determine if CPU supports nested paging
779   /*
780   if (vm_info.page_tables) {
781     //   if (0) {
782     // Flush the TLB on entries/exits
783     ctrl_area->TLB_CONTROL = 1;
784
785     // Enable Nested Paging
786     ctrl_area->NP_ENABLE = 1;
787
788     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
789
790         // Set the Nested Page Table pointer
791     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
792
793
794     //   ctrl_area->N_CR3 = Get_CR3();
795     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
796
797     guest_state->g_pat = 0x7040600070406ULL;
798
799     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
800     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
801     // Enable Paging
802     //    guest_state->cr0 |= 0x80000000;
803   }
804   */
805
806 }
807
808
809
810
811
812 #endif
813
814