Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added emulator
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31 extern void EnableInts();
32
33
34
35
36
37
38
39 static vmcb_t * Allocate_VMCB() {
40   vmcb_t * vmcb_page = (vmcb_t *)V3_AllocPages(1);
41
42
43   memset(vmcb_page, 0, 4096);
44
45   return vmcb_page;
46 }
47
48
49
50
51
52 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
53   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
54   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
55   uint_t i;
56
57
58   guest_state->rsp = vm_info->vm_regs.rsp;
59   // guest_state->rip = vm_info->rip;
60   guest_state->rip = 0xfff0;
61
62   guest_state->cpl = 0;
63
64   //ctrl_area->instrs.instrs.CR0 = 1;
65   ctrl_area->cr_reads.cr0 = 1;
66   ctrl_area->cr_writes.cr0 = 1;
67
68   guest_state->efer |= EFER_MSR_svm_enable;
69   guest_state->rflags = 0x00000002; // The reserved bit is always 1
70   ctrl_area->svm_instrs.VMRUN = 1;
71   ctrl_area->svm_instrs.VMMCALL = 1;
72   ctrl_area->svm_instrs.VMLOAD = 1;
73   ctrl_area->svm_instrs.VMSAVE = 1;
74   ctrl_area->svm_instrs.STGI = 1;
75   ctrl_area->svm_instrs.CLGI = 1;
76   ctrl_area->svm_instrs.SKINIT = 1;
77   ctrl_area->svm_instrs.RDTSCP = 1;
78   ctrl_area->svm_instrs.ICEBP = 1;
79   ctrl_area->svm_instrs.WBINVD = 1;
80   ctrl_area->svm_instrs.MONITOR = 1;
81   ctrl_area->svm_instrs.MWAIT_always = 1;
82   ctrl_area->svm_instrs.MWAIT_if_armed = 1;
83
84
85   ctrl_area->instrs.HLT = 1;
86   // guest_state->cr0 = 0x00000001;    // PE 
87   ctrl_area->guest_ASID = 1;
88
89   
90   /*
91     ctrl_area->exceptions.de = 1;
92     ctrl_area->exceptions.df = 1;
93     
94     ctrl_area->exceptions.ts = 1;
95     ctrl_area->exceptions.ss = 1;
96     ctrl_area->exceptions.ac = 1;
97     ctrl_area->exceptions.mc = 1;
98     ctrl_area->exceptions.gp = 1;
99     ctrl_area->exceptions.ud = 1;
100     ctrl_area->exceptions.np = 1;
101     ctrl_area->exceptions.of = 1;
102   
103     ctrl_area->exceptions.nmi = 1;
104   */
105   // Debug of boot on physical machines - 7/14/08
106   ctrl_area->instrs.NMI=1;
107   ctrl_area->instrs.SMI=1;
108   ctrl_area->instrs.INIT=1;
109   ctrl_area->instrs.PAUSE=1;
110   ctrl_area->instrs.shutdown_evts=1;
111
112
113
114   vm_info->vm_regs.rdx = 0x00000f00;
115
116   guest_state->cr0 = 0x60000010;
117
118   guest_state->cs.selector = 0xf000;
119   guest_state->cs.limit=0xffff;
120   guest_state->cs.base = 0x0000000f0000LL;
121   guest_state->cs.attrib.raw = 0xf3;
122
123   
124   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
125   for ( i = 0; segregs[i] != NULL; i++) {
126     struct vmcb_selector * seg = segregs[i];
127     
128     seg->selector = 0x0000;
129     //    seg->base = seg->selector << 4;
130     seg->base = 0x00000000;
131     seg->attrib.raw = 0xf3;
132     seg->limit = ~0u;
133   }
134   
135   guest_state->gdtr.limit = 0x0000ffff;
136   guest_state->gdtr.base = 0x0000000000000000LL;
137   guest_state->idtr.limit = 0x0000ffff;
138   guest_state->idtr.base = 0x0000000000000000LL;
139
140   guest_state->ldtr.selector = 0x0000;
141   guest_state->ldtr.limit = 0x0000ffff;
142   guest_state->ldtr.base = 0x0000000000000000LL;
143   guest_state->tr.selector = 0x0000;
144   guest_state->tr.limit = 0x0000ffff;
145   guest_state->tr.base = 0x0000000000000000LL;
146
147
148   guest_state->dr6 = 0x00000000ffff0ff0LL;
149   guest_state->dr7 = 0x0000000000000400LL;
150
151   if (vm_info->io_map.num_ports > 0) {
152     struct vmm_io_hook * iter;
153     addr_t io_port_bitmap;
154     
155     io_port_bitmap = (addr_t)V3_AllocPages(3);
156     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
157     
158     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
159
160     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
161
162     FOREACH_IO_HOOK(vm_info->io_map, iter) {
163       ushort_t port = iter->port;
164       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
165
166       bitmap += (port / 8);
167       PrintDebug("Setting Bit for port 0x%x\n", port);
168       *bitmap |= 1 << (port % 8);
169     }
170
171
172     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
173
174     ctrl_area->instrs.IOIO_PROT = 1;
175   }
176
177
178
179   PrintDebug("Exiting on interrupts\n");
180   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
181   ctrl_area->instrs.INTR = 1;
182
183
184   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
185     PrintDebug("Creating initial shadow page table\n");
186     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
187     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
188     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
189     PrintDebug("Created\n");
190
191     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
192
193     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
194
195     ctrl_area->cr_reads.cr3 = 1;
196     ctrl_area->cr_writes.cr3 = 1;
197
198
199     ctrl_area->instrs.INVLPG = 1;
200     ctrl_area->instrs.INVLPGA = 1;
201
202     ctrl_area->exceptions.pf = 1;
203
204     /* JRL: This is a performance killer, and a simplistic solution */
205     /* We need to fix this */
206     ctrl_area->TLB_CONTROL = 1;
207     
208
209
210     guest_state->g_pat = 0x7040600070406ULL;
211
212     guest_state->cr0 |= 0x80000000;
213
214   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
215     // Flush the TLB on entries/exits
216     ctrl_area->TLB_CONTROL = 1;
217
218     // Enable Nested Paging
219     ctrl_area->NP_ENABLE = 1;
220
221     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
222
223     // Set the Nested Page Table pointer
224     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
225     ctrl_area->N_CR3 = vm_info->direct_map_pt;
226
227     //   ctrl_area->N_CR3 = Get_CR3();
228     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
229
230     guest_state->g_pat = 0x7040600070406ULL;
231   }
232
233
234
235 }
236
237
238 static int init_svm_guest(struct guest_info *info) {
239  
240   PrintDebug("Allocating VMCB\n");
241   info->vmm_data = (void*)Allocate_VMCB();
242
243
244   //PrintDebug("Generating Guest nested page tables\n");
245   //  info->page_tables = NULL;
246   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
247   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
248   //  PrintDebugPageTables(info->page_tables);
249
250
251   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
252   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
253   
254
255   info->run_state = VM_STOPPED;
256
257   //  info->rip = 0;
258
259   info->vm_regs.rdi = 0;
260   info->vm_regs.rsi = 0;
261   info->vm_regs.rbp = 0;
262   info->vm_regs.rsp = 0;
263   info->vm_regs.rbx = 0;
264   info->vm_regs.rdx = 0;
265   info->vm_regs.rcx = 0;
266   info->vm_regs.rax = 0;
267   
268   return 0;
269 }
270
271
272 // can we start a kernel thread here...
273 static int start_svm_guest(struct guest_info *info) {
274   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
275   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
276   uint_t num_exits = 0;
277
278
279
280   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
281   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
282
283   info->run_state = VM_RUNNING;
284
285   while (1) {
286     ullong_t tmp_tsc;
287
288
289     EnableInts();
290     CLGI();
291
292     //    PrintDebug("SVM Entry to rip=%x...\n", info->rip);
293
294     rdtscll(info->time_state.cached_host_tsc);
295     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
296
297     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
298
299     rdtscll(tmp_tsc);
300     //PrintDebug("SVM Returned\n");
301
302
303     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
304     num_exits++;
305
306     STGI();
307
308     if ((num_exits % 25) == 0) {
309       PrintDebug("SVM Exit number %d\n", num_exits);
310     }
311
312      
313     if (handle_svm_exit(info) != 0) {
314
315       addr_t host_addr;
316       addr_t linear_addr = 0;
317
318       info->run_state = VM_ERROR;
319
320       PrintDebug("SVM ERROR!!\n"); 
321       
322       PrintDebug("RIP: %x\n", guest_state->rip);
323
324
325       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
326
327
328       PrintDebug("RIP Linear: %x\n", linear_addr);
329       PrintV3Segments(info);
330       PrintV3CtrlRegs(info);
331       PrintV3GPRs(info);
332       
333       if (info->mem_mode == PHYSICAL_MEM) {
334         guest_pa_to_host_pa(info, linear_addr, &host_addr);
335       } else if (info->mem_mode == VIRTUAL_MEM) {
336         guest_va_to_host_pa(info, linear_addr, &host_addr);
337       }
338
339
340       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
341
342       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
343       PrintTraceMemDump((char*)host_addr, 15);
344
345       break;
346     }
347   }
348   return 0;
349 }
350
351
352
353
354 /* Checks machine SVM capability */
355 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
356 int is_svm_capable() {
357
358 #if 1
359   // Dinda
360
361   uint_t ret;
362   uint_t vm_cr_low = 0, vm_cr_high = 0;
363
364
365   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
366   
367   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
368
369   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
370     PrintDebug("SVM Not Available\n");
371     return 0;
372   }  else {
373     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
374     
375     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
376     
377     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
378       PrintDebug("SVM is available but is disabled.\n");
379
380       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
381       
382       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
383       
384       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
385         PrintDebug("SVM BIOS Disabled, not unlockable\n");
386       } else {
387         PrintDebug("SVM is locked with a key\n");
388       }
389       return 0;
390
391     } else {
392       PrintDebug("SVM is available and  enabled.\n");
393
394       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
395       
396       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
397
398       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
399         PrintDebug("SVM Nested Paging not supported\n");
400       } else {
401         PrintDebug("SVM Nested Paging supported\n");
402       }
403       
404       return 1;
405       
406     }
407   }
408
409 #else
410
411   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
412   uint_t vm_cr_low = 0, vm_cr_high = 0;
413
414
415   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
416     PrintDebug("SVM Not Available\n");
417     return 0;
418   } 
419
420   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
421
422   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
423
424
425   // this part is clearly wrong, since the np bit is in 
426   // edx, not ecx
427   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
428     PrintDebug("Nested Paging not supported\n");
429   } else {
430     PrintDebug("Nested Paging supported\n");
431   }
432
433   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
434     PrintDebug("SVM is disabled.\n");
435     return 1;
436   }
437
438   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
439
440   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
441     PrintDebug("SVM BIOS Disabled, not unlockable\n");
442   } else {
443     PrintDebug("SVM is locked with a key\n");
444   }
445
446   return 0;
447
448 #endif
449
450 }
451
452 int has_svm_nested_paging() {
453   uint32_t ret;
454
455   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
456       
457   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
458   
459   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
460     PrintDebug("SVM Nested Paging not supported\n");
461     return 0;
462   } else {
463     PrintDebug("SVM Nested Paging supported\n");
464     return 1;
465   }
466
467 }
468
469
470
471 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
472   reg_ex_t msr;
473   void * host_state;
474
475
476   // Enable SVM on the CPU
477   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
478   msr.e_reg.low |= EFER_MSR_svm_enable;
479   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
480   
481   PrintDebug("SVM Enabled\n");
482
483
484   // Setup the host state save area
485   host_state = V3_AllocPages(4);
486   
487   msr.e_reg.high = 0;
488   msr.e_reg.low = (uint_t)host_state;
489
490
491   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
492   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
493
494
495
496   // Setup the SVM specific vmm operations
497   vmm_ops->init_guest = &init_svm_guest;
498   vmm_ops->start_guest = &start_svm_guest;
499   vmm_ops->has_nested_paging = &has_svm_nested_paging;
500
501   return;
502 }
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
556   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
557   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
558   uint_t i;
559
560
561   guest_state->rsp = vm_info.vm_regs.rsp;
562   guest_state->rip = vm_info.rip;
563
564
565   //ctrl_area->instrs.instrs.CR0 = 1;
566   ctrl_area->cr_reads.cr0 = 1;
567   ctrl_area->cr_writes.cr0 = 1;
568
569   guest_state->efer |= EFER_MSR_svm_enable;
570   guest_state->rflags = 0x00000002; // The reserved bit is always 1
571   ctrl_area->svm_instrs.VMRUN = 1;
572   // guest_state->cr0 = 0x00000001;    // PE 
573   ctrl_area->guest_ASID = 1;
574
575
576   ctrl_area->exceptions.de = 1;
577   ctrl_area->exceptions.df = 1;
578   ctrl_area->exceptions.pf = 1;
579   ctrl_area->exceptions.ts = 1;
580   ctrl_area->exceptions.ss = 1;
581   ctrl_area->exceptions.ac = 1;
582   ctrl_area->exceptions.mc = 1;
583   ctrl_area->exceptions.gp = 1;
584   ctrl_area->exceptions.ud = 1;
585   ctrl_area->exceptions.np = 1;
586   ctrl_area->exceptions.of = 1;
587   ctrl_area->exceptions.nmi = 1;
588
589   guest_state->cs.selector = 0x0000;
590   guest_state->cs.limit=~0u;
591   guest_state->cs.base = guest_state->cs.selector<<4;
592   guest_state->cs.attrib.raw = 0xf3;
593
594   
595   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
596   for ( i = 0; segregs[i] != NULL; i++) {
597     struct vmcb_selector * seg = segregs[i];
598     
599     seg->selector = 0x0000;
600     seg->base = seg->selector << 4;
601     seg->attrib.raw = 0xf3;
602     seg->limit = ~0u;
603   }
604   
605   if (vm_info.io_map.num_ports > 0) {
606     struct vmm_io_hook * iter;
607     addr_t io_port_bitmap;
608     
609     io_port_bitmap = (addr_t)V3_AllocPages(3);
610     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
611     
612     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
613
614     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
615
616     FOREACH_IO_HOOK(vm_info.io_map, iter) {
617       ushort_t port = iter->port;
618       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
619
620       bitmap += (port / 8);
621       PrintDebug("Setting Bit in block %x\n", bitmap);
622       *bitmap |= 1 << (port % 8);
623     }
624
625
626     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
627
628     ctrl_area->instrs.IOIO_PROT = 1;
629   }
630
631   ctrl_area->instrs.INTR = 1;
632
633
634
635   if (vm_info.page_mode == SHADOW_PAGING) {
636     PrintDebug("Creating initial shadow page table\n");
637     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
638     PrintDebug("Created\n");
639
640     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
641
642     ctrl_area->cr_reads.cr3 = 1;
643     ctrl_area->cr_writes.cr3 = 1;
644
645
646     ctrl_area->instrs.INVLPG = 1;
647     ctrl_area->instrs.INVLPGA = 1;
648
649     guest_state->g_pat = 0x7040600070406ULL;
650
651     guest_state->cr0 |= 0x80000000;
652   } else if (vm_info.page_mode == NESTED_PAGING) {
653     // Flush the TLB on entries/exits
654     //ctrl_area->TLB_CONTROL = 1;
655
656     // Enable Nested Paging
657     //ctrl_area->NP_ENABLE = 1;
658
659     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
660
661         // Set the Nested Page Table pointer
662     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
663     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
664
665     //   ctrl_area->N_CR3 = Get_CR3();
666     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
667
668     //    guest_state->g_pat = 0x7040600070406ULL;
669   }
670
671
672
673 }
674 */
675
676
677
678
679
680
681
682 #if 0
683 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
684   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
685   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
686   uint_t i = 0;
687
688
689   guest_state->rsp = vm_info.vm_regs.rsp;
690   guest_state->rip = vm_info.rip;
691
692
693   /* I pretty much just gutted this from TVMM */
694   /* Note: That means its probably wrong */
695
696   // set the segment registers to mirror ours
697   guest_state->cs.selector = 1<<3;
698   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
699   guest_state->cs.attrib.fields.S = 1;
700   guest_state->cs.attrib.fields.P = 1;
701   guest_state->cs.attrib.fields.db = 1;
702   guest_state->cs.attrib.fields.G = 1;
703   guest_state->cs.limit = 0xfffff;
704   guest_state->cs.base = 0;
705   
706   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
707   for ( i = 0; segregs[i] != NULL; i++) {
708     struct vmcb_selector * seg = segregs[i];
709     
710     seg->selector = 2<<3;
711     seg->attrib.fields.type = 0x2; // Data Segment+read/write
712     seg->attrib.fields.S = 1;
713     seg->attrib.fields.P = 1;
714     seg->attrib.fields.db = 1;
715     seg->attrib.fields.G = 1;
716     seg->limit = 0xfffff;
717     seg->base = 0;
718   }
719
720
721   {
722     /* JRL THIS HAS TO GO */
723     
724     //    guest_state->tr.selector = GetTR_Selector();
725     guest_state->tr.attrib.fields.type = 0x9; 
726     guest_state->tr.attrib.fields.P = 1;
727     // guest_state->tr.limit = GetTR_Limit();
728     //guest_state->tr.base = GetTR_Base();// - 0x2000;
729     /* ** */
730   }
731
732
733   /* ** */
734
735
736   guest_state->efer |= EFER_MSR_svm_enable;
737   guest_state->rflags = 0x00000002; // The reserved bit is always 1
738   ctrl_area->svm_instrs.VMRUN = 1;
739   guest_state->cr0 = 0x00000001;    // PE 
740   ctrl_area->guest_ASID = 1;
741
742
743   //  guest_state->cpl = 0;
744
745
746
747   // Setup exits
748
749   ctrl_area->cr_writes.cr4 = 1;
750   
751   ctrl_area->exceptions.de = 1;
752   ctrl_area->exceptions.df = 1;
753   ctrl_area->exceptions.pf = 1;
754   ctrl_area->exceptions.ts = 1;
755   ctrl_area->exceptions.ss = 1;
756   ctrl_area->exceptions.ac = 1;
757   ctrl_area->exceptions.mc = 1;
758   ctrl_area->exceptions.gp = 1;
759   ctrl_area->exceptions.ud = 1;
760   ctrl_area->exceptions.np = 1;
761   ctrl_area->exceptions.of = 1;
762   ctrl_area->exceptions.nmi = 1;
763
764   
765
766   ctrl_area->instrs.IOIO_PROT = 1;
767   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
768   
769   {
770     reg_ex_t tmp_reg;
771     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
772     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
773   }
774
775   ctrl_area->instrs.INTR = 1;
776
777   
778   {
779     char gdt_buf[6];
780     char idt_buf[6];
781
782     memset(gdt_buf, 0, 6);
783     memset(idt_buf, 0, 6);
784
785
786     uint_t gdt_base, idt_base;
787     ushort_t gdt_limit, idt_limit;
788     
789     GetGDTR(gdt_buf);
790     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
791     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
792     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
793
794     GetIDTR(idt_buf);
795     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
796     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
797     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
798
799
800     // gdt_base -= 0x2000;
801     //idt_base -= 0x2000;
802
803     guest_state->gdtr.base = gdt_base;
804     guest_state->gdtr.limit = gdt_limit;
805     guest_state->idtr.base = idt_base;
806     guest_state->idtr.limit = idt_limit;
807
808
809   }
810   
811   
812   // also determine if CPU supports nested paging
813   /*
814   if (vm_info.page_tables) {
815     //   if (0) {
816     // Flush the TLB on entries/exits
817     ctrl_area->TLB_CONTROL = 1;
818
819     // Enable Nested Paging
820     ctrl_area->NP_ENABLE = 1;
821
822     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
823
824         // Set the Nested Page Table pointer
825     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
826
827
828     //   ctrl_area->N_CR3 = Get_CR3();
829     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
830
831     guest_state->g_pat = 0x7040600070406ULL;
832
833     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
834     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
835     // Enable Paging
836     //    guest_state->cr0 |= 0x80000000;
837   }
838   */
839
840 }
841
842
843
844
845
846 #endif
847
848