Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


modified copyright tags
[palacios.git] / palacios / src / palacios / svm.c
1 /* (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> */
2 /* (c) 2008, The V3VEE Project <http://www.v3vee.org> */
3
4 #include <palacios/svm.h>
5 #include <palacios/vmm.h>
6
7 #include <palacios/vmcb.h>
8 #include <palacios/vmm_mem.h>
9 #include <palacios/vmm_paging.h>
10 #include <palacios/svm_handler.h>
11
12 #include <palacios/vmm_debug.h>
13 #include <palacios/vm_guest_mem.h>
14
15 #include <palacios/vmm_decoder.h>
16
17
18
19
20 extern uint_t cpuid_ecx(uint_t op);
21 extern uint_t cpuid_edx(uint_t op);
22 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
23 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
24 extern uint_t launch_svm(vmcb_t * vmcb_addr);
25 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
26
27 extern void STGI();
28 extern void CLGI();
29
30 extern uint_t Get_CR3();
31
32
33 extern void DisableInts();
34 extern void EnableInts();
35
36
37
38
39
40
41
42 static vmcb_t * Allocate_VMCB() {
43   vmcb_t * vmcb_page = (vmcb_t *)V3_AllocPages(1);
44
45
46   memset(vmcb_page, 0, 4096);
47
48   return vmcb_page;
49 }
50
51
52
53
54
55 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
56   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
57   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
58   uint_t i;
59
60
61   guest_state->rsp = vm_info->vm_regs.rsp;
62   // guest_state->rip = vm_info->rip;
63   guest_state->rip = 0xfff0;
64
65   guest_state->cpl = 0;
66
67   //ctrl_area->instrs.instrs.CR0 = 1;
68   ctrl_area->cr_reads.cr0 = 1;
69   ctrl_area->cr_writes.cr0 = 1;
70
71   guest_state->efer |= EFER_MSR_svm_enable;
72   guest_state->rflags = 0x00000002; // The reserved bit is always 1
73   ctrl_area->svm_instrs.VMRUN = 1;
74   ctrl_area->svm_instrs.VMMCALL = 1;
75   ctrl_area->svm_instrs.VMLOAD = 1;
76   ctrl_area->svm_instrs.VMSAVE = 1;
77   ctrl_area->svm_instrs.STGI = 1;
78   ctrl_area->svm_instrs.CLGI = 1;
79   ctrl_area->svm_instrs.SKINIT = 1;
80   ctrl_area->svm_instrs.RDTSCP = 1;
81   ctrl_area->svm_instrs.ICEBP = 1;
82   ctrl_area->svm_instrs.WBINVD = 1;
83   ctrl_area->svm_instrs.MONITOR = 1;
84   ctrl_area->svm_instrs.MWAIT_always = 1;
85   ctrl_area->svm_instrs.MWAIT_if_armed = 1;
86
87
88   ctrl_area->instrs.HLT = 1;
89   // guest_state->cr0 = 0x00000001;    // PE 
90   ctrl_area->guest_ASID = 1;
91
92   
93   /*
94     ctrl_area->exceptions.de = 1;
95     ctrl_area->exceptions.df = 1;
96     
97     ctrl_area->exceptions.ts = 1;
98     ctrl_area->exceptions.ss = 1;
99     ctrl_area->exceptions.ac = 1;
100     ctrl_area->exceptions.mc = 1;
101     ctrl_area->exceptions.gp = 1;
102     ctrl_area->exceptions.ud = 1;
103     ctrl_area->exceptions.np = 1;
104     ctrl_area->exceptions.of = 1;
105   
106     ctrl_area->exceptions.nmi = 1;
107   */
108   // Debug of boot on physical machines - 7/14/08
109   ctrl_area->instrs.NMI=1;
110   ctrl_area->instrs.SMI=1;
111   ctrl_area->instrs.INIT=1;
112   ctrl_area->instrs.PAUSE=1;
113   ctrl_area->instrs.shutdown_evts=1;
114
115
116
117   vm_info->vm_regs.rdx = 0x00000f00;
118
119   guest_state->cr0 = 0x60000010;
120
121   guest_state->cs.selector = 0xf000;
122   guest_state->cs.limit=0xffff;
123   guest_state->cs.base = 0x0000000f0000LL;
124   guest_state->cs.attrib.raw = 0xf3;
125
126   
127   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
128   for ( i = 0; segregs[i] != NULL; i++) {
129     struct vmcb_selector * seg = segregs[i];
130     
131     seg->selector = 0x0000;
132     //    seg->base = seg->selector << 4;
133     seg->base = 0x00000000;
134     seg->attrib.raw = 0xf3;
135     seg->limit = ~0u;
136   }
137   
138   guest_state->gdtr.limit = 0x0000ffff;
139   guest_state->gdtr.base = 0x0000000000000000LL;
140   guest_state->idtr.limit = 0x0000ffff;
141   guest_state->idtr.base = 0x0000000000000000LL;
142
143   guest_state->ldtr.selector = 0x0000;
144   guest_state->ldtr.limit = 0x0000ffff;
145   guest_state->ldtr.base = 0x0000000000000000LL;
146   guest_state->tr.selector = 0x0000;
147   guest_state->tr.limit = 0x0000ffff;
148   guest_state->tr.base = 0x0000000000000000LL;
149
150
151   guest_state->dr6 = 0x00000000ffff0ff0LL;
152   guest_state->dr7 = 0x0000000000000400LL;
153
154   if (vm_info->io_map.num_ports > 0) {
155     struct vmm_io_hook * iter;
156     addr_t io_port_bitmap;
157     
158     io_port_bitmap = (addr_t)V3_AllocPages(3);
159     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
160     
161     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
162
163     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
164
165     FOREACH_IO_HOOK(vm_info->io_map, iter) {
166       ushort_t port = iter->port;
167       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
168
169       bitmap += (port / 8);
170       PrintDebug("Setting Bit for port 0x%x\n", port);
171       *bitmap |= 1 << (port % 8);
172     }
173
174
175     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
176
177     ctrl_area->instrs.IOIO_PROT = 1;
178   }
179
180
181
182   PrintDebug("Exiting on interrupts\n");
183   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
184   ctrl_area->instrs.INTR = 1;
185
186
187   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
188     PrintDebug("Creating initial shadow page table\n");
189     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
190     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
191     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
192     PrintDebug("Created\n");
193
194     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
195
196     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
197
198     ctrl_area->cr_reads.cr3 = 1;
199     ctrl_area->cr_writes.cr3 = 1;
200
201
202     ctrl_area->instrs.INVLPG = 1;
203     ctrl_area->instrs.INVLPGA = 1;
204
205     ctrl_area->exceptions.pf = 1;
206
207     /* JRL: This is a performance killer, and a simplistic solution */
208     /* We need to fix this */
209     ctrl_area->TLB_CONTROL = 1;
210     
211
212
213     guest_state->g_pat = 0x7040600070406ULL;
214
215     guest_state->cr0 |= 0x80000000;
216
217   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
218     // Flush the TLB on entries/exits
219     ctrl_area->TLB_CONTROL = 1;
220
221     // Enable Nested Paging
222     ctrl_area->NP_ENABLE = 1;
223
224     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
225
226     // Set the Nested Page Table pointer
227     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
228     ctrl_area->N_CR3 = vm_info->direct_map_pt;
229
230     //   ctrl_area->N_CR3 = Get_CR3();
231     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
232
233     guest_state->g_pat = 0x7040600070406ULL;
234   }
235
236
237
238 }
239
240
241 static int init_svm_guest(struct guest_info *info) {
242  
243   PrintDebug("Allocating VMCB\n");
244   info->vmm_data = (void*)Allocate_VMCB();
245
246
247   //PrintDebug("Generating Guest nested page tables\n");
248   //  info->page_tables = NULL;
249   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
250   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
251   //  PrintDebugPageTables(info->page_tables);
252
253
254   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
255   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
256   
257
258   info->run_state = VM_STOPPED;
259
260   //  info->rip = 0;
261
262   info->vm_regs.rdi = 0;
263   info->vm_regs.rsi = 0;
264   info->vm_regs.rbp = 0;
265   info->vm_regs.rsp = 0;
266   info->vm_regs.rbx = 0;
267   info->vm_regs.rdx = 0;
268   info->vm_regs.rcx = 0;
269   info->vm_regs.rax = 0;
270   
271   return 0;
272 }
273
274
275
276 // can we start a kernel thread here...
277  int start_svm_guest(struct guest_info *info) {
278   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
279   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
280   uint_t num_exits = 0;
281
282
283
284   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
285   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
286
287   info->run_state = VM_RUNNING;
288
289   while (1) {
290     ullong_t tmp_tsc;
291
292
293     EnableInts();
294     CLGI();
295
296     //    PrintDebug("SVM Entry to rip=%x...\n", info->rip);
297
298     rdtscll(info->time_state.cached_host_tsc);
299     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
300
301     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
302
303     rdtscll(tmp_tsc);
304     //PrintDebug("SVM Returned\n");
305
306
307     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
308     num_exits++;
309
310     STGI();
311
312     if ((num_exits % 25) == 0) {
313       PrintDebug("SVM Exit number %d\n", num_exits);
314     }
315
316      
317     if (handle_svm_exit(info) != 0) {
318
319       addr_t host_addr;
320       addr_t linear_addr = 0;
321
322       info->run_state = VM_ERROR;
323
324       PrintDebug("SVM ERROR!!\n"); 
325       
326       PrintDebug("RIP: %x\n", guest_state->rip);
327
328
329       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
330
331
332       PrintDebug("RIP Linear: %x\n", linear_addr);
333       PrintV3Segments(info);
334       PrintV3CtrlRegs(info);
335       PrintV3GPRs(info);
336       
337       if (info->mem_mode == PHYSICAL_MEM) {
338         guest_pa_to_host_pa(info, linear_addr, &host_addr);
339       } else if (info->mem_mode == VIRTUAL_MEM) {
340         guest_va_to_host_pa(info, linear_addr, &host_addr);
341       }
342
343
344       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
345
346       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
347       PrintTraceMemDump((char*)host_addr, 15);
348
349       break;
350     }
351   }
352   return 0;
353 }
354
355
356
357
358
359 /* Checks machine SVM capability */
360 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
361 int is_svm_capable() {
362
363 #if 1
364   // Dinda
365
366   uint_t ret;
367   uint_t vm_cr_low = 0, vm_cr_high = 0;
368
369
370   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
371   
372   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
373
374   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
375     PrintDebug("SVM Not Available\n");
376     return 0;
377   }  else {
378     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
379     
380     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
381     
382     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
383       PrintDebug("SVM is available but is disabled.\n");
384
385       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
386       
387       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
388       
389       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
390         PrintDebug("SVM BIOS Disabled, not unlockable\n");
391       } else {
392         PrintDebug("SVM is locked with a key\n");
393       }
394       return 0;
395
396     } else {
397       PrintDebug("SVM is available and  enabled.\n");
398
399       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
400       
401       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
402
403       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
404         PrintDebug("SVM Nested Paging not supported\n");
405       } else {
406         PrintDebug("SVM Nested Paging supported\n");
407       }
408       
409       return 1;
410       
411     }
412   }
413
414 #else
415
416   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
417   uint_t vm_cr_low = 0, vm_cr_high = 0;
418
419
420   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
421     PrintDebug("SVM Not Available\n");
422     return 0;
423   } 
424
425   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
426
427   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
428
429
430   // this part is clearly wrong, since the np bit is in 
431   // edx, not ecx
432   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
433     PrintDebug("Nested Paging not supported\n");
434   } else {
435     PrintDebug("Nested Paging supported\n");
436   }
437
438   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
439     PrintDebug("SVM is disabled.\n");
440     return 1;
441   }
442
443   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
444
445   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
446     PrintDebug("SVM BIOS Disabled, not unlockable\n");
447   } else {
448     PrintDebug("SVM is locked with a key\n");
449   }
450
451   return 0;
452
453 #endif
454
455 }
456
457 int has_svm_nested_paging() {
458   uint32_t ret;
459
460   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
461       
462   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
463   
464   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
465     PrintDebug("SVM Nested Paging not supported\n");
466     return 0;
467   } else {
468     PrintDebug("SVM Nested Paging supported\n");
469     return 1;
470   }
471
472 }
473
474
475
476 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
477   reg_ex_t msr;
478   void * host_state;
479
480
481   // Enable SVM on the CPU
482   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
483   msr.e_reg.low |= EFER_MSR_svm_enable;
484   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
485   
486   PrintDebug("SVM Enabled\n");
487
488
489   // Setup the host state save area
490   host_state = V3_AllocPages(4);
491   
492   msr.e_reg.high = 0;
493   msr.e_reg.low = (uint_t)host_state;
494
495
496   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
497   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
498
499
500
501   // Setup the SVM specific vmm operations
502   vmm_ops->init_guest = &init_svm_guest;
503   vmm_ops->start_guest = &start_svm_guest;
504   vmm_ops->has_nested_paging = &has_svm_nested_paging;
505
506   return;
507 }
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
561   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
562   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
563   uint_t i;
564
565
566   guest_state->rsp = vm_info.vm_regs.rsp;
567   guest_state->rip = vm_info.rip;
568
569
570   //ctrl_area->instrs.instrs.CR0 = 1;
571   ctrl_area->cr_reads.cr0 = 1;
572   ctrl_area->cr_writes.cr0 = 1;
573
574   guest_state->efer |= EFER_MSR_svm_enable;
575   guest_state->rflags = 0x00000002; // The reserved bit is always 1
576   ctrl_area->svm_instrs.VMRUN = 1;
577   // guest_state->cr0 = 0x00000001;    // PE 
578   ctrl_area->guest_ASID = 1;
579
580
581   ctrl_area->exceptions.de = 1;
582   ctrl_area->exceptions.df = 1;
583   ctrl_area->exceptions.pf = 1;
584   ctrl_area->exceptions.ts = 1;
585   ctrl_area->exceptions.ss = 1;
586   ctrl_area->exceptions.ac = 1;
587   ctrl_area->exceptions.mc = 1;
588   ctrl_area->exceptions.gp = 1;
589   ctrl_area->exceptions.ud = 1;
590   ctrl_area->exceptions.np = 1;
591   ctrl_area->exceptions.of = 1;
592   ctrl_area->exceptions.nmi = 1;
593
594   guest_state->cs.selector = 0x0000;
595   guest_state->cs.limit=~0u;
596   guest_state->cs.base = guest_state->cs.selector<<4;
597   guest_state->cs.attrib.raw = 0xf3;
598
599   
600   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
601   for ( i = 0; segregs[i] != NULL; i++) {
602     struct vmcb_selector * seg = segregs[i];
603     
604     seg->selector = 0x0000;
605     seg->base = seg->selector << 4;
606     seg->attrib.raw = 0xf3;
607     seg->limit = ~0u;
608   }
609   
610   if (vm_info.io_map.num_ports > 0) {
611     struct vmm_io_hook * iter;
612     addr_t io_port_bitmap;
613     
614     io_port_bitmap = (addr_t)V3_AllocPages(3);
615     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
616     
617     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
618
619     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
620
621     FOREACH_IO_HOOK(vm_info.io_map, iter) {
622       ushort_t port = iter->port;
623       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
624
625       bitmap += (port / 8);
626       PrintDebug("Setting Bit in block %x\n", bitmap);
627       *bitmap |= 1 << (port % 8);
628     }
629
630
631     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
632
633     ctrl_area->instrs.IOIO_PROT = 1;
634   }
635
636   ctrl_area->instrs.INTR = 1;
637
638
639
640   if (vm_info.page_mode == SHADOW_PAGING) {
641     PrintDebug("Creating initial shadow page table\n");
642     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
643     PrintDebug("Created\n");
644
645     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
646
647     ctrl_area->cr_reads.cr3 = 1;
648     ctrl_area->cr_writes.cr3 = 1;
649
650
651     ctrl_area->instrs.INVLPG = 1;
652     ctrl_area->instrs.INVLPGA = 1;
653
654     guest_state->g_pat = 0x7040600070406ULL;
655
656     guest_state->cr0 |= 0x80000000;
657   } else if (vm_info.page_mode == NESTED_PAGING) {
658     // Flush the TLB on entries/exits
659     //ctrl_area->TLB_CONTROL = 1;
660
661     // Enable Nested Paging
662     //ctrl_area->NP_ENABLE = 1;
663
664     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
665
666         // Set the Nested Page Table pointer
667     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
668     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
669
670     //   ctrl_area->N_CR3 = Get_CR3();
671     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
672
673     //    guest_state->g_pat = 0x7040600070406ULL;
674   }
675
676
677
678 }
679 */
680
681
682
683
684
685
686
687 #if 0
688 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
689   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
690   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
691   uint_t i = 0;
692
693
694   guest_state->rsp = vm_info.vm_regs.rsp;
695   guest_state->rip = vm_info.rip;
696
697
698   /* I pretty much just gutted this from TVMM */
699   /* Note: That means its probably wrong */
700
701   // set the segment registers to mirror ours
702   guest_state->cs.selector = 1<<3;
703   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
704   guest_state->cs.attrib.fields.S = 1;
705   guest_state->cs.attrib.fields.P = 1;
706   guest_state->cs.attrib.fields.db = 1;
707   guest_state->cs.attrib.fields.G = 1;
708   guest_state->cs.limit = 0xfffff;
709   guest_state->cs.base = 0;
710   
711   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
712   for ( i = 0; segregs[i] != NULL; i++) {
713     struct vmcb_selector * seg = segregs[i];
714     
715     seg->selector = 2<<3;
716     seg->attrib.fields.type = 0x2; // Data Segment+read/write
717     seg->attrib.fields.S = 1;
718     seg->attrib.fields.P = 1;
719     seg->attrib.fields.db = 1;
720     seg->attrib.fields.G = 1;
721     seg->limit = 0xfffff;
722     seg->base = 0;
723   }
724
725
726   {
727     /* JRL THIS HAS TO GO */
728     
729     //    guest_state->tr.selector = GetTR_Selector();
730     guest_state->tr.attrib.fields.type = 0x9; 
731     guest_state->tr.attrib.fields.P = 1;
732     // guest_state->tr.limit = GetTR_Limit();
733     //guest_state->tr.base = GetTR_Base();// - 0x2000;
734     /* ** */
735   }
736
737
738   /* ** */
739
740
741   guest_state->efer |= EFER_MSR_svm_enable;
742   guest_state->rflags = 0x00000002; // The reserved bit is always 1
743   ctrl_area->svm_instrs.VMRUN = 1;
744   guest_state->cr0 = 0x00000001;    // PE 
745   ctrl_area->guest_ASID = 1;
746
747
748   //  guest_state->cpl = 0;
749
750
751
752   // Setup exits
753
754   ctrl_area->cr_writes.cr4 = 1;
755   
756   ctrl_area->exceptions.de = 1;
757   ctrl_area->exceptions.df = 1;
758   ctrl_area->exceptions.pf = 1;
759   ctrl_area->exceptions.ts = 1;
760   ctrl_area->exceptions.ss = 1;
761   ctrl_area->exceptions.ac = 1;
762   ctrl_area->exceptions.mc = 1;
763   ctrl_area->exceptions.gp = 1;
764   ctrl_area->exceptions.ud = 1;
765   ctrl_area->exceptions.np = 1;
766   ctrl_area->exceptions.of = 1;
767   ctrl_area->exceptions.nmi = 1;
768
769   
770
771   ctrl_area->instrs.IOIO_PROT = 1;
772   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
773   
774   {
775     reg_ex_t tmp_reg;
776     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
777     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
778   }
779
780   ctrl_area->instrs.INTR = 1;
781
782   
783   {
784     char gdt_buf[6];
785     char idt_buf[6];
786
787     memset(gdt_buf, 0, 6);
788     memset(idt_buf, 0, 6);
789
790
791     uint_t gdt_base, idt_base;
792     ushort_t gdt_limit, idt_limit;
793     
794     GetGDTR(gdt_buf);
795     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
796     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
797     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
798
799     GetIDTR(idt_buf);
800     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
801     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
802     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
803
804
805     // gdt_base -= 0x2000;
806     //idt_base -= 0x2000;
807
808     guest_state->gdtr.base = gdt_base;
809     guest_state->gdtr.limit = gdt_limit;
810     guest_state->idtr.base = idt_base;
811     guest_state->idtr.limit = idt_limit;
812
813
814   }
815   
816   
817   // also determine if CPU supports nested paging
818   /*
819   if (vm_info.page_tables) {
820     //   if (0) {
821     // Flush the TLB on entries/exits
822     ctrl_area->TLB_CONTROL = 1;
823
824     // Enable Nested Paging
825     ctrl_area->NP_ENABLE = 1;
826
827     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
828
829         // Set the Nested Page Table pointer
830     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
831
832
833     //   ctrl_area->N_CR3 = Get_CR3();
834     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
835
836     guest_state->g_pat = 0x7040600070406ULL;
837
838     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
839     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
840     // Enable Paging
841     //    guest_state->cr0 |= 0x80000000;
842   }
843   */
844
845 }
846
847
848
849
850
851 #endif
852
853