Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added copyright tags
[palacios.git] / palacios / src / palacios / svm.c
1 /* Northwestern University */
2 /* (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> */
3
4
5 #include <palacios/svm.h>
6 #include <palacios/vmm.h>
7
8 #include <palacios/vmcb.h>
9 #include <palacios/vmm_mem.h>
10 #include <palacios/vmm_paging.h>
11 #include <palacios/svm_handler.h>
12
13 #include <palacios/vmm_debug.h>
14 #include <palacios/vm_guest_mem.h>
15
16 #include <palacios/vmm_decoder.h>
17
18
19
20
21 extern uint_t cpuid_ecx(uint_t op);
22 extern uint_t cpuid_edx(uint_t op);
23 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
24 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
25 extern uint_t launch_svm(vmcb_t * vmcb_addr);
26 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
27
28 extern void STGI();
29 extern void CLGI();
30
31 extern uint_t Get_CR3();
32
33
34 extern void DisableInts();
35 extern void EnableInts();
36
37
38
39
40
41
42
43 static vmcb_t * Allocate_VMCB() {
44   vmcb_t * vmcb_page = (vmcb_t *)V3_AllocPages(1);
45
46
47   memset(vmcb_page, 0, 4096);
48
49   return vmcb_page;
50 }
51
52
53
54
55
56 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
57   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
58   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
59   uint_t i;
60
61
62   guest_state->rsp = vm_info->vm_regs.rsp;
63   // guest_state->rip = vm_info->rip;
64   guest_state->rip = 0xfff0;
65
66   guest_state->cpl = 0;
67
68   //ctrl_area->instrs.instrs.CR0 = 1;
69   ctrl_area->cr_reads.cr0 = 1;
70   ctrl_area->cr_writes.cr0 = 1;
71
72   guest_state->efer |= EFER_MSR_svm_enable;
73   guest_state->rflags = 0x00000002; // The reserved bit is always 1
74   ctrl_area->svm_instrs.VMRUN = 1;
75   ctrl_area->svm_instrs.VMMCALL = 1;
76   ctrl_area->svm_instrs.VMLOAD = 1;
77   ctrl_area->svm_instrs.VMSAVE = 1;
78   ctrl_area->svm_instrs.STGI = 1;
79   ctrl_area->svm_instrs.CLGI = 1;
80   ctrl_area->svm_instrs.SKINIT = 1;
81   ctrl_area->svm_instrs.RDTSCP = 1;
82   ctrl_area->svm_instrs.ICEBP = 1;
83   ctrl_area->svm_instrs.WBINVD = 1;
84   ctrl_area->svm_instrs.MONITOR = 1;
85   ctrl_area->svm_instrs.MWAIT_always = 1;
86   ctrl_area->svm_instrs.MWAIT_if_armed = 1;
87
88
89   ctrl_area->instrs.HLT = 1;
90   // guest_state->cr0 = 0x00000001;    // PE 
91   ctrl_area->guest_ASID = 1;
92
93   
94   /*
95     ctrl_area->exceptions.de = 1;
96     ctrl_area->exceptions.df = 1;
97     
98     ctrl_area->exceptions.ts = 1;
99     ctrl_area->exceptions.ss = 1;
100     ctrl_area->exceptions.ac = 1;
101     ctrl_area->exceptions.mc = 1;
102     ctrl_area->exceptions.gp = 1;
103     ctrl_area->exceptions.ud = 1;
104     ctrl_area->exceptions.np = 1;
105     ctrl_area->exceptions.of = 1;
106   
107     ctrl_area->exceptions.nmi = 1;
108   */
109   // Debug of boot on physical machines - 7/14/08
110   ctrl_area->instrs.NMI=1;
111   ctrl_area->instrs.SMI=1;
112   ctrl_area->instrs.INIT=1;
113   ctrl_area->instrs.PAUSE=1;
114   ctrl_area->instrs.shutdown_evts=1;
115
116
117
118   vm_info->vm_regs.rdx = 0x00000f00;
119
120   guest_state->cr0 = 0x60000010;
121
122   guest_state->cs.selector = 0xf000;
123   guest_state->cs.limit=0xffff;
124   guest_state->cs.base = 0x0000000f0000LL;
125   guest_state->cs.attrib.raw = 0xf3;
126
127   
128   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
129   for ( i = 0; segregs[i] != NULL; i++) {
130     struct vmcb_selector * seg = segregs[i];
131     
132     seg->selector = 0x0000;
133     //    seg->base = seg->selector << 4;
134     seg->base = 0x00000000;
135     seg->attrib.raw = 0xf3;
136     seg->limit = ~0u;
137   }
138   
139   guest_state->gdtr.limit = 0x0000ffff;
140   guest_state->gdtr.base = 0x0000000000000000LL;
141   guest_state->idtr.limit = 0x0000ffff;
142   guest_state->idtr.base = 0x0000000000000000LL;
143
144   guest_state->ldtr.selector = 0x0000;
145   guest_state->ldtr.limit = 0x0000ffff;
146   guest_state->ldtr.base = 0x0000000000000000LL;
147   guest_state->tr.selector = 0x0000;
148   guest_state->tr.limit = 0x0000ffff;
149   guest_state->tr.base = 0x0000000000000000LL;
150
151
152   guest_state->dr6 = 0x00000000ffff0ff0LL;
153   guest_state->dr7 = 0x0000000000000400LL;
154
155   if (vm_info->io_map.num_ports > 0) {
156     struct vmm_io_hook * iter;
157     addr_t io_port_bitmap;
158     
159     io_port_bitmap = (addr_t)V3_AllocPages(3);
160     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
161     
162     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
163
164     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
165
166     FOREACH_IO_HOOK(vm_info->io_map, iter) {
167       ushort_t port = iter->port;
168       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
169
170       bitmap += (port / 8);
171       PrintDebug("Setting Bit for port 0x%x\n", port);
172       *bitmap |= 1 << (port % 8);
173     }
174
175
176     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
177
178     ctrl_area->instrs.IOIO_PROT = 1;
179   }
180
181
182
183   PrintDebug("Exiting on interrupts\n");
184   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
185   ctrl_area->instrs.INTR = 1;
186
187
188   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
189     PrintDebug("Creating initial shadow page table\n");
190     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
191     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
192     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
193     PrintDebug("Created\n");
194
195     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
196
197     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
198
199     ctrl_area->cr_reads.cr3 = 1;
200     ctrl_area->cr_writes.cr3 = 1;
201
202
203     ctrl_area->instrs.INVLPG = 1;
204     ctrl_area->instrs.INVLPGA = 1;
205
206     ctrl_area->exceptions.pf = 1;
207
208     /* JRL: This is a performance killer, and a simplistic solution */
209     /* We need to fix this */
210     ctrl_area->TLB_CONTROL = 1;
211     
212
213
214     guest_state->g_pat = 0x7040600070406ULL;
215
216     guest_state->cr0 |= 0x80000000;
217
218   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
219     // Flush the TLB on entries/exits
220     ctrl_area->TLB_CONTROL = 1;
221
222     // Enable Nested Paging
223     ctrl_area->NP_ENABLE = 1;
224
225     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
226
227     // Set the Nested Page Table pointer
228     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
229     ctrl_area->N_CR3 = vm_info->direct_map_pt;
230
231     //   ctrl_area->N_CR3 = Get_CR3();
232     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
233
234     guest_state->g_pat = 0x7040600070406ULL;
235   }
236
237
238
239 }
240
241
242 static int init_svm_guest(struct guest_info *info) {
243  
244   PrintDebug("Allocating VMCB\n");
245   info->vmm_data = (void*)Allocate_VMCB();
246
247
248   //PrintDebug("Generating Guest nested page tables\n");
249   //  info->page_tables = NULL;
250   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
251   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
252   //  PrintDebugPageTables(info->page_tables);
253
254
255   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
256   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
257   
258
259   info->run_state = VM_STOPPED;
260
261   //  info->rip = 0;
262
263   info->vm_regs.rdi = 0;
264   info->vm_regs.rsi = 0;
265   info->vm_regs.rbp = 0;
266   info->vm_regs.rsp = 0;
267   info->vm_regs.rbx = 0;
268   info->vm_regs.rdx = 0;
269   info->vm_regs.rcx = 0;
270   info->vm_regs.rax = 0;
271   
272   return 0;
273 }
274
275
276
277 // can we start a kernel thread here...
278  int start_svm_guest(struct guest_info *info) {
279   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
280   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
281   uint_t num_exits = 0;
282
283
284
285   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
286   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
287
288   info->run_state = VM_RUNNING;
289
290   while (1) {
291     ullong_t tmp_tsc;
292
293
294     EnableInts();
295     CLGI();
296
297     //    PrintDebug("SVM Entry to rip=%x...\n", info->rip);
298
299     rdtscll(info->time_state.cached_host_tsc);
300     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
301
302     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
303
304     rdtscll(tmp_tsc);
305     //PrintDebug("SVM Returned\n");
306
307
308     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
309     num_exits++;
310
311     STGI();
312
313     if ((num_exits % 25) == 0) {
314       PrintDebug("SVM Exit number %d\n", num_exits);
315     }
316
317      
318     if (handle_svm_exit(info) != 0) {
319
320       addr_t host_addr;
321       addr_t linear_addr = 0;
322
323       info->run_state = VM_ERROR;
324
325       PrintDebug("SVM ERROR!!\n"); 
326       
327       PrintDebug("RIP: %x\n", guest_state->rip);
328
329
330       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
331
332
333       PrintDebug("RIP Linear: %x\n", linear_addr);
334       PrintV3Segments(info);
335       PrintV3CtrlRegs(info);
336       PrintV3GPRs(info);
337       
338       if (info->mem_mode == PHYSICAL_MEM) {
339         guest_pa_to_host_pa(info, linear_addr, &host_addr);
340       } else if (info->mem_mode == VIRTUAL_MEM) {
341         guest_va_to_host_pa(info, linear_addr, &host_addr);
342       }
343
344
345       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
346
347       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
348       PrintTraceMemDump((char*)host_addr, 15);
349
350       break;
351     }
352   }
353   return 0;
354 }
355
356
357
358
359
360 /* Checks machine SVM capability */
361 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
362 int is_svm_capable() {
363
364 #if 1
365   // Dinda
366
367   uint_t ret;
368   uint_t vm_cr_low = 0, vm_cr_high = 0;
369
370
371   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
372   
373   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
374
375   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
376     PrintDebug("SVM Not Available\n");
377     return 0;
378   }  else {
379     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
380     
381     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
382     
383     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
384       PrintDebug("SVM is available but is disabled.\n");
385
386       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
387       
388       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
389       
390       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
391         PrintDebug("SVM BIOS Disabled, not unlockable\n");
392       } else {
393         PrintDebug("SVM is locked with a key\n");
394       }
395       return 0;
396
397     } else {
398       PrintDebug("SVM is available and  enabled.\n");
399
400       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
401       
402       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
403
404       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
405         PrintDebug("SVM Nested Paging not supported\n");
406       } else {
407         PrintDebug("SVM Nested Paging supported\n");
408       }
409       
410       return 1;
411       
412     }
413   }
414
415 #else
416
417   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
418   uint_t vm_cr_low = 0, vm_cr_high = 0;
419
420
421   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
422     PrintDebug("SVM Not Available\n");
423     return 0;
424   } 
425
426   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
427
428   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
429
430
431   // this part is clearly wrong, since the np bit is in 
432   // edx, not ecx
433   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
434     PrintDebug("Nested Paging not supported\n");
435   } else {
436     PrintDebug("Nested Paging supported\n");
437   }
438
439   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
440     PrintDebug("SVM is disabled.\n");
441     return 1;
442   }
443
444   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
445
446   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
447     PrintDebug("SVM BIOS Disabled, not unlockable\n");
448   } else {
449     PrintDebug("SVM is locked with a key\n");
450   }
451
452   return 0;
453
454 #endif
455
456 }
457
458 int has_svm_nested_paging() {
459   uint32_t ret;
460
461   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
462       
463   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
464   
465   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
466     PrintDebug("SVM Nested Paging not supported\n");
467     return 0;
468   } else {
469     PrintDebug("SVM Nested Paging supported\n");
470     return 1;
471   }
472
473 }
474
475
476
477 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
478   reg_ex_t msr;
479   void * host_state;
480
481
482   // Enable SVM on the CPU
483   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
484   msr.e_reg.low |= EFER_MSR_svm_enable;
485   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
486   
487   PrintDebug("SVM Enabled\n");
488
489
490   // Setup the host state save area
491   host_state = V3_AllocPages(4);
492   
493   msr.e_reg.high = 0;
494   msr.e_reg.low = (uint_t)host_state;
495
496
497   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
498   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
499
500
501
502   // Setup the SVM specific vmm operations
503   vmm_ops->init_guest = &init_svm_guest;
504   vmm_ops->start_guest = &start_svm_guest;
505   vmm_ops->has_nested_paging = &has_svm_nested_paging;
506
507   return;
508 }
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
562   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
563   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
564   uint_t i;
565
566
567   guest_state->rsp = vm_info.vm_regs.rsp;
568   guest_state->rip = vm_info.rip;
569
570
571   //ctrl_area->instrs.instrs.CR0 = 1;
572   ctrl_area->cr_reads.cr0 = 1;
573   ctrl_area->cr_writes.cr0 = 1;
574
575   guest_state->efer |= EFER_MSR_svm_enable;
576   guest_state->rflags = 0x00000002; // The reserved bit is always 1
577   ctrl_area->svm_instrs.VMRUN = 1;
578   // guest_state->cr0 = 0x00000001;    // PE 
579   ctrl_area->guest_ASID = 1;
580
581
582   ctrl_area->exceptions.de = 1;
583   ctrl_area->exceptions.df = 1;
584   ctrl_area->exceptions.pf = 1;
585   ctrl_area->exceptions.ts = 1;
586   ctrl_area->exceptions.ss = 1;
587   ctrl_area->exceptions.ac = 1;
588   ctrl_area->exceptions.mc = 1;
589   ctrl_area->exceptions.gp = 1;
590   ctrl_area->exceptions.ud = 1;
591   ctrl_area->exceptions.np = 1;
592   ctrl_area->exceptions.of = 1;
593   ctrl_area->exceptions.nmi = 1;
594
595   guest_state->cs.selector = 0x0000;
596   guest_state->cs.limit=~0u;
597   guest_state->cs.base = guest_state->cs.selector<<4;
598   guest_state->cs.attrib.raw = 0xf3;
599
600   
601   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
602   for ( i = 0; segregs[i] != NULL; i++) {
603     struct vmcb_selector * seg = segregs[i];
604     
605     seg->selector = 0x0000;
606     seg->base = seg->selector << 4;
607     seg->attrib.raw = 0xf3;
608     seg->limit = ~0u;
609   }
610   
611   if (vm_info.io_map.num_ports > 0) {
612     struct vmm_io_hook * iter;
613     addr_t io_port_bitmap;
614     
615     io_port_bitmap = (addr_t)V3_AllocPages(3);
616     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
617     
618     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
619
620     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
621
622     FOREACH_IO_HOOK(vm_info.io_map, iter) {
623       ushort_t port = iter->port;
624       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
625
626       bitmap += (port / 8);
627       PrintDebug("Setting Bit in block %x\n", bitmap);
628       *bitmap |= 1 << (port % 8);
629     }
630
631
632     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
633
634     ctrl_area->instrs.IOIO_PROT = 1;
635   }
636
637   ctrl_area->instrs.INTR = 1;
638
639
640
641   if (vm_info.page_mode == SHADOW_PAGING) {
642     PrintDebug("Creating initial shadow page table\n");
643     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
644     PrintDebug("Created\n");
645
646     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
647
648     ctrl_area->cr_reads.cr3 = 1;
649     ctrl_area->cr_writes.cr3 = 1;
650
651
652     ctrl_area->instrs.INVLPG = 1;
653     ctrl_area->instrs.INVLPGA = 1;
654
655     guest_state->g_pat = 0x7040600070406ULL;
656
657     guest_state->cr0 |= 0x80000000;
658   } else if (vm_info.page_mode == NESTED_PAGING) {
659     // Flush the TLB on entries/exits
660     //ctrl_area->TLB_CONTROL = 1;
661
662     // Enable Nested Paging
663     //ctrl_area->NP_ENABLE = 1;
664
665     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
666
667         // Set the Nested Page Table pointer
668     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
669     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
670
671     //   ctrl_area->N_CR3 = Get_CR3();
672     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
673
674     //    guest_state->g_pat = 0x7040600070406ULL;
675   }
676
677
678
679 }
680 */
681
682
683
684
685
686
687
688 #if 0
689 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
690   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
691   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
692   uint_t i = 0;
693
694
695   guest_state->rsp = vm_info.vm_regs.rsp;
696   guest_state->rip = vm_info.rip;
697
698
699   /* I pretty much just gutted this from TVMM */
700   /* Note: That means its probably wrong */
701
702   // set the segment registers to mirror ours
703   guest_state->cs.selector = 1<<3;
704   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
705   guest_state->cs.attrib.fields.S = 1;
706   guest_state->cs.attrib.fields.P = 1;
707   guest_state->cs.attrib.fields.db = 1;
708   guest_state->cs.attrib.fields.G = 1;
709   guest_state->cs.limit = 0xfffff;
710   guest_state->cs.base = 0;
711   
712   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
713   for ( i = 0; segregs[i] != NULL; i++) {
714     struct vmcb_selector * seg = segregs[i];
715     
716     seg->selector = 2<<3;
717     seg->attrib.fields.type = 0x2; // Data Segment+read/write
718     seg->attrib.fields.S = 1;
719     seg->attrib.fields.P = 1;
720     seg->attrib.fields.db = 1;
721     seg->attrib.fields.G = 1;
722     seg->limit = 0xfffff;
723     seg->base = 0;
724   }
725
726
727   {
728     /* JRL THIS HAS TO GO */
729     
730     //    guest_state->tr.selector = GetTR_Selector();
731     guest_state->tr.attrib.fields.type = 0x9; 
732     guest_state->tr.attrib.fields.P = 1;
733     // guest_state->tr.limit = GetTR_Limit();
734     //guest_state->tr.base = GetTR_Base();// - 0x2000;
735     /* ** */
736   }
737
738
739   /* ** */
740
741
742   guest_state->efer |= EFER_MSR_svm_enable;
743   guest_state->rflags = 0x00000002; // The reserved bit is always 1
744   ctrl_area->svm_instrs.VMRUN = 1;
745   guest_state->cr0 = 0x00000001;    // PE 
746   ctrl_area->guest_ASID = 1;
747
748
749   //  guest_state->cpl = 0;
750
751
752
753   // Setup exits
754
755   ctrl_area->cr_writes.cr4 = 1;
756   
757   ctrl_area->exceptions.de = 1;
758   ctrl_area->exceptions.df = 1;
759   ctrl_area->exceptions.pf = 1;
760   ctrl_area->exceptions.ts = 1;
761   ctrl_area->exceptions.ss = 1;
762   ctrl_area->exceptions.ac = 1;
763   ctrl_area->exceptions.mc = 1;
764   ctrl_area->exceptions.gp = 1;
765   ctrl_area->exceptions.ud = 1;
766   ctrl_area->exceptions.np = 1;
767   ctrl_area->exceptions.of = 1;
768   ctrl_area->exceptions.nmi = 1;
769
770   
771
772   ctrl_area->instrs.IOIO_PROT = 1;
773   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
774   
775   {
776     reg_ex_t tmp_reg;
777     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
778     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
779   }
780
781   ctrl_area->instrs.INTR = 1;
782
783   
784   {
785     char gdt_buf[6];
786     char idt_buf[6];
787
788     memset(gdt_buf, 0, 6);
789     memset(idt_buf, 0, 6);
790
791
792     uint_t gdt_base, idt_base;
793     ushort_t gdt_limit, idt_limit;
794     
795     GetGDTR(gdt_buf);
796     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
797     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
798     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
799
800     GetIDTR(idt_buf);
801     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
802     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
803     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
804
805
806     // gdt_base -= 0x2000;
807     //idt_base -= 0x2000;
808
809     guest_state->gdtr.base = gdt_base;
810     guest_state->gdtr.limit = gdt_limit;
811     guest_state->idtr.base = idt_base;
812     guest_state->idtr.limit = idt_limit;
813
814
815   }
816   
817   
818   // also determine if CPU supports nested paging
819   /*
820   if (vm_info.page_tables) {
821     //   if (0) {
822     // Flush the TLB on entries/exits
823     ctrl_area->TLB_CONTROL = 1;
824
825     // Enable Nested Paging
826     ctrl_area->NP_ENABLE = 1;
827
828     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
829
830         // Set the Nested Page Table pointer
831     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
832
833
834     //   ctrl_area->N_CR3 = Get_CR3();
835     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
836
837     guest_state->g_pat = 0x7040600070406ULL;
838
839     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
840     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
841     // Enable Paging
842     //    guest_state->cr0 |= 0x80000000;
843   }
844   */
845
846 }
847
848
849
850
851
852 #endif
853
854