Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


fixed a ton of compile errors
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31 extern void EnableInts();
32
33
34
35
36
37
38
39 static vmcb_t * Allocate_VMCB() {
40   vmcb_t * vmcb_page = (vmcb_t *)V3_AllocPages(1);
41
42
43   memset(vmcb_page, 0, 4096);
44
45   return vmcb_page;
46 }
47
48
49
50
51
52 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
53   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
54   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
55   uint_t i;
56
57
58   guest_state->rsp = vm_info->vm_regs.rsp;
59   // guest_state->rip = vm_info->rip;
60   guest_state->rip = 0xfff0;
61
62   guest_state->cpl = 0;
63
64   //ctrl_area->instrs.instrs.CR0 = 1;
65   ctrl_area->cr_reads.cr0 = 1;
66   ctrl_area->cr_writes.cr0 = 1;
67
68   guest_state->efer |= EFER_MSR_svm_enable;
69   guest_state->rflags = 0x00000002; // The reserved bit is always 1
70   ctrl_area->svm_instrs.VMRUN = 1;
71   ctrl_area->svm_instrs.VMMCALL = 1;
72   ctrl_area->svm_instrs.VMLOAD = 1;
73   ctrl_area->svm_instrs.VMSAVE = 1;
74   ctrl_area->svm_instrs.STGI = 1;
75   ctrl_area->svm_instrs.CLGI = 1;
76   ctrl_area->svm_instrs.SKINIT = 1;
77   ctrl_area->svm_instrs.RDTSCP = 1;
78   ctrl_area->svm_instrs.ICEBP = 1;
79   ctrl_area->svm_instrs.WBINVD = 1;
80   ctrl_area->svm_instrs.MONITOR = 1;
81   ctrl_area->svm_instrs.MWAIT_always = 1;
82   ctrl_area->svm_instrs.MWAIT_if_armed = 1;
83
84
85   ctrl_area->instrs.HLT = 1;
86   // guest_state->cr0 = 0x00000001;    // PE 
87   ctrl_area->guest_ASID = 1;
88
89   
90   /*
91     ctrl_area->exceptions.de = 1;
92     ctrl_area->exceptions.df = 1;
93     
94     ctrl_area->exceptions.ts = 1;
95     ctrl_area->exceptions.ss = 1;
96     ctrl_area->exceptions.ac = 1;
97     ctrl_area->exceptions.mc = 1;
98     ctrl_area->exceptions.gp = 1;
99     ctrl_area->exceptions.ud = 1;
100     ctrl_area->exceptions.np = 1;
101     ctrl_area->exceptions.of = 1;
102   
103     ctrl_area->exceptions.nmi = 1;
104   */
105   // Debug of boot on physical machines - 7/14/08
106   ctrl_area->instrs.NMI=1;
107   ctrl_area->instrs.SMI=1;
108   ctrl_area->instrs.INIT=1;
109   ctrl_area->instrs.PAUSE=1;
110   ctrl_area->instrs.shutdown_evts=1;
111
112
113
114   vm_info->vm_regs.rdx = 0x00000f00;
115
116   guest_state->cr0 = 0x60000010;
117
118   guest_state->cs.selector = 0xf000;
119   guest_state->cs.limit=0xffff;
120   guest_state->cs.base = 0x0000000f0000LL;
121   guest_state->cs.attrib.raw = 0xf3;
122
123   
124   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
125   for ( i = 0; segregs[i] != NULL; i++) {
126     struct vmcb_selector * seg = segregs[i];
127     
128     seg->selector = 0x0000;
129     //    seg->base = seg->selector << 4;
130     seg->base = 0x00000000;
131     seg->attrib.raw = 0xf3;
132     seg->limit = ~0u;
133   }
134   
135   guest_state->gdtr.limit = 0x0000ffff;
136   guest_state->gdtr.base = 0x0000000000000000LL;
137   guest_state->idtr.limit = 0x0000ffff;
138   guest_state->idtr.base = 0x0000000000000000LL;
139
140   guest_state->ldtr.selector = 0x0000;
141   guest_state->ldtr.limit = 0x0000ffff;
142   guest_state->ldtr.base = 0x0000000000000000LL;
143   guest_state->tr.selector = 0x0000;
144   guest_state->tr.limit = 0x0000ffff;
145   guest_state->tr.base = 0x0000000000000000LL;
146
147
148   guest_state->dr6 = 0x00000000ffff0ff0LL;
149   guest_state->dr7 = 0x0000000000000400LL;
150
151   if (vm_info->io_map.num_ports > 0) {
152     struct vmm_io_hook * iter;
153     addr_t io_port_bitmap;
154     
155     io_port_bitmap = (addr_t)V3_AllocPages(3);
156     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
157     
158     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
159
160     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
161
162     FOREACH_IO_HOOK(vm_info->io_map, iter) {
163       ushort_t port = iter->port;
164       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
165
166       bitmap += (port / 8);
167       PrintDebug("Setting Bit for port 0x%x\n", port);
168       *bitmap |= 1 << (port % 8);
169     }
170
171
172     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
173
174     ctrl_area->instrs.IOIO_PROT = 1;
175   }
176
177
178
179   PrintDebug("Exiting on interrupts\n");
180   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
181   ctrl_area->instrs.INTR = 1;
182
183
184   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
185     PrintDebug("Creating initial shadow page table\n");
186     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
187     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
188     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
189     PrintDebug("Created\n");
190
191     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
192
193     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
194
195     ctrl_area->cr_reads.cr3 = 1;
196     ctrl_area->cr_writes.cr3 = 1;
197
198
199     ctrl_area->instrs.INVLPG = 1;
200     ctrl_area->instrs.INVLPGA = 1;
201
202     ctrl_area->exceptions.pf = 1;
203
204     /* JRL: This is a performance killer, and a simplistic solution */
205     /* We need to fix this */
206     ctrl_area->TLB_CONTROL = 1;
207     
208
209
210     guest_state->g_pat = 0x7040600070406ULL;
211
212     guest_state->cr0 |= 0x80000000;
213
214   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
215     // Flush the TLB on entries/exits
216     ctrl_area->TLB_CONTROL = 1;
217
218     // Enable Nested Paging
219     ctrl_area->NP_ENABLE = 1;
220
221     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
222
223     // Set the Nested Page Table pointer
224     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
225     ctrl_area->N_CR3 = vm_info->direct_map_pt;
226
227     //   ctrl_area->N_CR3 = Get_CR3();
228     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
229
230     guest_state->g_pat = 0x7040600070406ULL;
231   }
232
233
234
235 }
236
237
238 static int init_svm_guest(struct guest_info *info) {
239  
240   PrintDebug("Allocating VMCB\n");
241   info->vmm_data = (void*)Allocate_VMCB();
242
243
244   //PrintDebug("Generating Guest nested page tables\n");
245   //  info->page_tables = NULL;
246   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
247   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
248   //  PrintDebugPageTables(info->page_tables);
249
250
251   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
252   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
253   
254
255   info->run_state = VM_STOPPED;
256
257   //  info->rip = 0;
258
259   info->vm_regs.rdi = 0;
260   info->vm_regs.rsi = 0;
261   info->vm_regs.rbp = 0;
262   info->vm_regs.rsp = 0;
263   info->vm_regs.rbx = 0;
264   info->vm_regs.rdx = 0;
265   info->vm_regs.rcx = 0;
266   info->vm_regs.rax = 0;
267   
268   return 0;
269 }
270
271
272
273 // can we start a kernel thread here...
274  int start_svm_guest(struct guest_info *info) {
275   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
276   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
277   uint_t num_exits = 0;
278
279
280
281   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
282   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
283
284   info->run_state = VM_RUNNING;
285
286   while (1) {
287     ullong_t tmp_tsc;
288
289
290     EnableInts();
291     CLGI();
292
293     //    PrintDebug("SVM Entry to rip=%x...\n", info->rip);
294
295     rdtscll(info->time_state.cached_host_tsc);
296     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
297
298     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
299
300     rdtscll(tmp_tsc);
301     //PrintDebug("SVM Returned\n");
302
303
304     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
305     num_exits++;
306
307     STGI();
308
309     if ((num_exits % 25) == 0) {
310       PrintDebug("SVM Exit number %d\n", num_exits);
311     }
312
313      
314     if (handle_svm_exit(info) != 0) {
315
316       addr_t host_addr;
317       addr_t linear_addr = 0;
318
319       info->run_state = VM_ERROR;
320
321       PrintDebug("SVM ERROR!!\n"); 
322       
323       PrintDebug("RIP: %x\n", guest_state->rip);
324
325
326       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
327
328
329       PrintDebug("RIP Linear: %x\n", linear_addr);
330       PrintV3Segments(info);
331       PrintV3CtrlRegs(info);
332       PrintV3GPRs(info);
333       
334       if (info->mem_mode == PHYSICAL_MEM) {
335         guest_pa_to_host_pa(info, linear_addr, &host_addr);
336       } else if (info->mem_mode == VIRTUAL_MEM) {
337         guest_va_to_host_pa(info, linear_addr, &host_addr);
338       }
339
340
341       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
342
343       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
344       PrintTraceMemDump((char*)host_addr, 15);
345
346       break;
347     }
348   }
349   return 0;
350 }
351
352
353
354
355
356 /* Checks machine SVM capability */
357 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
358 int is_svm_capable() {
359
360 #if 1
361   // Dinda
362
363   uint_t ret;
364   uint_t vm_cr_low = 0, vm_cr_high = 0;
365
366
367   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
368   
369   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
370
371   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
372     PrintDebug("SVM Not Available\n");
373     return 0;
374   }  else {
375     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
376     
377     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
378     
379     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
380       PrintDebug("SVM is available but is disabled.\n");
381
382       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
383       
384       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
385       
386       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
387         PrintDebug("SVM BIOS Disabled, not unlockable\n");
388       } else {
389         PrintDebug("SVM is locked with a key\n");
390       }
391       return 0;
392
393     } else {
394       PrintDebug("SVM is available and  enabled.\n");
395
396       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
397       
398       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
399
400       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
401         PrintDebug("SVM Nested Paging not supported\n");
402       } else {
403         PrintDebug("SVM Nested Paging supported\n");
404       }
405       
406       return 1;
407       
408     }
409   }
410
411 #else
412
413   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
414   uint_t vm_cr_low = 0, vm_cr_high = 0;
415
416
417   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
418     PrintDebug("SVM Not Available\n");
419     return 0;
420   } 
421
422   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
423
424   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
425
426
427   // this part is clearly wrong, since the np bit is in 
428   // edx, not ecx
429   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
430     PrintDebug("Nested Paging not supported\n");
431   } else {
432     PrintDebug("Nested Paging supported\n");
433   }
434
435   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
436     PrintDebug("SVM is disabled.\n");
437     return 1;
438   }
439
440   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
441
442   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
443     PrintDebug("SVM BIOS Disabled, not unlockable\n");
444   } else {
445     PrintDebug("SVM is locked with a key\n");
446   }
447
448   return 0;
449
450 #endif
451
452 }
453
454 int has_svm_nested_paging() {
455   uint32_t ret;
456
457   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
458       
459   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
460   
461   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
462     PrintDebug("SVM Nested Paging not supported\n");
463     return 0;
464   } else {
465     PrintDebug("SVM Nested Paging supported\n");
466     return 1;
467   }
468
469 }
470
471
472
473 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
474   reg_ex_t msr;
475   void * host_state;
476
477
478   // Enable SVM on the CPU
479   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
480   msr.e_reg.low |= EFER_MSR_svm_enable;
481   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
482   
483   PrintDebug("SVM Enabled\n");
484
485
486   // Setup the host state save area
487   host_state = V3_AllocPages(4);
488   
489   msr.e_reg.high = 0;
490   msr.e_reg.low = (uint_t)host_state;
491
492
493   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
494   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
495
496
497
498   // Setup the SVM specific vmm operations
499   vmm_ops->init_guest = &init_svm_guest;
500   vmm_ops->start_guest = &start_svm_guest;
501   vmm_ops->has_nested_paging = &has_svm_nested_paging;
502
503   return;
504 }
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
558   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
559   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
560   uint_t i;
561
562
563   guest_state->rsp = vm_info.vm_regs.rsp;
564   guest_state->rip = vm_info.rip;
565
566
567   //ctrl_area->instrs.instrs.CR0 = 1;
568   ctrl_area->cr_reads.cr0 = 1;
569   ctrl_area->cr_writes.cr0 = 1;
570
571   guest_state->efer |= EFER_MSR_svm_enable;
572   guest_state->rflags = 0x00000002; // The reserved bit is always 1
573   ctrl_area->svm_instrs.VMRUN = 1;
574   // guest_state->cr0 = 0x00000001;    // PE 
575   ctrl_area->guest_ASID = 1;
576
577
578   ctrl_area->exceptions.de = 1;
579   ctrl_area->exceptions.df = 1;
580   ctrl_area->exceptions.pf = 1;
581   ctrl_area->exceptions.ts = 1;
582   ctrl_area->exceptions.ss = 1;
583   ctrl_area->exceptions.ac = 1;
584   ctrl_area->exceptions.mc = 1;
585   ctrl_area->exceptions.gp = 1;
586   ctrl_area->exceptions.ud = 1;
587   ctrl_area->exceptions.np = 1;
588   ctrl_area->exceptions.of = 1;
589   ctrl_area->exceptions.nmi = 1;
590
591   guest_state->cs.selector = 0x0000;
592   guest_state->cs.limit=~0u;
593   guest_state->cs.base = guest_state->cs.selector<<4;
594   guest_state->cs.attrib.raw = 0xf3;
595
596   
597   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
598   for ( i = 0; segregs[i] != NULL; i++) {
599     struct vmcb_selector * seg = segregs[i];
600     
601     seg->selector = 0x0000;
602     seg->base = seg->selector << 4;
603     seg->attrib.raw = 0xf3;
604     seg->limit = ~0u;
605   }
606   
607   if (vm_info.io_map.num_ports > 0) {
608     struct vmm_io_hook * iter;
609     addr_t io_port_bitmap;
610     
611     io_port_bitmap = (addr_t)V3_AllocPages(3);
612     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
613     
614     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
615
616     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
617
618     FOREACH_IO_HOOK(vm_info.io_map, iter) {
619       ushort_t port = iter->port;
620       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
621
622       bitmap += (port / 8);
623       PrintDebug("Setting Bit in block %x\n", bitmap);
624       *bitmap |= 1 << (port % 8);
625     }
626
627
628     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
629
630     ctrl_area->instrs.IOIO_PROT = 1;
631   }
632
633   ctrl_area->instrs.INTR = 1;
634
635
636
637   if (vm_info.page_mode == SHADOW_PAGING) {
638     PrintDebug("Creating initial shadow page table\n");
639     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
640     PrintDebug("Created\n");
641
642     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
643
644     ctrl_area->cr_reads.cr3 = 1;
645     ctrl_area->cr_writes.cr3 = 1;
646
647
648     ctrl_area->instrs.INVLPG = 1;
649     ctrl_area->instrs.INVLPGA = 1;
650
651     guest_state->g_pat = 0x7040600070406ULL;
652
653     guest_state->cr0 |= 0x80000000;
654   } else if (vm_info.page_mode == NESTED_PAGING) {
655     // Flush the TLB on entries/exits
656     //ctrl_area->TLB_CONTROL = 1;
657
658     // Enable Nested Paging
659     //ctrl_area->NP_ENABLE = 1;
660
661     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
662
663         // Set the Nested Page Table pointer
664     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
665     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
666
667     //   ctrl_area->N_CR3 = Get_CR3();
668     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
669
670     //    guest_state->g_pat = 0x7040600070406ULL;
671   }
672
673
674
675 }
676 */
677
678
679
680
681
682
683
684 #if 0
685 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
686   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
687   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
688   uint_t i = 0;
689
690
691   guest_state->rsp = vm_info.vm_regs.rsp;
692   guest_state->rip = vm_info.rip;
693
694
695   /* I pretty much just gutted this from TVMM */
696   /* Note: That means its probably wrong */
697
698   // set the segment registers to mirror ours
699   guest_state->cs.selector = 1<<3;
700   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
701   guest_state->cs.attrib.fields.S = 1;
702   guest_state->cs.attrib.fields.P = 1;
703   guest_state->cs.attrib.fields.db = 1;
704   guest_state->cs.attrib.fields.G = 1;
705   guest_state->cs.limit = 0xfffff;
706   guest_state->cs.base = 0;
707   
708   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
709   for ( i = 0; segregs[i] != NULL; i++) {
710     struct vmcb_selector * seg = segregs[i];
711     
712     seg->selector = 2<<3;
713     seg->attrib.fields.type = 0x2; // Data Segment+read/write
714     seg->attrib.fields.S = 1;
715     seg->attrib.fields.P = 1;
716     seg->attrib.fields.db = 1;
717     seg->attrib.fields.G = 1;
718     seg->limit = 0xfffff;
719     seg->base = 0;
720   }
721
722
723   {
724     /* JRL THIS HAS TO GO */
725     
726     //    guest_state->tr.selector = GetTR_Selector();
727     guest_state->tr.attrib.fields.type = 0x9; 
728     guest_state->tr.attrib.fields.P = 1;
729     // guest_state->tr.limit = GetTR_Limit();
730     //guest_state->tr.base = GetTR_Base();// - 0x2000;
731     /* ** */
732   }
733
734
735   /* ** */
736
737
738   guest_state->efer |= EFER_MSR_svm_enable;
739   guest_state->rflags = 0x00000002; // The reserved bit is always 1
740   ctrl_area->svm_instrs.VMRUN = 1;
741   guest_state->cr0 = 0x00000001;    // PE 
742   ctrl_area->guest_ASID = 1;
743
744
745   //  guest_state->cpl = 0;
746
747
748
749   // Setup exits
750
751   ctrl_area->cr_writes.cr4 = 1;
752   
753   ctrl_area->exceptions.de = 1;
754   ctrl_area->exceptions.df = 1;
755   ctrl_area->exceptions.pf = 1;
756   ctrl_area->exceptions.ts = 1;
757   ctrl_area->exceptions.ss = 1;
758   ctrl_area->exceptions.ac = 1;
759   ctrl_area->exceptions.mc = 1;
760   ctrl_area->exceptions.gp = 1;
761   ctrl_area->exceptions.ud = 1;
762   ctrl_area->exceptions.np = 1;
763   ctrl_area->exceptions.of = 1;
764   ctrl_area->exceptions.nmi = 1;
765
766   
767
768   ctrl_area->instrs.IOIO_PROT = 1;
769   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
770   
771   {
772     reg_ex_t tmp_reg;
773     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
774     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
775   }
776
777   ctrl_area->instrs.INTR = 1;
778
779   
780   {
781     char gdt_buf[6];
782     char idt_buf[6];
783
784     memset(gdt_buf, 0, 6);
785     memset(idt_buf, 0, 6);
786
787
788     uint_t gdt_base, idt_base;
789     ushort_t gdt_limit, idt_limit;
790     
791     GetGDTR(gdt_buf);
792     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
793     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
794     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
795
796     GetIDTR(idt_buf);
797     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
798     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
799     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
800
801
802     // gdt_base -= 0x2000;
803     //idt_base -= 0x2000;
804
805     guest_state->gdtr.base = gdt_base;
806     guest_state->gdtr.limit = gdt_limit;
807     guest_state->idtr.base = idt_base;
808     guest_state->idtr.limit = idt_limit;
809
810
811   }
812   
813   
814   // also determine if CPU supports nested paging
815   /*
816   if (vm_info.page_tables) {
817     //   if (0) {
818     // Flush the TLB on entries/exits
819     ctrl_area->TLB_CONTROL = 1;
820
821     // Enable Nested Paging
822     ctrl_area->NP_ENABLE = 1;
823
824     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
825
826         // Set the Nested Page Table pointer
827     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
828
829
830     //   ctrl_area->N_CR3 = Get_CR3();
831     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
832
833     guest_state->g_pat = 0x7040600070406ULL;
834
835     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
836     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
837     // Enable Paging
838     //    guest_state->cr0 |= 0x80000000;
839   }
840   */
841
842 }
843
844
845
846
847
848 #endif
849
850