Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


code clean up
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31 extern void EnableInts();
32
33
34
35
36
37
38
39 static vmcb_t * Allocate_VMCB() {
40   vmcb_t * vmcb_page = (vmcb_t *)V3_AllocPages(1);
41
42
43   memset(vmcb_page, 0, 4096);
44
45   return vmcb_page;
46 }
47
48
49
50
51
52 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
53   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
54   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
55   uint_t i;
56
57
58   guest_state->rsp = vm_info->vm_regs.rsp;
59   // guest_state->rip = vm_info->rip;
60   guest_state->rip = 0xfff0;
61
62   guest_state->cpl = 0;
63
64   //ctrl_area->instrs.instrs.CR0 = 1;
65   ctrl_area->cr_reads.cr0 = 1;
66   ctrl_area->cr_writes.cr0 = 1;
67
68   guest_state->efer |= EFER_MSR_svm_enable;
69   guest_state->rflags = 0x00000002; // The reserved bit is always 1
70   ctrl_area->svm_instrs.VMRUN = 1;
71   ctrl_area->instrs.HLT = 1;
72   // guest_state->cr0 = 0x00000001;    // PE 
73   ctrl_area->guest_ASID = 1;
74
75   
76   /*
77     ctrl_area->exceptions.de = 1;
78     ctrl_area->exceptions.df = 1;
79     
80     ctrl_area->exceptions.ts = 1;
81     ctrl_area->exceptions.ss = 1;
82     ctrl_area->exceptions.ac = 1;
83     ctrl_area->exceptions.mc = 1;
84     ctrl_area->exceptions.gp = 1;
85     ctrl_area->exceptions.ud = 1;
86     ctrl_area->exceptions.np = 1;
87     ctrl_area->exceptions.of = 1;
88   
89     ctrl_area->exceptions.nmi = 1;
90   */
91   // Debug of boot on physical machines - 7/14/08
92   ctrl_area->instrs.NMI=1;
93   ctrl_area->instrs.SMI=1;
94   ctrl_area->instrs.INIT=1;
95   ctrl_area->instrs.PAUSE=1;
96   ctrl_area->instrs.shutdown_evts=1;
97
98
99
100   vm_info->vm_regs.rdx = 0x00000f00;
101
102   guest_state->cr0 = 0x60000010;
103
104   guest_state->cs.selector = 0xf000;
105   guest_state->cs.limit=0xffff;
106   guest_state->cs.base = 0x0000000f0000LL;
107   guest_state->cs.attrib.raw = 0xf3;
108
109   
110   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
111   for ( i = 0; segregs[i] != NULL; i++) {
112     struct vmcb_selector * seg = segregs[i];
113     
114     seg->selector = 0x0000;
115     //    seg->base = seg->selector << 4;
116     seg->base = 0x00000000;
117     seg->attrib.raw = 0xf3;
118     seg->limit = ~0u;
119   }
120   
121   guest_state->gdtr.limit = 0x0000ffff;
122   guest_state->gdtr.base = 0x0000000000000000LL;
123   guest_state->idtr.limit = 0x0000ffff;
124   guest_state->idtr.base = 0x0000000000000000LL;
125
126   guest_state->ldtr.selector = 0x0000;
127   guest_state->ldtr.limit = 0x0000ffff;
128   guest_state->ldtr.base = 0x0000000000000000LL;
129   guest_state->tr.selector = 0x0000;
130   guest_state->tr.limit = 0x0000ffff;
131   guest_state->tr.base = 0x0000000000000000LL;
132
133
134   guest_state->dr6 = 0x00000000ffff0ff0LL;
135   guest_state->dr7 = 0x0000000000000400LL;
136
137   if (vm_info->io_map.num_ports > 0) {
138     struct vmm_io_hook * iter;
139     addr_t io_port_bitmap;
140     
141     io_port_bitmap = (addr_t)V3_AllocPages(3);
142     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
143     
144     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
145
146     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
147
148     FOREACH_IO_HOOK(vm_info->io_map, iter) {
149       ushort_t port = iter->port;
150       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
151
152       bitmap += (port / 8);
153       PrintDebug("Setting Bit for port 0x%x\n", port);
154       *bitmap |= 1 << (port % 8);
155     }
156
157
158     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
159
160     ctrl_area->instrs.IOIO_PROT = 1;
161   }
162
163
164
165   PrintDebug("Exiting on interrupts\n");
166   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
167   ctrl_area->instrs.INTR = 1;
168
169
170   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
171     PrintDebug("Creating initial shadow page table\n");
172     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
173     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
174     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
175     PrintDebug("Created\n");
176
177     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
178
179     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
180
181     ctrl_area->cr_reads.cr3 = 1;
182     ctrl_area->cr_writes.cr3 = 1;
183
184
185     ctrl_area->instrs.INVLPG = 1;
186     ctrl_area->instrs.INVLPGA = 1;
187
188     ctrl_area->exceptions.pf = 1;
189
190     /* JRL: This is a performance killer, and a simplistic solution */
191     /* We need to fix this */
192     ctrl_area->TLB_CONTROL = 1;
193     
194
195
196     guest_state->g_pat = 0x7040600070406ULL;
197
198     guest_state->cr0 |= 0x80000000;
199
200   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
201     // Flush the TLB on entries/exits
202     ctrl_area->TLB_CONTROL = 1;
203
204     // Enable Nested Paging
205     ctrl_area->NP_ENABLE = 1;
206
207     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
208
209     // Set the Nested Page Table pointer
210     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
211     ctrl_area->N_CR3 = vm_info->direct_map_pt;
212
213     //   ctrl_area->N_CR3 = Get_CR3();
214     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
215
216     guest_state->g_pat = 0x7040600070406ULL;
217   }
218
219
220
221 }
222
223
224 static int init_svm_guest(struct guest_info *info) {
225  
226   PrintDebug("Allocating VMCB\n");
227   info->vmm_data = (void*)Allocate_VMCB();
228
229
230   //PrintDebug("Generating Guest nested page tables\n");
231   //  info->page_tables = NULL;
232   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
233   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
234   //  PrintDebugPageTables(info->page_tables);
235
236
237   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
238   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
239   
240
241   //  info->rip = 0;
242
243   info->vm_regs.rdi = 0;
244   info->vm_regs.rsi = 0;
245   info->vm_regs.rbp = 0;
246   info->vm_regs.rsp = 0;
247   info->vm_regs.rbx = 0;
248   info->vm_regs.rdx = 0;
249   info->vm_regs.rcx = 0;
250   info->vm_regs.rax = 0;
251   
252   return 0;
253 }
254
255
256 // can we start a kernel thread here...
257 static int start_svm_guest(struct guest_info *info) {
258   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
259   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
260   uint_t num_exits = 0;
261
262   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
263   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
264
265   while (1) {
266     ullong_t tmp_tsc;
267
268
269     EnableInts();
270     CLGI();
271
272     //    PrintDebug("SVM Entry to rip=%x...\n", info->rip);
273
274     rdtscll(info->time_state.cached_host_tsc);
275     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
276
277     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
278
279     rdtscll(tmp_tsc);
280     //PrintDebug("SVM Returned\n");
281
282
283     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
284     num_exits++;
285
286     STGI();
287
288     if ((num_exits % 25) == 0) {
289       PrintDebug("SVM Exit number %d\n", num_exits);
290     }
291
292      
293     if (handle_svm_exit(info) != 0) {
294
295       addr_t host_addr;
296       addr_t linear_addr = 0;
297
298       PrintDebug("SVM ERROR!!\n"); 
299       
300       PrintDebug("RIP: %x\n", guest_state->rip);
301
302
303       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
304
305
306       PrintDebug("RIP Linear: %x\n", linear_addr);
307       PrintV3Segments(info);
308       PrintV3CtrlRegs(info);
309       PrintV3GPRs(info);
310       
311       if (info->mem_mode == PHYSICAL_MEM) {
312         guest_pa_to_host_pa(info, linear_addr, &host_addr);
313       } else if (info->mem_mode == VIRTUAL_MEM) {
314         guest_va_to_host_pa(info, linear_addr, &host_addr);
315       }
316
317
318       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
319
320       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
321       PrintTraceMemDump((char*)host_addr, 15);
322
323       break;
324     }
325   }
326   return 0;
327 }
328
329
330
331
332 /* Checks machine SVM capability */
333 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
334 int is_svm_capable() {
335
336 #if 1
337   // Dinda
338
339   uint_t ret;
340   uint_t vm_cr_low = 0, vm_cr_high = 0;
341
342
343   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
344   
345   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
346
347   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
348     PrintDebug("SVM Not Available\n");
349     return 0;
350   }  else {
351     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
352     
353     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
354     
355     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
356       PrintDebug("SVM is available but is disabled.\n");
357
358       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
359       
360       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
361       
362       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
363         PrintDebug("SVM BIOS Disabled, not unlockable\n");
364       } else {
365         PrintDebug("SVM is locked with a key\n");
366       }
367       return 0;
368
369     } else {
370       PrintDebug("SVM is available and  enabled.\n");
371
372       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
373       
374       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
375
376       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
377         PrintDebug("SVM Nested Paging not supported\n");
378       } else {
379         PrintDebug("SVM Nested Paging supported\n");
380       }
381       
382       return 1;
383       
384     }
385   }
386
387 #else
388
389   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
390   uint_t vm_cr_low = 0, vm_cr_high = 0;
391
392
393   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
394     PrintDebug("SVM Not Available\n");
395     return 0;
396   } 
397
398   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
399
400   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
401
402
403   // this part is clearly wrong, since the np bit is in 
404   // edx, not ecx
405   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
406     PrintDebug("Nested Paging not supported\n");
407   } else {
408     PrintDebug("Nested Paging supported\n");
409   }
410
411   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
412     PrintDebug("SVM is disabled.\n");
413     return 1;
414   }
415
416   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
417
418   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
419     PrintDebug("SVM BIOS Disabled, not unlockable\n");
420   } else {
421     PrintDebug("SVM is locked with a key\n");
422   }
423
424   return 0;
425
426 #endif
427
428 }
429
430 int has_svm_nested_paging() {
431   uint32_t ret;
432
433   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
434       
435   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
436   
437   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
438     PrintDebug("SVM Nested Paging not supported\n");
439     return 0;
440   } else {
441     PrintDebug("SVM Nested Paging supported\n");
442     return 1;
443   }
444
445 }
446
447
448
449 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
450   reg_ex_t msr;
451   void * host_state;
452
453
454   // Enable SVM on the CPU
455   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
456   msr.e_reg.low |= EFER_MSR_svm_enable;
457   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
458   
459   PrintDebug("SVM Enabled\n");
460
461
462   // Setup the host state save area
463   host_state = V3_AllocPages(4);
464   
465   msr.e_reg.high = 0;
466   msr.e_reg.low = (uint_t)host_state;
467
468
469   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
470   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
471
472
473
474   // Setup the SVM specific vmm operations
475   vmm_ops->init_guest = &init_svm_guest;
476   vmm_ops->start_guest = &start_svm_guest;
477   vmm_ops->has_nested_paging = &has_svm_nested_paging;
478
479   return;
480 }
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
534   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
535   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
536   uint_t i;
537
538
539   guest_state->rsp = vm_info.vm_regs.rsp;
540   guest_state->rip = vm_info.rip;
541
542
543   //ctrl_area->instrs.instrs.CR0 = 1;
544   ctrl_area->cr_reads.cr0 = 1;
545   ctrl_area->cr_writes.cr0 = 1;
546
547   guest_state->efer |= EFER_MSR_svm_enable;
548   guest_state->rflags = 0x00000002; // The reserved bit is always 1
549   ctrl_area->svm_instrs.VMRUN = 1;
550   // guest_state->cr0 = 0x00000001;    // PE 
551   ctrl_area->guest_ASID = 1;
552
553
554   ctrl_area->exceptions.de = 1;
555   ctrl_area->exceptions.df = 1;
556   ctrl_area->exceptions.pf = 1;
557   ctrl_area->exceptions.ts = 1;
558   ctrl_area->exceptions.ss = 1;
559   ctrl_area->exceptions.ac = 1;
560   ctrl_area->exceptions.mc = 1;
561   ctrl_area->exceptions.gp = 1;
562   ctrl_area->exceptions.ud = 1;
563   ctrl_area->exceptions.np = 1;
564   ctrl_area->exceptions.of = 1;
565   ctrl_area->exceptions.nmi = 1;
566
567   guest_state->cs.selector = 0x0000;
568   guest_state->cs.limit=~0u;
569   guest_state->cs.base = guest_state->cs.selector<<4;
570   guest_state->cs.attrib.raw = 0xf3;
571
572   
573   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
574   for ( i = 0; segregs[i] != NULL; i++) {
575     struct vmcb_selector * seg = segregs[i];
576     
577     seg->selector = 0x0000;
578     seg->base = seg->selector << 4;
579     seg->attrib.raw = 0xf3;
580     seg->limit = ~0u;
581   }
582   
583   if (vm_info.io_map.num_ports > 0) {
584     struct vmm_io_hook * iter;
585     addr_t io_port_bitmap;
586     
587     io_port_bitmap = (addr_t)V3_AllocPages(3);
588     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
589     
590     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
591
592     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
593
594     FOREACH_IO_HOOK(vm_info.io_map, iter) {
595       ushort_t port = iter->port;
596       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
597
598       bitmap += (port / 8);
599       PrintDebug("Setting Bit in block %x\n", bitmap);
600       *bitmap |= 1 << (port % 8);
601     }
602
603
604     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
605
606     ctrl_area->instrs.IOIO_PROT = 1;
607   }
608
609   ctrl_area->instrs.INTR = 1;
610
611
612
613   if (vm_info.page_mode == SHADOW_PAGING) {
614     PrintDebug("Creating initial shadow page table\n");
615     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
616     PrintDebug("Created\n");
617
618     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
619
620     ctrl_area->cr_reads.cr3 = 1;
621     ctrl_area->cr_writes.cr3 = 1;
622
623
624     ctrl_area->instrs.INVLPG = 1;
625     ctrl_area->instrs.INVLPGA = 1;
626
627     guest_state->g_pat = 0x7040600070406ULL;
628
629     guest_state->cr0 |= 0x80000000;
630   } else if (vm_info.page_mode == NESTED_PAGING) {
631     // Flush the TLB on entries/exits
632     //ctrl_area->TLB_CONTROL = 1;
633
634     // Enable Nested Paging
635     //ctrl_area->NP_ENABLE = 1;
636
637     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
638
639         // Set the Nested Page Table pointer
640     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
641     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
642
643     //   ctrl_area->N_CR3 = Get_CR3();
644     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
645
646     //    guest_state->g_pat = 0x7040600070406ULL;
647   }
648
649
650
651 }
652 */
653
654
655
656
657
658
659
660 #if 0
661 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
662   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
663   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
664   uint_t i = 0;
665
666
667   guest_state->rsp = vm_info.vm_regs.rsp;
668   guest_state->rip = vm_info.rip;
669
670
671   /* I pretty much just gutted this from TVMM */
672   /* Note: That means its probably wrong */
673
674   // set the segment registers to mirror ours
675   guest_state->cs.selector = 1<<3;
676   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
677   guest_state->cs.attrib.fields.S = 1;
678   guest_state->cs.attrib.fields.P = 1;
679   guest_state->cs.attrib.fields.db = 1;
680   guest_state->cs.attrib.fields.G = 1;
681   guest_state->cs.limit = 0xfffff;
682   guest_state->cs.base = 0;
683   
684   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
685   for ( i = 0; segregs[i] != NULL; i++) {
686     struct vmcb_selector * seg = segregs[i];
687     
688     seg->selector = 2<<3;
689     seg->attrib.fields.type = 0x2; // Data Segment+read/write
690     seg->attrib.fields.S = 1;
691     seg->attrib.fields.P = 1;
692     seg->attrib.fields.db = 1;
693     seg->attrib.fields.G = 1;
694     seg->limit = 0xfffff;
695     seg->base = 0;
696   }
697
698
699   {
700     /* JRL THIS HAS TO GO */
701     
702     //    guest_state->tr.selector = GetTR_Selector();
703     guest_state->tr.attrib.fields.type = 0x9; 
704     guest_state->tr.attrib.fields.P = 1;
705     // guest_state->tr.limit = GetTR_Limit();
706     //guest_state->tr.base = GetTR_Base();// - 0x2000;
707     /* ** */
708   }
709
710
711   /* ** */
712
713
714   guest_state->efer |= EFER_MSR_svm_enable;
715   guest_state->rflags = 0x00000002; // The reserved bit is always 1
716   ctrl_area->svm_instrs.VMRUN = 1;
717   guest_state->cr0 = 0x00000001;    // PE 
718   ctrl_area->guest_ASID = 1;
719
720
721   //  guest_state->cpl = 0;
722
723
724
725   // Setup exits
726
727   ctrl_area->cr_writes.cr4 = 1;
728   
729   ctrl_area->exceptions.de = 1;
730   ctrl_area->exceptions.df = 1;
731   ctrl_area->exceptions.pf = 1;
732   ctrl_area->exceptions.ts = 1;
733   ctrl_area->exceptions.ss = 1;
734   ctrl_area->exceptions.ac = 1;
735   ctrl_area->exceptions.mc = 1;
736   ctrl_area->exceptions.gp = 1;
737   ctrl_area->exceptions.ud = 1;
738   ctrl_area->exceptions.np = 1;
739   ctrl_area->exceptions.of = 1;
740   ctrl_area->exceptions.nmi = 1;
741
742   
743
744   ctrl_area->instrs.IOIO_PROT = 1;
745   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
746   
747   {
748     reg_ex_t tmp_reg;
749     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
750     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
751   }
752
753   ctrl_area->instrs.INTR = 1;
754
755   
756   {
757     char gdt_buf[6];
758     char idt_buf[6];
759
760     memset(gdt_buf, 0, 6);
761     memset(idt_buf, 0, 6);
762
763
764     uint_t gdt_base, idt_base;
765     ushort_t gdt_limit, idt_limit;
766     
767     GetGDTR(gdt_buf);
768     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
769     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
770     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
771
772     GetIDTR(idt_buf);
773     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
774     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
775     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
776
777
778     // gdt_base -= 0x2000;
779     //idt_base -= 0x2000;
780
781     guest_state->gdtr.base = gdt_base;
782     guest_state->gdtr.limit = gdt_limit;
783     guest_state->idtr.base = idt_base;
784     guest_state->idtr.limit = idt_limit;
785
786
787   }
788   
789   
790   // also determine if CPU supports nested paging
791   /*
792   if (vm_info.page_tables) {
793     //   if (0) {
794     // Flush the TLB on entries/exits
795     ctrl_area->TLB_CONTROL = 1;
796
797     // Enable Nested Paging
798     ctrl_area->NP_ENABLE = 1;
799
800     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
801
802         // Set the Nested Page Table pointer
803     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
804
805
806     //   ctrl_area->N_CR3 = Get_CR3();
807     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
808
809     guest_state->g_pat = 0x7040600070406ULL;
810
811     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
812     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
813     // Enable Paging
814     //    guest_state->cr0 |= 0x80000000;
815   }
816   */
817
818 }
819
820
821
822
823
824 #endif
825
826