Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added hash table
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31 extern void EnableInts();
32
33
34
35
36
37
38
39 static vmcb_t * Allocate_VMCB() {
40   vmcb_t * vmcb_page = (vmcb_t *)V3_AllocPages(1);
41
42
43   memset(vmcb_page, 0, 4096);
44
45   return vmcb_page;
46 }
47
48
49
50
51
52 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
53   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
54   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
55   uint_t i;
56
57
58   guest_state->rsp = vm_info->vm_regs.rsp;
59   // guest_state->rip = vm_info->rip;
60   guest_state->rip = 0xfff0;
61
62   guest_state->cpl = 0;
63
64   //ctrl_area->instrs.instrs.CR0 = 1;
65   ctrl_area->cr_reads.cr0 = 1;
66   ctrl_area->cr_writes.cr0 = 1;
67
68   guest_state->efer |= EFER_MSR_svm_enable;
69   guest_state->rflags = 0x00000002; // The reserved bit is always 1
70   ctrl_area->svm_instrs.VMRUN = 1;
71   ctrl_area->instrs.HLT = 1;
72   // guest_state->cr0 = 0x00000001;    // PE 
73   ctrl_area->guest_ASID = 1;
74
75   
76   /*
77     ctrl_area->exceptions.de = 1;
78     ctrl_area->exceptions.df = 1;
79     
80     ctrl_area->exceptions.ts = 1;
81     ctrl_area->exceptions.ss = 1;
82     ctrl_area->exceptions.ac = 1;
83     ctrl_area->exceptions.mc = 1;
84     ctrl_area->exceptions.gp = 1;
85     ctrl_area->exceptions.ud = 1;
86     ctrl_area->exceptions.np = 1;
87     ctrl_area->exceptions.of = 1;
88   
89     ctrl_area->exceptions.nmi = 1;
90   */
91   // Debug of boot on physical machines - 7/14/08
92   ctrl_area->instrs.NMI=1;
93   ctrl_area->instrs.SMI=1;
94   ctrl_area->instrs.INIT=1;
95   ctrl_area->instrs.PAUSE=1;
96   ctrl_area->instrs.shutdown_evts=1;
97
98
99
100   vm_info->vm_regs.rdx = 0x00000f00;
101
102   guest_state->cr0 = 0x60000010;
103
104   guest_state->cs.selector = 0xf000;
105   guest_state->cs.limit=0xffff;
106   guest_state->cs.base = 0x0000000f0000LL;
107   guest_state->cs.attrib.raw = 0xf3;
108
109   
110   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
111   for ( i = 0; segregs[i] != NULL; i++) {
112     struct vmcb_selector * seg = segregs[i];
113     
114     seg->selector = 0x0000;
115     //    seg->base = seg->selector << 4;
116     seg->base = 0x00000000;
117     seg->attrib.raw = 0xf3;
118     seg->limit = ~0u;
119   }
120   
121   guest_state->gdtr.limit = 0x0000ffff;
122   guest_state->gdtr.base = 0x0000000000000000LL;
123   guest_state->idtr.limit = 0x0000ffff;
124   guest_state->idtr.base = 0x0000000000000000LL;
125
126   guest_state->ldtr.selector = 0x0000;
127   guest_state->ldtr.limit = 0x0000ffff;
128   guest_state->ldtr.base = 0x0000000000000000LL;
129   guest_state->tr.selector = 0x0000;
130   guest_state->tr.limit = 0x0000ffff;
131   guest_state->tr.base = 0x0000000000000000LL;
132
133
134   guest_state->dr6 = 0x00000000ffff0ff0LL;
135   guest_state->dr7 = 0x0000000000000400LL;
136
137   if (vm_info->io_map.num_ports > 0) {
138     struct vmm_io_hook * iter;
139     addr_t io_port_bitmap;
140     
141     io_port_bitmap = (addr_t)V3_AllocPages(3);
142     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
143     
144     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
145
146     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
147
148     FOREACH_IO_HOOK(vm_info->io_map, iter) {
149       ushort_t port = iter->port;
150       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
151
152       bitmap += (port / 8);
153       PrintDebug("Setting Bit for port 0x%x\n", port);
154       *bitmap |= 1 << (port % 8);
155     }
156
157
158     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
159
160     ctrl_area->instrs.IOIO_PROT = 1;
161   }
162
163
164
165   PrintDebug("Exiting on interrupts\n");
166   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
167   ctrl_area->instrs.INTR = 1;
168
169
170   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
171     PrintDebug("Creating initial shadow page table\n");
172     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
173     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
174     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
175     PrintDebug("Created\n");
176
177     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
178
179     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
180
181     ctrl_area->cr_reads.cr3 = 1;
182     ctrl_area->cr_writes.cr3 = 1;
183
184
185     ctrl_area->instrs.INVLPG = 1;
186     ctrl_area->instrs.INVLPGA = 1;
187
188     ctrl_area->exceptions.pf = 1;
189
190     /* JRL: This is a performance killer, and a simplistic solution */
191     /* We need to fix this */
192     ctrl_area->TLB_CONTROL = 1;
193     
194
195
196     guest_state->g_pat = 0x7040600070406ULL;
197
198     guest_state->cr0 |= 0x80000000;
199
200   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
201     // Flush the TLB on entries/exits
202     ctrl_area->TLB_CONTROL = 1;
203
204     // Enable Nested Paging
205     ctrl_area->NP_ENABLE = 1;
206
207     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
208
209     // Set the Nested Page Table pointer
210     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
211     ctrl_area->N_CR3 = vm_info->direct_map_pt;
212
213     //   ctrl_area->N_CR3 = Get_CR3();
214     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
215
216     guest_state->g_pat = 0x7040600070406ULL;
217   }
218
219
220
221 }
222
223
224 static int init_svm_guest(struct guest_info *info) {
225  
226   PrintDebug("Allocating VMCB\n");
227   info->vmm_data = (void*)Allocate_VMCB();
228
229
230   //PrintDebug("Generating Guest nested page tables\n");
231   //  info->page_tables = NULL;
232   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
233   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
234   //  PrintDebugPageTables(info->page_tables);
235
236
237   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
238   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
239   
240
241   info->run_state = VM_STOPPED;
242
243   //  info->rip = 0;
244
245   info->vm_regs.rdi = 0;
246   info->vm_regs.rsi = 0;
247   info->vm_regs.rbp = 0;
248   info->vm_regs.rsp = 0;
249   info->vm_regs.rbx = 0;
250   info->vm_regs.rdx = 0;
251   info->vm_regs.rcx = 0;
252   info->vm_regs.rax = 0;
253   
254   return 0;
255 }
256
257
258 // can we start a kernel thread here...
259 static int start_svm_guest(struct guest_info *info) {
260   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
261   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
262   uint_t num_exits = 0;
263
264
265
266   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
267   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
268
269   info->run_state = VM_RUNNING;
270
271   while (1) {
272     ullong_t tmp_tsc;
273
274
275     EnableInts();
276     CLGI();
277
278     //    PrintDebug("SVM Entry to rip=%x...\n", info->rip);
279
280     rdtscll(info->time_state.cached_host_tsc);
281     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
282
283     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
284
285     rdtscll(tmp_tsc);
286     //PrintDebug("SVM Returned\n");
287
288
289     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
290     num_exits++;
291
292     STGI();
293
294     if ((num_exits % 25) == 0) {
295       PrintDebug("SVM Exit number %d\n", num_exits);
296     }
297
298      
299     if (handle_svm_exit(info) != 0) {
300
301       addr_t host_addr;
302       addr_t linear_addr = 0;
303
304       info->run_state = VM_ERROR;
305
306       PrintDebug("SVM ERROR!!\n"); 
307       
308       PrintDebug("RIP: %x\n", guest_state->rip);
309
310
311       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
312
313
314       PrintDebug("RIP Linear: %x\n", linear_addr);
315       PrintV3Segments(info);
316       PrintV3CtrlRegs(info);
317       PrintV3GPRs(info);
318       
319       if (info->mem_mode == PHYSICAL_MEM) {
320         guest_pa_to_host_pa(info, linear_addr, &host_addr);
321       } else if (info->mem_mode == VIRTUAL_MEM) {
322         guest_va_to_host_pa(info, linear_addr, &host_addr);
323       }
324
325
326       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
327
328       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
329       PrintTraceMemDump((char*)host_addr, 15);
330
331       break;
332     }
333   }
334   return 0;
335 }
336
337
338
339
340 /* Checks machine SVM capability */
341 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
342 int is_svm_capable() {
343
344 #if 1
345   // Dinda
346
347   uint_t ret;
348   uint_t vm_cr_low = 0, vm_cr_high = 0;
349
350
351   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
352   
353   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
354
355   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
356     PrintDebug("SVM Not Available\n");
357     return 0;
358   }  else {
359     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
360     
361     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
362     
363     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
364       PrintDebug("SVM is available but is disabled.\n");
365
366       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
367       
368       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
369       
370       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
371         PrintDebug("SVM BIOS Disabled, not unlockable\n");
372       } else {
373         PrintDebug("SVM is locked with a key\n");
374       }
375       return 0;
376
377     } else {
378       PrintDebug("SVM is available and  enabled.\n");
379
380       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
381       
382       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
383
384       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
385         PrintDebug("SVM Nested Paging not supported\n");
386       } else {
387         PrintDebug("SVM Nested Paging supported\n");
388       }
389       
390       return 1;
391       
392     }
393   }
394
395 #else
396
397   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
398   uint_t vm_cr_low = 0, vm_cr_high = 0;
399
400
401   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
402     PrintDebug("SVM Not Available\n");
403     return 0;
404   } 
405
406   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
407
408   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
409
410
411   // this part is clearly wrong, since the np bit is in 
412   // edx, not ecx
413   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
414     PrintDebug("Nested Paging not supported\n");
415   } else {
416     PrintDebug("Nested Paging supported\n");
417   }
418
419   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
420     PrintDebug("SVM is disabled.\n");
421     return 1;
422   }
423
424   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
425
426   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
427     PrintDebug("SVM BIOS Disabled, not unlockable\n");
428   } else {
429     PrintDebug("SVM is locked with a key\n");
430   }
431
432   return 0;
433
434 #endif
435
436 }
437
438 int has_svm_nested_paging() {
439   uint32_t ret;
440
441   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
442       
443   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
444   
445   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
446     PrintDebug("SVM Nested Paging not supported\n");
447     return 0;
448   } else {
449     PrintDebug("SVM Nested Paging supported\n");
450     return 1;
451   }
452
453 }
454
455
456
457 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
458   reg_ex_t msr;
459   void * host_state;
460
461
462   // Enable SVM on the CPU
463   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
464   msr.e_reg.low |= EFER_MSR_svm_enable;
465   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
466   
467   PrintDebug("SVM Enabled\n");
468
469
470   // Setup the host state save area
471   host_state = V3_AllocPages(4);
472   
473   msr.e_reg.high = 0;
474   msr.e_reg.low = (uint_t)host_state;
475
476
477   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
478   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
479
480
481
482   // Setup the SVM specific vmm operations
483   vmm_ops->init_guest = &init_svm_guest;
484   vmm_ops->start_guest = &start_svm_guest;
485   vmm_ops->has_nested_paging = &has_svm_nested_paging;
486
487   return;
488 }
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
542   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
543   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
544   uint_t i;
545
546
547   guest_state->rsp = vm_info.vm_regs.rsp;
548   guest_state->rip = vm_info.rip;
549
550
551   //ctrl_area->instrs.instrs.CR0 = 1;
552   ctrl_area->cr_reads.cr0 = 1;
553   ctrl_area->cr_writes.cr0 = 1;
554
555   guest_state->efer |= EFER_MSR_svm_enable;
556   guest_state->rflags = 0x00000002; // The reserved bit is always 1
557   ctrl_area->svm_instrs.VMRUN = 1;
558   // guest_state->cr0 = 0x00000001;    // PE 
559   ctrl_area->guest_ASID = 1;
560
561
562   ctrl_area->exceptions.de = 1;
563   ctrl_area->exceptions.df = 1;
564   ctrl_area->exceptions.pf = 1;
565   ctrl_area->exceptions.ts = 1;
566   ctrl_area->exceptions.ss = 1;
567   ctrl_area->exceptions.ac = 1;
568   ctrl_area->exceptions.mc = 1;
569   ctrl_area->exceptions.gp = 1;
570   ctrl_area->exceptions.ud = 1;
571   ctrl_area->exceptions.np = 1;
572   ctrl_area->exceptions.of = 1;
573   ctrl_area->exceptions.nmi = 1;
574
575   guest_state->cs.selector = 0x0000;
576   guest_state->cs.limit=~0u;
577   guest_state->cs.base = guest_state->cs.selector<<4;
578   guest_state->cs.attrib.raw = 0xf3;
579
580   
581   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
582   for ( i = 0; segregs[i] != NULL; i++) {
583     struct vmcb_selector * seg = segregs[i];
584     
585     seg->selector = 0x0000;
586     seg->base = seg->selector << 4;
587     seg->attrib.raw = 0xf3;
588     seg->limit = ~0u;
589   }
590   
591   if (vm_info.io_map.num_ports > 0) {
592     struct vmm_io_hook * iter;
593     addr_t io_port_bitmap;
594     
595     io_port_bitmap = (addr_t)V3_AllocPages(3);
596     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
597     
598     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
599
600     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
601
602     FOREACH_IO_HOOK(vm_info.io_map, iter) {
603       ushort_t port = iter->port;
604       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
605
606       bitmap += (port / 8);
607       PrintDebug("Setting Bit in block %x\n", bitmap);
608       *bitmap |= 1 << (port % 8);
609     }
610
611
612     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
613
614     ctrl_area->instrs.IOIO_PROT = 1;
615   }
616
617   ctrl_area->instrs.INTR = 1;
618
619
620
621   if (vm_info.page_mode == SHADOW_PAGING) {
622     PrintDebug("Creating initial shadow page table\n");
623     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
624     PrintDebug("Created\n");
625
626     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
627
628     ctrl_area->cr_reads.cr3 = 1;
629     ctrl_area->cr_writes.cr3 = 1;
630
631
632     ctrl_area->instrs.INVLPG = 1;
633     ctrl_area->instrs.INVLPGA = 1;
634
635     guest_state->g_pat = 0x7040600070406ULL;
636
637     guest_state->cr0 |= 0x80000000;
638   } else if (vm_info.page_mode == NESTED_PAGING) {
639     // Flush the TLB on entries/exits
640     //ctrl_area->TLB_CONTROL = 1;
641
642     // Enable Nested Paging
643     //ctrl_area->NP_ENABLE = 1;
644
645     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
646
647         // Set the Nested Page Table pointer
648     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
649     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
650
651     //   ctrl_area->N_CR3 = Get_CR3();
652     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
653
654     //    guest_state->g_pat = 0x7040600070406ULL;
655   }
656
657
658
659 }
660 */
661
662
663
664
665
666
667
668 #if 0
669 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
670   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
671   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
672   uint_t i = 0;
673
674
675   guest_state->rsp = vm_info.vm_regs.rsp;
676   guest_state->rip = vm_info.rip;
677
678
679   /* I pretty much just gutted this from TVMM */
680   /* Note: That means its probably wrong */
681
682   // set the segment registers to mirror ours
683   guest_state->cs.selector = 1<<3;
684   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
685   guest_state->cs.attrib.fields.S = 1;
686   guest_state->cs.attrib.fields.P = 1;
687   guest_state->cs.attrib.fields.db = 1;
688   guest_state->cs.attrib.fields.G = 1;
689   guest_state->cs.limit = 0xfffff;
690   guest_state->cs.base = 0;
691   
692   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
693   for ( i = 0; segregs[i] != NULL; i++) {
694     struct vmcb_selector * seg = segregs[i];
695     
696     seg->selector = 2<<3;
697     seg->attrib.fields.type = 0x2; // Data Segment+read/write
698     seg->attrib.fields.S = 1;
699     seg->attrib.fields.P = 1;
700     seg->attrib.fields.db = 1;
701     seg->attrib.fields.G = 1;
702     seg->limit = 0xfffff;
703     seg->base = 0;
704   }
705
706
707   {
708     /* JRL THIS HAS TO GO */
709     
710     //    guest_state->tr.selector = GetTR_Selector();
711     guest_state->tr.attrib.fields.type = 0x9; 
712     guest_state->tr.attrib.fields.P = 1;
713     // guest_state->tr.limit = GetTR_Limit();
714     //guest_state->tr.base = GetTR_Base();// - 0x2000;
715     /* ** */
716   }
717
718
719   /* ** */
720
721
722   guest_state->efer |= EFER_MSR_svm_enable;
723   guest_state->rflags = 0x00000002; // The reserved bit is always 1
724   ctrl_area->svm_instrs.VMRUN = 1;
725   guest_state->cr0 = 0x00000001;    // PE 
726   ctrl_area->guest_ASID = 1;
727
728
729   //  guest_state->cpl = 0;
730
731
732
733   // Setup exits
734
735   ctrl_area->cr_writes.cr4 = 1;
736   
737   ctrl_area->exceptions.de = 1;
738   ctrl_area->exceptions.df = 1;
739   ctrl_area->exceptions.pf = 1;
740   ctrl_area->exceptions.ts = 1;
741   ctrl_area->exceptions.ss = 1;
742   ctrl_area->exceptions.ac = 1;
743   ctrl_area->exceptions.mc = 1;
744   ctrl_area->exceptions.gp = 1;
745   ctrl_area->exceptions.ud = 1;
746   ctrl_area->exceptions.np = 1;
747   ctrl_area->exceptions.of = 1;
748   ctrl_area->exceptions.nmi = 1;
749
750   
751
752   ctrl_area->instrs.IOIO_PROT = 1;
753   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
754   
755   {
756     reg_ex_t tmp_reg;
757     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
758     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
759   }
760
761   ctrl_area->instrs.INTR = 1;
762
763   
764   {
765     char gdt_buf[6];
766     char idt_buf[6];
767
768     memset(gdt_buf, 0, 6);
769     memset(idt_buf, 0, 6);
770
771
772     uint_t gdt_base, idt_base;
773     ushort_t gdt_limit, idt_limit;
774     
775     GetGDTR(gdt_buf);
776     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
777     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
778     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
779
780     GetIDTR(idt_buf);
781     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
782     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
783     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
784
785
786     // gdt_base -= 0x2000;
787     //idt_base -= 0x2000;
788
789     guest_state->gdtr.base = gdt_base;
790     guest_state->gdtr.limit = gdt_limit;
791     guest_state->idtr.base = idt_base;
792     guest_state->idtr.limit = idt_limit;
793
794
795   }
796   
797   
798   // also determine if CPU supports nested paging
799   /*
800   if (vm_info.page_tables) {
801     //   if (0) {
802     // Flush the TLB on entries/exits
803     ctrl_area->TLB_CONTROL = 1;
804
805     // Enable Nested Paging
806     ctrl_area->NP_ENABLE = 1;
807
808     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
809
810         // Set the Nested Page Table pointer
811     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
812
813
814     //   ctrl_area->N_CR3 = Get_CR3();
815     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
816
817     guest_state->g_pat = 0x7040600070406ULL;
818
819     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
820     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
821     // Enable Paging
822     //    guest_state->cr0 |= 0x80000000;
823   }
824   */
825
826 }
827
828
829
830
831
832 #endif
833
834