Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


some nested paging fixes
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31 extern void EnableInts();
32
33
34
35
36
37
38
39 static vmcb_t * Allocate_VMCB() {
40   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
41
42
43   memset(vmcb_page, 0, 4096);
44
45   return vmcb_page;
46 }
47
48
49
50
51
52 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
53   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
54   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
55   uint_t i;
56
57
58   guest_state->rsp = vm_info->vm_regs.rsp;
59   // guest_state->rip = vm_info->rip;
60   guest_state->rip = 0xfff0;
61
62   guest_state->cpl = 0;
63
64   //ctrl_area->instrs.instrs.CR0 = 1;
65   ctrl_area->cr_reads.cr0 = 1;
66   ctrl_area->cr_writes.cr0 = 1;
67
68   guest_state->efer |= EFER_MSR_svm_enable;
69   guest_state->rflags = 0x00000002; // The reserved bit is always 1
70   ctrl_area->svm_instrs.VMRUN = 1;
71   ctrl_area->instrs.HLT = 1;
72   // guest_state->cr0 = 0x00000001;    // PE 
73   ctrl_area->guest_ASID = 1;
74
75   ctrl_area->exceptions.de = 1;
76   ctrl_area->exceptions.df = 1;
77
78   ctrl_area->exceptions.ts = 1;
79   ctrl_area->exceptions.ss = 1;
80   ctrl_area->exceptions.ac = 1;
81   ctrl_area->exceptions.mc = 1;
82   ctrl_area->exceptions.gp = 1;
83   ctrl_area->exceptions.ud = 1;
84   ctrl_area->exceptions.np = 1;
85   ctrl_area->exceptions.of = 1;
86   ctrl_area->exceptions.nmi = 1;
87
88   // Debug of boot on physical machines - 7/14/08
89   ctrl_area->instrs.NMI=1;
90   ctrl_area->instrs.SMI=1;
91   ctrl_area->instrs.INIT=1;
92   ctrl_area->instrs.PAUSE=1;
93   ctrl_area->instrs.shutdown_evts=1;
94
95
96
97   vm_info->vm_regs.rdx = 0x00000f00;
98
99   guest_state->cr0 = 0x60000010;
100
101   guest_state->cs.selector = 0xf000;
102   guest_state->cs.limit=0xffff;
103   guest_state->cs.base = 0x0000000f0000LL;
104   guest_state->cs.attrib.raw = 0xf3;
105
106   
107   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
108   for ( i = 0; segregs[i] != NULL; i++) {
109     struct vmcb_selector * seg = segregs[i];
110     
111     seg->selector = 0x0000;
112     //    seg->base = seg->selector << 4;
113     seg->base = 0x00000000;
114     seg->attrib.raw = 0xf3;
115     seg->limit = ~0u;
116   }
117   
118   guest_state->gdtr.limit = 0x0000ffff;
119   guest_state->gdtr.base = 0x0000000000000000LL;
120   guest_state->idtr.limit = 0x0000ffff;
121   guest_state->idtr.base = 0x0000000000000000LL;
122
123   guest_state->ldtr.selector = 0x0000;
124   guest_state->ldtr.limit = 0x0000ffff;
125   guest_state->ldtr.base = 0x0000000000000000LL;
126   guest_state->tr.selector = 0x0000;
127   guest_state->tr.limit = 0x0000ffff;
128   guest_state->tr.base = 0x0000000000000000LL;
129
130
131   guest_state->dr6 = 0x00000000ffff0ff0LL;
132   guest_state->dr7 = 0x0000000000000400LL;
133
134   if (vm_info->io_map.num_ports > 0) {
135     vmm_io_hook_t * iter;
136     addr_t io_port_bitmap;
137     
138     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
139     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
140     
141     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
142
143     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
144
145     FOREACH_IO_HOOK(vm_info->io_map, iter) {
146       ushort_t port = iter->port;
147       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
148
149       bitmap += (port / 8);
150       PrintDebug("Setting Bit for port 0x%x\n", port);
151       *bitmap |= 1 << (port % 8);
152     }
153
154
155     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
156
157     ctrl_area->instrs.IOIO_PROT = 1;
158   }
159
160
161
162   PrintDebug("Exiting on interrupts\n");
163   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
164   ctrl_area->instrs.INTR = 1;
165
166
167   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
168     PrintDebug("Creating initial shadow page table\n");
169     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
170     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
171     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
172     PrintDebug("Created\n");
173
174     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
175
176     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
177
178     ctrl_area->cr_reads.cr3 = 1;
179     ctrl_area->cr_writes.cr3 = 1;
180
181
182     ctrl_area->instrs.INVLPG = 1;
183     ctrl_area->instrs.INVLPGA = 1;
184
185     ctrl_area->exceptions.pf = 1;
186
187     /* JRL: This is a performance killer, and a simplistic solution */
188     /* We need to fix this */
189     ctrl_area->TLB_CONTROL = 1;
190     
191
192
193     guest_state->g_pat = 0x7040600070406ULL;
194
195     guest_state->cr0 |= 0x80000000;
196
197   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
198     // Flush the TLB on entries/exits
199     ctrl_area->TLB_CONTROL = 1;
200
201     // Enable Nested Paging
202     ctrl_area->NP_ENABLE = 1;
203
204     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
205
206     // Set the Nested Page Table pointer
207     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
208     ctrl_area->N_CR3 = vm_info->direct_map_pt;
209
210     //   ctrl_area->N_CR3 = Get_CR3();
211     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
212
213     guest_state->g_pat = 0x7040600070406ULL;
214   }
215
216
217
218 }
219
220
221
222
223
224
225
226
227
228 static int init_svm_guest(struct guest_info *info) {
229  
230   PrintDebug("Allocating VMCB\n");
231   info->vmm_data = (void*)Allocate_VMCB();
232
233
234   //PrintDebug("Generating Guest nested page tables\n");
235   //  info->page_tables = NULL;
236   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
237   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
238   //  PrintDebugPageTables(info->page_tables);
239
240
241   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
242   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
243   
244
245   //  info->rip = 0;
246
247   info->vm_regs.rdi = 0;
248   info->vm_regs.rsi = 0;
249   info->vm_regs.rbp = 0;
250   info->vm_regs.rsp = 0;
251   info->vm_regs.rbx = 0;
252   info->vm_regs.rdx = 0;
253   info->vm_regs.rcx = 0;
254   info->vm_regs.rax = 0;
255   
256   return 0;
257 }
258
259
260 // can we start a kernel thread here...
261 static int start_svm_guest(struct guest_info *info) {
262   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
263   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
264
265   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
266   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
267
268   while (1) {
269     ullong_t tmp_tsc;
270
271
272     EnableInts();
273     CLGI();
274
275     PrintDebug("SVM Entry to rip=%x...\n", info->rip);
276
277     rdtscll(info->time_state.cached_host_tsc);
278     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
279
280     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
281
282     rdtscll(tmp_tsc);
283     //PrintDebug("SVM Returned\n");
284
285
286     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
287
288     STGI();
289
290      
291     if (handle_svm_exit(info) != 0) {
292
293       addr_t host_addr;
294       addr_t linear_addr = 0;
295
296       PrintDebug("SVM ERROR!!\n"); 
297       
298       PrintDebug("RIP: %x\n", guest_state->rip);
299
300
301       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
302
303
304       PrintDebug("RIP Linear: %x\n", linear_addr);
305       PrintV3Segments(info);
306       PrintV3CtrlRegs(info);
307       PrintV3GPRs(info);
308       
309       if (info->mem_mode == PHYSICAL_MEM) {
310         guest_pa_to_host_pa(info, linear_addr, &host_addr);
311       } else if (info->mem_mode == VIRTUAL_MEM) {
312         guest_va_to_host_pa(info, linear_addr, &host_addr);
313       }
314
315
316       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
317
318       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
319       PrintTraceMemDump((char*)host_addr, 15);
320
321       break;
322     }
323   }
324   return 0;
325 }
326
327
328
329
330 /* Checks machine SVM capability */
331 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
332 int is_svm_capable() {
333
334 #if 1
335   // Dinda
336
337   uint_t ret;
338   uint_t vm_cr_low = 0, vm_cr_high = 0;
339
340
341   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
342   
343   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
344
345   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
346     PrintDebug("SVM Not Available\n");
347     return 0;
348   }  else {
349     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
350     
351     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
352     
353     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
354       PrintDebug("SVM is available but is disabled.\n");
355
356       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
357       
358       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
359       
360       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
361         PrintDebug("SVM BIOS Disabled, not unlockable\n");
362       } else {
363         PrintDebug("SVM is locked with a key\n");
364       }
365       return 0;
366
367     } else {
368       PrintDebug("SVM is available and  enabled.\n");
369
370       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
371       
372       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
373
374       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
375         PrintDebug("SVM Nested Paging not supported\n");
376       } else {
377         PrintDebug("SVM Nested Paging supported\n");
378       }
379       
380       return 1;
381       
382     }
383   }
384
385 #else
386
387   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
388   uint_t vm_cr_low = 0, vm_cr_high = 0;
389
390
391   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
392     PrintDebug("SVM Not Available\n");
393     return 0;
394   } 
395
396   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
397
398   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
399
400
401   // this part is clearly wrong, since the np bit is in 
402   // edx, not ecx
403   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
404     PrintDebug("Nested Paging not supported\n");
405   } else {
406     PrintDebug("Nested Paging supported\n");
407   }
408
409   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
410     PrintDebug("SVM is disabled.\n");
411     return 1;
412   }
413
414   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
415
416   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
417     PrintDebug("SVM BIOS Disabled, not unlockable\n");
418   } else {
419     PrintDebug("SVM is locked with a key\n");
420   }
421
422   return 0;
423
424 #endif
425
426 }
427
428 int has_svm_nested_paging() {
429   uint32_t ret;
430
431   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
432       
433   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
434   
435   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
436     PrintDebug("SVM Nested Paging not supported\n");
437     return 0;
438   } else {
439     PrintDebug("SVM Nested Paging supported\n");
440     return 1;
441   }
442
443 }
444
445
446
447 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
448   reg_ex_t msr;
449   void * host_state;
450
451
452   // Enable SVM on the CPU
453   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
454   msr.e_reg.low |= EFER_MSR_svm_enable;
455   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
456   
457   PrintDebug("SVM Enabled\n");
458
459
460   // Setup the host state save area
461   host_state = os_hooks->allocate_pages(4);
462   
463   msr.e_reg.high = 0;
464   msr.e_reg.low = (uint_t)host_state;
465
466
467   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
468   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
469
470
471
472   // Setup the SVM specific vmm operations
473   vmm_ops->init_guest = &init_svm_guest;
474   vmm_ops->start_guest = &start_svm_guest;
475   vmm_ops->has_nested_paging = &has_svm_nested_paging;
476
477   return;
478 }
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
532   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
533   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
534   uint_t i;
535
536
537   guest_state->rsp = vm_info.vm_regs.rsp;
538   guest_state->rip = vm_info.rip;
539
540
541   //ctrl_area->instrs.instrs.CR0 = 1;
542   ctrl_area->cr_reads.cr0 = 1;
543   ctrl_area->cr_writes.cr0 = 1;
544
545   guest_state->efer |= EFER_MSR_svm_enable;
546   guest_state->rflags = 0x00000002; // The reserved bit is always 1
547   ctrl_area->svm_instrs.VMRUN = 1;
548   // guest_state->cr0 = 0x00000001;    // PE 
549   ctrl_area->guest_ASID = 1;
550
551
552   ctrl_area->exceptions.de = 1;
553   ctrl_area->exceptions.df = 1;
554   ctrl_area->exceptions.pf = 1;
555   ctrl_area->exceptions.ts = 1;
556   ctrl_area->exceptions.ss = 1;
557   ctrl_area->exceptions.ac = 1;
558   ctrl_area->exceptions.mc = 1;
559   ctrl_area->exceptions.gp = 1;
560   ctrl_area->exceptions.ud = 1;
561   ctrl_area->exceptions.np = 1;
562   ctrl_area->exceptions.of = 1;
563   ctrl_area->exceptions.nmi = 1;
564
565   guest_state->cs.selector = 0x0000;
566   guest_state->cs.limit=~0u;
567   guest_state->cs.base = guest_state->cs.selector<<4;
568   guest_state->cs.attrib.raw = 0xf3;
569
570   
571   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
572   for ( i = 0; segregs[i] != NULL; i++) {
573     struct vmcb_selector * seg = segregs[i];
574     
575     seg->selector = 0x0000;
576     seg->base = seg->selector << 4;
577     seg->attrib.raw = 0xf3;
578     seg->limit = ~0u;
579   }
580   
581   if (vm_info.io_map.num_ports > 0) {
582     vmm_io_hook_t * iter;
583     addr_t io_port_bitmap;
584     
585     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
586     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
587     
588     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
589
590     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
591
592     FOREACH_IO_HOOK(vm_info.io_map, iter) {
593       ushort_t port = iter->port;
594       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
595
596       bitmap += (port / 8);
597       PrintDebug("Setting Bit in block %x\n", bitmap);
598       *bitmap |= 1 << (port % 8);
599     }
600
601
602     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
603
604     ctrl_area->instrs.IOIO_PROT = 1;
605   }
606
607   ctrl_area->instrs.INTR = 1;
608
609
610
611   if (vm_info.page_mode == SHADOW_PAGING) {
612     PrintDebug("Creating initial shadow page table\n");
613     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
614     PrintDebug("Created\n");
615
616     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
617
618     ctrl_area->cr_reads.cr3 = 1;
619     ctrl_area->cr_writes.cr3 = 1;
620
621
622     ctrl_area->instrs.INVLPG = 1;
623     ctrl_area->instrs.INVLPGA = 1;
624
625     guest_state->g_pat = 0x7040600070406ULL;
626
627     guest_state->cr0 |= 0x80000000;
628   } else if (vm_info.page_mode == NESTED_PAGING) {
629     // Flush the TLB on entries/exits
630     //ctrl_area->TLB_CONTROL = 1;
631
632     // Enable Nested Paging
633     //ctrl_area->NP_ENABLE = 1;
634
635     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
636
637         // Set the Nested Page Table pointer
638     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
639     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
640
641     //   ctrl_area->N_CR3 = Get_CR3();
642     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
643
644     //    guest_state->g_pat = 0x7040600070406ULL;
645   }
646
647
648
649 }
650 */
651
652
653
654
655
656
657
658 #if 0
659 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
660   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
661   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
662   uint_t i = 0;
663
664
665   guest_state->rsp = vm_info.vm_regs.rsp;
666   guest_state->rip = vm_info.rip;
667
668
669   /* I pretty much just gutted this from TVMM */
670   /* Note: That means its probably wrong */
671
672   // set the segment registers to mirror ours
673   guest_state->cs.selector = 1<<3;
674   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
675   guest_state->cs.attrib.fields.S = 1;
676   guest_state->cs.attrib.fields.P = 1;
677   guest_state->cs.attrib.fields.db = 1;
678   guest_state->cs.attrib.fields.G = 1;
679   guest_state->cs.limit = 0xfffff;
680   guest_state->cs.base = 0;
681   
682   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
683   for ( i = 0; segregs[i] != NULL; i++) {
684     struct vmcb_selector * seg = segregs[i];
685     
686     seg->selector = 2<<3;
687     seg->attrib.fields.type = 0x2; // Data Segment+read/write
688     seg->attrib.fields.S = 1;
689     seg->attrib.fields.P = 1;
690     seg->attrib.fields.db = 1;
691     seg->attrib.fields.G = 1;
692     seg->limit = 0xfffff;
693     seg->base = 0;
694   }
695
696
697   {
698     /* JRL THIS HAS TO GO */
699     
700     //    guest_state->tr.selector = GetTR_Selector();
701     guest_state->tr.attrib.fields.type = 0x9; 
702     guest_state->tr.attrib.fields.P = 1;
703     // guest_state->tr.limit = GetTR_Limit();
704     //guest_state->tr.base = GetTR_Base();// - 0x2000;
705     /* ** */
706   }
707
708
709   /* ** */
710
711
712   guest_state->efer |= EFER_MSR_svm_enable;
713   guest_state->rflags = 0x00000002; // The reserved bit is always 1
714   ctrl_area->svm_instrs.VMRUN = 1;
715   guest_state->cr0 = 0x00000001;    // PE 
716   ctrl_area->guest_ASID = 1;
717
718
719   //  guest_state->cpl = 0;
720
721
722
723   // Setup exits
724
725   ctrl_area->cr_writes.cr4 = 1;
726   
727   ctrl_area->exceptions.de = 1;
728   ctrl_area->exceptions.df = 1;
729   ctrl_area->exceptions.pf = 1;
730   ctrl_area->exceptions.ts = 1;
731   ctrl_area->exceptions.ss = 1;
732   ctrl_area->exceptions.ac = 1;
733   ctrl_area->exceptions.mc = 1;
734   ctrl_area->exceptions.gp = 1;
735   ctrl_area->exceptions.ud = 1;
736   ctrl_area->exceptions.np = 1;
737   ctrl_area->exceptions.of = 1;
738   ctrl_area->exceptions.nmi = 1;
739
740   
741
742   ctrl_area->instrs.IOIO_PROT = 1;
743   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
744   
745   {
746     reg_ex_t tmp_reg;
747     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
748     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
749   }
750
751   ctrl_area->instrs.INTR = 1;
752
753   
754   {
755     char gdt_buf[6];
756     char idt_buf[6];
757
758     memset(gdt_buf, 0, 6);
759     memset(idt_buf, 0, 6);
760
761
762     uint_t gdt_base, idt_base;
763     ushort_t gdt_limit, idt_limit;
764     
765     GetGDTR(gdt_buf);
766     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
767     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
768     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
769
770     GetIDTR(idt_buf);
771     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
772     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
773     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
774
775
776     // gdt_base -= 0x2000;
777     //idt_base -= 0x2000;
778
779     guest_state->gdtr.base = gdt_base;
780     guest_state->gdtr.limit = gdt_limit;
781     guest_state->idtr.base = idt_base;
782     guest_state->idtr.limit = idt_limit;
783
784
785   }
786   
787   
788   // also determine if CPU supports nested paging
789   /*
790   if (vm_info.page_tables) {
791     //   if (0) {
792     // Flush the TLB on entries/exits
793     ctrl_area->TLB_CONTROL = 1;
794
795     // Enable Nested Paging
796     ctrl_area->NP_ENABLE = 1;
797
798     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
799
800         // Set the Nested Page Table pointer
801     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
802
803
804     //   ctrl_area->N_CR3 = Get_CR3();
805     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
806
807     guest_state->g_pat = 0x7040600070406ULL;
808
809     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
810     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
811     // Enable Paging
812     //    guest_state->cr0 |= 0x80000000;
813   }
814   */
815
816 }
817
818
819
820
821
822 #endif
823
824