Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Added support for SMI interrupt
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31 extern void EnableInts();
32
33
34
35
36
37
38
39 static vmcb_t * Allocate_VMCB() {
40   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
41
42
43   memset(vmcb_page, 0, 4096);
44
45   return vmcb_page;
46 }
47
48
49
50
51
52 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
53   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
54   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
55   uint_t i;
56
57
58   guest_state->rsp = vm_info->vm_regs.rsp;
59   // guest_state->rip = vm_info->rip;
60   guest_state->rip = 0xfff0;
61
62   guest_state->cpl = 0;
63
64   //ctrl_area->instrs.instrs.CR0 = 1;
65   ctrl_area->cr_reads.cr0 = 1;
66   ctrl_area->cr_writes.cr0 = 1;
67
68   guest_state->efer |= EFER_MSR_svm_enable;
69   guest_state->rflags = 0x00000002; // The reserved bit is always 1
70   ctrl_area->svm_instrs.VMRUN = 1;
71   ctrl_area->instrs.HLT = 1;
72   // guest_state->cr0 = 0x00000001;    // PE 
73   ctrl_area->guest_ASID = 1;
74
75   ctrl_area->exceptions.de = 1;
76   ctrl_area->exceptions.df = 1;
77   ctrl_area->exceptions.pf = 1;
78   ctrl_area->exceptions.ts = 1;
79   ctrl_area->exceptions.ss = 1;
80   ctrl_area->exceptions.ac = 1;
81   ctrl_area->exceptions.mc = 1;
82   ctrl_area->exceptions.gp = 1;
83   ctrl_area->exceptions.ud = 1;
84   ctrl_area->exceptions.np = 1;
85   ctrl_area->exceptions.of = 1;
86   ctrl_area->exceptions.nmi = 1;
87
88   // Debug of boot on physical machines - 7/14/08
89   ctrl_area->instrs.NMI=1;
90   ctrl_area->instrs.SMI=1;
91   ctrl_area->instrs.INIT=1;
92   ctrl_area->instrs.PAUSE=1;
93   ctrl_area->instrs.shutdown_evts=1;
94
95
96
97   vm_info->vm_regs.rdx = 0x00000f00;
98
99   guest_state->cr0 = 0x60000010;
100
101   guest_state->cs.selector = 0xf000;
102   guest_state->cs.limit=0xffff;
103   guest_state->cs.base = 0x0000000f0000LL;
104   guest_state->cs.attrib.raw = 0xf3;
105
106   
107   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
108   for ( i = 0; segregs[i] != NULL; i++) {
109     struct vmcb_selector * seg = segregs[i];
110     
111     seg->selector = 0x0000;
112     //    seg->base = seg->selector << 4;
113     seg->base = 0x00000000;
114     seg->attrib.raw = 0xf3;
115     seg->limit = ~0u;
116   }
117   
118   guest_state->gdtr.limit = 0x0000ffff;
119   guest_state->gdtr.base = 0x0000000000000000LL;
120   guest_state->idtr.limit = 0x0000ffff;
121   guest_state->idtr.base = 0x0000000000000000LL;
122
123   guest_state->ldtr.selector = 0x0000;
124   guest_state->ldtr.limit = 0x0000ffff;
125   guest_state->ldtr.base = 0x0000000000000000LL;
126   guest_state->tr.selector = 0x0000;
127   guest_state->tr.limit = 0x0000ffff;
128   guest_state->tr.base = 0x0000000000000000LL;
129
130
131   guest_state->dr6 = 0x00000000ffff0ff0LL;
132   guest_state->dr7 = 0x0000000000000400LL;
133
134   if (vm_info->io_map.num_ports > 0) {
135     vmm_io_hook_t * iter;
136     addr_t io_port_bitmap;
137     
138     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
139     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
140     
141     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
142
143     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
144
145     FOREACH_IO_HOOK(vm_info->io_map, iter) {
146       ushort_t port = iter->port;
147       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
148
149       bitmap += (port / 8);
150       PrintDebug("Setting Bit for port 0x%x\n", port);
151       *bitmap |= 1 << (port % 8);
152     }
153
154
155     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
156
157     ctrl_area->instrs.IOIO_PROT = 1;
158   }
159
160
161
162   PrintDebug("Exiting on interrupts\n");
163   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
164   ctrl_area->instrs.INTR = 1;
165
166
167   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
168     PrintDebug("Creating initial shadow page table\n");
169     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
170     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
171     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
172     PrintDebug("Created\n");
173
174     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
175
176     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
177
178     ctrl_area->cr_reads.cr3 = 1;
179     ctrl_area->cr_writes.cr3 = 1;
180
181
182     ctrl_area->instrs.INVLPG = 1;
183     ctrl_area->instrs.INVLPGA = 1;
184
185     /* JRL: This is a performance killer, and a simplistic solution */
186     /* We need to fix this */
187     ctrl_area->TLB_CONTROL = 1;
188     
189
190
191     guest_state->g_pat = 0x7040600070406ULL;
192
193     guest_state->cr0 |= 0x80000000;
194
195   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
196     // Flush the TLB on entries/exits
197     ctrl_area->TLB_CONTROL = 1;
198
199     // Enable Nested Paging
200     ctrl_area->NP_ENABLE = 1;
201
202     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
203
204     // Set the Nested Page Table pointer
205     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
206     ctrl_area->N_CR3 = vm_info->direct_map_pt;
207
208     //   ctrl_area->N_CR3 = Get_CR3();
209     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
210
211     guest_state->g_pat = 0x7040600070406ULL;
212   }
213
214
215
216 }
217
218
219
220
221
222
223
224
225
226 static int init_svm_guest(struct guest_info *info) {
227  
228   PrintDebug("Allocating VMCB\n");
229   info->vmm_data = (void*)Allocate_VMCB();
230
231
232   //PrintDebug("Generating Guest nested page tables\n");
233   //  info->page_tables = NULL;
234   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
235   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
236   //  PrintDebugPageTables(info->page_tables);
237
238
239   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
240   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
241   
242
243   //  info->rip = 0;
244
245   info->vm_regs.rdi = 0;
246   info->vm_regs.rsi = 0;
247   info->vm_regs.rbp = 0;
248   info->vm_regs.rsp = 0;
249   info->vm_regs.rbx = 0;
250   info->vm_regs.rdx = 0;
251   info->vm_regs.rcx = 0;
252   info->vm_regs.rax = 0;
253   
254   return 0;
255 }
256
257
258 // can we start a kernel thread here...
259 static int start_svm_guest(struct guest_info *info) {
260   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
261   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
262
263   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
264   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
265
266   while (1) {
267     ullong_t tmp_tsc;
268
269
270     EnableInts();
271     CLGI();
272
273     PrintDebug("SVM Entry to rip=%x...\n", info->rip);
274
275     rdtscll(info->time_state.cached_host_tsc);
276     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
277
278     PrintDebug("Launching\n");
279     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
280
281     rdtscll(tmp_tsc);
282     //PrintDebug("SVM Returned\n");
283
284
285     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
286
287     STGI();
288
289      
290     if (handle_svm_exit(info) != 0) {
291
292       addr_t host_addr;
293       addr_t linear_addr = 0;
294
295       PrintDebug("SVM ERROR!!\n"); 
296       
297       PrintDebug("RIP: %x\n", guest_state->rip);
298
299
300       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
301
302
303       PrintDebug("RIP Linear: %x\n", linear_addr);
304       PrintV3Segments(&(info->segments));
305       PrintV3CtrlRegs(&(info->ctrl_regs));
306
307       
308       if (info->mem_mode == PHYSICAL_MEM) {
309         guest_pa_to_host_pa(info, linear_addr, &host_addr);
310       } else if (info->mem_mode == VIRTUAL_MEM) {
311         guest_va_to_host_pa(info, linear_addr, &host_addr);
312       }
313
314
315       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
316
317       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
318       PrintTraceMemDump((char*)host_addr, 15);
319
320       break;
321     }
322   }
323   return 0;
324 }
325
326
327
328
329 /* Checks machine SVM capability */
330 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
331 int is_svm_capable() {
332
333 #if 1
334   // Dinda
335
336   uint_t ret;
337   uint_t vm_cr_low = 0, vm_cr_high = 0;
338
339
340   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
341   
342   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
343
344   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
345     PrintDebug("SVM Not Available\n");
346     return 0;
347   }  else {
348     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
349     
350     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
351     
352     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
353       PrintDebug("SVM is available but is disabled.\n");
354
355       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
356       
357       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
358       
359       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
360         PrintDebug("SVM BIOS Disabled, not unlockable\n");
361       } else {
362         PrintDebug("SVM is locked with a key\n");
363       }
364       return 0;
365
366     } else {
367       PrintDebug("SVM is available and  enabled.\n");
368
369       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
370       
371       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
372
373       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
374         PrintDebug("SVM Nested Paging not supported\n");
375       } else {
376         PrintDebug("SVM Nested Paging supported\n");
377       }
378       
379       return 1;
380       
381     }
382   }
383
384 #else
385
386   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
387   uint_t vm_cr_low = 0, vm_cr_high = 0;
388
389
390   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
391     PrintDebug("SVM Not Available\n");
392     return 0;
393   } 
394
395   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
396
397   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
398
399
400   // this part is clearly wrong, since the np bit is in 
401   // edx, not ecx
402   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
403     PrintDebug("Nested Paging not supported\n");
404   } else {
405     PrintDebug("Nested Paging supported\n");
406   }
407
408   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
409     PrintDebug("SVM is disabled.\n");
410     return 1;
411   }
412
413   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
414
415   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
416     PrintDebug("SVM BIOS Disabled, not unlockable\n");
417   } else {
418     PrintDebug("SVM is locked with a key\n");
419   }
420
421   return 0;
422
423 #endif
424
425 }
426
427 int has_svm_nested_paging() {
428   uint32_t ret;
429
430   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
431       
432   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
433   
434   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
435     PrintDebug("SVM Nested Paging not supported\n");
436     return 0;
437   } else {
438     PrintDebug("SVM Nested Paging supported\n");
439     return 1;
440   }
441
442 }
443
444
445
446 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
447   reg_ex_t msr;
448   void * host_state;
449
450
451   // Enable SVM on the CPU
452   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
453   msr.e_reg.low |= EFER_MSR_svm_enable;
454   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
455   
456   PrintDebug("SVM Enabled\n");
457
458
459   // Setup the host state save area
460   host_state = os_hooks->allocate_pages(4);
461   
462   msr.e_reg.high = 0;
463   msr.e_reg.low = (uint_t)host_state;
464
465
466   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
467   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
468
469
470
471   // Setup the SVM specific vmm operations
472   vmm_ops->init_guest = &init_svm_guest;
473   vmm_ops->start_guest = &start_svm_guest;
474   vmm_ops->has_nested_paging = &has_svm_nested_paging;
475
476   return;
477 }
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
531   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
532   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
533   uint_t i;
534
535
536   guest_state->rsp = vm_info.vm_regs.rsp;
537   guest_state->rip = vm_info.rip;
538
539
540   //ctrl_area->instrs.instrs.CR0 = 1;
541   ctrl_area->cr_reads.cr0 = 1;
542   ctrl_area->cr_writes.cr0 = 1;
543
544   guest_state->efer |= EFER_MSR_svm_enable;
545   guest_state->rflags = 0x00000002; // The reserved bit is always 1
546   ctrl_area->svm_instrs.VMRUN = 1;
547   // guest_state->cr0 = 0x00000001;    // PE 
548   ctrl_area->guest_ASID = 1;
549
550
551   ctrl_area->exceptions.de = 1;
552   ctrl_area->exceptions.df = 1;
553   ctrl_area->exceptions.pf = 1;
554   ctrl_area->exceptions.ts = 1;
555   ctrl_area->exceptions.ss = 1;
556   ctrl_area->exceptions.ac = 1;
557   ctrl_area->exceptions.mc = 1;
558   ctrl_area->exceptions.gp = 1;
559   ctrl_area->exceptions.ud = 1;
560   ctrl_area->exceptions.np = 1;
561   ctrl_area->exceptions.of = 1;
562   ctrl_area->exceptions.nmi = 1;
563
564   guest_state->cs.selector = 0x0000;
565   guest_state->cs.limit=~0u;
566   guest_state->cs.base = guest_state->cs.selector<<4;
567   guest_state->cs.attrib.raw = 0xf3;
568
569   
570   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
571   for ( i = 0; segregs[i] != NULL; i++) {
572     struct vmcb_selector * seg = segregs[i];
573     
574     seg->selector = 0x0000;
575     seg->base = seg->selector << 4;
576     seg->attrib.raw = 0xf3;
577     seg->limit = ~0u;
578   }
579   
580   if (vm_info.io_map.num_ports > 0) {
581     vmm_io_hook_t * iter;
582     addr_t io_port_bitmap;
583     
584     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
585     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
586     
587     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
588
589     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
590
591     FOREACH_IO_HOOK(vm_info.io_map, iter) {
592       ushort_t port = iter->port;
593       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
594
595       bitmap += (port / 8);
596       PrintDebug("Setting Bit in block %x\n", bitmap);
597       *bitmap |= 1 << (port % 8);
598     }
599
600
601     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
602
603     ctrl_area->instrs.IOIO_PROT = 1;
604   }
605
606   ctrl_area->instrs.INTR = 1;
607
608
609
610   if (vm_info.page_mode == SHADOW_PAGING) {
611     PrintDebug("Creating initial shadow page table\n");
612     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
613     PrintDebug("Created\n");
614
615     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
616
617     ctrl_area->cr_reads.cr3 = 1;
618     ctrl_area->cr_writes.cr3 = 1;
619
620
621     ctrl_area->instrs.INVLPG = 1;
622     ctrl_area->instrs.INVLPGA = 1;
623
624     guest_state->g_pat = 0x7040600070406ULL;
625
626     guest_state->cr0 |= 0x80000000;
627   } else if (vm_info.page_mode == NESTED_PAGING) {
628     // Flush the TLB on entries/exits
629     //ctrl_area->TLB_CONTROL = 1;
630
631     // Enable Nested Paging
632     //ctrl_area->NP_ENABLE = 1;
633
634     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
635
636         // Set the Nested Page Table pointer
637     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
638     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
639
640     //   ctrl_area->N_CR3 = Get_CR3();
641     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
642
643     //    guest_state->g_pat = 0x7040600070406ULL;
644   }
645
646
647
648 }
649 */
650
651
652
653
654
655
656
657 #if 0
658 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
659   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
660   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
661   uint_t i = 0;
662
663
664   guest_state->rsp = vm_info.vm_regs.rsp;
665   guest_state->rip = vm_info.rip;
666
667
668   /* I pretty much just gutted this from TVMM */
669   /* Note: That means its probably wrong */
670
671   // set the segment registers to mirror ours
672   guest_state->cs.selector = 1<<3;
673   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
674   guest_state->cs.attrib.fields.S = 1;
675   guest_state->cs.attrib.fields.P = 1;
676   guest_state->cs.attrib.fields.db = 1;
677   guest_state->cs.attrib.fields.G = 1;
678   guest_state->cs.limit = 0xfffff;
679   guest_state->cs.base = 0;
680   
681   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
682   for ( i = 0; segregs[i] != NULL; i++) {
683     struct vmcb_selector * seg = segregs[i];
684     
685     seg->selector = 2<<3;
686     seg->attrib.fields.type = 0x2; // Data Segment+read/write
687     seg->attrib.fields.S = 1;
688     seg->attrib.fields.P = 1;
689     seg->attrib.fields.db = 1;
690     seg->attrib.fields.G = 1;
691     seg->limit = 0xfffff;
692     seg->base = 0;
693   }
694
695
696   {
697     /* JRL THIS HAS TO GO */
698     
699     //    guest_state->tr.selector = GetTR_Selector();
700     guest_state->tr.attrib.fields.type = 0x9; 
701     guest_state->tr.attrib.fields.P = 1;
702     // guest_state->tr.limit = GetTR_Limit();
703     //guest_state->tr.base = GetTR_Base();// - 0x2000;
704     /* ** */
705   }
706
707
708   /* ** */
709
710
711   guest_state->efer |= EFER_MSR_svm_enable;
712   guest_state->rflags = 0x00000002; // The reserved bit is always 1
713   ctrl_area->svm_instrs.VMRUN = 1;
714   guest_state->cr0 = 0x00000001;    // PE 
715   ctrl_area->guest_ASID = 1;
716
717
718   //  guest_state->cpl = 0;
719
720
721
722   // Setup exits
723
724   ctrl_area->cr_writes.cr4 = 1;
725   
726   ctrl_area->exceptions.de = 1;
727   ctrl_area->exceptions.df = 1;
728   ctrl_area->exceptions.pf = 1;
729   ctrl_area->exceptions.ts = 1;
730   ctrl_area->exceptions.ss = 1;
731   ctrl_area->exceptions.ac = 1;
732   ctrl_area->exceptions.mc = 1;
733   ctrl_area->exceptions.gp = 1;
734   ctrl_area->exceptions.ud = 1;
735   ctrl_area->exceptions.np = 1;
736   ctrl_area->exceptions.of = 1;
737   ctrl_area->exceptions.nmi = 1;
738
739   
740
741   ctrl_area->instrs.IOIO_PROT = 1;
742   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
743   
744   {
745     reg_ex_t tmp_reg;
746     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
747     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
748   }
749
750   ctrl_area->instrs.INTR = 1;
751
752   
753   {
754     char gdt_buf[6];
755     char idt_buf[6];
756
757     memset(gdt_buf, 0, 6);
758     memset(idt_buf, 0, 6);
759
760
761     uint_t gdt_base, idt_base;
762     ushort_t gdt_limit, idt_limit;
763     
764     GetGDTR(gdt_buf);
765     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
766     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
767     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
768
769     GetIDTR(idt_buf);
770     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
771     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
772     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
773
774
775     // gdt_base -= 0x2000;
776     //idt_base -= 0x2000;
777
778     guest_state->gdtr.base = gdt_base;
779     guest_state->gdtr.limit = gdt_limit;
780     guest_state->idtr.base = idt_base;
781     guest_state->idtr.limit = idt_limit;
782
783
784   }
785   
786   
787   // also determine if CPU supports nested paging
788   /*
789   if (vm_info.page_tables) {
790     //   if (0) {
791     // Flush the TLB on entries/exits
792     ctrl_area->TLB_CONTROL = 1;
793
794     // Enable Nested Paging
795     ctrl_area->NP_ENABLE = 1;
796
797     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
798
799         // Set the Nested Page Table pointer
800     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
801
802
803     //   ctrl_area->N_CR3 = Get_CR3();
804     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
805
806     guest_state->g_pat = 0x7040600070406ULL;
807
808     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
809     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
810     // Enable Paging
811     //    guest_state->cr0 |= 0x80000000;
812   }
813   */
814
815 }
816
817
818
819
820
821 #endif
822
823