Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


fixed debug output
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31 extern void EnableInts();
32
33
34
35
36
37
38
39 static vmcb_t * Allocate_VMCB() {
40   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
41
42
43   memset(vmcb_page, 0, 4096);
44
45   return vmcb_page;
46 }
47
48
49
50
51
52 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
53   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
54   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
55   uint_t i;
56
57
58   guest_state->rsp = vm_info->vm_regs.rsp;
59   // guest_state->rip = vm_info->rip;
60   guest_state->rip = 0xfff0;
61
62   guest_state->cpl = 0;
63
64   //ctrl_area->instrs.instrs.CR0 = 1;
65   ctrl_area->cr_reads.cr0 = 1;
66   ctrl_area->cr_writes.cr0 = 1;
67
68   guest_state->efer |= EFER_MSR_svm_enable;
69   guest_state->rflags = 0x00000002; // The reserved bit is always 1
70   ctrl_area->svm_instrs.VMRUN = 1;
71   ctrl_area->instrs.HLT = 1;
72   // guest_state->cr0 = 0x00000001;    // PE 
73   ctrl_area->guest_ASID = 1;
74
75   
76   /*
77     ctrl_area->exceptions.de = 1;
78     ctrl_area->exceptions.df = 1;
79     
80     ctrl_area->exceptions.ts = 1;
81     ctrl_area->exceptions.ss = 1;
82     ctrl_area->exceptions.ac = 1;
83     ctrl_area->exceptions.mc = 1;
84     ctrl_area->exceptions.gp = 1;
85     ctrl_area->exceptions.ud = 1;
86     ctrl_area->exceptions.np = 1;
87     ctrl_area->exceptions.of = 1;
88   
89     ctrl_area->exceptions.nmi = 1;
90   */
91   // Debug of boot on physical machines - 7/14/08
92   ctrl_area->instrs.NMI=1;
93   ctrl_area->instrs.SMI=1;
94   ctrl_area->instrs.INIT=1;
95   ctrl_area->instrs.PAUSE=1;
96   ctrl_area->instrs.shutdown_evts=1;
97
98
99
100   vm_info->vm_regs.rdx = 0x00000f00;
101
102   guest_state->cr0 = 0x60000010;
103
104   guest_state->cs.selector = 0xf000;
105   guest_state->cs.limit=0xffff;
106   guest_state->cs.base = 0x0000000f0000LL;
107   guest_state->cs.attrib.raw = 0xf3;
108
109   
110   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
111   for ( i = 0; segregs[i] != NULL; i++) {
112     struct vmcb_selector * seg = segregs[i];
113     
114     seg->selector = 0x0000;
115     //    seg->base = seg->selector << 4;
116     seg->base = 0x00000000;
117     seg->attrib.raw = 0xf3;
118     seg->limit = ~0u;
119   }
120   
121   guest_state->gdtr.limit = 0x0000ffff;
122   guest_state->gdtr.base = 0x0000000000000000LL;
123   guest_state->idtr.limit = 0x0000ffff;
124   guest_state->idtr.base = 0x0000000000000000LL;
125
126   guest_state->ldtr.selector = 0x0000;
127   guest_state->ldtr.limit = 0x0000ffff;
128   guest_state->ldtr.base = 0x0000000000000000LL;
129   guest_state->tr.selector = 0x0000;
130   guest_state->tr.limit = 0x0000ffff;
131   guest_state->tr.base = 0x0000000000000000LL;
132
133
134   guest_state->dr6 = 0x00000000ffff0ff0LL;
135   guest_state->dr7 = 0x0000000000000400LL;
136
137   if (vm_info->io_map.num_ports > 0) {
138     vmm_io_hook_t * iter;
139     addr_t io_port_bitmap;
140     
141     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
142     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
143     
144     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
145
146     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
147
148     FOREACH_IO_HOOK(vm_info->io_map, iter) {
149       ushort_t port = iter->port;
150       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
151
152       bitmap += (port / 8);
153       PrintDebug("Setting Bit for port 0x%x\n", port);
154       *bitmap |= 1 << (port % 8);
155     }
156
157
158     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
159
160     ctrl_area->instrs.IOIO_PROT = 1;
161   }
162
163
164
165   PrintDebug("Exiting on interrupts\n");
166   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
167   ctrl_area->instrs.INTR = 1;
168
169
170   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
171     PrintDebug("Creating initial shadow page table\n");
172     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
173     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
174     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
175     PrintDebug("Created\n");
176
177     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
178
179     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
180
181     ctrl_area->cr_reads.cr3 = 1;
182     ctrl_area->cr_writes.cr3 = 1;
183
184
185     ctrl_area->instrs.INVLPG = 1;
186     ctrl_area->instrs.INVLPGA = 1;
187
188     ctrl_area->exceptions.pf = 1;
189
190     /* JRL: This is a performance killer, and a simplistic solution */
191     /* We need to fix this */
192     ctrl_area->TLB_CONTROL = 1;
193     
194
195
196     guest_state->g_pat = 0x7040600070406ULL;
197
198     guest_state->cr0 |= 0x80000000;
199
200   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
201     // Flush the TLB on entries/exits
202     ctrl_area->TLB_CONTROL = 1;
203
204     // Enable Nested Paging
205     ctrl_area->NP_ENABLE = 1;
206
207     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
208
209     // Set the Nested Page Table pointer
210     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
211     ctrl_area->N_CR3 = vm_info->direct_map_pt;
212
213     //   ctrl_area->N_CR3 = Get_CR3();
214     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
215
216     guest_state->g_pat = 0x7040600070406ULL;
217   }
218
219
220
221 }
222
223
224
225
226
227
228
229
230
231 static int init_svm_guest(struct guest_info *info) {
232  
233   PrintDebug("Allocating VMCB\n");
234   info->vmm_data = (void*)Allocate_VMCB();
235
236
237   //PrintDebug("Generating Guest nested page tables\n");
238   //  info->page_tables = NULL;
239   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
240   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
241   //  PrintDebugPageTables(info->page_tables);
242
243
244   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
245   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
246   
247
248   //  info->rip = 0;
249
250   info->vm_regs.rdi = 0;
251   info->vm_regs.rsi = 0;
252   info->vm_regs.rbp = 0;
253   info->vm_regs.rsp = 0;
254   info->vm_regs.rbx = 0;
255   info->vm_regs.rdx = 0;
256   info->vm_regs.rcx = 0;
257   info->vm_regs.rax = 0;
258   
259   return 0;
260 }
261
262
263 // can we start a kernel thread here...
264 static int start_svm_guest(struct guest_info *info) {
265   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
266   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
267   uint_t num_exits = 0;
268
269   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
270   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
271
272   while (1) {
273     ullong_t tmp_tsc;
274
275
276     EnableInts();
277     CLGI();
278
279     //    PrintDebug("SVM Entry to rip=%x...\n", info->rip);
280
281     rdtscll(info->time_state.cached_host_tsc);
282     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
283
284     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
285
286     rdtscll(tmp_tsc);
287     //PrintDebug("SVM Returned\n");
288
289
290     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
291     num_exits++;
292
293     STGI();
294
295     if ((num_exits % 25) == 0) {
296       PrintDebug("SVM Exit number %d\n", num_exits);
297     }
298
299      
300     if (handle_svm_exit(info) != 0) {
301
302       addr_t host_addr;
303       addr_t linear_addr = 0;
304
305       PrintDebug("SVM ERROR!!\n"); 
306       
307       PrintDebug("RIP: %x\n", guest_state->rip);
308
309
310       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
311
312
313       PrintDebug("RIP Linear: %x\n", linear_addr);
314       PrintV3Segments(info);
315       PrintV3CtrlRegs(info);
316       PrintV3GPRs(info);
317       
318       if (info->mem_mode == PHYSICAL_MEM) {
319         guest_pa_to_host_pa(info, linear_addr, &host_addr);
320       } else if (info->mem_mode == VIRTUAL_MEM) {
321         guest_va_to_host_pa(info, linear_addr, &host_addr);
322       }
323
324
325       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
326
327       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
328       PrintTraceMemDump((char*)host_addr, 15);
329
330       break;
331     }
332   }
333   return 0;
334 }
335
336
337
338
339 /* Checks machine SVM capability */
340 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
341 int is_svm_capable() {
342
343 #if 1
344   // Dinda
345
346   uint_t ret;
347   uint_t vm_cr_low = 0, vm_cr_high = 0;
348
349
350   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
351   
352   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
353
354   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
355     PrintDebug("SVM Not Available\n");
356     return 0;
357   }  else {
358     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
359     
360     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
361     
362     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
363       PrintDebug("SVM is available but is disabled.\n");
364
365       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
366       
367       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
368       
369       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
370         PrintDebug("SVM BIOS Disabled, not unlockable\n");
371       } else {
372         PrintDebug("SVM is locked with a key\n");
373       }
374       return 0;
375
376     } else {
377       PrintDebug("SVM is available and  enabled.\n");
378
379       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
380       
381       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
382
383       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
384         PrintDebug("SVM Nested Paging not supported\n");
385       } else {
386         PrintDebug("SVM Nested Paging supported\n");
387       }
388       
389       return 1;
390       
391     }
392   }
393
394 #else
395
396   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
397   uint_t vm_cr_low = 0, vm_cr_high = 0;
398
399
400   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
401     PrintDebug("SVM Not Available\n");
402     return 0;
403   } 
404
405   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
406
407   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
408
409
410   // this part is clearly wrong, since the np bit is in 
411   // edx, not ecx
412   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
413     PrintDebug("Nested Paging not supported\n");
414   } else {
415     PrintDebug("Nested Paging supported\n");
416   }
417
418   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
419     PrintDebug("SVM is disabled.\n");
420     return 1;
421   }
422
423   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
424
425   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
426     PrintDebug("SVM BIOS Disabled, not unlockable\n");
427   } else {
428     PrintDebug("SVM is locked with a key\n");
429   }
430
431   return 0;
432
433 #endif
434
435 }
436
437 int has_svm_nested_paging() {
438   uint32_t ret;
439
440   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
441       
442   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
443   
444   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
445     PrintDebug("SVM Nested Paging not supported\n");
446     return 0;
447   } else {
448     PrintDebug("SVM Nested Paging supported\n");
449     return 1;
450   }
451
452 }
453
454
455
456 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
457   reg_ex_t msr;
458   void * host_state;
459
460
461   // Enable SVM on the CPU
462   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
463   msr.e_reg.low |= EFER_MSR_svm_enable;
464   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
465   
466   PrintDebug("SVM Enabled\n");
467
468
469   // Setup the host state save area
470   host_state = os_hooks->allocate_pages(4);
471   
472   msr.e_reg.high = 0;
473   msr.e_reg.low = (uint_t)host_state;
474
475
476   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
477   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
478
479
480
481   // Setup the SVM specific vmm operations
482   vmm_ops->init_guest = &init_svm_guest;
483   vmm_ops->start_guest = &start_svm_guest;
484   vmm_ops->has_nested_paging = &has_svm_nested_paging;
485
486   return;
487 }
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
541   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
542   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
543   uint_t i;
544
545
546   guest_state->rsp = vm_info.vm_regs.rsp;
547   guest_state->rip = vm_info.rip;
548
549
550   //ctrl_area->instrs.instrs.CR0 = 1;
551   ctrl_area->cr_reads.cr0 = 1;
552   ctrl_area->cr_writes.cr0 = 1;
553
554   guest_state->efer |= EFER_MSR_svm_enable;
555   guest_state->rflags = 0x00000002; // The reserved bit is always 1
556   ctrl_area->svm_instrs.VMRUN = 1;
557   // guest_state->cr0 = 0x00000001;    // PE 
558   ctrl_area->guest_ASID = 1;
559
560
561   ctrl_area->exceptions.de = 1;
562   ctrl_area->exceptions.df = 1;
563   ctrl_area->exceptions.pf = 1;
564   ctrl_area->exceptions.ts = 1;
565   ctrl_area->exceptions.ss = 1;
566   ctrl_area->exceptions.ac = 1;
567   ctrl_area->exceptions.mc = 1;
568   ctrl_area->exceptions.gp = 1;
569   ctrl_area->exceptions.ud = 1;
570   ctrl_area->exceptions.np = 1;
571   ctrl_area->exceptions.of = 1;
572   ctrl_area->exceptions.nmi = 1;
573
574   guest_state->cs.selector = 0x0000;
575   guest_state->cs.limit=~0u;
576   guest_state->cs.base = guest_state->cs.selector<<4;
577   guest_state->cs.attrib.raw = 0xf3;
578
579   
580   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
581   for ( i = 0; segregs[i] != NULL; i++) {
582     struct vmcb_selector * seg = segregs[i];
583     
584     seg->selector = 0x0000;
585     seg->base = seg->selector << 4;
586     seg->attrib.raw = 0xf3;
587     seg->limit = ~0u;
588   }
589   
590   if (vm_info.io_map.num_ports > 0) {
591     vmm_io_hook_t * iter;
592     addr_t io_port_bitmap;
593     
594     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
595     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
596     
597     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
598
599     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
600
601     FOREACH_IO_HOOK(vm_info.io_map, iter) {
602       ushort_t port = iter->port;
603       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
604
605       bitmap += (port / 8);
606       PrintDebug("Setting Bit in block %x\n", bitmap);
607       *bitmap |= 1 << (port % 8);
608     }
609
610
611     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
612
613     ctrl_area->instrs.IOIO_PROT = 1;
614   }
615
616   ctrl_area->instrs.INTR = 1;
617
618
619
620   if (vm_info.page_mode == SHADOW_PAGING) {
621     PrintDebug("Creating initial shadow page table\n");
622     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
623     PrintDebug("Created\n");
624
625     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
626
627     ctrl_area->cr_reads.cr3 = 1;
628     ctrl_area->cr_writes.cr3 = 1;
629
630
631     ctrl_area->instrs.INVLPG = 1;
632     ctrl_area->instrs.INVLPGA = 1;
633
634     guest_state->g_pat = 0x7040600070406ULL;
635
636     guest_state->cr0 |= 0x80000000;
637   } else if (vm_info.page_mode == NESTED_PAGING) {
638     // Flush the TLB on entries/exits
639     //ctrl_area->TLB_CONTROL = 1;
640
641     // Enable Nested Paging
642     //ctrl_area->NP_ENABLE = 1;
643
644     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
645
646         // Set the Nested Page Table pointer
647     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
648     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
649
650     //   ctrl_area->N_CR3 = Get_CR3();
651     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
652
653     //    guest_state->g_pat = 0x7040600070406ULL;
654   }
655
656
657
658 }
659 */
660
661
662
663
664
665
666
667 #if 0
668 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
669   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
670   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
671   uint_t i = 0;
672
673
674   guest_state->rsp = vm_info.vm_regs.rsp;
675   guest_state->rip = vm_info.rip;
676
677
678   /* I pretty much just gutted this from TVMM */
679   /* Note: That means its probably wrong */
680
681   // set the segment registers to mirror ours
682   guest_state->cs.selector = 1<<3;
683   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
684   guest_state->cs.attrib.fields.S = 1;
685   guest_state->cs.attrib.fields.P = 1;
686   guest_state->cs.attrib.fields.db = 1;
687   guest_state->cs.attrib.fields.G = 1;
688   guest_state->cs.limit = 0xfffff;
689   guest_state->cs.base = 0;
690   
691   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
692   for ( i = 0; segregs[i] != NULL; i++) {
693     struct vmcb_selector * seg = segregs[i];
694     
695     seg->selector = 2<<3;
696     seg->attrib.fields.type = 0x2; // Data Segment+read/write
697     seg->attrib.fields.S = 1;
698     seg->attrib.fields.P = 1;
699     seg->attrib.fields.db = 1;
700     seg->attrib.fields.G = 1;
701     seg->limit = 0xfffff;
702     seg->base = 0;
703   }
704
705
706   {
707     /* JRL THIS HAS TO GO */
708     
709     //    guest_state->tr.selector = GetTR_Selector();
710     guest_state->tr.attrib.fields.type = 0x9; 
711     guest_state->tr.attrib.fields.P = 1;
712     // guest_state->tr.limit = GetTR_Limit();
713     //guest_state->tr.base = GetTR_Base();// - 0x2000;
714     /* ** */
715   }
716
717
718   /* ** */
719
720
721   guest_state->efer |= EFER_MSR_svm_enable;
722   guest_state->rflags = 0x00000002; // The reserved bit is always 1
723   ctrl_area->svm_instrs.VMRUN = 1;
724   guest_state->cr0 = 0x00000001;    // PE 
725   ctrl_area->guest_ASID = 1;
726
727
728   //  guest_state->cpl = 0;
729
730
731
732   // Setup exits
733
734   ctrl_area->cr_writes.cr4 = 1;
735   
736   ctrl_area->exceptions.de = 1;
737   ctrl_area->exceptions.df = 1;
738   ctrl_area->exceptions.pf = 1;
739   ctrl_area->exceptions.ts = 1;
740   ctrl_area->exceptions.ss = 1;
741   ctrl_area->exceptions.ac = 1;
742   ctrl_area->exceptions.mc = 1;
743   ctrl_area->exceptions.gp = 1;
744   ctrl_area->exceptions.ud = 1;
745   ctrl_area->exceptions.np = 1;
746   ctrl_area->exceptions.of = 1;
747   ctrl_area->exceptions.nmi = 1;
748
749   
750
751   ctrl_area->instrs.IOIO_PROT = 1;
752   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
753   
754   {
755     reg_ex_t tmp_reg;
756     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
757     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
758   }
759
760   ctrl_area->instrs.INTR = 1;
761
762   
763   {
764     char gdt_buf[6];
765     char idt_buf[6];
766
767     memset(gdt_buf, 0, 6);
768     memset(idt_buf, 0, 6);
769
770
771     uint_t gdt_base, idt_base;
772     ushort_t gdt_limit, idt_limit;
773     
774     GetGDTR(gdt_buf);
775     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
776     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
777     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
778
779     GetIDTR(idt_buf);
780     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
781     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
782     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
783
784
785     // gdt_base -= 0x2000;
786     //idt_base -= 0x2000;
787
788     guest_state->gdtr.base = gdt_base;
789     guest_state->gdtr.limit = gdt_limit;
790     guest_state->idtr.base = idt_base;
791     guest_state->idtr.limit = idt_limit;
792
793
794   }
795   
796   
797   // also determine if CPU supports nested paging
798   /*
799   if (vm_info.page_tables) {
800     //   if (0) {
801     // Flush the TLB on entries/exits
802     ctrl_area->TLB_CONTROL = 1;
803
804     // Enable Nested Paging
805     ctrl_area->NP_ENABLE = 1;
806
807     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
808
809         // Set the Nested Page Table pointer
810     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
811
812
813     //   ctrl_area->N_CR3 = Get_CR3();
814     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
815
816     guest_state->g_pat = 0x7040600070406ULL;
817
818     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
819     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
820     // Enable Paging
821     //    guest_state->cr0 |= 0x80000000;
822   }
823   */
824
825 }
826
827
828
829
830
831 #endif
832
833