Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added full device support
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12
13
14 extern struct vmm_os_hooks * os_hooks;
15
16 extern uint_t cpuid_ecx(uint_t op);
17 extern uint_t cpuid_edx(uint_t op);
18 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
19 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
20 extern uint_t launch_svm(vmcb_t * vmcb_addr);
21 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct guest_gprs * gprs);
22
23 extern uint_t Get_CR3();
24
25 extern void GetGDTR(void * gdt);
26 extern void GetIDTR(void * idt);
27
28 extern void DisableInts();
29
30 /* Checks machine SVM capability */
31 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
32 int is_svm_capable() {
33   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
34   uint_t vm_cr_low = 0, vm_cr_high = 0;
35
36
37   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
38     PrintDebug("SVM Not Available\n");
39     return 0;
40   } 
41
42   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
43
44   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
45     PrintDebug("Nested Paging not supported\n");
46   }
47
48   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
49     return 1;
50   }
51
52   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
53
54   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
55     PrintDebug("SVM BIOS Disabled, not unlockable\n");
56   } else {
57     PrintDebug("SVM is locked with a key\n");
58   }
59
60   return 0;
61 }
62
63
64
65 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
66   reg_ex_t msr;
67   void * host_state;
68
69
70   // Enable SVM on the CPU
71   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
72   msr.e_reg.low |= EFER_MSR_svm_enable;
73   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
74   
75   PrintDebug("SVM Enabled\n");
76
77
78   // Setup the host state save area
79   host_state = os_hooks->allocate_pages(4);
80   
81   msr.e_reg.high = 0;
82   msr.e_reg.low = (uint_t)host_state;
83
84
85   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
86   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
87
88
89
90   // Setup the SVM specific vmm operations
91   vmm_ops->init_guest = &init_svm_guest;
92   vmm_ops->start_guest = &start_svm_guest;
93
94
95   return;
96 }
97
98
99 int init_svm_guest(struct guest_info *info) {
100  
101   PrintDebug("Allocating VMCB\n");
102   info->vmm_data = (void*)Allocate_VMCB();
103
104
105   //PrintDebug("Generating Guest nested page tables\n");
106   //  info->page_tables = NULL;
107   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
108   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
109   //  PrintDebugPageTables(info->page_tables);
110
111
112   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
113   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
114   
115
116   //  info->rip = 0;
117
118   info->vm_regs.rdi = 0;
119   info->vm_regs.rsi = 0;
120   info->vm_regs.rbp = 0;
121   info->vm_regs.rsp = 0;
122   info->vm_regs.rbx = 0;
123   info->vm_regs.rdx = 0;
124   info->vm_regs.rcx = 0;
125   info->vm_regs.rax = 0;
126   
127   return 0;
128 }
129
130
131 // can we start a kernel thread here...
132 int start_svm_guest(struct guest_info *info) {
133
134
135
136   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
137   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
138
139   while (1) {
140
141     //PrintDebug("SVM Launch Args (vmcb=%x), (info=%x), (vm_regs=%x)\n", info->vmm_data,  &(info->vm_regs));
142     //PrintDebug("Launching to RIP: %x\n", info->rip);
143     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
144     //launch_svm((vmcb_t*)(info->vmm_data));
145     //PrintDebug("SVM Returned\n");
146
147     if (handle_svm_exit(info) != 0) {
148       // handle exit code....
149       break;
150     }
151   }
152   return 0;
153 }
154
155
156
157 vmcb_t * Allocate_VMCB() {
158   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
159
160
161   memset(vmcb_page, 0, 4096);
162
163   return vmcb_page;
164 }
165
166
167
168 void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
169   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
170   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
171   uint_t i;
172
173
174   guest_state->rsp = vm_info.vm_regs.rsp;
175   guest_state->rip = vm_info.rip;
176
177
178   //ctrl_area->instrs.instrs.CR0 = 1;
179   ctrl_area->cr_reads.cr0 = 1;
180   ctrl_area->cr_writes.cr0 = 1;
181
182   guest_state->efer |= EFER_MSR_svm_enable;
183   guest_state->rflags = 0x00000002; // The reserved bit is always 1
184   ctrl_area->svm_instrs.VMRUN = 1;
185   // guest_state->cr0 = 0x00000001;    // PE 
186   ctrl_area->guest_ASID = 1;
187
188
189   ctrl_area->exceptions.de = 1;
190   ctrl_area->exceptions.df = 1;
191   ctrl_area->exceptions.pf = 1;
192   ctrl_area->exceptions.ts = 1;
193   ctrl_area->exceptions.ss = 1;
194   ctrl_area->exceptions.ac = 1;
195   ctrl_area->exceptions.mc = 1;
196   ctrl_area->exceptions.gp = 1;
197   ctrl_area->exceptions.ud = 1;
198   ctrl_area->exceptions.np = 1;
199   ctrl_area->exceptions.of = 1;
200   ctrl_area->exceptions.nmi = 1;
201
202   guest_state->cs.selector = 0x0000;
203   guest_state->cs.limit=~0u;
204   guest_state->cs.base = guest_state->cs.selector<<4;
205   guest_state->cs.attrib.raw = 0xf3;
206
207   
208   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
209   for ( i = 0; segregs[i] != NULL; i++) {
210     struct vmcb_selector * seg = segregs[i];
211     
212     seg->selector = 0x0000;
213     seg->base = seg->selector << 4;
214     seg->attrib.raw = 0xf3;
215     seg->limit = ~0u;
216   }
217   
218   if (vm_info.io_map.num_ports > 0) {
219     vmm_io_hook_t * iter;
220     addr_t io_port_bitmap;
221     
222     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
223     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
224     
225     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
226
227     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
228
229     FOREACH_IO_HOOK(vm_info.io_map, iter) {
230       ushort_t port = iter->port;
231       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
232
233       bitmap += (port / 8);
234       PrintDebug("Setting Bit in block %x\n", bitmap);
235       *bitmap |= 1 << (port % 8);
236     }
237
238
239     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
240
241     ctrl_area->instrs.IOIO_PROT = 1;
242   }
243
244   ctrl_area->instrs.INTR = 1;
245
246
247
248   if (vm_info.page_mode == SHADOW_PAGING) {
249     PrintDebug("Creating initial shadow page table\n");
250     vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
251     PrintDebug("Created\n");
252
253     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
254
255     ctrl_area->cr_reads.cr3 = 1;
256     ctrl_area->cr_writes.cr3 = 1;
257
258
259     ctrl_area->instrs.INVLPG = 1;
260     ctrl_area->instrs.INVLPGA = 1;
261
262     guest_state->g_pat = 0x7040600070406ULL;
263
264     guest_state->cr0 |= 0x80000000;
265   } else if (vm_info.page_mode == NESTED_PAGING) {
266     // Flush the TLB on entries/exits
267     //ctrl_area->TLB_CONTROL = 1;
268
269     // Enable Nested Paging
270     //ctrl_area->NP_ENABLE = 1;
271
272     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
273
274         // Set the Nested Page Table pointer
275     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
276     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
277
278     //   ctrl_area->N_CR3 = Get_CR3();
279     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
280
281     //    guest_state->g_pat = 0x7040600070406ULL;
282   }
283
284
285
286 }
287
288
289
290 void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
291   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
292   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
293   uint_t i;
294
295
296   guest_state->rsp = vm_info.vm_regs.rsp;
297   // guest_state->rip = vm_info.rip;
298   guest_state->rip = 0xfff0;
299
300   //ctrl_area->instrs.instrs.CR0 = 1;
301   ctrl_area->cr_reads.cr0 = 1;
302   ctrl_area->cr_writes.cr0 = 1;
303
304   guest_state->efer |= EFER_MSR_svm_enable;
305   guest_state->rflags = 0x00000002; // The reserved bit is always 1
306   ctrl_area->svm_instrs.VMRUN = 1;
307   // guest_state->cr0 = 0x00000001;    // PE 
308   ctrl_area->guest_ASID = 1;
309
310   ctrl_area->exceptions.de = 1;
311   ctrl_area->exceptions.df = 1;
312   ctrl_area->exceptions.pf = 1;
313   ctrl_area->exceptions.ts = 1;
314   ctrl_area->exceptions.ss = 1;
315   ctrl_area->exceptions.ac = 1;
316   ctrl_area->exceptions.mc = 1;
317   ctrl_area->exceptions.gp = 1;
318   ctrl_area->exceptions.ud = 1;
319   ctrl_area->exceptions.np = 1;
320   ctrl_area->exceptions.of = 1;
321   ctrl_area->exceptions.nmi = 1;
322
323   vm_info.vm_regs.rdx = 0x00000f00;
324
325   guest_state->cr0 = 0x60000010;
326
327   guest_state->cs.selector = 0xf000;
328   guest_state->cs.limit=0xffff;
329   guest_state->cs.base = 0x0000000f0000LL;
330   guest_state->cs.attrib.raw = 0xf3;
331
332   
333   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
334   for ( i = 0; segregs[i] != NULL; i++) {
335     struct vmcb_selector * seg = segregs[i];
336     
337     seg->selector = 0x0000;
338     //    seg->base = seg->selector << 4;
339     seg->base = 0x00000000;
340     seg->attrib.raw = 0xf3;
341     seg->limit = ~0u;
342   }
343   
344   guest_state->gdtr.limit = 0x0000ffff;
345   guest_state->gdtr.base = 0x0000000000000000LL;
346   guest_state->idtr.limit = 0x0000ffff;
347   guest_state->idtr.base = 0x0000000000000000LL;
348
349   guest_state->ldtr.selector = 0x0000;
350   guest_state->ldtr.limit = 0x0000ffff;
351   guest_state->ldtr.base = 0x0000000000000000LL;
352   guest_state->tr.selector = 0x0000;
353   guest_state->tr.limit = 0x0000ffff;
354   guest_state->tr.base = 0x0000000000000000LL;
355
356
357   guest_state->dr6 = 0x00000000ffff0ff0LL;
358   guest_state->dr7 = 0x0000000000000400LL;
359
360   if (vm_info.io_map.num_ports > 0) {
361     vmm_io_hook_t * iter;
362     addr_t io_port_bitmap;
363     
364     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
365     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
366     
367     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
368
369     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
370
371     FOREACH_IO_HOOK(vm_info.io_map, iter) {
372       ushort_t port = iter->port;
373       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
374
375       bitmap += (port / 8);
376       PrintDebug("Setting Bit for port 0x%x\n", port);
377       *bitmap |= 1 << (port % 8);
378     }
379
380
381     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
382
383     ctrl_area->instrs.IOIO_PROT = 1;
384   }
385
386   //ctrl_area->instrs.instrs.INTR = 1;
387
388
389
390   if (vm_info.page_mode == SHADOW_PAGING) {
391     PrintDebug("Creating initial shadow page table\n");
392     vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
393     PrintDebug("Created\n");
394
395     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
396
397     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
398
399     ctrl_area->cr_reads.cr3 = 1;
400     ctrl_area->cr_writes.cr3 = 1;
401
402
403     ctrl_area->instrs.INVLPG = 1;
404     ctrl_area->instrs.INVLPGA = 1;
405
406     guest_state->g_pat = 0x7040600070406ULL;
407
408     guest_state->cr0 |= 0x80000000;
409   } else if (vm_info.page_mode == NESTED_PAGING) {
410     // Flush the TLB on entries/exits
411     //ctrl_area->TLB_CONTROL = 1;
412
413     // Enable Nested Paging
414     //ctrl_area->NP_ENABLE = 1;
415
416     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
417
418         // Set the Nested Page Table pointer
419     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
420     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
421
422     //   ctrl_area->N_CR3 = Get_CR3();
423     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
424
425     //    guest_state->g_pat = 0x7040600070406ULL;
426   }
427
428
429
430 }
431
432
433 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
434   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
435   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
436   uint_t i = 0;
437
438
439   guest_state->rsp = vm_info.vm_regs.rsp;
440   guest_state->rip = vm_info.rip;
441
442
443   /* I pretty much just gutted this from TVMM */
444   /* Note: That means its probably wrong */
445
446   // set the segment registers to mirror ours
447   guest_state->cs.selector = 1<<3;
448   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
449   guest_state->cs.attrib.fields.S = 1;
450   guest_state->cs.attrib.fields.P = 1;
451   guest_state->cs.attrib.fields.db = 1;
452   guest_state->cs.attrib.fields.G = 1;
453   guest_state->cs.limit = 0xfffff;
454   guest_state->cs.base = 0;
455   
456   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
457   for ( i = 0; segregs[i] != NULL; i++) {
458     struct vmcb_selector * seg = segregs[i];
459     
460     seg->selector = 2<<3;
461     seg->attrib.fields.type = 0x2; // Data Segment+read/write
462     seg->attrib.fields.S = 1;
463     seg->attrib.fields.P = 1;
464     seg->attrib.fields.db = 1;
465     seg->attrib.fields.G = 1;
466     seg->limit = 0xfffff;
467     seg->base = 0;
468   }
469
470
471   {
472     /* JRL THIS HAS TO GO */
473     
474     //    guest_state->tr.selector = GetTR_Selector();
475     guest_state->tr.attrib.fields.type = 0x9; 
476     guest_state->tr.attrib.fields.P = 1;
477     // guest_state->tr.limit = GetTR_Limit();
478     //guest_state->tr.base = GetTR_Base();// - 0x2000;
479     /* ** */
480   }
481
482
483   /* ** */
484
485
486   guest_state->efer |= EFER_MSR_svm_enable;
487   guest_state->rflags = 0x00000002; // The reserved bit is always 1
488   ctrl_area->svm_instrs.VMRUN = 1;
489   guest_state->cr0 = 0x00000001;    // PE 
490   ctrl_area->guest_ASID = 1;
491
492
493   //  guest_state->cpl = 0;
494
495
496
497   // Setup exits
498
499   ctrl_area->cr_writes.cr4 = 1;
500   
501   ctrl_area->exceptions.de = 1;
502   ctrl_area->exceptions.df = 1;
503   ctrl_area->exceptions.pf = 1;
504   ctrl_area->exceptions.ts = 1;
505   ctrl_area->exceptions.ss = 1;
506   ctrl_area->exceptions.ac = 1;
507   ctrl_area->exceptions.mc = 1;
508   ctrl_area->exceptions.gp = 1;
509   ctrl_area->exceptions.ud = 1;
510   ctrl_area->exceptions.np = 1;
511   ctrl_area->exceptions.of = 1;
512   ctrl_area->exceptions.nmi = 1;
513
514   
515
516   ctrl_area->instrs.IOIO_PROT = 1;
517   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
518   
519   {
520     reg_ex_t tmp_reg;
521     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
522     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
523   }
524
525   ctrl_area->instrs.INTR = 1;
526
527   
528   {
529     char gdt_buf[6];
530     char idt_buf[6];
531
532     memset(gdt_buf, 0, 6);
533     memset(idt_buf, 0, 6);
534
535
536     uint_t gdt_base, idt_base;
537     ushort_t gdt_limit, idt_limit;
538     
539     GetGDTR(gdt_buf);
540     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
541     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
542     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
543
544     GetIDTR(idt_buf);
545     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
546     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
547     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
548
549
550     // gdt_base -= 0x2000;
551     //idt_base -= 0x2000;
552
553     guest_state->gdtr.base = gdt_base;
554     guest_state->gdtr.limit = gdt_limit;
555     guest_state->idtr.base = idt_base;
556     guest_state->idtr.limit = idt_limit;
557
558
559   }
560   
561   
562   // also determine if CPU supports nested paging
563   /*
564   if (vm_info.page_tables) {
565     //   if (0) {
566     // Flush the TLB on entries/exits
567     ctrl_area->TLB_CONTROL = 1;
568
569     // Enable Nested Paging
570     ctrl_area->NP_ENABLE = 1;
571
572     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
573
574         // Set the Nested Page Table pointer
575     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
576
577
578     //   ctrl_area->N_CR3 = Get_CR3();
579     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
580
581     guest_state->g_pat = 0x7040600070406ULL;
582
583     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
584     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
585     // Enable Paging
586     //    guest_state->cr0 |= 0x80000000;
587   }
588   */
589
590 }
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611 /*
612
613
614 void Init_VMCB_Real(vmcb_t * vmcb, struct guest_info vm_info) {
615   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
616   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
617   uint_t i;
618
619
620   guest_state->rsp = vm_info.vm_regs.rsp;
621   guest_state->rip = vm_info.rip;
622
623
624   guest_state->efer |= EFER_MSR_svm_enable;
625   guest_state->rflags = 0x00000002; // The reserved bit is always 1
626   ctrl_area->svm_instrs.instrs.VMRUN = 1;
627   ctrl_area->guest_ASID = 1;
628   guest_state->cr0 = 0x60000010;
629
630
631   ctrl_area->exceptions.de = 1;
632   ctrl_area->exceptions.df = 1;
633   ctrl_area->exceptions.pf = 1;
634   ctrl_area->exceptions.ts = 1;
635   ctrl_area->exceptions.ss = 1;
636   ctrl_area->exceptions.ac = 1;
637   ctrl_area->exceptions.mc = 1;
638   ctrl_area->exceptions.gp = 1;
639   ctrl_area->exceptions.ud = 1;
640   ctrl_area->exceptions.np = 1;
641   ctrl_area->exceptions.of = 1;
642   ctrl_area->exceptions.nmi = 1;
643
644   guest_state->cs.selector = 0xf000;
645   guest_state->cs.limit=0xffff;
646   guest_state->cs.base =  0xffff0000;
647   guest_state->cs.attrib.raw = 0x9a;
648
649   
650   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
651   for ( i = 0; segregs[i] != NULL; i++) {
652     struct vmcb_selector * seg = segregs[i];
653     
654     seg->selector = 0x0000;
655     seg->base = 0xffff0000;
656     seg->attrib.raw = 0x9b;
657     seg->limit = 0xffff;
658   }
659   
660   // Set GPRs 
661   //
662   //  EDX == 0xfxx
663   //  EAX, EBX, ECX, ESI, EDI, EBP, ESP == 0x0
664   //
665
666   guest_state->gdtr.base = 0;
667   guest_state->gdtr.limit = 0xffff;
668   guest_state->gdtr.attrib.raw = 0x0;
669
670   guest_state->idtr.base = 0;
671   guest_state->idtr.limit = 0xffff;
672   guest_state->idtr.attrib.raw = 0x0;
673
674   guest_state->ldtr.base = 0;
675   guest_state->ldtr.limit = 0xffff;
676   guest_state->ldtr.attrib.raw = 0x82;
677
678   guest_state->tr.base = 0;
679   guest_state->tr.limit = 0xffff;
680   guest_state->tr.attrib.raw = 0x83;
681
682
683
684
685   if (vm_info.io_map.num_ports > 0) {
686     vmm_io_hook_t * iter;
687     addr_t io_port_bitmap;
688     
689     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
690     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
691     
692     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
693
694     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
695
696     FOREACH_IO_HOOK(vm_info.io_map, iter) {
697       ushort_t port = iter->port;
698       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
699
700       bitmap += (port / 8);
701       PrintDebug("Setting Bit in block %x\n", bitmap);
702       *bitmap |= 1 << (port % 8);
703     }
704
705     ctrl_area->instrs.instrs.IOIO_PROT = 1;
706   }
707
708   ctrl_area->instrs.instrs.INTR = 1;
709
710   // also determine if CPU supports nested paging
711
712   if (vm_info.page_mode == SHADOW_PAGING) {
713     PrintDebug("Creating initial shadow page table\n");
714     vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
715     PrintDebug("Created\n");
716
717     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
718
719     ctrl_area->cr_reads.crs.cr3 = 1;
720     ctrl_area->cr_writes.crs.cr3 = 1;
721     ctrl_area->cr_reads.crs.cr0 = 1;
722     ctrl_area->cr_writes.crs.cr0 = 1;
723
724     ctrl_area->instrs.instrs.INVLPG = 1;
725     ctrl_area->instrs.instrs.INVLPGA = 1;
726
727         
728     guest_state->g_pat = 0x7040600070406ULL;
729
730     vm_info.shdw_pg_state.guest_cr0.e_reg.low = guest_state->cr0;
731     guest_state->cr0 |= 0x80000000;
732   } else if (vm_info.page_mode == NESTED_PAGING) {
733     // Flush the TLB on entries/exits
734     //ctrl_area->TLB_CONTROL = 1;
735
736     // Enable Nested Paging
737     //ctrl_area->NP_ENABLE = 1;
738
739     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
740
741         // Set the Nested Page Table pointer
742     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
743     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
744
745     //   ctrl_area->N_CR3 = Get_CR3();
746     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
747
748     //    guest_state->g_pat = 0x7040600070406ULL;
749   }
750
751 }
752 */