Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


got the bios setup ok, now we have to handle the exits
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12
13 /* TEMPORARY BECAUSE SVM IS WEIRD */
14 //#include <palacios/tss.h>
15 /* ** */
16
17
18 extern struct vmm_os_hooks * os_hooks;
19
20 extern uint_t cpuid_ecx(uint_t op);
21 extern uint_t cpuid_edx(uint_t op);
22 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
23 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
24 extern uint_t launch_svm(vmcb_t * vmcb_addr);
25 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct guest_gprs * gprs);
26
27 extern uint_t Get_CR3();
28
29 extern void GetGDTR(void * gdt);
30 extern void GetIDTR(void * idt);
31
32 extern void DisableInts();
33
34 /* Checks machine SVM capability */
35 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
36 int is_svm_capable() {
37   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
38   uint_t vm_cr_low = 0, vm_cr_high = 0;
39
40
41   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
42     PrintDebug("SVM Not Available\n");
43     return 0;
44   } 
45
46   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
47
48   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
49     PrintDebug("Nested Paging not supported\n");
50   }
51
52   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
53     return 1;
54   }
55
56   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
57
58   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
59     PrintDebug("SVM BIOS Disabled, not unlockable\n");
60   } else {
61     PrintDebug("SVM is locked with a key\n");
62   }
63
64   return 0;
65 }
66
67
68
69 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
70   reg_ex_t msr;
71   void * host_state;
72
73
74   // Enable SVM on the CPU
75   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
76   msr.e_reg.low |= EFER_MSR_svm_enable;
77   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
78   
79   PrintDebug("SVM Enabled\n");
80
81
82   // Setup the host state save area
83   host_state = os_hooks->allocate_pages(4);
84   
85   msr.e_reg.high = 0;
86   msr.e_reg.low = (uint_t)host_state;
87
88
89   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
90   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
91
92
93
94   // Setup the SVM specific vmm operations
95   vmm_ops->init_guest = &init_svm_guest;
96   vmm_ops->start_guest = &start_svm_guest;
97
98
99   return;
100 }
101
102
103 int init_svm_guest(struct guest_info *info) {
104  
105   PrintDebug("Allocating VMCB\n");
106   info->vmm_data = (void*)Allocate_VMCB();
107
108
109   //PrintDebug("Generating Guest nested page tables\n");
110   //  info->page_tables = NULL;
111   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
112   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
113   //  PrintDebugPageTables(info->page_tables);
114
115
116   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
117   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
118   
119
120   //  info->rip = 0;
121
122   info->vm_regs.rdi = 0;
123   info->vm_regs.rsi = 0;
124   info->vm_regs.rbp = 0;
125   info->vm_regs.rsp = 0;
126   info->vm_regs.rbx = 0;
127   info->vm_regs.rdx = 0;
128   info->vm_regs.rcx = 0;
129   info->vm_regs.rax = 0;
130   
131   return 0;
132 }
133
134
135 // can we start a kernel thread here...
136 int start_svm_guest(struct guest_info *info) {
137
138
139
140   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
141   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
142
143   while (1) {
144
145     PrintDebug("SVM Launch Args (vmcb=%x), (info=%x), (vm_regs=%x)\n", info->vmm_data,  &(info->vm_regs));
146     PrintDebug("Launching to RIP: %x\n", info->rip);
147     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
148     //launch_svm((vmcb_t*)(info->vmm_data));
149     PrintDebug("SVM Returned\n");
150
151     if (handle_svm_exit(info) != 0) {
152       // handle exit code....
153       break;
154     }
155   }
156   return 0;
157 }
158
159
160
161 vmcb_t * Allocate_VMCB() {
162   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
163
164
165   memset(vmcb_page, 0, 4096);
166
167   return vmcb_page;
168 }
169
170
171
172 void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
173   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
174   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
175   uint_t i;
176
177
178   guest_state->rsp = vm_info.vm_regs.rsp;
179   guest_state->rip = vm_info.rip;
180
181
182   //ctrl_area->instrs.instrs.CR0 = 1;
183   ctrl_area->cr_reads.crs.cr0 = 1;
184   ctrl_area->cr_writes.crs.cr0 = 1;
185
186   guest_state->efer |= EFER_MSR_svm_enable;
187   guest_state->rflags = 0x00000002; // The reserved bit is always 1
188   ctrl_area->svm_instrs.instrs.VMRUN = 1;
189   // guest_state->cr0 = 0x00000001;    // PE 
190   ctrl_area->guest_ASID = 1;
191
192
193   ctrl_area->exceptions.ex_names.de = 1;
194   ctrl_area->exceptions.ex_names.df = 1;
195   ctrl_area->exceptions.ex_names.pf = 1;
196   ctrl_area->exceptions.ex_names.ts = 1;
197   ctrl_area->exceptions.ex_names.ss = 1;
198   ctrl_area->exceptions.ex_names.ac = 1;
199   ctrl_area->exceptions.ex_names.mc = 1;
200   ctrl_area->exceptions.ex_names.gp = 1;
201   ctrl_area->exceptions.ex_names.ud = 1;
202   ctrl_area->exceptions.ex_names.np = 1;
203   ctrl_area->exceptions.ex_names.of = 1;
204   ctrl_area->exceptions.ex_names.nmi = 1;
205
206   guest_state->cs.selector = 0x0000;
207   guest_state->cs.limit=~0u;
208   guest_state->cs.base = guest_state->cs.selector<<4;
209   guest_state->cs.attrib.raw = 0xf3;
210
211   
212   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
213   for ( i = 0; segregs[i] != NULL; i++) {
214     struct vmcb_selector * seg = segregs[i];
215     
216     seg->selector = 0x0000;
217     seg->base = seg->selector << 4;
218     seg->attrib.raw = 0xf3;
219     seg->limit = ~0u;
220   }
221   
222   if (vm_info.io_map.num_ports > 0) {
223     vmm_io_hook_t * iter;
224     addr_t io_port_bitmap;
225     
226     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
227     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
228     
229     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
230
231     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
232
233     FOREACH_IO_HOOK(vm_info.io_map, iter) {
234       ushort_t port = iter->port;
235       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
236
237       bitmap += (port / 8);
238       PrintDebug("Setting Bit in block %x\n", bitmap);
239       *bitmap |= 1 << (port % 8);
240     }
241
242
243     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
244
245     ctrl_area->instrs.instrs.IOIO_PROT = 1;
246   }
247
248   ctrl_area->instrs.instrs.INTR = 1;
249
250
251
252   if (vm_info.page_mode == SHADOW_PAGING) {
253     PrintDebug("Creating initial shadow page table\n");
254     vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
255     PrintDebug("Created\n");
256
257     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
258
259     ctrl_area->cr_reads.crs.cr3 = 1;
260     ctrl_area->cr_writes.crs.cr3 = 1;
261
262
263     ctrl_area->instrs.instrs.INVLPG = 1;
264     ctrl_area->instrs.instrs.INVLPGA = 1;
265
266     guest_state->g_pat = 0x7040600070406ULL;
267
268     guest_state->cr0 |= 0x80000000;
269   } else if (vm_info.page_mode == NESTED_PAGING) {
270     // Flush the TLB on entries/exits
271     //ctrl_area->TLB_CONTROL = 1;
272
273     // Enable Nested Paging
274     //ctrl_area->NP_ENABLE = 1;
275
276     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
277
278         // Set the Nested Page Table pointer
279     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
280     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
281
282     //   ctrl_area->N_CR3 = Get_CR3();
283     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
284
285     //    guest_state->g_pat = 0x7040600070406ULL;
286   }
287
288
289
290 }
291
292
293
294 void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
295   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
296   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
297   uint_t i;
298
299
300   guest_state->rsp = vm_info.vm_regs.rsp;
301   // guest_state->rip = vm_info.rip;
302   guest_state->rip = 0xfff0;
303
304   //ctrl_area->instrs.instrs.CR0 = 1;
305   ctrl_area->cr_reads.crs.cr0 = 1;
306   ctrl_area->cr_writes.crs.cr0 = 1;
307
308   guest_state->efer |= EFER_MSR_svm_enable;
309   guest_state->rflags = 0x00000002; // The reserved bit is always 1
310   ctrl_area->svm_instrs.instrs.VMRUN = 1;
311   // guest_state->cr0 = 0x00000001;    // PE 
312   ctrl_area->guest_ASID = 1;
313
314
315   ctrl_area->exceptions.ex_names.de = 1;
316   ctrl_area->exceptions.ex_names.df = 1;
317   ctrl_area->exceptions.ex_names.pf = 1;
318   ctrl_area->exceptions.ex_names.ts = 1;
319   ctrl_area->exceptions.ex_names.ss = 1;
320   ctrl_area->exceptions.ex_names.ac = 1;
321   ctrl_area->exceptions.ex_names.mc = 1;
322   ctrl_area->exceptions.ex_names.gp = 1;
323   ctrl_area->exceptions.ex_names.ud = 1;
324   ctrl_area->exceptions.ex_names.np = 1;
325   ctrl_area->exceptions.ex_names.of = 1;
326   ctrl_area->exceptions.ex_names.nmi = 1;
327
328   guest_state->cs.selector = 0xf000;
329   guest_state->cs.limit=~0u;
330   guest_state->cs.base = guest_state->cs.selector<<4;
331   guest_state->cs.attrib.raw = 0xf3;
332
333   
334   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
335   for ( i = 0; segregs[i] != NULL; i++) {
336     struct vmcb_selector * seg = segregs[i];
337     
338     seg->selector = 0x0000;
339     seg->base = seg->selector << 4;
340     seg->attrib.raw = 0xf3;
341     seg->limit = ~0u;
342   }
343   
344   if (vm_info.io_map.num_ports > 0) {
345     vmm_io_hook_t * iter;
346     addr_t io_port_bitmap;
347     
348     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
349     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
350     
351     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
352
353     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
354
355     FOREACH_IO_HOOK(vm_info.io_map, iter) {
356       ushort_t port = iter->port;
357       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
358
359       bitmap += (port / 8);
360       PrintDebug("Setting Bit in block %x\n", bitmap);
361       *bitmap |= 1 << (port % 8);
362     }
363
364
365     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
366
367     ctrl_area->instrs.instrs.IOIO_PROT = 1;
368   }
369
370   ctrl_area->instrs.instrs.INTR = 1;
371
372
373
374   if (vm_info.page_mode == SHADOW_PAGING) {
375     PrintDebug("Creating initial shadow page table\n");
376     vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
377     PrintDebug("Created\n");
378
379     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
380
381     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
382
383     ctrl_area->cr_reads.crs.cr3 = 1;
384     ctrl_area->cr_writes.crs.cr3 = 1;
385
386
387     ctrl_area->instrs.instrs.INVLPG = 1;
388     ctrl_area->instrs.instrs.INVLPGA = 1;
389
390     guest_state->g_pat = 0x7040600070406ULL;
391
392     guest_state->cr0 |= 0x80000000;
393   } else if (vm_info.page_mode == NESTED_PAGING) {
394     // Flush the TLB on entries/exits
395     //ctrl_area->TLB_CONTROL = 1;
396
397     // Enable Nested Paging
398     //ctrl_area->NP_ENABLE = 1;
399
400     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
401
402         // Set the Nested Page Table pointer
403     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
404     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
405
406     //   ctrl_area->N_CR3 = Get_CR3();
407     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
408
409     //    guest_state->g_pat = 0x7040600070406ULL;
410   }
411
412
413
414 }
415
416
417 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
418   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
419   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
420   uint_t i = 0;
421
422
423   guest_state->rsp = vm_info.vm_regs.rsp;
424   guest_state->rip = vm_info.rip;
425
426
427   /* I pretty much just gutted this from TVMM */
428   /* Note: That means its probably wrong */
429
430   // set the segment registers to mirror ours
431   guest_state->cs.selector = 1<<3;
432   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
433   guest_state->cs.attrib.fields.S = 1;
434   guest_state->cs.attrib.fields.P = 1;
435   guest_state->cs.attrib.fields.db = 1;
436   guest_state->cs.attrib.fields.G = 1;
437   guest_state->cs.limit = 0xfffff;
438   guest_state->cs.base = 0;
439   
440   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
441   for ( i = 0; segregs[i] != NULL; i++) {
442     struct vmcb_selector * seg = segregs[i];
443     
444     seg->selector = 2<<3;
445     seg->attrib.fields.type = 0x2; // Data Segment+read/write
446     seg->attrib.fields.S = 1;
447     seg->attrib.fields.P = 1;
448     seg->attrib.fields.db = 1;
449     seg->attrib.fields.G = 1;
450     seg->limit = 0xfffff;
451     seg->base = 0;
452   }
453
454
455   {
456     /* JRL THIS HAS TO GO */
457     
458     //    guest_state->tr.selector = GetTR_Selector();
459     guest_state->tr.attrib.fields.type = 0x9; 
460     guest_state->tr.attrib.fields.P = 1;
461     // guest_state->tr.limit = GetTR_Limit();
462     //guest_state->tr.base = GetTR_Base();// - 0x2000;
463     /* ** */
464   }
465
466
467   /* ** */
468
469
470   guest_state->efer |= EFER_MSR_svm_enable;
471   guest_state->rflags = 0x00000002; // The reserved bit is always 1
472   ctrl_area->svm_instrs.instrs.VMRUN = 1;
473   guest_state->cr0 = 0x00000001;    // PE 
474   ctrl_area->guest_ASID = 1;
475
476
477   //  guest_state->cpl = 0;
478
479
480
481   // Setup exits
482
483   ctrl_area->cr_writes.crs.cr4 = 1;
484   
485   ctrl_area->exceptions.ex_names.de = 1;
486   ctrl_area->exceptions.ex_names.df = 1;
487   ctrl_area->exceptions.ex_names.pf = 1;
488   ctrl_area->exceptions.ex_names.ts = 1;
489   ctrl_area->exceptions.ex_names.ss = 1;
490   ctrl_area->exceptions.ex_names.ac = 1;
491   ctrl_area->exceptions.ex_names.mc = 1;
492   ctrl_area->exceptions.ex_names.gp = 1;
493   ctrl_area->exceptions.ex_names.ud = 1;
494   ctrl_area->exceptions.ex_names.np = 1;
495   ctrl_area->exceptions.ex_names.of = 1;
496   ctrl_area->exceptions.ex_names.nmi = 1;
497
498   
499
500   ctrl_area->instrs.instrs.IOIO_PROT = 1;
501   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
502   
503   {
504     reg_ex_t tmp_reg;
505     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
506     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
507   }
508
509   ctrl_area->instrs.instrs.INTR = 1;
510
511   
512   {
513     char gdt_buf[6];
514     char idt_buf[6];
515
516     memset(gdt_buf, 0, 6);
517     memset(idt_buf, 0, 6);
518
519
520     uint_t gdt_base, idt_base;
521     ushort_t gdt_limit, idt_limit;
522     
523     GetGDTR(gdt_buf);
524     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
525     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
526     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
527
528     GetIDTR(idt_buf);
529     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
530     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
531     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
532
533
534     // gdt_base -= 0x2000;
535     //idt_base -= 0x2000;
536
537     guest_state->gdtr.base = gdt_base;
538     guest_state->gdtr.limit = gdt_limit;
539     guest_state->idtr.base = idt_base;
540     guest_state->idtr.limit = idt_limit;
541
542
543   }
544   
545   
546   // also determine if CPU supports nested paging
547   /*
548   if (vm_info.page_tables) {
549     //   if (0) {
550     // Flush the TLB on entries/exits
551     ctrl_area->TLB_CONTROL = 1;
552
553     // Enable Nested Paging
554     ctrl_area->NP_ENABLE = 1;
555
556     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
557
558         // Set the Nested Page Table pointer
559     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
560
561
562     //   ctrl_area->N_CR3 = Get_CR3();
563     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
564
565     guest_state->g_pat = 0x7040600070406ULL;
566
567     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
568     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
569     // Enable Paging
570     //    guest_state->cr0 |= 0x80000000;
571   }
572   */
573
574 }
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595 /*
596
597
598 void Init_VMCB_Real(vmcb_t * vmcb, struct guest_info vm_info) {
599   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
600   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
601   uint_t i;
602
603
604   guest_state->rsp = vm_info.vm_regs.rsp;
605   guest_state->rip = vm_info.rip;
606
607
608   guest_state->efer |= EFER_MSR_svm_enable;
609   guest_state->rflags = 0x00000002; // The reserved bit is always 1
610   ctrl_area->svm_instrs.instrs.VMRUN = 1;
611   ctrl_area->guest_ASID = 1;
612   guest_state->cr0 = 0x60000010;
613
614
615   ctrl_area->exceptions.ex_names.de = 1;
616   ctrl_area->exceptions.ex_names.df = 1;
617   ctrl_area->exceptions.ex_names.pf = 1;
618   ctrl_area->exceptions.ex_names.ts = 1;
619   ctrl_area->exceptions.ex_names.ss = 1;
620   ctrl_area->exceptions.ex_names.ac = 1;
621   ctrl_area->exceptions.ex_names.mc = 1;
622   ctrl_area->exceptions.ex_names.gp = 1;
623   ctrl_area->exceptions.ex_names.ud = 1;
624   ctrl_area->exceptions.ex_names.np = 1;
625   ctrl_area->exceptions.ex_names.of = 1;
626   ctrl_area->exceptions.ex_names.nmi = 1;
627
628   guest_state->cs.selector = 0xf000;
629   guest_state->cs.limit=0xffff;
630   guest_state->cs.base =  0xffff0000;
631   guest_state->cs.attrib.raw = 0x9a;
632
633   
634   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
635   for ( i = 0; segregs[i] != NULL; i++) {
636     struct vmcb_selector * seg = segregs[i];
637     
638     seg->selector = 0x0000;
639     seg->base = 0xffff0000;
640     seg->attrib.raw = 0x9b;
641     seg->limit = 0xffff;
642   }
643   
644   // Set GPRs 
645   //
646   //  EDX == 0xfxx
647   //  EAX, EBX, ECX, ESI, EDI, EBP, ESP == 0x0
648   //
649
650   guest_state->gdtr.base = 0;
651   guest_state->gdtr.limit = 0xffff;
652   guest_state->gdtr.attrib.raw = 0x0;
653
654   guest_state->idtr.base = 0;
655   guest_state->idtr.limit = 0xffff;
656   guest_state->idtr.attrib.raw = 0x0;
657
658   guest_state->ldtr.base = 0;
659   guest_state->ldtr.limit = 0xffff;
660   guest_state->ldtr.attrib.raw = 0x82;
661
662   guest_state->tr.base = 0;
663   guest_state->tr.limit = 0xffff;
664   guest_state->tr.attrib.raw = 0x83;
665
666
667
668
669   if (vm_info.io_map.num_ports > 0) {
670     vmm_io_hook_t * iter;
671     addr_t io_port_bitmap;
672     
673     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
674     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
675     
676     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
677
678     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
679
680     FOREACH_IO_HOOK(vm_info.io_map, iter) {
681       ushort_t port = iter->port;
682       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
683
684       bitmap += (port / 8);
685       PrintDebug("Setting Bit in block %x\n", bitmap);
686       *bitmap |= 1 << (port % 8);
687     }
688
689     ctrl_area->instrs.instrs.IOIO_PROT = 1;
690   }
691
692   ctrl_area->instrs.instrs.INTR = 1;
693
694   // also determine if CPU supports nested paging
695
696   if (vm_info.page_mode == SHADOW_PAGING) {
697     PrintDebug("Creating initial shadow page table\n");
698     vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
699     PrintDebug("Created\n");
700
701     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
702
703     ctrl_area->cr_reads.crs.cr3 = 1;
704     ctrl_area->cr_writes.crs.cr3 = 1;
705     ctrl_area->cr_reads.crs.cr0 = 1;
706     ctrl_area->cr_writes.crs.cr0 = 1;
707
708     ctrl_area->instrs.instrs.INVLPG = 1;
709     ctrl_area->instrs.instrs.INVLPGA = 1;
710
711         
712     guest_state->g_pat = 0x7040600070406ULL;
713
714     vm_info.shdw_pg_state.guest_cr0.e_reg.low = guest_state->cr0;
715     guest_state->cr0 |= 0x80000000;
716   } else if (vm_info.page_mode == NESTED_PAGING) {
717     // Flush the TLB on entries/exits
718     //ctrl_area->TLB_CONTROL = 1;
719
720     // Enable Nested Paging
721     //ctrl_area->NP_ENABLE = 1;
722
723     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
724
725         // Set the Nested Page Table pointer
726     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
727     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
728
729     //   ctrl_area->N_CR3 = Get_CR3();
730     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
731
732     //    guest_state->g_pat = 0x7040600070406ULL;
733   }
734
735 }
736 */