Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


e62f5a7629ecd28a9019a8bdf12241d3b1bfdff0
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_emulate.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31
32 /* Checks machine SVM capability */
33 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
34 int is_svm_capable() {
35   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
36   uint_t vm_cr_low = 0, vm_cr_high = 0;
37
38
39   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
40     PrintDebug("SVM Not Available\n");
41     return 0;
42   } 
43
44   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
45
46   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
47     PrintDebug("Nested Paging not supported\n");
48   }
49
50   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
51     return 1;
52   }
53
54   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
55
56   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
57     PrintDebug("SVM BIOS Disabled, not unlockable\n");
58   } else {
59     PrintDebug("SVM is locked with a key\n");
60   }
61
62   return 0;
63 }
64
65
66
67 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
68   reg_ex_t msr;
69   void * host_state;
70
71
72   // Enable SVM on the CPU
73   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
74   msr.e_reg.low |= EFER_MSR_svm_enable;
75   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
76   
77   PrintDebug("SVM Enabled\n");
78
79
80   // Setup the host state save area
81   host_state = os_hooks->allocate_pages(4);
82   
83   msr.e_reg.high = 0;
84   msr.e_reg.low = (uint_t)host_state;
85
86
87   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
88   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
89
90
91
92   // Setup the SVM specific vmm operations
93   vmm_ops->init_guest = &init_svm_guest;
94   vmm_ops->start_guest = &start_svm_guest;
95
96
97   return;
98 }
99
100
101 int init_svm_guest(struct guest_info *info) {
102  
103   PrintDebug("Allocating VMCB\n");
104   info->vmm_data = (void*)Allocate_VMCB();
105
106
107   //PrintDebug("Generating Guest nested page tables\n");
108   //  info->page_tables = NULL;
109   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
110   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
111   //  PrintDebugPageTables(info->page_tables);
112
113
114   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
115   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
116   
117
118   //  info->rip = 0;
119
120   info->vm_regs.rdi = 0;
121   info->vm_regs.rsi = 0;
122   info->vm_regs.rbp = 0;
123   info->vm_regs.rsp = 0;
124   info->vm_regs.rbx = 0;
125   info->vm_regs.rdx = 0;
126   info->vm_regs.rcx = 0;
127   info->vm_regs.rax = 0;
128   
129   return 0;
130 }
131
132
133 // can we start a kernel thread here...
134 int start_svm_guest(struct guest_info *info) {
135   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
136   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
137
138   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
139   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
140
141   while (1) {
142     ullong_t tmp_tsc;
143
144     CLGI();
145
146     rdtscll(info->time_state.cached_host_tsc);
147     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
148     //PrintDebug("SVM Launch Args (vmcb=%x), (info=%x), (vm_regs=%x)\n", info->vmm_data,  &(info->vm_regs));
149     //PrintDebug("Launching to RIP: %x\n", info->rip);
150     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
151     //launch_svm((vmcb_t*)(info->vmm_data));
152     //PrintDebug("SVM Returned\n");
153     rdtscll(tmp_tsc);
154     info->time_state.guest_tsc += tmp_tsc - info->time_state.cached_host_tsc;
155     
156
157     STGI();
158
159      
160     if (handle_svm_exit(info) != 0) {
161
162       addr_t host_addr;
163       addr_t linear_addr = 0;
164
165       PrintDebug("SVM ERROR!!\n"); 
166       
167
168       PrintDebug("RIP: %x\n", guest_state->rip);
169
170
171       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
172
173
174       PrintDebug("RIP Linear: %x\n", linear_addr);
175
176       guest_pa_to_host_pa(info, linear_addr, &host_addr);
177
178       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
179
180       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
181       PrintTraceMemDump((char*)host_addr, 15);
182
183       break;
184     }
185   }
186   return 0;
187 }
188
189
190
191 vmcb_t * Allocate_VMCB() {
192   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
193
194
195   memset(vmcb_page, 0, 4096);
196
197   return vmcb_page;
198 }
199
200
201
202 void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
203   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
204   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
205   uint_t i;
206
207
208   guest_state->rsp = vm_info.vm_regs.rsp;
209   guest_state->rip = vm_info.rip;
210
211
212   //ctrl_area->instrs.instrs.CR0 = 1;
213   ctrl_area->cr_reads.cr0 = 1;
214   ctrl_area->cr_writes.cr0 = 1;
215
216   guest_state->efer |= EFER_MSR_svm_enable;
217   guest_state->rflags = 0x00000002; // The reserved bit is always 1
218   ctrl_area->svm_instrs.VMRUN = 1;
219   // guest_state->cr0 = 0x00000001;    // PE 
220   ctrl_area->guest_ASID = 1;
221
222
223   ctrl_area->exceptions.de = 1;
224   ctrl_area->exceptions.df = 1;
225   ctrl_area->exceptions.pf = 1;
226   ctrl_area->exceptions.ts = 1;
227   ctrl_area->exceptions.ss = 1;
228   ctrl_area->exceptions.ac = 1;
229   ctrl_area->exceptions.mc = 1;
230   ctrl_area->exceptions.gp = 1;
231   ctrl_area->exceptions.ud = 1;
232   ctrl_area->exceptions.np = 1;
233   ctrl_area->exceptions.of = 1;
234   ctrl_area->exceptions.nmi = 1;
235
236   guest_state->cs.selector = 0x0000;
237   guest_state->cs.limit=~0u;
238   guest_state->cs.base = guest_state->cs.selector<<4;
239   guest_state->cs.attrib.raw = 0xf3;
240
241   
242   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
243   for ( i = 0; segregs[i] != NULL; i++) {
244     struct vmcb_selector * seg = segregs[i];
245     
246     seg->selector = 0x0000;
247     seg->base = seg->selector << 4;
248     seg->attrib.raw = 0xf3;
249     seg->limit = ~0u;
250   }
251   
252   if (vm_info.io_map.num_ports > 0) {
253     vmm_io_hook_t * iter;
254     addr_t io_port_bitmap;
255     
256     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
257     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
258     
259     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
260
261     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
262
263     FOREACH_IO_HOOK(vm_info.io_map, iter) {
264       ushort_t port = iter->port;
265       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
266
267       bitmap += (port / 8);
268       PrintDebug("Setting Bit in block %x\n", bitmap);
269       *bitmap |= 1 << (port % 8);
270     }
271
272
273     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
274
275     ctrl_area->instrs.IOIO_PROT = 1;
276   }
277
278   ctrl_area->instrs.INTR = 1;
279
280
281
282   if (vm_info.page_mode == SHADOW_PAGING) {
283     PrintDebug("Creating initial shadow page table\n");
284     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
285     PrintDebug("Created\n");
286
287     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
288
289     ctrl_area->cr_reads.cr3 = 1;
290     ctrl_area->cr_writes.cr3 = 1;
291
292
293     ctrl_area->instrs.INVLPG = 1;
294     ctrl_area->instrs.INVLPGA = 1;
295
296     guest_state->g_pat = 0x7040600070406ULL;
297
298     guest_state->cr0 |= 0x80000000;
299   } else if (vm_info.page_mode == NESTED_PAGING) {
300     // Flush the TLB on entries/exits
301     //ctrl_area->TLB_CONTROL = 1;
302
303     // Enable Nested Paging
304     //ctrl_area->NP_ENABLE = 1;
305
306     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
307
308         // Set the Nested Page Table pointer
309     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
310     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
311
312     //   ctrl_area->N_CR3 = Get_CR3();
313     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
314
315     //    guest_state->g_pat = 0x7040600070406ULL;
316   }
317
318
319
320 }
321
322
323
324 void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
325   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
326   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
327   uint_t i;
328
329
330   guest_state->rsp = vm_info.vm_regs.rsp;
331   // guest_state->rip = vm_info.rip;
332   guest_state->rip = 0xfff0;
333
334   guest_state->cpl = 0;
335
336   //ctrl_area->instrs.instrs.CR0 = 1;
337   ctrl_area->cr_reads.cr0 = 1;
338   ctrl_area->cr_writes.cr0 = 1;
339
340   guest_state->efer |= EFER_MSR_svm_enable;
341   guest_state->rflags = 0x00000002; // The reserved bit is always 1
342   ctrl_area->svm_instrs.VMRUN = 1;
343   ctrl_area->instrs.HLT = 1;
344   // guest_state->cr0 = 0x00000001;    // PE 
345   ctrl_area->guest_ASID = 1;
346
347   ctrl_area->exceptions.de = 1;
348   ctrl_area->exceptions.df = 1;
349   ctrl_area->exceptions.pf = 1;
350   ctrl_area->exceptions.ts = 1;
351   ctrl_area->exceptions.ss = 1;
352   ctrl_area->exceptions.ac = 1;
353   ctrl_area->exceptions.mc = 1;
354   ctrl_area->exceptions.gp = 1;
355   ctrl_area->exceptions.ud = 1;
356   ctrl_area->exceptions.np = 1;
357   ctrl_area->exceptions.of = 1;
358   ctrl_area->exceptions.nmi = 1;
359
360   vm_info.vm_regs.rdx = 0x00000f00;
361
362   guest_state->cr0 = 0x60000010;
363
364   guest_state->cs.selector = 0xf000;
365   guest_state->cs.limit=0xffff;
366   guest_state->cs.base = 0x0000000f0000LL;
367   guest_state->cs.attrib.raw = 0xf3;
368
369   
370   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
371   for ( i = 0; segregs[i] != NULL; i++) {
372     struct vmcb_selector * seg = segregs[i];
373     
374     seg->selector = 0x0000;
375     //    seg->base = seg->selector << 4;
376     seg->base = 0x00000000;
377     seg->attrib.raw = 0xf3;
378     seg->limit = ~0u;
379   }
380   
381   guest_state->gdtr.limit = 0x0000ffff;
382   guest_state->gdtr.base = 0x0000000000000000LL;
383   guest_state->idtr.limit = 0x0000ffff;
384   guest_state->idtr.base = 0x0000000000000000LL;
385
386   guest_state->ldtr.selector = 0x0000;
387   guest_state->ldtr.limit = 0x0000ffff;
388   guest_state->ldtr.base = 0x0000000000000000LL;
389   guest_state->tr.selector = 0x0000;
390   guest_state->tr.limit = 0x0000ffff;
391   guest_state->tr.base = 0x0000000000000000LL;
392
393
394   guest_state->dr6 = 0x00000000ffff0ff0LL;
395   guest_state->dr7 = 0x0000000000000400LL;
396
397   if (vm_info.io_map.num_ports > 0) {
398     vmm_io_hook_t * iter;
399     addr_t io_port_bitmap;
400     
401     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
402     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
403     
404     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
405
406     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
407
408     FOREACH_IO_HOOK(vm_info.io_map, iter) {
409       ushort_t port = iter->port;
410       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
411
412       bitmap += (port / 8);
413       PrintDebug("Setting Bit for port 0x%x\n", port);
414       *bitmap |= 1 << (port % 8);
415     }
416
417
418     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
419
420     ctrl_area->instrs.IOIO_PROT = 1;
421   }
422
423
424
425   PrintDebug("Exiting on interrupts\n");
426   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
427   ctrl_area->instrs.INTR = 1;
428
429
430   if (vm_info.page_mode == SHADOW_PAGING) {
431     PrintDebug("Creating initial shadow page table\n");
432     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
433     PrintDebug("Created\n");
434
435     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
436
437     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
438
439     ctrl_area->cr_reads.cr3 = 1;
440     ctrl_area->cr_writes.cr3 = 1;
441
442
443     ctrl_area->instrs.INVLPG = 1;
444     ctrl_area->instrs.INVLPGA = 1;
445
446     guest_state->g_pat = 0x7040600070406ULL;
447
448     guest_state->cr0 |= 0x80000000;
449   } else if (vm_info.page_mode == NESTED_PAGING) {
450     // Flush the TLB on entries/exits
451     //ctrl_area->TLB_CONTROL = 1;
452
453     // Enable Nested Paging
454     //ctrl_area->NP_ENABLE = 1;
455
456     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
457
458         // Set the Nested Page Table pointer
459     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
460     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
461
462     //   ctrl_area->N_CR3 = Get_CR3();
463     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
464
465     //    guest_state->g_pat = 0x7040600070406ULL;
466   }
467
468
469
470 }
471
472
473 #if 0
474 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
475   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
476   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
477   uint_t i = 0;
478
479
480   guest_state->rsp = vm_info.vm_regs.rsp;
481   guest_state->rip = vm_info.rip;
482
483
484   /* I pretty much just gutted this from TVMM */
485   /* Note: That means its probably wrong */
486
487   // set the segment registers to mirror ours
488   guest_state->cs.selector = 1<<3;
489   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
490   guest_state->cs.attrib.fields.S = 1;
491   guest_state->cs.attrib.fields.P = 1;
492   guest_state->cs.attrib.fields.db = 1;
493   guest_state->cs.attrib.fields.G = 1;
494   guest_state->cs.limit = 0xfffff;
495   guest_state->cs.base = 0;
496   
497   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
498   for ( i = 0; segregs[i] != NULL; i++) {
499     struct vmcb_selector * seg = segregs[i];
500     
501     seg->selector = 2<<3;
502     seg->attrib.fields.type = 0x2; // Data Segment+read/write
503     seg->attrib.fields.S = 1;
504     seg->attrib.fields.P = 1;
505     seg->attrib.fields.db = 1;
506     seg->attrib.fields.G = 1;
507     seg->limit = 0xfffff;
508     seg->base = 0;
509   }
510
511
512   {
513     /* JRL THIS HAS TO GO */
514     
515     //    guest_state->tr.selector = GetTR_Selector();
516     guest_state->tr.attrib.fields.type = 0x9; 
517     guest_state->tr.attrib.fields.P = 1;
518     // guest_state->tr.limit = GetTR_Limit();
519     //guest_state->tr.base = GetTR_Base();// - 0x2000;
520     /* ** */
521   }
522
523
524   /* ** */
525
526
527   guest_state->efer |= EFER_MSR_svm_enable;
528   guest_state->rflags = 0x00000002; // The reserved bit is always 1
529   ctrl_area->svm_instrs.VMRUN = 1;
530   guest_state->cr0 = 0x00000001;    // PE 
531   ctrl_area->guest_ASID = 1;
532
533
534   //  guest_state->cpl = 0;
535
536
537
538   // Setup exits
539
540   ctrl_area->cr_writes.cr4 = 1;
541   
542   ctrl_area->exceptions.de = 1;
543   ctrl_area->exceptions.df = 1;
544   ctrl_area->exceptions.pf = 1;
545   ctrl_area->exceptions.ts = 1;
546   ctrl_area->exceptions.ss = 1;
547   ctrl_area->exceptions.ac = 1;
548   ctrl_area->exceptions.mc = 1;
549   ctrl_area->exceptions.gp = 1;
550   ctrl_area->exceptions.ud = 1;
551   ctrl_area->exceptions.np = 1;
552   ctrl_area->exceptions.of = 1;
553   ctrl_area->exceptions.nmi = 1;
554
555   
556
557   ctrl_area->instrs.IOIO_PROT = 1;
558   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
559   
560   {
561     reg_ex_t tmp_reg;
562     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
563     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
564   }
565
566   ctrl_area->instrs.INTR = 1;
567
568   
569   {
570     char gdt_buf[6];
571     char idt_buf[6];
572
573     memset(gdt_buf, 0, 6);
574     memset(idt_buf, 0, 6);
575
576
577     uint_t gdt_base, idt_base;
578     ushort_t gdt_limit, idt_limit;
579     
580     GetGDTR(gdt_buf);
581     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
582     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
583     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
584
585     GetIDTR(idt_buf);
586     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
587     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
588     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
589
590
591     // gdt_base -= 0x2000;
592     //idt_base -= 0x2000;
593
594     guest_state->gdtr.base = gdt_base;
595     guest_state->gdtr.limit = gdt_limit;
596     guest_state->idtr.base = idt_base;
597     guest_state->idtr.limit = idt_limit;
598
599
600   }
601   
602   
603   // also determine if CPU supports nested paging
604   /*
605   if (vm_info.page_tables) {
606     //   if (0) {
607     // Flush the TLB on entries/exits
608     ctrl_area->TLB_CONTROL = 1;
609
610     // Enable Nested Paging
611     ctrl_area->NP_ENABLE = 1;
612
613     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
614
615         // Set the Nested Page Table pointer
616     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
617
618
619     //   ctrl_area->N_CR3 = Get_CR3();
620     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
621
622     guest_state->g_pat = 0x7040600070406ULL;
623
624     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
625     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
626     // Enable Paging
627     //    guest_state->cr0 |= 0x80000000;
628   }
629   */
630
631 }
632
633
634
635
636
637 #endif
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652 /*
653
654
655 void Init_VMCB_Real(vmcb_t * vmcb, struct guest_info vm_info) {
656   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
657   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
658   uint_t i;
659
660
661   guest_state->rsp = vm_info.vm_regs.rsp;
662   guest_state->rip = vm_info.rip;
663
664
665   guest_state->efer |= EFER_MSR_svm_enable;
666   guest_state->rflags = 0x00000002; // The reserved bit is always 1
667   ctrl_area->svm_instrs.instrs.VMRUN = 1;
668   ctrl_area->guest_ASID = 1;
669   guest_state->cr0 = 0x60000010;
670
671
672   ctrl_area->exceptions.de = 1;
673   ctrl_area->exceptions.df = 1;
674   ctrl_area->exceptions.pf = 1;
675   ctrl_area->exceptions.ts = 1;
676   ctrl_area->exceptions.ss = 1;
677   ctrl_area->exceptions.ac = 1;
678   ctrl_area->exceptions.mc = 1;
679   ctrl_area->exceptions.gp = 1;
680   ctrl_area->exceptions.ud = 1;
681   ctrl_area->exceptions.np = 1;
682   ctrl_area->exceptions.of = 1;
683   ctrl_area->exceptions.nmi = 1;
684
685   guest_state->cs.selector = 0xf000;
686   guest_state->cs.limit=0xffff;
687   guest_state->cs.base =  0xffff0000;
688   guest_state->cs.attrib.raw = 0x9a;
689
690   
691   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
692   for ( i = 0; segregs[i] != NULL; i++) {
693     struct vmcb_selector * seg = segregs[i];
694     
695     seg->selector = 0x0000;
696     seg->base = 0xffff0000;
697     seg->attrib.raw = 0x9b;
698     seg->limit = 0xffff;
699   }
700   
701   // Set GPRs 
702   //
703   //  EDX == 0xfxx
704   //  EAX, EBX, ECX, ESI, EDI, EBP, ESP == 0x0
705   //
706
707   guest_state->gdtr.base = 0;
708   guest_state->gdtr.limit = 0xffff;
709   guest_state->gdtr.attrib.raw = 0x0;
710
711   guest_state->idtr.base = 0;
712   guest_state->idtr.limit = 0xffff;
713   guest_state->idtr.attrib.raw = 0x0;
714
715   guest_state->ldtr.base = 0;
716   guest_state->ldtr.limit = 0xffff;
717   guest_state->ldtr.attrib.raw = 0x82;
718
719   guest_state->tr.base = 0;
720   guest_state->tr.limit = 0xffff;
721   guest_state->tr.attrib.raw = 0x83;
722
723
724
725
726   if (vm_info.io_map.num_ports > 0) {
727     vmm_io_hook_t * iter;
728     addr_t io_port_bitmap;
729     
730     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
731     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
732     
733     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
734
735     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
736
737     FOREACH_IO_HOOK(vm_info.io_map, iter) {
738       ushort_t port = iter->port;
739       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
740
741       bitmap += (port / 8);
742       PrintDebug("Setting Bit in block %x\n", bitmap);
743       *bitmap |= 1 << (port % 8);
744     }
745
746     ctrl_area->instrs.instrs.IOIO_PROT = 1;
747   }
748
749   ctrl_area->instrs.instrs.INTR = 1;
750
751   // also determine if CPU supports nested paging
752
753   if (vm_info.page_mode == SHADOW_PAGING) {
754     PrintDebug("Creating initial shadow page table\n");
755     vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
756     PrintDebug("Created\n");
757
758     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
759
760     ctrl_area->cr_reads.crs.cr3 = 1;
761     ctrl_area->cr_writes.crs.cr3 = 1;
762     ctrl_area->cr_reads.crs.cr0 = 1;
763     ctrl_area->cr_writes.crs.cr0 = 1;
764
765     ctrl_area->instrs.instrs.INVLPG = 1;
766     ctrl_area->instrs.instrs.INVLPGA = 1;
767
768         
769     guest_state->g_pat = 0x7040600070406ULL;
770
771     vm_info.shdw_pg_state.guest_cr0.e_reg.low = guest_state->cr0;
772     guest_state->cr0 |= 0x80000000;
773   } else if (vm_info.page_mode == NESTED_PAGING) {
774     // Flush the TLB on entries/exits
775     //ctrl_area->TLB_CONTROL = 1;
776
777     // Enable Nested Paging
778     //ctrl_area->NP_ENABLE = 1;
779
780     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
781
782         // Set the Nested Page Table pointer
783     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
784     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
785
786     //   ctrl_area->N_CR3 = Get_CR3();
787     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
788
789     //    guest_state->g_pat = 0x7040600070406ULL;
790   }
791
792 }
793 */