Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


lots of changes...
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_emulate.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct guest_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31
32 /* Checks machine SVM capability */
33 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
34 int is_svm_capable() {
35   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
36   uint_t vm_cr_low = 0, vm_cr_high = 0;
37
38
39   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
40     PrintDebug("SVM Not Available\n");
41     return 0;
42   } 
43
44   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
45
46   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
47     PrintDebug("Nested Paging not supported\n");
48   }
49
50   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
51     return 1;
52   }
53
54   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
55
56   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
57     PrintDebug("SVM BIOS Disabled, not unlockable\n");
58   } else {
59     PrintDebug("SVM is locked with a key\n");
60   }
61
62   return 0;
63 }
64
65
66
67 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
68   reg_ex_t msr;
69   void * host_state;
70
71
72   // Enable SVM on the CPU
73   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
74   msr.e_reg.low |= EFER_MSR_svm_enable;
75   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
76   
77   PrintDebug("SVM Enabled\n");
78
79
80   // Setup the host state save area
81   host_state = os_hooks->allocate_pages(4);
82   
83   msr.e_reg.high = 0;
84   msr.e_reg.low = (uint_t)host_state;
85
86
87   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
88   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
89
90
91
92   // Setup the SVM specific vmm operations
93   vmm_ops->init_guest = &init_svm_guest;
94   vmm_ops->start_guest = &start_svm_guest;
95
96
97   return;
98 }
99
100
101 int init_svm_guest(struct guest_info *info) {
102  
103   PrintDebug("Allocating VMCB\n");
104   info->vmm_data = (void*)Allocate_VMCB();
105
106
107   //PrintDebug("Generating Guest nested page tables\n");
108   //  info->page_tables = NULL;
109   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
110   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
111   //  PrintDebugPageTables(info->page_tables);
112
113
114   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
115   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
116   
117
118   //  info->rip = 0;
119
120   info->vm_regs.rdi = 0;
121   info->vm_regs.rsi = 0;
122   info->vm_regs.rbp = 0;
123   info->vm_regs.rsp = 0;
124   info->vm_regs.rbx = 0;
125   info->vm_regs.rdx = 0;
126   info->vm_regs.rcx = 0;
127   info->vm_regs.rax = 0;
128   
129   return 0;
130 }
131
132
133 // can we start a kernel thread here...
134 int start_svm_guest(struct guest_info *info) {
135
136
137
138   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
139   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
140
141   while (1) {
142
143     CLGI();
144
145     //PrintDebug("SVM Launch Args (vmcb=%x), (info=%x), (vm_regs=%x)\n", info->vmm_data,  &(info->vm_regs));
146     PrintDebug("Launching to RIP: %x\n", info->rip);
147     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
148     //launch_svm((vmcb_t*)(info->vmm_data));
149     //PrintDebug("SVM Returned\n");
150
151     
152
153     STGI();
154
155      
156     if (handle_svm_exit(info) != 0) {
157       vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
158       addr_t host_addr;
159       addr_t linear_addr = 0;
160
161       PrintDebug("SVM ERROR!!\n"); 
162       
163
164       PrintDebug("RIP: %x\n", guest_state->rip);
165
166       if (info->cpu_mode == REAL) {
167         linear_addr = get_addr_linear(info, guest_state->rip, guest_state->cs.selector);
168       } else {
169         linear_addr = get_addr_linear(info, guest_state->rip, guest_state->cs.base);
170       }
171
172       PrintDebug("RIP Linear: %x\n", linear_addr);
173
174       guest_pa_to_host_pa(info, linear_addr, &host_addr);
175
176       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
177
178       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
179       PrintTraceMemDump((char*)host_addr, 15);
180
181       break;
182     }
183   }
184   return 0;
185 }
186
187
188
189 vmcb_t * Allocate_VMCB() {
190   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
191
192
193   memset(vmcb_page, 0, 4096);
194
195   return vmcb_page;
196 }
197
198
199
200 void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
201   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
202   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
203   uint_t i;
204
205
206   guest_state->rsp = vm_info.vm_regs.rsp;
207   guest_state->rip = vm_info.rip;
208
209
210   //ctrl_area->instrs.instrs.CR0 = 1;
211   ctrl_area->cr_reads.cr0 = 1;
212   ctrl_area->cr_writes.cr0 = 1;
213
214   guest_state->efer |= EFER_MSR_svm_enable;
215   guest_state->rflags = 0x00000002; // The reserved bit is always 1
216   ctrl_area->svm_instrs.VMRUN = 1;
217   // guest_state->cr0 = 0x00000001;    // PE 
218   ctrl_area->guest_ASID = 1;
219
220
221   ctrl_area->exceptions.de = 1;
222   ctrl_area->exceptions.df = 1;
223   ctrl_area->exceptions.pf = 1;
224   ctrl_area->exceptions.ts = 1;
225   ctrl_area->exceptions.ss = 1;
226   ctrl_area->exceptions.ac = 1;
227   ctrl_area->exceptions.mc = 1;
228   ctrl_area->exceptions.gp = 1;
229   ctrl_area->exceptions.ud = 1;
230   ctrl_area->exceptions.np = 1;
231   ctrl_area->exceptions.of = 1;
232   ctrl_area->exceptions.nmi = 1;
233
234   guest_state->cs.selector = 0x0000;
235   guest_state->cs.limit=~0u;
236   guest_state->cs.base = guest_state->cs.selector<<4;
237   guest_state->cs.attrib.raw = 0xf3;
238
239   
240   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
241   for ( i = 0; segregs[i] != NULL; i++) {
242     struct vmcb_selector * seg = segregs[i];
243     
244     seg->selector = 0x0000;
245     seg->base = seg->selector << 4;
246     seg->attrib.raw = 0xf3;
247     seg->limit = ~0u;
248   }
249   
250   if (vm_info.io_map.num_ports > 0) {
251     vmm_io_hook_t * iter;
252     addr_t io_port_bitmap;
253     
254     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
255     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
256     
257     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
258
259     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
260
261     FOREACH_IO_HOOK(vm_info.io_map, iter) {
262       ushort_t port = iter->port;
263       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
264
265       bitmap += (port / 8);
266       PrintDebug("Setting Bit in block %x\n", bitmap);
267       *bitmap |= 1 << (port % 8);
268     }
269
270
271     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
272
273     ctrl_area->instrs.IOIO_PROT = 1;
274   }
275
276   ctrl_area->instrs.INTR = 1;
277
278
279
280   if (vm_info.page_mode == SHADOW_PAGING) {
281     PrintDebug("Creating initial shadow page table\n");
282     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
283     PrintDebug("Created\n");
284
285     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
286
287     ctrl_area->cr_reads.cr3 = 1;
288     ctrl_area->cr_writes.cr3 = 1;
289
290
291     ctrl_area->instrs.INVLPG = 1;
292     ctrl_area->instrs.INVLPGA = 1;
293
294     guest_state->g_pat = 0x7040600070406ULL;
295
296     guest_state->cr0 |= 0x80000000;
297   } else if (vm_info.page_mode == NESTED_PAGING) {
298     // Flush the TLB on entries/exits
299     //ctrl_area->TLB_CONTROL = 1;
300
301     // Enable Nested Paging
302     //ctrl_area->NP_ENABLE = 1;
303
304     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
305
306         // Set the Nested Page Table pointer
307     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
308     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
309
310     //   ctrl_area->N_CR3 = Get_CR3();
311     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
312
313     //    guest_state->g_pat = 0x7040600070406ULL;
314   }
315
316
317
318 }
319
320
321
322 void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
323   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
324   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
325   uint_t i;
326
327
328   guest_state->rsp = vm_info.vm_regs.rsp;
329   // guest_state->rip = vm_info.rip;
330   guest_state->rip = 0xfff0;
331
332   //ctrl_area->instrs.instrs.CR0 = 1;
333   ctrl_area->cr_reads.cr0 = 1;
334   ctrl_area->cr_writes.cr0 = 1;
335
336   guest_state->efer |= EFER_MSR_svm_enable;
337   guest_state->rflags = 0x00000002; // The reserved bit is always 1
338   ctrl_area->svm_instrs.VMRUN = 1;
339   ctrl_area->instrs.HLT = 1;
340   // guest_state->cr0 = 0x00000001;    // PE 
341   ctrl_area->guest_ASID = 1;
342
343   ctrl_area->exceptions.de = 1;
344   ctrl_area->exceptions.df = 1;
345   ctrl_area->exceptions.pf = 1;
346   ctrl_area->exceptions.ts = 1;
347   ctrl_area->exceptions.ss = 1;
348   ctrl_area->exceptions.ac = 1;
349   ctrl_area->exceptions.mc = 1;
350   ctrl_area->exceptions.gp = 1;
351   ctrl_area->exceptions.ud = 1;
352   ctrl_area->exceptions.np = 1;
353   ctrl_area->exceptions.of = 1;
354   ctrl_area->exceptions.nmi = 1;
355
356   vm_info.vm_regs.rdx = 0x00000f00;
357
358   guest_state->cr0 = 0x60000010;
359
360   guest_state->cs.selector = 0xf000;
361   guest_state->cs.limit=0xffff;
362   guest_state->cs.base = 0x0000000f0000LL;
363   guest_state->cs.attrib.raw = 0xf3;
364
365   
366   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
367   for ( i = 0; segregs[i] != NULL; i++) {
368     struct vmcb_selector * seg = segregs[i];
369     
370     seg->selector = 0x0000;
371     //    seg->base = seg->selector << 4;
372     seg->base = 0x00000000;
373     seg->attrib.raw = 0xf3;
374     seg->limit = ~0u;
375   }
376   
377   guest_state->gdtr.limit = 0x0000ffff;
378   guest_state->gdtr.base = 0x0000000000000000LL;
379   guest_state->idtr.limit = 0x0000ffff;
380   guest_state->idtr.base = 0x0000000000000000LL;
381
382   guest_state->ldtr.selector = 0x0000;
383   guest_state->ldtr.limit = 0x0000ffff;
384   guest_state->ldtr.base = 0x0000000000000000LL;
385   guest_state->tr.selector = 0x0000;
386   guest_state->tr.limit = 0x0000ffff;
387   guest_state->tr.base = 0x0000000000000000LL;
388
389
390   guest_state->dr6 = 0x00000000ffff0ff0LL;
391   guest_state->dr7 = 0x0000000000000400LL;
392
393   if (vm_info.io_map.num_ports > 0) {
394     vmm_io_hook_t * iter;
395     addr_t io_port_bitmap;
396     
397     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
398     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
399     
400     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
401
402     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
403
404     FOREACH_IO_HOOK(vm_info.io_map, iter) {
405       ushort_t port = iter->port;
406       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
407
408       bitmap += (port / 8);
409       PrintDebug("Setting Bit for port 0x%x\n", port);
410       *bitmap |= 1 << (port % 8);
411     }
412
413
414     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
415
416     ctrl_area->instrs.IOIO_PROT = 1;
417   }
418
419
420
421   PrintDebug("Exiting on interrupts\n");
422   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
423   ctrl_area->instrs.INTR = 1;
424
425
426   if (vm_info.page_mode == SHADOW_PAGING) {
427     PrintDebug("Creating initial shadow page table\n");
428     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
429     PrintDebug("Created\n");
430
431     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
432
433     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
434
435     ctrl_area->cr_reads.cr3 = 1;
436     ctrl_area->cr_writes.cr3 = 1;
437
438
439     ctrl_area->instrs.INVLPG = 1;
440     ctrl_area->instrs.INVLPGA = 1;
441
442     guest_state->g_pat = 0x7040600070406ULL;
443
444     guest_state->cr0 |= 0x80000000;
445   } else if (vm_info.page_mode == NESTED_PAGING) {
446     // Flush the TLB on entries/exits
447     //ctrl_area->TLB_CONTROL = 1;
448
449     // Enable Nested Paging
450     //ctrl_area->NP_ENABLE = 1;
451
452     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
453
454         // Set the Nested Page Table pointer
455     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
456     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
457
458     //   ctrl_area->N_CR3 = Get_CR3();
459     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
460
461     //    guest_state->g_pat = 0x7040600070406ULL;
462   }
463
464
465
466 }
467
468
469 #if 0
470 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
471   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
472   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
473   uint_t i = 0;
474
475
476   guest_state->rsp = vm_info.vm_regs.rsp;
477   guest_state->rip = vm_info.rip;
478
479
480   /* I pretty much just gutted this from TVMM */
481   /* Note: That means its probably wrong */
482
483   // set the segment registers to mirror ours
484   guest_state->cs.selector = 1<<3;
485   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
486   guest_state->cs.attrib.fields.S = 1;
487   guest_state->cs.attrib.fields.P = 1;
488   guest_state->cs.attrib.fields.db = 1;
489   guest_state->cs.attrib.fields.G = 1;
490   guest_state->cs.limit = 0xfffff;
491   guest_state->cs.base = 0;
492   
493   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
494   for ( i = 0; segregs[i] != NULL; i++) {
495     struct vmcb_selector * seg = segregs[i];
496     
497     seg->selector = 2<<3;
498     seg->attrib.fields.type = 0x2; // Data Segment+read/write
499     seg->attrib.fields.S = 1;
500     seg->attrib.fields.P = 1;
501     seg->attrib.fields.db = 1;
502     seg->attrib.fields.G = 1;
503     seg->limit = 0xfffff;
504     seg->base = 0;
505   }
506
507
508   {
509     /* JRL THIS HAS TO GO */
510     
511     //    guest_state->tr.selector = GetTR_Selector();
512     guest_state->tr.attrib.fields.type = 0x9; 
513     guest_state->tr.attrib.fields.P = 1;
514     // guest_state->tr.limit = GetTR_Limit();
515     //guest_state->tr.base = GetTR_Base();// - 0x2000;
516     /* ** */
517   }
518
519
520   /* ** */
521
522
523   guest_state->efer |= EFER_MSR_svm_enable;
524   guest_state->rflags = 0x00000002; // The reserved bit is always 1
525   ctrl_area->svm_instrs.VMRUN = 1;
526   guest_state->cr0 = 0x00000001;    // PE 
527   ctrl_area->guest_ASID = 1;
528
529
530   //  guest_state->cpl = 0;
531
532
533
534   // Setup exits
535
536   ctrl_area->cr_writes.cr4 = 1;
537   
538   ctrl_area->exceptions.de = 1;
539   ctrl_area->exceptions.df = 1;
540   ctrl_area->exceptions.pf = 1;
541   ctrl_area->exceptions.ts = 1;
542   ctrl_area->exceptions.ss = 1;
543   ctrl_area->exceptions.ac = 1;
544   ctrl_area->exceptions.mc = 1;
545   ctrl_area->exceptions.gp = 1;
546   ctrl_area->exceptions.ud = 1;
547   ctrl_area->exceptions.np = 1;
548   ctrl_area->exceptions.of = 1;
549   ctrl_area->exceptions.nmi = 1;
550
551   
552
553   ctrl_area->instrs.IOIO_PROT = 1;
554   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
555   
556   {
557     reg_ex_t tmp_reg;
558     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
559     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
560   }
561
562   ctrl_area->instrs.INTR = 1;
563
564   
565   {
566     char gdt_buf[6];
567     char idt_buf[6];
568
569     memset(gdt_buf, 0, 6);
570     memset(idt_buf, 0, 6);
571
572
573     uint_t gdt_base, idt_base;
574     ushort_t gdt_limit, idt_limit;
575     
576     GetGDTR(gdt_buf);
577     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
578     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
579     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
580
581     GetIDTR(idt_buf);
582     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
583     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
584     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
585
586
587     // gdt_base -= 0x2000;
588     //idt_base -= 0x2000;
589
590     guest_state->gdtr.base = gdt_base;
591     guest_state->gdtr.limit = gdt_limit;
592     guest_state->idtr.base = idt_base;
593     guest_state->idtr.limit = idt_limit;
594
595
596   }
597   
598   
599   // also determine if CPU supports nested paging
600   /*
601   if (vm_info.page_tables) {
602     //   if (0) {
603     // Flush the TLB on entries/exits
604     ctrl_area->TLB_CONTROL = 1;
605
606     // Enable Nested Paging
607     ctrl_area->NP_ENABLE = 1;
608
609     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
610
611         // Set the Nested Page Table pointer
612     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
613
614
615     //   ctrl_area->N_CR3 = Get_CR3();
616     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
617
618     guest_state->g_pat = 0x7040600070406ULL;
619
620     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
621     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
622     // Enable Paging
623     //    guest_state->cr0 |= 0x80000000;
624   }
625   */
626
627 }
628
629
630
631
632
633 #endif
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648 /*
649
650
651 void Init_VMCB_Real(vmcb_t * vmcb, struct guest_info vm_info) {
652   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
653   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
654   uint_t i;
655
656
657   guest_state->rsp = vm_info.vm_regs.rsp;
658   guest_state->rip = vm_info.rip;
659
660
661   guest_state->efer |= EFER_MSR_svm_enable;
662   guest_state->rflags = 0x00000002; // The reserved bit is always 1
663   ctrl_area->svm_instrs.instrs.VMRUN = 1;
664   ctrl_area->guest_ASID = 1;
665   guest_state->cr0 = 0x60000010;
666
667
668   ctrl_area->exceptions.de = 1;
669   ctrl_area->exceptions.df = 1;
670   ctrl_area->exceptions.pf = 1;
671   ctrl_area->exceptions.ts = 1;
672   ctrl_area->exceptions.ss = 1;
673   ctrl_area->exceptions.ac = 1;
674   ctrl_area->exceptions.mc = 1;
675   ctrl_area->exceptions.gp = 1;
676   ctrl_area->exceptions.ud = 1;
677   ctrl_area->exceptions.np = 1;
678   ctrl_area->exceptions.of = 1;
679   ctrl_area->exceptions.nmi = 1;
680
681   guest_state->cs.selector = 0xf000;
682   guest_state->cs.limit=0xffff;
683   guest_state->cs.base =  0xffff0000;
684   guest_state->cs.attrib.raw = 0x9a;
685
686   
687   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
688   for ( i = 0; segregs[i] != NULL; i++) {
689     struct vmcb_selector * seg = segregs[i];
690     
691     seg->selector = 0x0000;
692     seg->base = 0xffff0000;
693     seg->attrib.raw = 0x9b;
694     seg->limit = 0xffff;
695   }
696   
697   // Set GPRs 
698   //
699   //  EDX == 0xfxx
700   //  EAX, EBX, ECX, ESI, EDI, EBP, ESP == 0x0
701   //
702
703   guest_state->gdtr.base = 0;
704   guest_state->gdtr.limit = 0xffff;
705   guest_state->gdtr.attrib.raw = 0x0;
706
707   guest_state->idtr.base = 0;
708   guest_state->idtr.limit = 0xffff;
709   guest_state->idtr.attrib.raw = 0x0;
710
711   guest_state->ldtr.base = 0;
712   guest_state->ldtr.limit = 0xffff;
713   guest_state->ldtr.attrib.raw = 0x82;
714
715   guest_state->tr.base = 0;
716   guest_state->tr.limit = 0xffff;
717   guest_state->tr.attrib.raw = 0x83;
718
719
720
721
722   if (vm_info.io_map.num_ports > 0) {
723     vmm_io_hook_t * iter;
724     addr_t io_port_bitmap;
725     
726     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
727     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
728     
729     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
730
731     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
732
733     FOREACH_IO_HOOK(vm_info.io_map, iter) {
734       ushort_t port = iter->port;
735       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
736
737       bitmap += (port / 8);
738       PrintDebug("Setting Bit in block %x\n", bitmap);
739       *bitmap |= 1 << (port % 8);
740     }
741
742     ctrl_area->instrs.instrs.IOIO_PROT = 1;
743   }
744
745   ctrl_area->instrs.instrs.INTR = 1;
746
747   // also determine if CPU supports nested paging
748
749   if (vm_info.page_mode == SHADOW_PAGING) {
750     PrintDebug("Creating initial shadow page table\n");
751     vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
752     PrintDebug("Created\n");
753
754     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
755
756     ctrl_area->cr_reads.crs.cr3 = 1;
757     ctrl_area->cr_writes.crs.cr3 = 1;
758     ctrl_area->cr_reads.crs.cr0 = 1;
759     ctrl_area->cr_writes.crs.cr0 = 1;
760
761     ctrl_area->instrs.instrs.INVLPG = 1;
762     ctrl_area->instrs.instrs.INVLPGA = 1;
763
764         
765     guest_state->g_pat = 0x7040600070406ULL;
766
767     vm_info.shdw_pg_state.guest_cr0.e_reg.low = guest_state->cr0;
768     guest_state->cr0 |= 0x80000000;
769   } else if (vm_info.page_mode == NESTED_PAGING) {
770     // Flush the TLB on entries/exits
771     //ctrl_area->TLB_CONTROL = 1;
772
773     // Enable Nested Paging
774     //ctrl_area->NP_ENABLE = 1;
775
776     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
777
778         // Set the Nested Page Table pointer
779     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
780     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
781
782     //   ctrl_area->N_CR3 = Get_CR3();
783     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
784
785     //    guest_state->g_pat = 0x7040600070406ULL;
786   }
787
788 }
789 */