Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


architecture independence work
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_emulate.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31
32 /* Checks machine SVM capability */
33 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
34 int is_svm_capable() {
35   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
36   uint_t vm_cr_low = 0, vm_cr_high = 0;
37
38
39   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
40     PrintDebug("SVM Not Available\n");
41     return 0;
42   } 
43
44   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
45
46   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
47     PrintDebug("Nested Paging not supported\n");
48   }
49
50   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
51     return 1;
52   }
53
54   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
55
56   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
57     PrintDebug("SVM BIOS Disabled, not unlockable\n");
58   } else {
59     PrintDebug("SVM is locked with a key\n");
60   }
61
62   return 0;
63 }
64
65
66
67 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
68   reg_ex_t msr;
69   void * host_state;
70
71
72   // Enable SVM on the CPU
73   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
74   msr.e_reg.low |= EFER_MSR_svm_enable;
75   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
76   
77   PrintDebug("SVM Enabled\n");
78
79
80   // Setup the host state save area
81   host_state = os_hooks->allocate_pages(4);
82   
83   msr.e_reg.high = 0;
84   msr.e_reg.low = (uint_t)host_state;
85
86
87   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
88   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
89
90
91
92   // Setup the SVM specific vmm operations
93   vmm_ops->init_guest = &init_svm_guest;
94   vmm_ops->start_guest = &start_svm_guest;
95
96
97   return;
98 }
99
100
101 int init_svm_guest(struct guest_info *info) {
102  
103   PrintDebug("Allocating VMCB\n");
104   info->vmm_data = (void*)Allocate_VMCB();
105
106
107   //PrintDebug("Generating Guest nested page tables\n");
108   //  info->page_tables = NULL;
109   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
110   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
111   //  PrintDebugPageTables(info->page_tables);
112
113
114   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
115   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
116   
117
118   //  info->rip = 0;
119
120   info->vm_regs.rdi = 0;
121   info->vm_regs.rsi = 0;
122   info->vm_regs.rbp = 0;
123   info->vm_regs.rsp = 0;
124   info->vm_regs.rbx = 0;
125   info->vm_regs.rdx = 0;
126   info->vm_regs.rcx = 0;
127   info->vm_regs.rax = 0;
128   
129   return 0;
130 }
131
132
133 // can we start a kernel thread here...
134 int start_svm_guest(struct guest_info *info) {
135
136
137
138   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
139   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
140
141   while (1) {
142
143     CLGI();
144
145     //PrintDebug("SVM Launch Args (vmcb=%x), (info=%x), (vm_regs=%x)\n", info->vmm_data,  &(info->vm_regs));
146     PrintDebug("Launching to RIP: %x\n", info->rip);
147     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
148     //launch_svm((vmcb_t*)(info->vmm_data));
149     //PrintDebug("SVM Returned\n");
150
151     
152
153     STGI();
154
155      
156     if (handle_svm_exit(info) != 0) {
157       vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
158       addr_t host_addr;
159       addr_t linear_addr = 0;
160
161       PrintDebug("SVM ERROR!!\n"); 
162       
163
164       PrintDebug("RIP: %x\n", guest_state->rip);
165
166
167       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
168
169
170       PrintDebug("RIP Linear: %x\n", linear_addr);
171
172       guest_pa_to_host_pa(info, linear_addr, &host_addr);
173
174       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
175
176       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
177       PrintTraceMemDump((char*)host_addr, 15);
178
179       break;
180     }
181   }
182   return 0;
183 }
184
185
186
187 vmcb_t * Allocate_VMCB() {
188   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
189
190
191   memset(vmcb_page, 0, 4096);
192
193   return vmcb_page;
194 }
195
196
197
198 void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
199   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
200   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
201   uint_t i;
202
203
204   guest_state->rsp = vm_info.vm_regs.rsp;
205   guest_state->rip = vm_info.rip;
206
207
208   //ctrl_area->instrs.instrs.CR0 = 1;
209   ctrl_area->cr_reads.cr0 = 1;
210   ctrl_area->cr_writes.cr0 = 1;
211
212   guest_state->efer |= EFER_MSR_svm_enable;
213   guest_state->rflags = 0x00000002; // The reserved bit is always 1
214   ctrl_area->svm_instrs.VMRUN = 1;
215   // guest_state->cr0 = 0x00000001;    // PE 
216   ctrl_area->guest_ASID = 1;
217
218
219   ctrl_area->exceptions.de = 1;
220   ctrl_area->exceptions.df = 1;
221   ctrl_area->exceptions.pf = 1;
222   ctrl_area->exceptions.ts = 1;
223   ctrl_area->exceptions.ss = 1;
224   ctrl_area->exceptions.ac = 1;
225   ctrl_area->exceptions.mc = 1;
226   ctrl_area->exceptions.gp = 1;
227   ctrl_area->exceptions.ud = 1;
228   ctrl_area->exceptions.np = 1;
229   ctrl_area->exceptions.of = 1;
230   ctrl_area->exceptions.nmi = 1;
231
232   guest_state->cs.selector = 0x0000;
233   guest_state->cs.limit=~0u;
234   guest_state->cs.base = guest_state->cs.selector<<4;
235   guest_state->cs.attrib.raw = 0xf3;
236
237   
238   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
239   for ( i = 0; segregs[i] != NULL; i++) {
240     struct vmcb_selector * seg = segregs[i];
241     
242     seg->selector = 0x0000;
243     seg->base = seg->selector << 4;
244     seg->attrib.raw = 0xf3;
245     seg->limit = ~0u;
246   }
247   
248   if (vm_info.io_map.num_ports > 0) {
249     vmm_io_hook_t * iter;
250     addr_t io_port_bitmap;
251     
252     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
253     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
254     
255     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
256
257     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
258
259     FOREACH_IO_HOOK(vm_info.io_map, iter) {
260       ushort_t port = iter->port;
261       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
262
263       bitmap += (port / 8);
264       PrintDebug("Setting Bit in block %x\n", bitmap);
265       *bitmap |= 1 << (port % 8);
266     }
267
268
269     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
270
271     ctrl_area->instrs.IOIO_PROT = 1;
272   }
273
274   ctrl_area->instrs.INTR = 1;
275
276
277
278   if (vm_info.page_mode == SHADOW_PAGING) {
279     PrintDebug("Creating initial shadow page table\n");
280     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
281     PrintDebug("Created\n");
282
283     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
284
285     ctrl_area->cr_reads.cr3 = 1;
286     ctrl_area->cr_writes.cr3 = 1;
287
288
289     ctrl_area->instrs.INVLPG = 1;
290     ctrl_area->instrs.INVLPGA = 1;
291
292     guest_state->g_pat = 0x7040600070406ULL;
293
294     guest_state->cr0 |= 0x80000000;
295   } else if (vm_info.page_mode == NESTED_PAGING) {
296     // Flush the TLB on entries/exits
297     //ctrl_area->TLB_CONTROL = 1;
298
299     // Enable Nested Paging
300     //ctrl_area->NP_ENABLE = 1;
301
302     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
303
304         // Set the Nested Page Table pointer
305     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
306     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
307
308     //   ctrl_area->N_CR3 = Get_CR3();
309     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
310
311     //    guest_state->g_pat = 0x7040600070406ULL;
312   }
313
314
315
316 }
317
318
319
320 void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
321   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
322   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
323   uint_t i;
324
325
326   guest_state->rsp = vm_info.vm_regs.rsp;
327   // guest_state->rip = vm_info.rip;
328   guest_state->rip = 0xfff0;
329
330   //ctrl_area->instrs.instrs.CR0 = 1;
331   ctrl_area->cr_reads.cr0 = 1;
332   ctrl_area->cr_writes.cr0 = 1;
333
334   guest_state->efer |= EFER_MSR_svm_enable;
335   guest_state->rflags = 0x00000002; // The reserved bit is always 1
336   ctrl_area->svm_instrs.VMRUN = 1;
337   ctrl_area->instrs.HLT = 1;
338   // guest_state->cr0 = 0x00000001;    // PE 
339   ctrl_area->guest_ASID = 1;
340
341   ctrl_area->exceptions.de = 1;
342   ctrl_area->exceptions.df = 1;
343   ctrl_area->exceptions.pf = 1;
344   ctrl_area->exceptions.ts = 1;
345   ctrl_area->exceptions.ss = 1;
346   ctrl_area->exceptions.ac = 1;
347   ctrl_area->exceptions.mc = 1;
348   ctrl_area->exceptions.gp = 1;
349   ctrl_area->exceptions.ud = 1;
350   ctrl_area->exceptions.np = 1;
351   ctrl_area->exceptions.of = 1;
352   ctrl_area->exceptions.nmi = 1;
353
354   vm_info.vm_regs.rdx = 0x00000f00;
355
356   guest_state->cr0 = 0x60000010;
357
358   guest_state->cs.selector = 0xf000;
359   guest_state->cs.limit=0xffff;
360   guest_state->cs.base = 0x0000000f0000LL;
361   guest_state->cs.attrib.raw = 0xf3;
362
363   
364   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
365   for ( i = 0; segregs[i] != NULL; i++) {
366     struct vmcb_selector * seg = segregs[i];
367     
368     seg->selector = 0x0000;
369     //    seg->base = seg->selector << 4;
370     seg->base = 0x00000000;
371     seg->attrib.raw = 0xf3;
372     seg->limit = ~0u;
373   }
374   
375   guest_state->gdtr.limit = 0x0000ffff;
376   guest_state->gdtr.base = 0x0000000000000000LL;
377   guest_state->idtr.limit = 0x0000ffff;
378   guest_state->idtr.base = 0x0000000000000000LL;
379
380   guest_state->ldtr.selector = 0x0000;
381   guest_state->ldtr.limit = 0x0000ffff;
382   guest_state->ldtr.base = 0x0000000000000000LL;
383   guest_state->tr.selector = 0x0000;
384   guest_state->tr.limit = 0x0000ffff;
385   guest_state->tr.base = 0x0000000000000000LL;
386
387
388   guest_state->dr6 = 0x00000000ffff0ff0LL;
389   guest_state->dr7 = 0x0000000000000400LL;
390
391   if (vm_info.io_map.num_ports > 0) {
392     vmm_io_hook_t * iter;
393     addr_t io_port_bitmap;
394     
395     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
396     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
397     
398     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
399
400     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
401
402     FOREACH_IO_HOOK(vm_info.io_map, iter) {
403       ushort_t port = iter->port;
404       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
405
406       bitmap += (port / 8);
407       PrintDebug("Setting Bit for port 0x%x\n", port);
408       *bitmap |= 1 << (port % 8);
409     }
410
411
412     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
413
414     ctrl_area->instrs.IOIO_PROT = 1;
415   }
416
417
418
419   PrintDebug("Exiting on interrupts\n");
420   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
421   ctrl_area->instrs.INTR = 1;
422
423
424   if (vm_info.page_mode == SHADOW_PAGING) {
425     PrintDebug("Creating initial shadow page table\n");
426     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
427     PrintDebug("Created\n");
428
429     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
430
431     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
432
433     ctrl_area->cr_reads.cr3 = 1;
434     ctrl_area->cr_writes.cr3 = 1;
435
436
437     ctrl_area->instrs.INVLPG = 1;
438     ctrl_area->instrs.INVLPGA = 1;
439
440     guest_state->g_pat = 0x7040600070406ULL;
441
442     guest_state->cr0 |= 0x80000000;
443   } else if (vm_info.page_mode == NESTED_PAGING) {
444     // Flush the TLB on entries/exits
445     //ctrl_area->TLB_CONTROL = 1;
446
447     // Enable Nested Paging
448     //ctrl_area->NP_ENABLE = 1;
449
450     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
451
452         // Set the Nested Page Table pointer
453     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
454     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
455
456     //   ctrl_area->N_CR3 = Get_CR3();
457     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
458
459     //    guest_state->g_pat = 0x7040600070406ULL;
460   }
461
462
463
464 }
465
466
467 #if 0
468 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
469   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
470   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
471   uint_t i = 0;
472
473
474   guest_state->rsp = vm_info.vm_regs.rsp;
475   guest_state->rip = vm_info.rip;
476
477
478   /* I pretty much just gutted this from TVMM */
479   /* Note: That means its probably wrong */
480
481   // set the segment registers to mirror ours
482   guest_state->cs.selector = 1<<3;
483   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
484   guest_state->cs.attrib.fields.S = 1;
485   guest_state->cs.attrib.fields.P = 1;
486   guest_state->cs.attrib.fields.db = 1;
487   guest_state->cs.attrib.fields.G = 1;
488   guest_state->cs.limit = 0xfffff;
489   guest_state->cs.base = 0;
490   
491   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
492   for ( i = 0; segregs[i] != NULL; i++) {
493     struct vmcb_selector * seg = segregs[i];
494     
495     seg->selector = 2<<3;
496     seg->attrib.fields.type = 0x2; // Data Segment+read/write
497     seg->attrib.fields.S = 1;
498     seg->attrib.fields.P = 1;
499     seg->attrib.fields.db = 1;
500     seg->attrib.fields.G = 1;
501     seg->limit = 0xfffff;
502     seg->base = 0;
503   }
504
505
506   {
507     /* JRL THIS HAS TO GO */
508     
509     //    guest_state->tr.selector = GetTR_Selector();
510     guest_state->tr.attrib.fields.type = 0x9; 
511     guest_state->tr.attrib.fields.P = 1;
512     // guest_state->tr.limit = GetTR_Limit();
513     //guest_state->tr.base = GetTR_Base();// - 0x2000;
514     /* ** */
515   }
516
517
518   /* ** */
519
520
521   guest_state->efer |= EFER_MSR_svm_enable;
522   guest_state->rflags = 0x00000002; // The reserved bit is always 1
523   ctrl_area->svm_instrs.VMRUN = 1;
524   guest_state->cr0 = 0x00000001;    // PE 
525   ctrl_area->guest_ASID = 1;
526
527
528   //  guest_state->cpl = 0;
529
530
531
532   // Setup exits
533
534   ctrl_area->cr_writes.cr4 = 1;
535   
536   ctrl_area->exceptions.de = 1;
537   ctrl_area->exceptions.df = 1;
538   ctrl_area->exceptions.pf = 1;
539   ctrl_area->exceptions.ts = 1;
540   ctrl_area->exceptions.ss = 1;
541   ctrl_area->exceptions.ac = 1;
542   ctrl_area->exceptions.mc = 1;
543   ctrl_area->exceptions.gp = 1;
544   ctrl_area->exceptions.ud = 1;
545   ctrl_area->exceptions.np = 1;
546   ctrl_area->exceptions.of = 1;
547   ctrl_area->exceptions.nmi = 1;
548
549   
550
551   ctrl_area->instrs.IOIO_PROT = 1;
552   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
553   
554   {
555     reg_ex_t tmp_reg;
556     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
557     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
558   }
559
560   ctrl_area->instrs.INTR = 1;
561
562   
563   {
564     char gdt_buf[6];
565     char idt_buf[6];
566
567     memset(gdt_buf, 0, 6);
568     memset(idt_buf, 0, 6);
569
570
571     uint_t gdt_base, idt_base;
572     ushort_t gdt_limit, idt_limit;
573     
574     GetGDTR(gdt_buf);
575     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
576     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
577     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
578
579     GetIDTR(idt_buf);
580     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
581     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
582     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
583
584
585     // gdt_base -= 0x2000;
586     //idt_base -= 0x2000;
587
588     guest_state->gdtr.base = gdt_base;
589     guest_state->gdtr.limit = gdt_limit;
590     guest_state->idtr.base = idt_base;
591     guest_state->idtr.limit = idt_limit;
592
593
594   }
595   
596   
597   // also determine if CPU supports nested paging
598   /*
599   if (vm_info.page_tables) {
600     //   if (0) {
601     // Flush the TLB on entries/exits
602     ctrl_area->TLB_CONTROL = 1;
603
604     // Enable Nested Paging
605     ctrl_area->NP_ENABLE = 1;
606
607     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
608
609         // Set the Nested Page Table pointer
610     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
611
612
613     //   ctrl_area->N_CR3 = Get_CR3();
614     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
615
616     guest_state->g_pat = 0x7040600070406ULL;
617
618     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
619     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
620     // Enable Paging
621     //    guest_state->cr0 |= 0x80000000;
622   }
623   */
624
625 }
626
627
628
629
630
631 #endif
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646 /*
647
648
649 void Init_VMCB_Real(vmcb_t * vmcb, struct guest_info vm_info) {
650   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
651   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
652   uint_t i;
653
654
655   guest_state->rsp = vm_info.vm_regs.rsp;
656   guest_state->rip = vm_info.rip;
657
658
659   guest_state->efer |= EFER_MSR_svm_enable;
660   guest_state->rflags = 0x00000002; // The reserved bit is always 1
661   ctrl_area->svm_instrs.instrs.VMRUN = 1;
662   ctrl_area->guest_ASID = 1;
663   guest_state->cr0 = 0x60000010;
664
665
666   ctrl_area->exceptions.de = 1;
667   ctrl_area->exceptions.df = 1;
668   ctrl_area->exceptions.pf = 1;
669   ctrl_area->exceptions.ts = 1;
670   ctrl_area->exceptions.ss = 1;
671   ctrl_area->exceptions.ac = 1;
672   ctrl_area->exceptions.mc = 1;
673   ctrl_area->exceptions.gp = 1;
674   ctrl_area->exceptions.ud = 1;
675   ctrl_area->exceptions.np = 1;
676   ctrl_area->exceptions.of = 1;
677   ctrl_area->exceptions.nmi = 1;
678
679   guest_state->cs.selector = 0xf000;
680   guest_state->cs.limit=0xffff;
681   guest_state->cs.base =  0xffff0000;
682   guest_state->cs.attrib.raw = 0x9a;
683
684   
685   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
686   for ( i = 0; segregs[i] != NULL; i++) {
687     struct vmcb_selector * seg = segregs[i];
688     
689     seg->selector = 0x0000;
690     seg->base = 0xffff0000;
691     seg->attrib.raw = 0x9b;
692     seg->limit = 0xffff;
693   }
694   
695   // Set GPRs 
696   //
697   //  EDX == 0xfxx
698   //  EAX, EBX, ECX, ESI, EDI, EBP, ESP == 0x0
699   //
700
701   guest_state->gdtr.base = 0;
702   guest_state->gdtr.limit = 0xffff;
703   guest_state->gdtr.attrib.raw = 0x0;
704
705   guest_state->idtr.base = 0;
706   guest_state->idtr.limit = 0xffff;
707   guest_state->idtr.attrib.raw = 0x0;
708
709   guest_state->ldtr.base = 0;
710   guest_state->ldtr.limit = 0xffff;
711   guest_state->ldtr.attrib.raw = 0x82;
712
713   guest_state->tr.base = 0;
714   guest_state->tr.limit = 0xffff;
715   guest_state->tr.attrib.raw = 0x83;
716
717
718
719
720   if (vm_info.io_map.num_ports > 0) {
721     vmm_io_hook_t * iter;
722     addr_t io_port_bitmap;
723     
724     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
725     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
726     
727     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
728
729     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
730
731     FOREACH_IO_HOOK(vm_info.io_map, iter) {
732       ushort_t port = iter->port;
733       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
734
735       bitmap += (port / 8);
736       PrintDebug("Setting Bit in block %x\n", bitmap);
737       *bitmap |= 1 << (port % 8);
738     }
739
740     ctrl_area->instrs.instrs.IOIO_PROT = 1;
741   }
742
743   ctrl_area->instrs.instrs.INTR = 1;
744
745   // also determine if CPU supports nested paging
746
747   if (vm_info.page_mode == SHADOW_PAGING) {
748     PrintDebug("Creating initial shadow page table\n");
749     vm_info.shdw_pg_state.shadow_cr3.e_reg.low |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
750     PrintDebug("Created\n");
751
752     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3.r_reg;
753
754     ctrl_area->cr_reads.crs.cr3 = 1;
755     ctrl_area->cr_writes.crs.cr3 = 1;
756     ctrl_area->cr_reads.crs.cr0 = 1;
757     ctrl_area->cr_writes.crs.cr0 = 1;
758
759     ctrl_area->instrs.instrs.INVLPG = 1;
760     ctrl_area->instrs.instrs.INVLPGA = 1;
761
762         
763     guest_state->g_pat = 0x7040600070406ULL;
764
765     vm_info.shdw_pg_state.guest_cr0.e_reg.low = guest_state->cr0;
766     guest_state->cr0 |= 0x80000000;
767   } else if (vm_info.page_mode == NESTED_PAGING) {
768     // Flush the TLB on entries/exits
769     //ctrl_area->TLB_CONTROL = 1;
770
771     // Enable Nested Paging
772     //ctrl_area->NP_ENABLE = 1;
773
774     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
775
776         // Set the Nested Page Table pointer
777     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
778     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
779
780     //   ctrl_area->N_CR3 = Get_CR3();
781     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
782
783     //    guest_state->g_pat = 0x7040600070406ULL;
784   }
785
786 }
787 */