Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


*** empty log message ***
[palacios.git] / palacios / src / palacios / svm.c
1 #include <palacios/svm.h>
2 #include <palacios/vmm.h>
3
4 #include <palacios/vmcb.h>
5 #include <palacios/vmm_mem.h>
6 #include <palacios/vmm_paging.h>
7 #include <palacios/svm_handler.h>
8
9 #include <palacios/vmm_debug.h>
10 #include <palacios/vm_guest_mem.h>
11
12 #include <palacios/vmm_decoder.h>
13
14
15 extern struct vmm_os_hooks * os_hooks;
16
17 extern uint_t cpuid_ecx(uint_t op);
18 extern uint_t cpuid_edx(uint_t op);
19 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
20 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
21 extern uint_t launch_svm(vmcb_t * vmcb_addr);
22 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
23
24 extern void STGI();
25 extern void CLGI();
26
27 extern uint_t Get_CR3();
28
29
30 extern void DisableInts();
31 extern void EnableInts();
32
33
34
35
36
37
38
39 static vmcb_t * Allocate_VMCB() {
40   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
41
42
43   memset(vmcb_page, 0, 4096);
44
45   return vmcb_page;
46 }
47
48
49
50
51
52 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info vm_info) {
53   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
54   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
55   uint_t i;
56
57
58   guest_state->rsp = vm_info.vm_regs.rsp;
59   // guest_state->rip = vm_info.rip;
60   guest_state->rip = 0xfff0;
61
62   guest_state->cpl = 0;
63
64   //ctrl_area->instrs.instrs.CR0 = 1;
65   ctrl_area->cr_reads.cr0 = 1;
66   ctrl_area->cr_writes.cr0 = 1;
67
68   guest_state->efer |= EFER_MSR_svm_enable;
69   guest_state->rflags = 0x00000002; // The reserved bit is always 1
70   ctrl_area->svm_instrs.VMRUN = 1;
71   ctrl_area->instrs.HLT = 1;
72   // guest_state->cr0 = 0x00000001;    // PE 
73   ctrl_area->guest_ASID = 1;
74
75   ctrl_area->exceptions.de = 1;
76   ctrl_area->exceptions.df = 1;
77   ctrl_area->exceptions.pf = 1;
78   ctrl_area->exceptions.ts = 1;
79   ctrl_area->exceptions.ss = 1;
80   ctrl_area->exceptions.ac = 1;
81   ctrl_area->exceptions.mc = 1;
82   ctrl_area->exceptions.gp = 1;
83   ctrl_area->exceptions.ud = 1;
84   ctrl_area->exceptions.np = 1;
85   ctrl_area->exceptions.of = 1;
86   ctrl_area->exceptions.nmi = 1;
87
88   vm_info.vm_regs.rdx = 0x00000f00;
89
90   guest_state->cr0 = 0x60000010;
91
92   guest_state->cs.selector = 0xf000;
93   guest_state->cs.limit=0xffff;
94   guest_state->cs.base = 0x0000000f0000LL;
95   guest_state->cs.attrib.raw = 0xf3;
96
97   
98   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
99   for ( i = 0; segregs[i] != NULL; i++) {
100     struct vmcb_selector * seg = segregs[i];
101     
102     seg->selector = 0x0000;
103     //    seg->base = seg->selector << 4;
104     seg->base = 0x00000000;
105     seg->attrib.raw = 0xf3;
106     seg->limit = ~0u;
107   }
108   
109   guest_state->gdtr.limit = 0x0000ffff;
110   guest_state->gdtr.base = 0x0000000000000000LL;
111   guest_state->idtr.limit = 0x0000ffff;
112   guest_state->idtr.base = 0x0000000000000000LL;
113
114   guest_state->ldtr.selector = 0x0000;
115   guest_state->ldtr.limit = 0x0000ffff;
116   guest_state->ldtr.base = 0x0000000000000000LL;
117   guest_state->tr.selector = 0x0000;
118   guest_state->tr.limit = 0x0000ffff;
119   guest_state->tr.base = 0x0000000000000000LL;
120
121
122   guest_state->dr6 = 0x00000000ffff0ff0LL;
123   guest_state->dr7 = 0x0000000000000400LL;
124
125   if (vm_info.io_map.num_ports > 0) {
126     vmm_io_hook_t * iter;
127     addr_t io_port_bitmap;
128     
129     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
130     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
131     
132     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
133
134     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
135
136     FOREACH_IO_HOOK(vm_info.io_map, iter) {
137       ushort_t port = iter->port;
138       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
139
140       bitmap += (port / 8);
141       PrintDebug("Setting Bit for port 0x%x\n", port);
142       *bitmap |= 1 << (port % 8);
143     }
144
145
146     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
147
148     ctrl_area->instrs.IOIO_PROT = 1;
149   }
150
151
152
153   PrintDebug("Exiting on interrupts\n");
154   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
155   ctrl_area->instrs.INTR = 1;
156
157
158   if (vm_info.shdw_pg_mode == SHADOW_PAGING) {
159     PrintDebug("Creating initial shadow page table\n");
160     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
161     vm_info.shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
162     PrintDebug("Created\n");
163
164     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
165
166     //PrintDebugPageTables((pde32_t*)(vm_info.shdw_pg_state.shadow_cr3.e_reg.low));
167
168     ctrl_area->cr_reads.cr3 = 1;
169     ctrl_area->cr_writes.cr3 = 1;
170
171
172     ctrl_area->instrs.INVLPG = 1;
173     ctrl_area->instrs.INVLPGA = 1;
174
175     /* JRL: This is a performance killer, and a simplistic solution */
176     /* We need to fix this */
177     ctrl_area->TLB_CONTROL = 1;
178     
179
180
181     guest_state->g_pat = 0x7040600070406ULL;
182
183     guest_state->cr0 |= 0x80000000;
184
185   } else if (vm_info.shdw_pg_mode == NESTED_PAGING) {
186     // Flush the TLB on entries/exits
187     ctrl_area->TLB_CONTROL = 1;
188
189     // Enable Nested Paging
190     ctrl_area->NP_ENABLE = 1;
191
192     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
193
194     // Set the Nested Page Table pointer
195     ctrl_area->N_CR3 = ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
196
197     //   ctrl_area->N_CR3 = Get_CR3();
198     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
199
200     guest_state->g_pat = 0x7040600070406ULL;
201   }
202
203
204
205 }
206
207
208
209
210
211
212
213
214
215 static int init_svm_guest(struct guest_info *info) {
216  
217   PrintDebug("Allocating VMCB\n");
218   info->vmm_data = (void*)Allocate_VMCB();
219
220
221   //PrintDebug("Generating Guest nested page tables\n");
222   //  info->page_tables = NULL;
223   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
224   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
225   //  PrintDebugPageTables(info->page_tables);
226
227
228   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
229   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), *info);
230   
231
232   //  info->rip = 0;
233
234   info->vm_regs.rdi = 0;
235   info->vm_regs.rsi = 0;
236   info->vm_regs.rbp = 0;
237   info->vm_regs.rsp = 0;
238   info->vm_regs.rbx = 0;
239   info->vm_regs.rdx = 0;
240   info->vm_regs.rcx = 0;
241   info->vm_regs.rax = 0;
242   
243   return 0;
244 }
245
246
247 // can we start a kernel thread here...
248 static int start_svm_guest(struct guest_info *info) {
249   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
250   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
251
252   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
253   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
254
255   while (1) {
256     ullong_t tmp_tsc;
257
258
259     EnableInts();
260     CLGI();
261
262     PrintDebug("SVM Entry to rip=%x...\n", info->rip);
263
264     rdtscll(info->time_state.cached_host_tsc);
265     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
266
267     PrintDebug("Launching\n");
268     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
269
270     rdtscll(tmp_tsc);
271     //PrintDebug("SVM Returned\n");
272
273
274     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
275
276     STGI();
277
278      
279     if (handle_svm_exit(info) != 0) {
280
281       addr_t host_addr;
282       addr_t linear_addr = 0;
283
284       PrintDebug("SVM ERROR!!\n"); 
285       
286       PrintDebug("RIP: %x\n", guest_state->rip);
287
288
289       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
290
291
292       PrintDebug("RIP Linear: %x\n", linear_addr);
293       PrintV3Segments(&(info->segments));
294       PrintV3CtrlRegs(&(info->ctrl_regs));
295
296       
297       if (info->mem_mode == PHYSICAL_MEM) {
298         guest_pa_to_host_pa(info, linear_addr, &host_addr);
299       } else if (info->mem_mode == VIRTUAL_MEM) {
300         guest_va_to_host_pa(info, linear_addr, &host_addr);
301       }
302
303
304       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
305
306       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
307       PrintTraceMemDump((char*)host_addr, 15);
308
309       break;
310     }
311   }
312   return 0;
313 }
314
315
316
317
318 /* Checks machine SVM capability */
319 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
320 int is_svm_capable() {
321
322 #if 1
323   // Dinda
324
325   uint_t ret;
326   uint_t vm_cr_low = 0, vm_cr_high = 0;
327
328
329   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
330   
331   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
332
333   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
334     PrintDebug("SVM Not Available\n");
335     return 0;
336   }  else {
337     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
338     
339     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
340     
341     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
342       PrintDebug("SVM is available but is disabled.\n");
343
344       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
345       
346       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
347       
348       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
349         PrintDebug("SVM BIOS Disabled, not unlockable\n");
350       } else {
351         PrintDebug("SVM is locked with a key\n");
352       }
353       return 0;
354
355     } else {
356       PrintDebug("SVM is available and  enabled.\n");
357
358       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
359       
360       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
361
362       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
363         PrintDebug("SVM Nested Paging not supported\n");
364       } else {
365         PrintDebug("SVM Nested Paging supported\n");
366       }
367       
368       return 1;
369       
370     }
371   }
372
373 #else
374
375   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
376   uint_t vm_cr_low = 0, vm_cr_high = 0;
377
378
379   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
380     PrintDebug("SVM Not Available\n");
381     return 0;
382   } 
383
384   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
385
386   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
387
388
389   // this part is clearly wrong, since the np bit is in 
390   // edx, not ecx
391   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
392     PrintDebug("Nested Paging not supported\n");
393   } else {
394     PrintDebug("Nested Paging supported\n");
395   }
396
397   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
398     PrintDebug("SVM is disabled.\n");
399     return 1;
400   }
401
402   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
403
404   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
405     PrintDebug("SVM BIOS Disabled, not unlockable\n");
406   } else {
407     PrintDebug("SVM is locked with a key\n");
408   }
409
410   return 0;
411
412 #endif
413
414 }
415
416 int has_svm_nested_paging() {
417   uint32_t ret;
418
419   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
420       
421   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
422   
423   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
424     PrintDebug("SVM Nested Paging not supported\n");
425     return 0;
426   } else {
427     PrintDebug("SVM Nested Paging supported\n");
428     return 1;
429   }
430
431 }
432
433
434
435 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
436   reg_ex_t msr;
437   void * host_state;
438
439
440   // Enable SVM on the CPU
441   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
442   msr.e_reg.low |= EFER_MSR_svm_enable;
443   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
444   
445   PrintDebug("SVM Enabled\n");
446
447
448   // Setup the host state save area
449   host_state = os_hooks->allocate_pages(4);
450   
451   msr.e_reg.high = 0;
452   msr.e_reg.low = (uint_t)host_state;
453
454
455   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
456   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
457
458
459
460   // Setup the SVM specific vmm operations
461   vmm_ops->init_guest = &init_svm_guest;
462   vmm_ops->start_guest = &start_svm_guest;
463   vmm_ops->has_nested_paging = &has_svm_nested_paging;
464
465   return;
466 }
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
520   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
521   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
522   uint_t i;
523
524
525   guest_state->rsp = vm_info.vm_regs.rsp;
526   guest_state->rip = vm_info.rip;
527
528
529   //ctrl_area->instrs.instrs.CR0 = 1;
530   ctrl_area->cr_reads.cr0 = 1;
531   ctrl_area->cr_writes.cr0 = 1;
532
533   guest_state->efer |= EFER_MSR_svm_enable;
534   guest_state->rflags = 0x00000002; // The reserved bit is always 1
535   ctrl_area->svm_instrs.VMRUN = 1;
536   // guest_state->cr0 = 0x00000001;    // PE 
537   ctrl_area->guest_ASID = 1;
538
539
540   ctrl_area->exceptions.de = 1;
541   ctrl_area->exceptions.df = 1;
542   ctrl_area->exceptions.pf = 1;
543   ctrl_area->exceptions.ts = 1;
544   ctrl_area->exceptions.ss = 1;
545   ctrl_area->exceptions.ac = 1;
546   ctrl_area->exceptions.mc = 1;
547   ctrl_area->exceptions.gp = 1;
548   ctrl_area->exceptions.ud = 1;
549   ctrl_area->exceptions.np = 1;
550   ctrl_area->exceptions.of = 1;
551   ctrl_area->exceptions.nmi = 1;
552
553   guest_state->cs.selector = 0x0000;
554   guest_state->cs.limit=~0u;
555   guest_state->cs.base = guest_state->cs.selector<<4;
556   guest_state->cs.attrib.raw = 0xf3;
557
558   
559   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
560   for ( i = 0; segregs[i] != NULL; i++) {
561     struct vmcb_selector * seg = segregs[i];
562     
563     seg->selector = 0x0000;
564     seg->base = seg->selector << 4;
565     seg->attrib.raw = 0xf3;
566     seg->limit = ~0u;
567   }
568   
569   if (vm_info.io_map.num_ports > 0) {
570     vmm_io_hook_t * iter;
571     addr_t io_port_bitmap;
572     
573     io_port_bitmap = (addr_t)os_hooks->allocate_pages(3);
574     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
575     
576     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
577
578     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
579
580     FOREACH_IO_HOOK(vm_info.io_map, iter) {
581       ushort_t port = iter->port;
582       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
583
584       bitmap += (port / 8);
585       PrintDebug("Setting Bit in block %x\n", bitmap);
586       *bitmap |= 1 << (port % 8);
587     }
588
589
590     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
591
592     ctrl_area->instrs.IOIO_PROT = 1;
593   }
594
595   ctrl_area->instrs.INTR = 1;
596
597
598
599   if (vm_info.page_mode == SHADOW_PAGING) {
600     PrintDebug("Creating initial shadow page table\n");
601     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
602     PrintDebug("Created\n");
603
604     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
605
606     ctrl_area->cr_reads.cr3 = 1;
607     ctrl_area->cr_writes.cr3 = 1;
608
609
610     ctrl_area->instrs.INVLPG = 1;
611     ctrl_area->instrs.INVLPGA = 1;
612
613     guest_state->g_pat = 0x7040600070406ULL;
614
615     guest_state->cr0 |= 0x80000000;
616   } else if (vm_info.page_mode == NESTED_PAGING) {
617     // Flush the TLB on entries/exits
618     //ctrl_area->TLB_CONTROL = 1;
619
620     // Enable Nested Paging
621     //ctrl_area->NP_ENABLE = 1;
622
623     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
624
625         // Set the Nested Page Table pointer
626     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
627     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
628
629     //   ctrl_area->N_CR3 = Get_CR3();
630     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
631
632     //    guest_state->g_pat = 0x7040600070406ULL;
633   }
634
635
636
637 }
638 */
639
640
641
642
643
644
645
646 #if 0
647 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
648   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
649   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
650   uint_t i = 0;
651
652
653   guest_state->rsp = vm_info.vm_regs.rsp;
654   guest_state->rip = vm_info.rip;
655
656
657   /* I pretty much just gutted this from TVMM */
658   /* Note: That means its probably wrong */
659
660   // set the segment registers to mirror ours
661   guest_state->cs.selector = 1<<3;
662   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
663   guest_state->cs.attrib.fields.S = 1;
664   guest_state->cs.attrib.fields.P = 1;
665   guest_state->cs.attrib.fields.db = 1;
666   guest_state->cs.attrib.fields.G = 1;
667   guest_state->cs.limit = 0xfffff;
668   guest_state->cs.base = 0;
669   
670   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
671   for ( i = 0; segregs[i] != NULL; i++) {
672     struct vmcb_selector * seg = segregs[i];
673     
674     seg->selector = 2<<3;
675     seg->attrib.fields.type = 0x2; // Data Segment+read/write
676     seg->attrib.fields.S = 1;
677     seg->attrib.fields.P = 1;
678     seg->attrib.fields.db = 1;
679     seg->attrib.fields.G = 1;
680     seg->limit = 0xfffff;
681     seg->base = 0;
682   }
683
684
685   {
686     /* JRL THIS HAS TO GO */
687     
688     //    guest_state->tr.selector = GetTR_Selector();
689     guest_state->tr.attrib.fields.type = 0x9; 
690     guest_state->tr.attrib.fields.P = 1;
691     // guest_state->tr.limit = GetTR_Limit();
692     //guest_state->tr.base = GetTR_Base();// - 0x2000;
693     /* ** */
694   }
695
696
697   /* ** */
698
699
700   guest_state->efer |= EFER_MSR_svm_enable;
701   guest_state->rflags = 0x00000002; // The reserved bit is always 1
702   ctrl_area->svm_instrs.VMRUN = 1;
703   guest_state->cr0 = 0x00000001;    // PE 
704   ctrl_area->guest_ASID = 1;
705
706
707   //  guest_state->cpl = 0;
708
709
710
711   // Setup exits
712
713   ctrl_area->cr_writes.cr4 = 1;
714   
715   ctrl_area->exceptions.de = 1;
716   ctrl_area->exceptions.df = 1;
717   ctrl_area->exceptions.pf = 1;
718   ctrl_area->exceptions.ts = 1;
719   ctrl_area->exceptions.ss = 1;
720   ctrl_area->exceptions.ac = 1;
721   ctrl_area->exceptions.mc = 1;
722   ctrl_area->exceptions.gp = 1;
723   ctrl_area->exceptions.ud = 1;
724   ctrl_area->exceptions.np = 1;
725   ctrl_area->exceptions.of = 1;
726   ctrl_area->exceptions.nmi = 1;
727
728   
729
730   ctrl_area->instrs.IOIO_PROT = 1;
731   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
732   
733   {
734     reg_ex_t tmp_reg;
735     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
736     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
737   }
738
739   ctrl_area->instrs.INTR = 1;
740
741   
742   {
743     char gdt_buf[6];
744     char idt_buf[6];
745
746     memset(gdt_buf, 0, 6);
747     memset(idt_buf, 0, 6);
748
749
750     uint_t gdt_base, idt_base;
751     ushort_t gdt_limit, idt_limit;
752     
753     GetGDTR(gdt_buf);
754     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
755     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
756     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
757
758     GetIDTR(idt_buf);
759     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
760     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
761     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
762
763
764     // gdt_base -= 0x2000;
765     //idt_base -= 0x2000;
766
767     guest_state->gdtr.base = gdt_base;
768     guest_state->gdtr.limit = gdt_limit;
769     guest_state->idtr.base = idt_base;
770     guest_state->idtr.limit = idt_limit;
771
772
773   }
774   
775   
776   // also determine if CPU supports nested paging
777   /*
778   if (vm_info.page_tables) {
779     //   if (0) {
780     // Flush the TLB on entries/exits
781     ctrl_area->TLB_CONTROL = 1;
782
783     // Enable Nested Paging
784     ctrl_area->NP_ENABLE = 1;
785
786     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
787
788         // Set the Nested Page Table pointer
789     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
790
791
792     //   ctrl_area->N_CR3 = Get_CR3();
793     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
794
795     guest_state->g_pat = 0x7040600070406ULL;
796
797     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
798     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
799     // Enable Paging
800     //    guest_state->cr0 |= 0x80000000;
801   }
802   */
803
804 }
805
806
807
808
809
810 #endif
811
812