Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


moved to centralized state dump
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48 extern void v3_stgi();
49 extern void v3_clgi();
50 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
51 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
52
53
54 static vmcb_t * Allocate_VMCB() {
55     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
56
57     memset(vmcb_page, 0, 4096);
58
59     return vmcb_page;
60 }
61
62
63
64 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
65     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
66     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
67     uint_t i;
68
69
70     //
71     guest_state->rsp = 0x00;
72     guest_state->rip = 0xfff0;
73
74
75     guest_state->cpl = 0;
76
77     guest_state->efer |= EFER_MSR_svm_enable;
78
79
80     guest_state->rflags = 0x00000002; // The reserved bit is always 1
81     ctrl_area->svm_instrs.VMRUN = 1;
82     ctrl_area->svm_instrs.VMMCALL = 1;
83     ctrl_area->svm_instrs.VMLOAD = 1;
84     ctrl_area->svm_instrs.VMSAVE = 1;
85     ctrl_area->svm_instrs.STGI = 1;
86     ctrl_area->svm_instrs.CLGI = 1;
87     ctrl_area->svm_instrs.SKINIT = 1;
88     ctrl_area->svm_instrs.RDTSCP = 1;
89     ctrl_area->svm_instrs.ICEBP = 1;
90     ctrl_area->svm_instrs.WBINVD = 1;
91     ctrl_area->svm_instrs.MONITOR = 1;
92     ctrl_area->svm_instrs.MWAIT_always = 1;
93     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
94     ctrl_area->instrs.INVLPGA = 1;
95
96
97     ctrl_area->instrs.HLT = 1;
98     // guest_state->cr0 = 0x00000001;    // PE 
99   
100     /*
101       ctrl_area->exceptions.de = 1;
102       ctrl_area->exceptions.df = 1;
103       
104       ctrl_area->exceptions.ts = 1;
105       ctrl_area->exceptions.ss = 1;
106       ctrl_area->exceptions.ac = 1;
107       ctrl_area->exceptions.mc = 1;
108       ctrl_area->exceptions.gp = 1;
109       ctrl_area->exceptions.ud = 1;
110       ctrl_area->exceptions.np = 1;
111       ctrl_area->exceptions.of = 1;
112       
113       ctrl_area->exceptions.nmi = 1;
114     */
115     
116
117     ctrl_area->instrs.NMI = 1;
118     ctrl_area->instrs.SMI = 1;
119     ctrl_area->instrs.INIT = 1;
120     ctrl_area->instrs.PAUSE = 1;
121     ctrl_area->instrs.shutdown_evts = 1;
122
123     vm_info->vm_regs.rdx = 0x00000f00;
124
125     guest_state->cr0 = 0x60000010;
126
127
128     guest_state->cs.selector = 0xf000;
129     guest_state->cs.limit = 0xffff;
130     guest_state->cs.base = 0x0000000f0000LL;
131     guest_state->cs.attrib.raw = 0xf3;
132
133
134     /* DEBUG FOR RETURN CODE */
135     ctrl_area->exit_code = 1;
136
137
138     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
139                                         &(guest_state->es), &(guest_state->fs), 
140                                         &(guest_state->gs), NULL};
141
142     for ( i = 0; segregs[i] != NULL; i++) {
143         struct vmcb_selector * seg = segregs[i];
144         
145         seg->selector = 0x0000;
146         //    seg->base = seg->selector << 4;
147         seg->base = 0x00000000;
148         seg->attrib.raw = 0xf3;
149         seg->limit = ~0u;
150     }
151
152     guest_state->gdtr.limit = 0x0000ffff;
153     guest_state->gdtr.base = 0x0000000000000000LL;
154     guest_state->idtr.limit = 0x0000ffff;
155     guest_state->idtr.base = 0x0000000000000000LL;
156
157     guest_state->ldtr.selector = 0x0000;
158     guest_state->ldtr.limit = 0x0000ffff;
159     guest_state->ldtr.base = 0x0000000000000000LL;
160     guest_state->tr.selector = 0x0000;
161     guest_state->tr.limit = 0x0000ffff;
162     guest_state->tr.base = 0x0000000000000000LL;
163
164
165     guest_state->dr6 = 0x00000000ffff0ff0LL;
166     guest_state->dr7 = 0x0000000000000400LL;
167
168
169     v3_init_svm_io_map(vm_info);
170     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
171     ctrl_area->instrs.IOIO_PROT = 1;
172
173
174
175     v3_init_svm_msr_map(vm_info);
176     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
177     ctrl_area->instrs.MSR_PROT = 1;
178
179
180
181     PrintDebug("Exiting on interrupts\n");
182     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
183     ctrl_area->instrs.INTR = 1;
184
185
186     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
187         PrintDebug("Creating initial shadow page table\n");
188         
189         /* JRL: This is a performance killer, and a simplistic solution */
190         /* We need to fix this */
191         ctrl_area->TLB_CONTROL = 1;
192         ctrl_area->guest_ASID = 1;
193         
194         
195         if (v3_init_passthrough_pts(vm_info) == -1) {
196             PrintError("Could not initialize passthrough page tables\n");
197             return ;
198         }
199
200
201         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
202         PrintDebug("Created\n");
203         
204         guest_state->cr3 = vm_info->direct_map_pt;
205
206         ctrl_area->cr_reads.cr0 = 1;
207         ctrl_area->cr_writes.cr0 = 1;
208         //ctrl_area->cr_reads.cr4 = 1;
209         ctrl_area->cr_writes.cr4 = 1;
210         ctrl_area->cr_reads.cr3 = 1;
211         ctrl_area->cr_writes.cr3 = 1;
212
213         v3_hook_msr(vm_info, EFER_MSR, 
214                     &v3_handle_efer_read,
215                     &v3_handle_efer_write, 
216                     vm_info);
217
218         ctrl_area->instrs.INVLPG = 1;
219
220         ctrl_area->exceptions.pf = 1;
221
222         guest_state->g_pat = 0x7040600070406ULL;
223
224         guest_state->cr0 |= 0x80000000;
225
226     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
227         // Flush the TLB on entries/exits
228         ctrl_area->TLB_CONTROL = 1;
229         ctrl_area->guest_ASID = 1;
230
231         // Enable Nested Paging
232         ctrl_area->NP_ENABLE = 1;
233
234         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
235
236         // Set the Nested Page Table pointer
237         if (v3_init_passthrough_pts(vm_info) == -1) {
238             PrintError("Could not initialize Nested page tables\n");
239             return ;
240         }
241
242         ctrl_area->N_CR3 = vm_info->direct_map_pt;
243
244         guest_state->g_pat = 0x7040600070406ULL;
245     }
246 }
247
248
249 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
250
251
252     v3_pre_config_guest(info, config_ptr);
253
254     PrintDebug("Allocating VMCB\n");
255     info->vmm_data = (void*)Allocate_VMCB();
256
257     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
258     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
259
260     v3_post_config_guest(info, config_ptr);
261
262     return 0;
263 }
264
265
266
267 // can we start a kernel thread here...
268 static int start_svm_guest(struct guest_info *info) {
269     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
270     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
271     uint_t num_exits = 0;
272
273
274
275     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
276     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
277     
278     info->run_state = VM_RUNNING;
279     
280     while (1) {
281         ullong_t tmp_tsc;
282         
283
284 #ifdef __V3_64BIT__
285
286 #define MSR_LSTAR         0xc0000082
287 #define MSR_CSTAR         0xc0000083
288 #define MSR_SF_MASK       0xc0000084
289 #define MSR_GS_BASE       0xc0000101
290 #define MSR_KERNGS_BASE   0xc0000102
291         struct v3_msr host_cstar;
292         struct v3_msr host_lstar;
293         struct v3_msr host_syscall_mask;
294         struct v3_msr host_gs_base;
295         struct v3_msr host_kerngs_base;
296
297 #else 
298
299 #define MSR_SYSENTER_CS       0x00000174
300 #define MSR_SYSENTER_ESP      0x00000175
301 #define MSR_SYSENTER_EIP      0x00000176
302
303         struct v3_msr host_sysenter_cs;
304         struct v3_msr host_sysenter_esp;
305         struct v3_msr host_sysenter_eip;
306
307 #endif
308
309 #define MSR_STAR              0xc0000081
310         struct v3_msr host_star;
311
312
313         /*
314           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
315           (void *)(addr_t)info->segments.cs.base, 
316           (void *)(addr_t)info->rip);
317         */
318
319
320 #ifdef __V3_64BIT__
321         v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo));
322         v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo));
323         v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo));
324         v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo));
325         v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo));
326 #else 
327         v3_get_msr(MSR_SYSENTER_CS, &(host_sysenter_cs.hi), &(host_sysenter_cs.lo));
328         v3_get_msr(MSR_SYSENTER_ESP, &(host_sysenter_esp.hi), &(host_sysenter_esp.lo));
329         v3_get_msr(MSR_SYSENTER_EIP, &(host_sysenter_eip.hi), &(host_sysenter_eip.lo));
330 #endif
331         v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo));
332
333         rdtscll(info->time_state.cached_host_tsc);
334         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
335         
336         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
337         
338         rdtscll(tmp_tsc);
339         
340 #ifdef __V3_64BIT__
341         v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo);
342         v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo);
343         v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo);
344         v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo);
345         v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo);
346 #else 
347         v3_set_msr(MSR_SYSENTER_CS, host_sysenter_cs.hi, host_sysenter_cs.lo);
348         v3_set_msr(MSR_SYSENTER_ESP, host_sysenter_esp.hi, host_sysenter_esp.lo);
349         v3_set_msr(MSR_SYSENTER_EIP, host_sysenter_eip.hi, host_sysenter_eip.lo);
350 #endif
351         v3_set_msr(MSR_STAR, host_star.hi, host_star.lo);
352
353         
354         //PrintDebug("SVM Returned\n");
355
356
357
358         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
359         num_exits++;
360
361         //PrintDebug("Turning on global interrupts\n");
362         v3_stgi();
363         v3_clgi();
364         
365         if ((num_exits % 5000) == 0) {
366             PrintDebug("SVM Exit number %d\n", num_exits);
367
368             if (info->enable_profiler) {
369                 v3_print_profile(info);
370             }
371         }
372
373
374      
375         if (v3_handle_svm_exit(info) != 0) {
376             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
377             addr_t host_addr;
378             addr_t linear_addr = 0;
379             
380             info->run_state = VM_ERROR;
381             
382             PrintDebug("SVM ERROR!!\n"); 
383       
384             v3_print_guest_state(info);
385
386             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
387       
388             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
389             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
390       
391             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
392             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
393       
394             if (info->mem_mode == PHYSICAL_MEM) {
395                 guest_pa_to_host_va(info, linear_addr, &host_addr);
396             } else if (info->mem_mode == VIRTUAL_MEM) {
397                 guest_va_to_host_va(info, linear_addr, &host_addr);
398             }
399
400
401             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
402
403             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
404             PrintTraceMemDump((uchar_t *)host_addr, 15);
405
406             break;
407         }
408     }
409     return 0;
410 }
411
412
413
414
415
416 /* Checks machine SVM capability */
417 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
418 int v3_is_svm_capable() {
419     // Dinda
420     uint_t vm_cr_low = 0, vm_cr_high = 0;
421     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
422
423     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
424   
425     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
426
427     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
428       PrintDebug("SVM Not Available\n");
429       return 0;
430     }  else {
431         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
432         
433         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
434         
435         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
436             PrintDebug("SVM is available but is disabled.\n");
437             
438             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
439             
440             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
441             
442             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
443                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
444             } else {
445                 PrintDebug("SVM is locked with a key\n");
446             }
447             return 0;
448
449         } else {
450             PrintDebug("SVM is available and  enabled.\n");
451
452             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
453             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
454             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
455             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
456             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
457
458
459             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
460                 PrintDebug("SVM Nested Paging not supported\n");
461             } else {
462                 PrintDebug("SVM Nested Paging supported\n");
463             }
464
465             return 1;
466         }
467     }
468 }
469
470 static int has_svm_nested_paging() {
471     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
472
473     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
474
475     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
476
477     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
478         PrintDebug("SVM Nested Paging not supported\n");
479         return 0;
480     } else {
481         PrintDebug("SVM Nested Paging supported\n");
482         return 1;
483     }
484 }
485
486
487
488 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
489     reg_ex_t msr;
490     void * host_state;
491     extern v3_cpu_arch_t v3_cpu_type;
492
493     // Enable SVM on the CPU
494     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
495     msr.e_reg.low |= EFER_MSR_svm_enable;
496     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
497
498     PrintDebug("SVM Enabled\n");
499
500
501     // Setup the host state save area
502     host_state = V3_AllocPages(4);
503
504
505     /* 64-BIT-ISSUE */
506     //  msr.e_reg.high = 0;
507     //msr.e_reg.low = (uint_t)host_state;
508     msr.r_reg = (addr_t)host_state;
509
510     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state);
511     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
512
513     if (has_svm_nested_paging() == 1) {
514         v3_cpu_type = V3_SVM_REV3_CPU;
515     } else {
516         v3_cpu_type = V3_SVM_CPU;
517     }
518
519     // Setup the SVM specific vmm operations
520     vmm_ops->init_guest = &init_svm_guest;
521     vmm_ops->start_guest = &start_svm_guest;
522     vmm_ops->has_nested_paging = &has_svm_nested_paging;
523
524     return;
525 }
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
579   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
580   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
581   uint_t i;
582
583
584   guest_state->rsp = vm_info.vm_regs.rsp;
585   guest_state->rip = vm_info.rip;
586
587
588   //ctrl_area->instrs.instrs.CR0 = 1;
589   ctrl_area->cr_reads.cr0 = 1;
590   ctrl_area->cr_writes.cr0 = 1;
591
592   guest_state->efer |= EFER_MSR_svm_enable;
593   guest_state->rflags = 0x00000002; // The reserved bit is always 1
594   ctrl_area->svm_instrs.VMRUN = 1;
595   // guest_state->cr0 = 0x00000001;    // PE 
596   ctrl_area->guest_ASID = 1;
597
598
599   ctrl_area->exceptions.de = 1;
600   ctrl_area->exceptions.df = 1;
601   ctrl_area->exceptions.pf = 1;
602   ctrl_area->exceptions.ts = 1;
603   ctrl_area->exceptions.ss = 1;
604   ctrl_area->exceptions.ac = 1;
605   ctrl_area->exceptions.mc = 1;
606   ctrl_area->exceptions.gp = 1;
607   ctrl_area->exceptions.ud = 1;
608   ctrl_area->exceptions.np = 1;
609   ctrl_area->exceptions.of = 1;
610   ctrl_area->exceptions.nmi = 1;
611
612   guest_state->cs.selector = 0x0000;
613   guest_state->cs.limit=~0u;
614   guest_state->cs.base = guest_state->cs.selector<<4;
615   guest_state->cs.attrib.raw = 0xf3;
616
617   
618   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
619   for ( i = 0; segregs[i] != NULL; i++) {
620     struct vmcb_selector * seg = segregs[i];
621     
622     seg->selector = 0x0000;
623     seg->base = seg->selector << 4;
624     seg->attrib.raw = 0xf3;
625     seg->limit = ~0u;
626   }
627   
628   if (vm_info.io_map.num_ports > 0) {
629     struct vmm_io_hook * iter;
630     addr_t io_port_bitmap;
631     
632     io_port_bitmap = (addr_t)V3_AllocPages(3);
633     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
634     
635     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
636
637     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
638
639     FOREACH_IO_HOOK(vm_info.io_map, iter) {
640       ushort_t port = iter->port;
641       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
642
643       bitmap += (port / 8);
644       PrintDebug("Setting Bit in block %x\n", bitmap);
645       *bitmap |= 1 << (port % 8);
646     }
647
648
649     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
650
651     ctrl_area->instrs.IOIO_PROT = 1;
652   }
653
654   ctrl_area->instrs.INTR = 1;
655
656
657
658   if (vm_info.page_mode == SHADOW_PAGING) {
659     PrintDebug("Creating initial shadow page table\n");
660     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
661     PrintDebug("Created\n");
662
663     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
664
665     ctrl_area->cr_reads.cr3 = 1;
666     ctrl_area->cr_writes.cr3 = 1;
667
668
669     ctrl_area->instrs.INVLPG = 1;
670     ctrl_area->instrs.INVLPGA = 1;
671
672     guest_state->g_pat = 0x7040600070406ULL;
673
674     guest_state->cr0 |= 0x80000000;
675   } else if (vm_info.page_mode == NESTED_PAGING) {
676     // Flush the TLB on entries/exits
677     //ctrl_area->TLB_CONTROL = 1;
678
679     // Enable Nested Paging
680     //ctrl_area->NP_ENABLE = 1;
681
682     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
683
684         // Set the Nested Page Table pointer
685     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
686     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
687
688     //   ctrl_area->N_CR3 = Get_CR3();
689     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
690
691     //    guest_state->g_pat = 0x7040600070406ULL;
692   }
693
694
695
696 }
697 */
698
699
700
701
702
703
704
705 #if 0
706 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
707   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
708   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
709   uint_t i = 0;
710
711
712   guest_state->rsp = vm_info.vm_regs.rsp;
713   guest_state->rip = vm_info.rip;
714
715
716   /* I pretty much just gutted this from TVMM */
717   /* Note: That means its probably wrong */
718
719   // set the segment registers to mirror ours
720   guest_state->cs.selector = 1<<3;
721   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
722   guest_state->cs.attrib.fields.S = 1;
723   guest_state->cs.attrib.fields.P = 1;
724   guest_state->cs.attrib.fields.db = 1;
725   guest_state->cs.attrib.fields.G = 1;
726   guest_state->cs.limit = 0xfffff;
727   guest_state->cs.base = 0;
728   
729   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
730   for ( i = 0; segregs[i] != NULL; i++) {
731     struct vmcb_selector * seg = segregs[i];
732     
733     seg->selector = 2<<3;
734     seg->attrib.fields.type = 0x2; // Data Segment+read/write
735     seg->attrib.fields.S = 1;
736     seg->attrib.fields.P = 1;
737     seg->attrib.fields.db = 1;
738     seg->attrib.fields.G = 1;
739     seg->limit = 0xfffff;
740     seg->base = 0;
741   }
742
743
744   {
745     /* JRL THIS HAS TO GO */
746     
747     //    guest_state->tr.selector = GetTR_Selector();
748     guest_state->tr.attrib.fields.type = 0x9; 
749     guest_state->tr.attrib.fields.P = 1;
750     // guest_state->tr.limit = GetTR_Limit();
751     //guest_state->tr.base = GetTR_Base();// - 0x2000;
752     /* ** */
753   }
754
755
756   /* ** */
757
758
759   guest_state->efer |= EFER_MSR_svm_enable;
760   guest_state->rflags = 0x00000002; // The reserved bit is always 1
761   ctrl_area->svm_instrs.VMRUN = 1;
762   guest_state->cr0 = 0x00000001;    // PE 
763   ctrl_area->guest_ASID = 1;
764
765
766   //  guest_state->cpl = 0;
767
768
769
770   // Setup exits
771
772   ctrl_area->cr_writes.cr4 = 1;
773   
774   ctrl_area->exceptions.de = 1;
775   ctrl_area->exceptions.df = 1;
776   ctrl_area->exceptions.pf = 1;
777   ctrl_area->exceptions.ts = 1;
778   ctrl_area->exceptions.ss = 1;
779   ctrl_area->exceptions.ac = 1;
780   ctrl_area->exceptions.mc = 1;
781   ctrl_area->exceptions.gp = 1;
782   ctrl_area->exceptions.ud = 1;
783   ctrl_area->exceptions.np = 1;
784   ctrl_area->exceptions.of = 1;
785   ctrl_area->exceptions.nmi = 1;
786
787   
788
789   ctrl_area->instrs.IOIO_PROT = 1;
790   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
791   
792   {
793     reg_ex_t tmp_reg;
794     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
795     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
796   }
797
798   ctrl_area->instrs.INTR = 1;
799
800   
801   {
802     char gdt_buf[6];
803     char idt_buf[6];
804
805     memset(gdt_buf, 0, 6);
806     memset(idt_buf, 0, 6);
807
808
809     uint_t gdt_base, idt_base;
810     ushort_t gdt_limit, idt_limit;
811     
812     GetGDTR(gdt_buf);
813     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
814     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
815     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
816
817     GetIDTR(idt_buf);
818     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
819     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
820     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
821
822
823     // gdt_base -= 0x2000;
824     //idt_base -= 0x2000;
825
826     guest_state->gdtr.base = gdt_base;
827     guest_state->gdtr.limit = gdt_limit;
828     guest_state->idtr.base = idt_base;
829     guest_state->idtr.limit = idt_limit;
830
831
832   }
833   
834   
835   // also determine if CPU supports nested paging
836   /*
837   if (vm_info.page_tables) {
838     //   if (0) {
839     // Flush the TLB on entries/exits
840     ctrl_area->TLB_CONTROL = 1;
841
842     // Enable Nested Paging
843     ctrl_area->NP_ENABLE = 1;
844
845     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
846
847         // Set the Nested Page Table pointer
848     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
849
850
851     //   ctrl_area->N_CR3 = Get_CR3();
852     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
853
854     guest_state->g_pat = 0x7040600070406ULL;
855
856     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
857     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
858     // Enable Paging
859     //    guest_state->cr0 |= 0x80000000;
860   }
861   */
862
863 }
864
865
866
867
868
869 #endif
870
871