Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


moved guest_efer from top level guest_info to the shadow page state
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48 extern void v3_stgi();
49 extern void v3_clgi();
50 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
51 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
52
53
54 static vmcb_t * Allocate_VMCB() {
55     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
56
57     memset(vmcb_page, 0, 4096);
58
59     return vmcb_page;
60 }
61
62
63
64 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
65     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
66     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
67     uint_t i;
68
69
70     guest_state->rsp = vm_info->vm_regs.rsp;
71     // guest_state->rip = vm_info->rip;
72     guest_state->rip = 0xfff0;
73
74     guest_state->cpl = 0;
75
76     guest_state->efer |= EFER_MSR_svm_enable;
77
78
79     guest_state->rflags = 0x00000002; // The reserved bit is always 1
80     ctrl_area->svm_instrs.VMRUN = 1;
81     ctrl_area->svm_instrs.VMMCALL = 1;
82     ctrl_area->svm_instrs.VMLOAD = 1;
83     ctrl_area->svm_instrs.VMSAVE = 1;
84     ctrl_area->svm_instrs.STGI = 1;
85     ctrl_area->svm_instrs.CLGI = 1;
86     ctrl_area->svm_instrs.SKINIT = 1;
87     ctrl_area->svm_instrs.RDTSCP = 1;
88     ctrl_area->svm_instrs.ICEBP = 1;
89     ctrl_area->svm_instrs.WBINVD = 1;
90     ctrl_area->svm_instrs.MONITOR = 1;
91     ctrl_area->svm_instrs.MWAIT_always = 1;
92     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
93     ctrl_area->instrs.INVLPGA = 1;
94
95
96     ctrl_area->instrs.HLT = 1;
97     // guest_state->cr0 = 0x00000001;    // PE 
98   
99     /*
100       ctrl_area->exceptions.de = 1;
101       ctrl_area->exceptions.df = 1;
102       
103       ctrl_area->exceptions.ts = 1;
104       ctrl_area->exceptions.ss = 1;
105       ctrl_area->exceptions.ac = 1;
106       ctrl_area->exceptions.mc = 1;
107       ctrl_area->exceptions.gp = 1;
108       ctrl_area->exceptions.ud = 1;
109       ctrl_area->exceptions.np = 1;
110       ctrl_area->exceptions.of = 1;
111       
112       ctrl_area->exceptions.nmi = 1;
113     */
114     
115
116     ctrl_area->instrs.NMI = 1;
117     ctrl_area->instrs.SMI = 1;
118     ctrl_area->instrs.INIT = 1;
119     ctrl_area->instrs.PAUSE = 1;
120     ctrl_area->instrs.shutdown_evts = 1;
121
122     vm_info->vm_regs.rdx = 0x00000f00;
123
124     guest_state->cr0 = 0x60000010;
125
126
127     guest_state->cs.selector = 0xf000;
128     guest_state->cs.limit = 0xffff;
129     guest_state->cs.base = 0x0000000f0000LL;
130     guest_state->cs.attrib.raw = 0xf3;
131
132
133     /* DEBUG FOR RETURN CODE */
134     ctrl_area->exit_code = 1;
135
136
137     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
138                                         &(guest_state->es), &(guest_state->fs), 
139                                         &(guest_state->gs), NULL};
140
141     for ( i = 0; segregs[i] != NULL; i++) {
142         struct vmcb_selector * seg = segregs[i];
143         
144         seg->selector = 0x0000;
145         //    seg->base = seg->selector << 4;
146         seg->base = 0x00000000;
147         seg->attrib.raw = 0xf3;
148         seg->limit = ~0u;
149     }
150
151     guest_state->gdtr.limit = 0x0000ffff;
152     guest_state->gdtr.base = 0x0000000000000000LL;
153     guest_state->idtr.limit = 0x0000ffff;
154     guest_state->idtr.base = 0x0000000000000000LL;
155
156     guest_state->ldtr.selector = 0x0000;
157     guest_state->ldtr.limit = 0x0000ffff;
158     guest_state->ldtr.base = 0x0000000000000000LL;
159     guest_state->tr.selector = 0x0000;
160     guest_state->tr.limit = 0x0000ffff;
161     guest_state->tr.base = 0x0000000000000000LL;
162
163
164     guest_state->dr6 = 0x00000000ffff0ff0LL;
165     guest_state->dr7 = 0x0000000000000400LL;
166
167
168     v3_init_svm_io_map(vm_info);
169     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
170     ctrl_area->instrs.IOIO_PROT = 1;
171
172
173
174     v3_init_svm_msr_map(vm_info);
175     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
176     ctrl_area->instrs.MSR_PROT = 1;
177
178
179
180     PrintDebug("Exiting on interrupts\n");
181     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
182     ctrl_area->instrs.INTR = 1;
183
184
185     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
186         PrintDebug("Creating initial shadow page table\n");
187         
188         /* JRL: This is a performance killer, and a simplistic solution */
189         /* We need to fix this */
190         ctrl_area->TLB_CONTROL = 1;
191         ctrl_area->guest_ASID = 1;
192         
193         
194         if (v3_init_passthrough_pts(vm_info) == -1) {
195             PrintError("Could not initialize passthrough page tables\n");
196             return ;
197         }
198
199
200         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
201         PrintDebug("Created\n");
202         
203         guest_state->cr3 = vm_info->direct_map_pt;
204
205         ctrl_area->cr_reads.cr0 = 1;
206         ctrl_area->cr_writes.cr0 = 1;
207         //ctrl_area->cr_reads.cr4 = 1;
208         ctrl_area->cr_writes.cr4 = 1;
209         ctrl_area->cr_reads.cr3 = 1;
210         ctrl_area->cr_writes.cr3 = 1;
211
212         vm_info->guest_efer.value = 0x0LL;
213
214         v3_hook_msr(vm_info, EFER_MSR, 
215                     &v3_handle_efer_read,
216                     &v3_handle_efer_write, 
217                     vm_info);
218
219         ctrl_area->instrs.INVLPG = 1;
220
221         ctrl_area->exceptions.pf = 1;
222
223         guest_state->g_pat = 0x7040600070406ULL;
224
225         guest_state->cr0 |= 0x80000000;
226
227     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
228         // Flush the TLB on entries/exits
229         ctrl_area->TLB_CONTROL = 1;
230         ctrl_area->guest_ASID = 1;
231
232         // Enable Nested Paging
233         ctrl_area->NP_ENABLE = 1;
234
235         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
236
237         // Set the Nested Page Table pointer
238         if (v3_init_passthrough_pts(vm_info) == -1) {
239             PrintError("Could not initialize Nested page tables\n");
240             return ;
241         }
242
243         ctrl_area->N_CR3 = vm_info->direct_map_pt;
244
245         guest_state->g_pat = 0x7040600070406ULL;
246     }
247
248
249
250     /* Safety locations for fs/gs */
251     //    vm_info->fs = 0;
252     //    vm_info->gs = 0;
253 }
254
255
256 static int init_svm_guest(struct guest_info *info, struct v3_vm_config * config_ptr) {
257     v3_config_guest(info, config_ptr);
258
259     PrintDebug("Allocating VMCB\n");
260     info->vmm_data = (void*)Allocate_VMCB();
261
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_config_devices(info, config_ptr);
265
266     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
267
268
269     info->run_state = VM_STOPPED;
270
271     //  info->rip = 0;
272
273     info->vm_regs.rdi = 0;
274     info->vm_regs.rsi = 0;
275     info->vm_regs.rbp = 0;
276     info->vm_regs.rsp = 0;
277     info->vm_regs.rbx = 0;
278     info->vm_regs.rdx = 0;
279     info->vm_regs.rcx = 0;
280     info->vm_regs.rax = 0;
281
282     return 0;
283 }
284
285
286
287 // can we start a kernel thread here...
288 static int start_svm_guest(struct guest_info *info) {
289     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
290     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
291     uint_t num_exits = 0;
292
293
294
295     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
296     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
297     
298     info->run_state = VM_RUNNING;
299     
300     while (1) {
301         ullong_t tmp_tsc;
302         
303
304
305 #define MSR_STAR      0xc0000081
306 #define MSR_LSTAR     0xc0000082
307 #define MSR_CSTAR     0xc0000083
308 #define MSR_SF_MASK   0xc0000084
309 #define MSR_GS_BASE   0xc0000101
310 #define MSR_KERNGS_BASE   0xc0000102
311
312
313         struct v3_msr host_cstar;
314         struct v3_msr host_star;
315         struct v3_msr host_lstar;
316         struct v3_msr host_syscall_mask;
317         struct v3_msr host_gs_base;
318         struct v3_msr host_kerngs_base;
319
320 /*      v3_enable_ints(); */
321 /*      v3_clgi(); */
322
323
324         /*
325           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
326           (void *)(addr_t)info->segments.cs.base, 
327           (void *)(addr_t)info->rip);
328         */
329
330
331         v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo));
332         v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo));
333         v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo));
334         v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo));
335         v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo));
336         v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo));
337
338
339         rdtscll(info->time_state.cached_host_tsc);
340         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
341         
342         //v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), &(info->fs), &(info->gs));
343         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
344         
345         rdtscll(tmp_tsc);
346         
347         v3_set_msr(MSR_STAR, host_star.hi, host_star.lo);
348         v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo);
349         v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo);
350         v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo);
351         v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo);
352         v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo);
353         
354         //PrintDebug("SVM Returned\n");
355
356
357
358         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
359         num_exits++;
360
361         //PrintDebug("Turning on global interrupts\n");
362         v3_stgi();
363         v3_clgi();
364         
365         if ((num_exits % 5000) == 0) {
366             PrintDebug("SVM Exit number %d\n", num_exits);
367
368             if (info->enable_profiler) {
369                 v3_print_profile(info);
370             }
371         }
372
373
374      
375         if (v3_handle_svm_exit(info) != 0) {
376             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
377             addr_t host_addr;
378             addr_t linear_addr = 0;
379             
380             info->run_state = VM_ERROR;
381             
382             PrintDebug("SVM ERROR!!\n"); 
383       
384             PrintDebug("RIP: %p\n", (void *)(addr_t)(guest_state->rip));
385
386
387             linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
388
389
390             PrintDebug("RIP Linear: %p\n", (void *)linear_addr);
391             v3_print_segments(info);
392             v3_print_ctrl_regs(info);
393             if (info->shdw_pg_mode == SHADOW_PAGING) {
394                 PrintDebug("Shadow Paging Guest Registers:\n");
395                 PrintDebug("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
396                 PrintDebug("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
397                 // efer
398                 // CR4
399             }
400             v3_print_GPRs(info);
401
402             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
403       
404             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
405             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
406       
407             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
408             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
409       
410             if (info->mem_mode == PHYSICAL_MEM) {
411                 guest_pa_to_host_va(info, linear_addr, &host_addr);
412             } else if (info->mem_mode == VIRTUAL_MEM) {
413                 guest_va_to_host_va(info, linear_addr, &host_addr);
414             }
415
416
417             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
418
419             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
420             PrintTraceMemDump((uchar_t *)host_addr, 15);
421
422             break;
423         }
424     }
425     return 0;
426 }
427
428
429
430
431
432 /* Checks machine SVM capability */
433 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
434 int v3_is_svm_capable() {
435     // Dinda
436     uint_t vm_cr_low = 0, vm_cr_high = 0;
437     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
438
439     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
440   
441     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
442
443     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
444       PrintDebug("SVM Not Available\n");
445       return 0;
446     }  else {
447         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
448         
449         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
450         
451         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
452             PrintDebug("SVM is available but is disabled.\n");
453             
454             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
455             
456             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
457             
458             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
459                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
460             } else {
461                 PrintDebug("SVM is locked with a key\n");
462             }
463             return 0;
464
465         } else {
466             PrintDebug("SVM is available and  enabled.\n");
467
468             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
469             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
470             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
471             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
472             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
473
474
475             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
476                 PrintDebug("SVM Nested Paging not supported\n");
477             } else {
478                 PrintDebug("SVM Nested Paging supported\n");
479             }
480
481             return 1;
482         }
483     }
484 }
485
486 static int has_svm_nested_paging() {
487     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
488
489     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
490
491     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
492
493     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
494         PrintDebug("SVM Nested Paging not supported\n");
495         return 0;
496     } else {
497         PrintDebug("SVM Nested Paging supported\n");
498         return 1;
499     }
500 }
501
502
503
504 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
505     reg_ex_t msr;
506     void * host_state;
507     extern v3_cpu_arch_t v3_cpu_type;
508
509     // Enable SVM on the CPU
510     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
511     msr.e_reg.low |= EFER_MSR_svm_enable;
512     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
513
514     PrintDebug("SVM Enabled\n");
515
516
517     // Setup the host state save area
518     host_state = V3_AllocPages(4);
519
520
521     /* 64-BIT-ISSUE */
522     //  msr.e_reg.high = 0;
523     //msr.e_reg.low = (uint_t)host_state;
524     msr.r_reg = (addr_t)host_state;
525
526     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state);
527     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
528
529     if (has_svm_nested_paging() == 1) {
530         v3_cpu_type = V3_SVM_REV3_CPU;
531     } else {
532         v3_cpu_type = V3_SVM_CPU;
533     }
534
535     // Setup the SVM specific vmm operations
536     vmm_ops->init_guest = &init_svm_guest;
537     vmm_ops->start_guest = &start_svm_guest;
538     vmm_ops->has_nested_paging = &has_svm_nested_paging;
539
540     return;
541 }
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
595   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
596   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
597   uint_t i;
598
599
600   guest_state->rsp = vm_info.vm_regs.rsp;
601   guest_state->rip = vm_info.rip;
602
603
604   //ctrl_area->instrs.instrs.CR0 = 1;
605   ctrl_area->cr_reads.cr0 = 1;
606   ctrl_area->cr_writes.cr0 = 1;
607
608   guest_state->efer |= EFER_MSR_svm_enable;
609   guest_state->rflags = 0x00000002; // The reserved bit is always 1
610   ctrl_area->svm_instrs.VMRUN = 1;
611   // guest_state->cr0 = 0x00000001;    // PE 
612   ctrl_area->guest_ASID = 1;
613
614
615   ctrl_area->exceptions.de = 1;
616   ctrl_area->exceptions.df = 1;
617   ctrl_area->exceptions.pf = 1;
618   ctrl_area->exceptions.ts = 1;
619   ctrl_area->exceptions.ss = 1;
620   ctrl_area->exceptions.ac = 1;
621   ctrl_area->exceptions.mc = 1;
622   ctrl_area->exceptions.gp = 1;
623   ctrl_area->exceptions.ud = 1;
624   ctrl_area->exceptions.np = 1;
625   ctrl_area->exceptions.of = 1;
626   ctrl_area->exceptions.nmi = 1;
627
628   guest_state->cs.selector = 0x0000;
629   guest_state->cs.limit=~0u;
630   guest_state->cs.base = guest_state->cs.selector<<4;
631   guest_state->cs.attrib.raw = 0xf3;
632
633   
634   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
635   for ( i = 0; segregs[i] != NULL; i++) {
636     struct vmcb_selector * seg = segregs[i];
637     
638     seg->selector = 0x0000;
639     seg->base = seg->selector << 4;
640     seg->attrib.raw = 0xf3;
641     seg->limit = ~0u;
642   }
643   
644   if (vm_info.io_map.num_ports > 0) {
645     struct vmm_io_hook * iter;
646     addr_t io_port_bitmap;
647     
648     io_port_bitmap = (addr_t)V3_AllocPages(3);
649     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
650     
651     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
652
653     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
654
655     FOREACH_IO_HOOK(vm_info.io_map, iter) {
656       ushort_t port = iter->port;
657       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
658
659       bitmap += (port / 8);
660       PrintDebug("Setting Bit in block %x\n", bitmap);
661       *bitmap |= 1 << (port % 8);
662     }
663
664
665     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
666
667     ctrl_area->instrs.IOIO_PROT = 1;
668   }
669
670   ctrl_area->instrs.INTR = 1;
671
672
673
674   if (vm_info.page_mode == SHADOW_PAGING) {
675     PrintDebug("Creating initial shadow page table\n");
676     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
677     PrintDebug("Created\n");
678
679     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
680
681     ctrl_area->cr_reads.cr3 = 1;
682     ctrl_area->cr_writes.cr3 = 1;
683
684
685     ctrl_area->instrs.INVLPG = 1;
686     ctrl_area->instrs.INVLPGA = 1;
687
688     guest_state->g_pat = 0x7040600070406ULL;
689
690     guest_state->cr0 |= 0x80000000;
691   } else if (vm_info.page_mode == NESTED_PAGING) {
692     // Flush the TLB on entries/exits
693     //ctrl_area->TLB_CONTROL = 1;
694
695     // Enable Nested Paging
696     //ctrl_area->NP_ENABLE = 1;
697
698     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
699
700         // Set the Nested Page Table pointer
701     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
702     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
703
704     //   ctrl_area->N_CR3 = Get_CR3();
705     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
706
707     //    guest_state->g_pat = 0x7040600070406ULL;
708   }
709
710
711
712 }
713 */
714
715
716
717
718
719
720
721 #if 0
722 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
723   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
724   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
725   uint_t i = 0;
726
727
728   guest_state->rsp = vm_info.vm_regs.rsp;
729   guest_state->rip = vm_info.rip;
730
731
732   /* I pretty much just gutted this from TVMM */
733   /* Note: That means its probably wrong */
734
735   // set the segment registers to mirror ours
736   guest_state->cs.selector = 1<<3;
737   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
738   guest_state->cs.attrib.fields.S = 1;
739   guest_state->cs.attrib.fields.P = 1;
740   guest_state->cs.attrib.fields.db = 1;
741   guest_state->cs.attrib.fields.G = 1;
742   guest_state->cs.limit = 0xfffff;
743   guest_state->cs.base = 0;
744   
745   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
746   for ( i = 0; segregs[i] != NULL; i++) {
747     struct vmcb_selector * seg = segregs[i];
748     
749     seg->selector = 2<<3;
750     seg->attrib.fields.type = 0x2; // Data Segment+read/write
751     seg->attrib.fields.S = 1;
752     seg->attrib.fields.P = 1;
753     seg->attrib.fields.db = 1;
754     seg->attrib.fields.G = 1;
755     seg->limit = 0xfffff;
756     seg->base = 0;
757   }
758
759
760   {
761     /* JRL THIS HAS TO GO */
762     
763     //    guest_state->tr.selector = GetTR_Selector();
764     guest_state->tr.attrib.fields.type = 0x9; 
765     guest_state->tr.attrib.fields.P = 1;
766     // guest_state->tr.limit = GetTR_Limit();
767     //guest_state->tr.base = GetTR_Base();// - 0x2000;
768     /* ** */
769   }
770
771
772   /* ** */
773
774
775   guest_state->efer |= EFER_MSR_svm_enable;
776   guest_state->rflags = 0x00000002; // The reserved bit is always 1
777   ctrl_area->svm_instrs.VMRUN = 1;
778   guest_state->cr0 = 0x00000001;    // PE 
779   ctrl_area->guest_ASID = 1;
780
781
782   //  guest_state->cpl = 0;
783
784
785
786   // Setup exits
787
788   ctrl_area->cr_writes.cr4 = 1;
789   
790   ctrl_area->exceptions.de = 1;
791   ctrl_area->exceptions.df = 1;
792   ctrl_area->exceptions.pf = 1;
793   ctrl_area->exceptions.ts = 1;
794   ctrl_area->exceptions.ss = 1;
795   ctrl_area->exceptions.ac = 1;
796   ctrl_area->exceptions.mc = 1;
797   ctrl_area->exceptions.gp = 1;
798   ctrl_area->exceptions.ud = 1;
799   ctrl_area->exceptions.np = 1;
800   ctrl_area->exceptions.of = 1;
801   ctrl_area->exceptions.nmi = 1;
802
803   
804
805   ctrl_area->instrs.IOIO_PROT = 1;
806   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
807   
808   {
809     reg_ex_t tmp_reg;
810     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
811     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
812   }
813
814   ctrl_area->instrs.INTR = 1;
815
816   
817   {
818     char gdt_buf[6];
819     char idt_buf[6];
820
821     memset(gdt_buf, 0, 6);
822     memset(idt_buf, 0, 6);
823
824
825     uint_t gdt_base, idt_base;
826     ushort_t gdt_limit, idt_limit;
827     
828     GetGDTR(gdt_buf);
829     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
830     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
831     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
832
833     GetIDTR(idt_buf);
834     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
835     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
836     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
837
838
839     // gdt_base -= 0x2000;
840     //idt_base -= 0x2000;
841
842     guest_state->gdtr.base = gdt_base;
843     guest_state->gdtr.limit = gdt_limit;
844     guest_state->idtr.base = idt_base;
845     guest_state->idtr.limit = idt_limit;
846
847
848   }
849   
850   
851   // also determine if CPU supports nested paging
852   /*
853   if (vm_info.page_tables) {
854     //   if (0) {
855     // Flush the TLB on entries/exits
856     ctrl_area->TLB_CONTROL = 1;
857
858     // Enable Nested Paging
859     ctrl_area->NP_ENABLE = 1;
860
861     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
862
863         // Set the Nested Page Table pointer
864     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
865
866
867     //   ctrl_area->N_CR3 = Get_CR3();
868     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
869
870     guest_state->g_pat = 0x7040600070406ULL;
871
872     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
873     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
874     // Enable Paging
875     //    guest_state->cr0 |= 0x80000000;
876   }
877   */
878
879 }
880
881
882
883
884
885 #endif
886
887