Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Added compile time dependencies for host MSRs
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48 extern void v3_stgi();
49 extern void v3_clgi();
50 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
51 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
52
53
54 static vmcb_t * Allocate_VMCB() {
55     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
56
57     memset(vmcb_page, 0, 4096);
58
59     return vmcb_page;
60 }
61
62
63
64 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
65     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
66     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
67     uint_t i;
68
69
70     guest_state->rsp = vm_info->vm_regs.rsp;
71     // guest_state->rip = vm_info->rip;
72     guest_state->rip = 0xfff0;
73
74     guest_state->cpl = 0;
75
76     guest_state->efer |= EFER_MSR_svm_enable;
77
78
79     guest_state->rflags = 0x00000002; // The reserved bit is always 1
80     ctrl_area->svm_instrs.VMRUN = 1;
81     ctrl_area->svm_instrs.VMMCALL = 1;
82     ctrl_area->svm_instrs.VMLOAD = 1;
83     ctrl_area->svm_instrs.VMSAVE = 1;
84     ctrl_area->svm_instrs.STGI = 1;
85     ctrl_area->svm_instrs.CLGI = 1;
86     ctrl_area->svm_instrs.SKINIT = 1;
87     ctrl_area->svm_instrs.RDTSCP = 1;
88     ctrl_area->svm_instrs.ICEBP = 1;
89     ctrl_area->svm_instrs.WBINVD = 1;
90     ctrl_area->svm_instrs.MONITOR = 1;
91     ctrl_area->svm_instrs.MWAIT_always = 1;
92     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
93     ctrl_area->instrs.INVLPGA = 1;
94
95
96     ctrl_area->instrs.HLT = 1;
97     // guest_state->cr0 = 0x00000001;    // PE 
98   
99     /*
100       ctrl_area->exceptions.de = 1;
101       ctrl_area->exceptions.df = 1;
102       
103       ctrl_area->exceptions.ts = 1;
104       ctrl_area->exceptions.ss = 1;
105       ctrl_area->exceptions.ac = 1;
106       ctrl_area->exceptions.mc = 1;
107       ctrl_area->exceptions.gp = 1;
108       ctrl_area->exceptions.ud = 1;
109       ctrl_area->exceptions.np = 1;
110       ctrl_area->exceptions.of = 1;
111       
112       ctrl_area->exceptions.nmi = 1;
113     */
114     
115
116     ctrl_area->instrs.NMI = 1;
117     ctrl_area->instrs.SMI = 1;
118     ctrl_area->instrs.INIT = 1;
119     ctrl_area->instrs.PAUSE = 1;
120     ctrl_area->instrs.shutdown_evts = 1;
121
122     vm_info->vm_regs.rdx = 0x00000f00;
123
124     guest_state->cr0 = 0x60000010;
125
126
127     guest_state->cs.selector = 0xf000;
128     guest_state->cs.limit = 0xffff;
129     guest_state->cs.base = 0x0000000f0000LL;
130     guest_state->cs.attrib.raw = 0xf3;
131
132
133     /* DEBUG FOR RETURN CODE */
134     ctrl_area->exit_code = 1;
135
136
137     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
138                                         &(guest_state->es), &(guest_state->fs), 
139                                         &(guest_state->gs), NULL};
140
141     for ( i = 0; segregs[i] != NULL; i++) {
142         struct vmcb_selector * seg = segregs[i];
143         
144         seg->selector = 0x0000;
145         //    seg->base = seg->selector << 4;
146         seg->base = 0x00000000;
147         seg->attrib.raw = 0xf3;
148         seg->limit = ~0u;
149     }
150
151     guest_state->gdtr.limit = 0x0000ffff;
152     guest_state->gdtr.base = 0x0000000000000000LL;
153     guest_state->idtr.limit = 0x0000ffff;
154     guest_state->idtr.base = 0x0000000000000000LL;
155
156     guest_state->ldtr.selector = 0x0000;
157     guest_state->ldtr.limit = 0x0000ffff;
158     guest_state->ldtr.base = 0x0000000000000000LL;
159     guest_state->tr.selector = 0x0000;
160     guest_state->tr.limit = 0x0000ffff;
161     guest_state->tr.base = 0x0000000000000000LL;
162
163
164     guest_state->dr6 = 0x00000000ffff0ff0LL;
165     guest_state->dr7 = 0x0000000000000400LL;
166
167
168     v3_init_svm_io_map(vm_info);
169     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
170     ctrl_area->instrs.IOIO_PROT = 1;
171
172
173
174     v3_init_svm_msr_map(vm_info);
175     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
176     ctrl_area->instrs.MSR_PROT = 1;
177
178
179
180     PrintDebug("Exiting on interrupts\n");
181     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
182     ctrl_area->instrs.INTR = 1;
183
184
185     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
186         PrintDebug("Creating initial shadow page table\n");
187         
188         /* JRL: This is a performance killer, and a simplistic solution */
189         /* We need to fix this */
190         ctrl_area->TLB_CONTROL = 1;
191         ctrl_area->guest_ASID = 1;
192         
193         
194         if (v3_init_passthrough_pts(vm_info) == -1) {
195             PrintError("Could not initialize passthrough page tables\n");
196             return ;
197         }
198
199
200         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
201         PrintDebug("Created\n");
202         
203         guest_state->cr3 = vm_info->direct_map_pt;
204
205         ctrl_area->cr_reads.cr0 = 1;
206         ctrl_area->cr_writes.cr0 = 1;
207         //ctrl_area->cr_reads.cr4 = 1;
208         ctrl_area->cr_writes.cr4 = 1;
209         ctrl_area->cr_reads.cr3 = 1;
210         ctrl_area->cr_writes.cr3 = 1;
211
212         v3_hook_msr(vm_info, EFER_MSR, 
213                     &v3_handle_efer_read,
214                     &v3_handle_efer_write, 
215                     vm_info);
216
217         ctrl_area->instrs.INVLPG = 1;
218
219         ctrl_area->exceptions.pf = 1;
220
221         guest_state->g_pat = 0x7040600070406ULL;
222
223         guest_state->cr0 |= 0x80000000;
224
225     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
226         // Flush the TLB on entries/exits
227         ctrl_area->TLB_CONTROL = 1;
228         ctrl_area->guest_ASID = 1;
229
230         // Enable Nested Paging
231         ctrl_area->NP_ENABLE = 1;
232
233         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
234
235         // Set the Nested Page Table pointer
236         if (v3_init_passthrough_pts(vm_info) == -1) {
237             PrintError("Could not initialize Nested page tables\n");
238             return ;
239         }
240
241         ctrl_area->N_CR3 = vm_info->direct_map_pt;
242
243         guest_state->g_pat = 0x7040600070406ULL;
244     }
245 }
246
247
248 static int init_svm_guest(struct guest_info *info, struct v3_vm_config * config_ptr) {
249     v3_config_guest(info, config_ptr);
250
251     PrintDebug("Allocating VMCB\n");
252     info->vmm_data = (void*)Allocate_VMCB();
253
254     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
255
256     v3_config_devices(info, config_ptr);
257
258     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
259
260
261     info->run_state = VM_STOPPED;
262
263     info->vm_regs.rdi = 0;
264     info->vm_regs.rsi = 0;
265     info->vm_regs.rbp = 0;
266     info->vm_regs.rsp = 0;
267     info->vm_regs.rbx = 0;
268     info->vm_regs.rdx = 0;
269     info->vm_regs.rcx = 0;
270     info->vm_regs.rax = 0;
271
272     return 0;
273 }
274
275
276
277 // can we start a kernel thread here...
278 static int start_svm_guest(struct guest_info *info) {
279     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
280     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
281     uint_t num_exits = 0;
282
283
284
285     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
286     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
287     
288     info->run_state = VM_RUNNING;
289     
290     while (1) {
291         ullong_t tmp_tsc;
292         
293
294 #ifdef __V3_64BIT__
295
296 #define MSR_LSTAR         0xc0000082
297 #define MSR_CSTAR         0xc0000083
298 #define MSR_SF_MASK       0xc0000084
299 #define MSR_GS_BASE       0xc0000101
300 #define MSR_KERNGS_BASE   0xc0000102
301         struct v3_msr host_cstar;
302         struct v3_msr host_lstar;
303         struct v3_msr host_syscall_mask;
304         struct v3_msr host_gs_base;
305         struct v3_msr host_kerngs_base;
306
307 #else 
308
309 #define MSR_STAR              0xc0000081
310 #define MSR_SYSENTER_CS       0x00000174
311 #define MSR_SYSENTER_ESP      0x00000175
312 #define MSR_SYSENTER_EIP      0x00000176
313         struct v3_msr host_star;
314         struct v3_msr host_sysenter_cs;
315         struct v3_msr host_sysenter_esp;
316         struct v3_msr host_sysenter_eip;
317
318 #endif
319
320
321
322         /*
323           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
324           (void *)(addr_t)info->segments.cs.base, 
325           (void *)(addr_t)info->rip);
326         */
327
328
329 #ifdef __V3_64BIT__
330         v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo));
331         v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo));
332         v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo));
333         v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo));
334         v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo));
335 #else 
336         v3_get_msr(MSR_SYSENTER_CS, &(host_sysenter_cs.hi), &(host_sysenter_cs.lo));
337         v3_get_msr(MSR_SYSENTER_ESP, &(host_sysenter_esp.hi), &(host_sysenter_esp.lo));
338         v3_get_msr(MSR_SYSENTER_EIP, &(host_sysenter_eip.hi), &(host_sysenter_eip.lo));
339         v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo));
340 #endif
341
342
343
344
345         rdtscll(info->time_state.cached_host_tsc);
346         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
347         
348         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
349         
350         rdtscll(tmp_tsc);
351         
352 #ifdef __V3_64BIT__
353         v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo);
354         v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo);
355         v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo);
356         v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo);
357         v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo);
358 #else 
359         v3_set_msr(MSR_SYSENTER_CS, host_sysenter_cs.hi, host_sysenter_cs.lo);
360         v3_set_msr(MSR_SYSENTER_ESP, host_sysenter_esp.hi, host_sysenter_esp.lo);
361         v3_set_msr(MSR_SYSENTER_EIP, host_sysenter_eip.hi, host_sysenter_eip.lo);
362         v3_set_msr(MSR_STAR, host_star.hi, host_star.lo);
363 #endif
364
365
366         
367         //PrintDebug("SVM Returned\n");
368
369
370
371         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
372         num_exits++;
373
374         //PrintDebug("Turning on global interrupts\n");
375         v3_stgi();
376         v3_clgi();
377         
378         if ((num_exits % 5000) == 0) {
379             PrintDebug("SVM Exit number %d\n", num_exits);
380
381             if (info->enable_profiler) {
382                 v3_print_profile(info);
383             }
384         }
385
386
387      
388         if (v3_handle_svm_exit(info) != 0) {
389             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
390             addr_t host_addr;
391             addr_t linear_addr = 0;
392             
393             info->run_state = VM_ERROR;
394             
395             PrintDebug("SVM ERROR!!\n"); 
396       
397             PrintDebug("RIP: %p\n", (void *)(addr_t)(guest_state->rip));
398
399
400             linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
401
402
403             PrintDebug("RIP Linear: %p\n", (void *)linear_addr);
404             v3_print_segments(info);
405             v3_print_ctrl_regs(info);
406             if (info->shdw_pg_mode == SHADOW_PAGING) {
407                 PrintDebug("Shadow Paging Guest Registers:\n");
408                 PrintDebug("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
409                 PrintDebug("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
410                 PrintDebug("\tGuest EFER=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_efer.value));
411                 // CR4
412             }
413             v3_print_GPRs(info);
414
415             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
416       
417             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
418             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
419       
420             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
421             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
422       
423             if (info->mem_mode == PHYSICAL_MEM) {
424                 guest_pa_to_host_va(info, linear_addr, &host_addr);
425             } else if (info->mem_mode == VIRTUAL_MEM) {
426                 guest_va_to_host_va(info, linear_addr, &host_addr);
427             }
428
429
430             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
431
432             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
433             PrintTraceMemDump((uchar_t *)host_addr, 15);
434
435             break;
436         }
437     }
438     return 0;
439 }
440
441
442
443
444
445 /* Checks machine SVM capability */
446 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
447 int v3_is_svm_capable() {
448     // Dinda
449     uint_t vm_cr_low = 0, vm_cr_high = 0;
450     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
451
452     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
453   
454     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
455
456     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
457       PrintDebug("SVM Not Available\n");
458       return 0;
459     }  else {
460         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
461         
462         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
463         
464         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
465             PrintDebug("SVM is available but is disabled.\n");
466             
467             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
468             
469             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
470             
471             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
472                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
473             } else {
474                 PrintDebug("SVM is locked with a key\n");
475             }
476             return 0;
477
478         } else {
479             PrintDebug("SVM is available and  enabled.\n");
480
481             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
482             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
483             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
484             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
485             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
486
487
488             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
489                 PrintDebug("SVM Nested Paging not supported\n");
490             } else {
491                 PrintDebug("SVM Nested Paging supported\n");
492             }
493
494             return 1;
495         }
496     }
497 }
498
499 static int has_svm_nested_paging() {
500     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
501
502     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
503
504     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
505
506     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
507         PrintDebug("SVM Nested Paging not supported\n");
508         return 0;
509     } else {
510         PrintDebug("SVM Nested Paging supported\n");
511         return 1;
512     }
513 }
514
515
516
517 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
518     reg_ex_t msr;
519     void * host_state;
520     extern v3_cpu_arch_t v3_cpu_type;
521
522     // Enable SVM on the CPU
523     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
524     msr.e_reg.low |= EFER_MSR_svm_enable;
525     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
526
527     PrintDebug("SVM Enabled\n");
528
529
530     // Setup the host state save area
531     host_state = V3_AllocPages(4);
532
533
534     /* 64-BIT-ISSUE */
535     //  msr.e_reg.high = 0;
536     //msr.e_reg.low = (uint_t)host_state;
537     msr.r_reg = (addr_t)host_state;
538
539     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state);
540     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
541
542     if (has_svm_nested_paging() == 1) {
543         v3_cpu_type = V3_SVM_REV3_CPU;
544     } else {
545         v3_cpu_type = V3_SVM_CPU;
546     }
547
548     // Setup the SVM specific vmm operations
549     vmm_ops->init_guest = &init_svm_guest;
550     vmm_ops->start_guest = &start_svm_guest;
551     vmm_ops->has_nested_paging = &has_svm_nested_paging;
552
553     return;
554 }
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
608   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
609   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
610   uint_t i;
611
612
613   guest_state->rsp = vm_info.vm_regs.rsp;
614   guest_state->rip = vm_info.rip;
615
616
617   //ctrl_area->instrs.instrs.CR0 = 1;
618   ctrl_area->cr_reads.cr0 = 1;
619   ctrl_area->cr_writes.cr0 = 1;
620
621   guest_state->efer |= EFER_MSR_svm_enable;
622   guest_state->rflags = 0x00000002; // The reserved bit is always 1
623   ctrl_area->svm_instrs.VMRUN = 1;
624   // guest_state->cr0 = 0x00000001;    // PE 
625   ctrl_area->guest_ASID = 1;
626
627
628   ctrl_area->exceptions.de = 1;
629   ctrl_area->exceptions.df = 1;
630   ctrl_area->exceptions.pf = 1;
631   ctrl_area->exceptions.ts = 1;
632   ctrl_area->exceptions.ss = 1;
633   ctrl_area->exceptions.ac = 1;
634   ctrl_area->exceptions.mc = 1;
635   ctrl_area->exceptions.gp = 1;
636   ctrl_area->exceptions.ud = 1;
637   ctrl_area->exceptions.np = 1;
638   ctrl_area->exceptions.of = 1;
639   ctrl_area->exceptions.nmi = 1;
640
641   guest_state->cs.selector = 0x0000;
642   guest_state->cs.limit=~0u;
643   guest_state->cs.base = guest_state->cs.selector<<4;
644   guest_state->cs.attrib.raw = 0xf3;
645
646   
647   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
648   for ( i = 0; segregs[i] != NULL; i++) {
649     struct vmcb_selector * seg = segregs[i];
650     
651     seg->selector = 0x0000;
652     seg->base = seg->selector << 4;
653     seg->attrib.raw = 0xf3;
654     seg->limit = ~0u;
655   }
656   
657   if (vm_info.io_map.num_ports > 0) {
658     struct vmm_io_hook * iter;
659     addr_t io_port_bitmap;
660     
661     io_port_bitmap = (addr_t)V3_AllocPages(3);
662     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
663     
664     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
665
666     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
667
668     FOREACH_IO_HOOK(vm_info.io_map, iter) {
669       ushort_t port = iter->port;
670       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
671
672       bitmap += (port / 8);
673       PrintDebug("Setting Bit in block %x\n", bitmap);
674       *bitmap |= 1 << (port % 8);
675     }
676
677
678     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
679
680     ctrl_area->instrs.IOIO_PROT = 1;
681   }
682
683   ctrl_area->instrs.INTR = 1;
684
685
686
687   if (vm_info.page_mode == SHADOW_PAGING) {
688     PrintDebug("Creating initial shadow page table\n");
689     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
690     PrintDebug("Created\n");
691
692     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
693
694     ctrl_area->cr_reads.cr3 = 1;
695     ctrl_area->cr_writes.cr3 = 1;
696
697
698     ctrl_area->instrs.INVLPG = 1;
699     ctrl_area->instrs.INVLPGA = 1;
700
701     guest_state->g_pat = 0x7040600070406ULL;
702
703     guest_state->cr0 |= 0x80000000;
704   } else if (vm_info.page_mode == NESTED_PAGING) {
705     // Flush the TLB on entries/exits
706     //ctrl_area->TLB_CONTROL = 1;
707
708     // Enable Nested Paging
709     //ctrl_area->NP_ENABLE = 1;
710
711     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
712
713         // Set the Nested Page Table pointer
714     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
715     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
716
717     //   ctrl_area->N_CR3 = Get_CR3();
718     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
719
720     //    guest_state->g_pat = 0x7040600070406ULL;
721   }
722
723
724
725 }
726 */
727
728
729
730
731
732
733
734 #if 0
735 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
736   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
737   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
738   uint_t i = 0;
739
740
741   guest_state->rsp = vm_info.vm_regs.rsp;
742   guest_state->rip = vm_info.rip;
743
744
745   /* I pretty much just gutted this from TVMM */
746   /* Note: That means its probably wrong */
747
748   // set the segment registers to mirror ours
749   guest_state->cs.selector = 1<<3;
750   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
751   guest_state->cs.attrib.fields.S = 1;
752   guest_state->cs.attrib.fields.P = 1;
753   guest_state->cs.attrib.fields.db = 1;
754   guest_state->cs.attrib.fields.G = 1;
755   guest_state->cs.limit = 0xfffff;
756   guest_state->cs.base = 0;
757   
758   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
759   for ( i = 0; segregs[i] != NULL; i++) {
760     struct vmcb_selector * seg = segregs[i];
761     
762     seg->selector = 2<<3;
763     seg->attrib.fields.type = 0x2; // Data Segment+read/write
764     seg->attrib.fields.S = 1;
765     seg->attrib.fields.P = 1;
766     seg->attrib.fields.db = 1;
767     seg->attrib.fields.G = 1;
768     seg->limit = 0xfffff;
769     seg->base = 0;
770   }
771
772
773   {
774     /* JRL THIS HAS TO GO */
775     
776     //    guest_state->tr.selector = GetTR_Selector();
777     guest_state->tr.attrib.fields.type = 0x9; 
778     guest_state->tr.attrib.fields.P = 1;
779     // guest_state->tr.limit = GetTR_Limit();
780     //guest_state->tr.base = GetTR_Base();// - 0x2000;
781     /* ** */
782   }
783
784
785   /* ** */
786
787
788   guest_state->efer |= EFER_MSR_svm_enable;
789   guest_state->rflags = 0x00000002; // The reserved bit is always 1
790   ctrl_area->svm_instrs.VMRUN = 1;
791   guest_state->cr0 = 0x00000001;    // PE 
792   ctrl_area->guest_ASID = 1;
793
794
795   //  guest_state->cpl = 0;
796
797
798
799   // Setup exits
800
801   ctrl_area->cr_writes.cr4 = 1;
802   
803   ctrl_area->exceptions.de = 1;
804   ctrl_area->exceptions.df = 1;
805   ctrl_area->exceptions.pf = 1;
806   ctrl_area->exceptions.ts = 1;
807   ctrl_area->exceptions.ss = 1;
808   ctrl_area->exceptions.ac = 1;
809   ctrl_area->exceptions.mc = 1;
810   ctrl_area->exceptions.gp = 1;
811   ctrl_area->exceptions.ud = 1;
812   ctrl_area->exceptions.np = 1;
813   ctrl_area->exceptions.of = 1;
814   ctrl_area->exceptions.nmi = 1;
815
816   
817
818   ctrl_area->instrs.IOIO_PROT = 1;
819   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
820   
821   {
822     reg_ex_t tmp_reg;
823     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
824     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
825   }
826
827   ctrl_area->instrs.INTR = 1;
828
829   
830   {
831     char gdt_buf[6];
832     char idt_buf[6];
833
834     memset(gdt_buf, 0, 6);
835     memset(idt_buf, 0, 6);
836
837
838     uint_t gdt_base, idt_base;
839     ushort_t gdt_limit, idt_limit;
840     
841     GetGDTR(gdt_buf);
842     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
843     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
844     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
845
846     GetIDTR(idt_buf);
847     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
848     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
849     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
850
851
852     // gdt_base -= 0x2000;
853     //idt_base -= 0x2000;
854
855     guest_state->gdtr.base = gdt_base;
856     guest_state->gdtr.limit = gdt_limit;
857     guest_state->idtr.base = idt_base;
858     guest_state->idtr.limit = idt_limit;
859
860
861   }
862   
863   
864   // also determine if CPU supports nested paging
865   /*
866   if (vm_info.page_tables) {
867     //   if (0) {
868     // Flush the TLB on entries/exits
869     ctrl_area->TLB_CONTROL = 1;
870
871     // Enable Nested Paging
872     ctrl_area->NP_ENABLE = 1;
873
874     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
875
876         // Set the Nested Page Table pointer
877     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
878
879
880     //   ctrl_area->N_CR3 = Get_CR3();
881     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
882
883     guest_state->g_pat = 0x7040600070406ULL;
884
885     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
886     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
887     // Enable Paging
888     //    guest_state->cr0 |= 0x80000000;
889   }
890   */
891
892 }
893
894
895
896
897
898 #endif
899
900