Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


changed the STAR MSR to be mode independent
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48 extern void v3_stgi();
49 extern void v3_clgi();
50 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
51 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
52
53
54 static vmcb_t * Allocate_VMCB() {
55     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
56
57     memset(vmcb_page, 0, 4096);
58
59     return vmcb_page;
60 }
61
62
63
64 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
65     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
66     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
67     uint_t i;
68
69
70     guest_state->rsp = vm_info->vm_regs.rsp;
71     // guest_state->rip = vm_info->rip;
72     guest_state->rip = 0xfff0;
73
74     guest_state->cpl = 0;
75
76     guest_state->efer |= EFER_MSR_svm_enable;
77
78
79     guest_state->rflags = 0x00000002; // The reserved bit is always 1
80     ctrl_area->svm_instrs.VMRUN = 1;
81     ctrl_area->svm_instrs.VMMCALL = 1;
82     ctrl_area->svm_instrs.VMLOAD = 1;
83     ctrl_area->svm_instrs.VMSAVE = 1;
84     ctrl_area->svm_instrs.STGI = 1;
85     ctrl_area->svm_instrs.CLGI = 1;
86     ctrl_area->svm_instrs.SKINIT = 1;
87     ctrl_area->svm_instrs.RDTSCP = 1;
88     ctrl_area->svm_instrs.ICEBP = 1;
89     ctrl_area->svm_instrs.WBINVD = 1;
90     ctrl_area->svm_instrs.MONITOR = 1;
91     ctrl_area->svm_instrs.MWAIT_always = 1;
92     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
93     ctrl_area->instrs.INVLPGA = 1;
94
95
96     ctrl_area->instrs.HLT = 1;
97     // guest_state->cr0 = 0x00000001;    // PE 
98   
99     /*
100       ctrl_area->exceptions.de = 1;
101       ctrl_area->exceptions.df = 1;
102       
103       ctrl_area->exceptions.ts = 1;
104       ctrl_area->exceptions.ss = 1;
105       ctrl_area->exceptions.ac = 1;
106       ctrl_area->exceptions.mc = 1;
107       ctrl_area->exceptions.gp = 1;
108       ctrl_area->exceptions.ud = 1;
109       ctrl_area->exceptions.np = 1;
110       ctrl_area->exceptions.of = 1;
111       
112       ctrl_area->exceptions.nmi = 1;
113     */
114     
115
116     ctrl_area->instrs.NMI = 1;
117     ctrl_area->instrs.SMI = 1;
118     ctrl_area->instrs.INIT = 1;
119     ctrl_area->instrs.PAUSE = 1;
120     ctrl_area->instrs.shutdown_evts = 1;
121
122     vm_info->vm_regs.rdx = 0x00000f00;
123
124     guest_state->cr0 = 0x60000010;
125
126
127     guest_state->cs.selector = 0xf000;
128     guest_state->cs.limit = 0xffff;
129     guest_state->cs.base = 0x0000000f0000LL;
130     guest_state->cs.attrib.raw = 0xf3;
131
132
133     /* DEBUG FOR RETURN CODE */
134     ctrl_area->exit_code = 1;
135
136
137     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
138                                         &(guest_state->es), &(guest_state->fs), 
139                                         &(guest_state->gs), NULL};
140
141     for ( i = 0; segregs[i] != NULL; i++) {
142         struct vmcb_selector * seg = segregs[i];
143         
144         seg->selector = 0x0000;
145         //    seg->base = seg->selector << 4;
146         seg->base = 0x00000000;
147         seg->attrib.raw = 0xf3;
148         seg->limit = ~0u;
149     }
150
151     guest_state->gdtr.limit = 0x0000ffff;
152     guest_state->gdtr.base = 0x0000000000000000LL;
153     guest_state->idtr.limit = 0x0000ffff;
154     guest_state->idtr.base = 0x0000000000000000LL;
155
156     guest_state->ldtr.selector = 0x0000;
157     guest_state->ldtr.limit = 0x0000ffff;
158     guest_state->ldtr.base = 0x0000000000000000LL;
159     guest_state->tr.selector = 0x0000;
160     guest_state->tr.limit = 0x0000ffff;
161     guest_state->tr.base = 0x0000000000000000LL;
162
163
164     guest_state->dr6 = 0x00000000ffff0ff0LL;
165     guest_state->dr7 = 0x0000000000000400LL;
166
167
168     v3_init_svm_io_map(vm_info);
169     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
170     ctrl_area->instrs.IOIO_PROT = 1;
171
172
173
174     v3_init_svm_msr_map(vm_info);
175     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
176     ctrl_area->instrs.MSR_PROT = 1;
177
178
179
180     PrintDebug("Exiting on interrupts\n");
181     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
182     ctrl_area->instrs.INTR = 1;
183
184
185     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
186         PrintDebug("Creating initial shadow page table\n");
187         
188         /* JRL: This is a performance killer, and a simplistic solution */
189         /* We need to fix this */
190         ctrl_area->TLB_CONTROL = 1;
191         ctrl_area->guest_ASID = 1;
192         
193         
194         if (v3_init_passthrough_pts(vm_info) == -1) {
195             PrintError("Could not initialize passthrough page tables\n");
196             return ;
197         }
198
199
200         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
201         PrintDebug("Created\n");
202         
203         guest_state->cr3 = vm_info->direct_map_pt;
204
205         ctrl_area->cr_reads.cr0 = 1;
206         ctrl_area->cr_writes.cr0 = 1;
207         //ctrl_area->cr_reads.cr4 = 1;
208         ctrl_area->cr_writes.cr4 = 1;
209         ctrl_area->cr_reads.cr3 = 1;
210         ctrl_area->cr_writes.cr3 = 1;
211
212         v3_hook_msr(vm_info, EFER_MSR, 
213                     &v3_handle_efer_read,
214                     &v3_handle_efer_write, 
215                     vm_info);
216
217         ctrl_area->instrs.INVLPG = 1;
218
219         ctrl_area->exceptions.pf = 1;
220
221         guest_state->g_pat = 0x7040600070406ULL;
222
223         guest_state->cr0 |= 0x80000000;
224
225     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
226         // Flush the TLB on entries/exits
227         ctrl_area->TLB_CONTROL = 1;
228         ctrl_area->guest_ASID = 1;
229
230         // Enable Nested Paging
231         ctrl_area->NP_ENABLE = 1;
232
233         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
234
235         // Set the Nested Page Table pointer
236         if (v3_init_passthrough_pts(vm_info) == -1) {
237             PrintError("Could not initialize Nested page tables\n");
238             return ;
239         }
240
241         ctrl_area->N_CR3 = vm_info->direct_map_pt;
242
243         guest_state->g_pat = 0x7040600070406ULL;
244     }
245 }
246
247
248 static int init_svm_guest(struct guest_info *info, struct v3_vm_config * config_ptr) {
249     v3_config_guest(info, config_ptr);
250
251     PrintDebug("Allocating VMCB\n");
252     info->vmm_data = (void*)Allocate_VMCB();
253
254     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
255
256     v3_config_devices(info, config_ptr);
257
258     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
259
260
261     info->run_state = VM_STOPPED;
262
263     info->vm_regs.rdi = 0;
264     info->vm_regs.rsi = 0;
265     info->vm_regs.rbp = 0;
266     info->vm_regs.rsp = 0;
267     info->vm_regs.rbx = 0;
268     info->vm_regs.rdx = 0;
269     info->vm_regs.rcx = 0;
270     info->vm_regs.rax = 0;
271
272     return 0;
273 }
274
275
276
277 // can we start a kernel thread here...
278 static int start_svm_guest(struct guest_info *info) {
279     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
280     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
281     uint_t num_exits = 0;
282
283
284
285     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
286     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
287     
288     info->run_state = VM_RUNNING;
289     
290     while (1) {
291         ullong_t tmp_tsc;
292         
293
294 #ifdef __V3_64BIT__
295
296 #define MSR_LSTAR         0xc0000082
297 #define MSR_CSTAR         0xc0000083
298 #define MSR_SF_MASK       0xc0000084
299 #define MSR_GS_BASE       0xc0000101
300 #define MSR_KERNGS_BASE   0xc0000102
301         struct v3_msr host_cstar;
302         struct v3_msr host_lstar;
303         struct v3_msr host_syscall_mask;
304         struct v3_msr host_gs_base;
305         struct v3_msr host_kerngs_base;
306
307 #else 
308
309 #define MSR_SYSENTER_CS       0x00000174
310 #define MSR_SYSENTER_ESP      0x00000175
311 #define MSR_SYSENTER_EIP      0x00000176
312
313         struct v3_msr host_sysenter_cs;
314         struct v3_msr host_sysenter_esp;
315         struct v3_msr host_sysenter_eip;
316
317 #endif
318
319 #define MSR_STAR              0xc0000081
320         struct v3_msr host_star;
321
322
323         /*
324           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
325           (void *)(addr_t)info->segments.cs.base, 
326           (void *)(addr_t)info->rip);
327         */
328
329
330 #ifdef __V3_64BIT__
331         v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo));
332         v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo));
333         v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo));
334         v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo));
335         v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo));
336 #else 
337         v3_get_msr(MSR_SYSENTER_CS, &(host_sysenter_cs.hi), &(host_sysenter_cs.lo));
338         v3_get_msr(MSR_SYSENTER_ESP, &(host_sysenter_esp.hi), &(host_sysenter_esp.lo));
339         v3_get_msr(MSR_SYSENTER_EIP, &(host_sysenter_eip.hi), &(host_sysenter_eip.lo));
340 #endif
341         v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo));
342
343         rdtscll(info->time_state.cached_host_tsc);
344         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
345         
346         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
347         
348         rdtscll(tmp_tsc);
349         
350 #ifdef __V3_64BIT__
351         v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo);
352         v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo);
353         v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo);
354         v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo);
355         v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo);
356 #else 
357         v3_set_msr(MSR_SYSENTER_CS, host_sysenter_cs.hi, host_sysenter_cs.lo);
358         v3_set_msr(MSR_SYSENTER_ESP, host_sysenter_esp.hi, host_sysenter_esp.lo);
359         v3_set_msr(MSR_SYSENTER_EIP, host_sysenter_eip.hi, host_sysenter_eip.lo);
360 #endif
361         v3_set_msr(MSR_STAR, host_star.hi, host_star.lo);
362
363         
364         //PrintDebug("SVM Returned\n");
365
366
367
368         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
369         num_exits++;
370
371         //PrintDebug("Turning on global interrupts\n");
372         v3_stgi();
373         v3_clgi();
374         
375         if ((num_exits % 5000) == 0) {
376             PrintDebug("SVM Exit number %d\n", num_exits);
377
378             if (info->enable_profiler) {
379                 v3_print_profile(info);
380             }
381         }
382
383
384      
385         if (v3_handle_svm_exit(info) != 0) {
386             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
387             addr_t host_addr;
388             addr_t linear_addr = 0;
389             
390             info->run_state = VM_ERROR;
391             
392             PrintDebug("SVM ERROR!!\n"); 
393       
394             PrintDebug("RIP: %p\n", (void *)(addr_t)(guest_state->rip));
395
396
397             linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
398
399
400             PrintDebug("RIP Linear: %p\n", (void *)linear_addr);
401             v3_print_segments(info);
402             v3_print_ctrl_regs(info);
403             if (info->shdw_pg_mode == SHADOW_PAGING) {
404                 PrintDebug("Shadow Paging Guest Registers:\n");
405                 PrintDebug("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
406                 PrintDebug("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
407                 PrintDebug("\tGuest EFER=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_efer.value));
408                 // CR4
409             }
410             v3_print_GPRs(info);
411
412             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
413       
414             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
415             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
416       
417             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
418             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
419       
420             if (info->mem_mode == PHYSICAL_MEM) {
421                 guest_pa_to_host_va(info, linear_addr, &host_addr);
422             } else if (info->mem_mode == VIRTUAL_MEM) {
423                 guest_va_to_host_va(info, linear_addr, &host_addr);
424             }
425
426
427             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
428
429             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
430             PrintTraceMemDump((uchar_t *)host_addr, 15);
431
432             break;
433         }
434     }
435     return 0;
436 }
437
438
439
440
441
442 /* Checks machine SVM capability */
443 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
444 int v3_is_svm_capable() {
445     // Dinda
446     uint_t vm_cr_low = 0, vm_cr_high = 0;
447     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
448
449     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
450   
451     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
452
453     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
454       PrintDebug("SVM Not Available\n");
455       return 0;
456     }  else {
457         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
458         
459         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
460         
461         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
462             PrintDebug("SVM is available but is disabled.\n");
463             
464             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
465             
466             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
467             
468             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
469                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
470             } else {
471                 PrintDebug("SVM is locked with a key\n");
472             }
473             return 0;
474
475         } else {
476             PrintDebug("SVM is available and  enabled.\n");
477
478             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
479             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
480             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
481             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
482             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
483
484
485             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
486                 PrintDebug("SVM Nested Paging not supported\n");
487             } else {
488                 PrintDebug("SVM Nested Paging supported\n");
489             }
490
491             return 1;
492         }
493     }
494 }
495
496 static int has_svm_nested_paging() {
497     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
498
499     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
500
501     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
502
503     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
504         PrintDebug("SVM Nested Paging not supported\n");
505         return 0;
506     } else {
507         PrintDebug("SVM Nested Paging supported\n");
508         return 1;
509     }
510 }
511
512
513
514 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
515     reg_ex_t msr;
516     void * host_state;
517     extern v3_cpu_arch_t v3_cpu_type;
518
519     // Enable SVM on the CPU
520     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
521     msr.e_reg.low |= EFER_MSR_svm_enable;
522     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
523
524     PrintDebug("SVM Enabled\n");
525
526
527     // Setup the host state save area
528     host_state = V3_AllocPages(4);
529
530
531     /* 64-BIT-ISSUE */
532     //  msr.e_reg.high = 0;
533     //msr.e_reg.low = (uint_t)host_state;
534     msr.r_reg = (addr_t)host_state;
535
536     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state);
537     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
538
539     if (has_svm_nested_paging() == 1) {
540         v3_cpu_type = V3_SVM_REV3_CPU;
541     } else {
542         v3_cpu_type = V3_SVM_CPU;
543     }
544
545     // Setup the SVM specific vmm operations
546     vmm_ops->init_guest = &init_svm_guest;
547     vmm_ops->start_guest = &start_svm_guest;
548     vmm_ops->has_nested_paging = &has_svm_nested_paging;
549
550     return;
551 }
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
605   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
606   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
607   uint_t i;
608
609
610   guest_state->rsp = vm_info.vm_regs.rsp;
611   guest_state->rip = vm_info.rip;
612
613
614   //ctrl_area->instrs.instrs.CR0 = 1;
615   ctrl_area->cr_reads.cr0 = 1;
616   ctrl_area->cr_writes.cr0 = 1;
617
618   guest_state->efer |= EFER_MSR_svm_enable;
619   guest_state->rflags = 0x00000002; // The reserved bit is always 1
620   ctrl_area->svm_instrs.VMRUN = 1;
621   // guest_state->cr0 = 0x00000001;    // PE 
622   ctrl_area->guest_ASID = 1;
623
624
625   ctrl_area->exceptions.de = 1;
626   ctrl_area->exceptions.df = 1;
627   ctrl_area->exceptions.pf = 1;
628   ctrl_area->exceptions.ts = 1;
629   ctrl_area->exceptions.ss = 1;
630   ctrl_area->exceptions.ac = 1;
631   ctrl_area->exceptions.mc = 1;
632   ctrl_area->exceptions.gp = 1;
633   ctrl_area->exceptions.ud = 1;
634   ctrl_area->exceptions.np = 1;
635   ctrl_area->exceptions.of = 1;
636   ctrl_area->exceptions.nmi = 1;
637
638   guest_state->cs.selector = 0x0000;
639   guest_state->cs.limit=~0u;
640   guest_state->cs.base = guest_state->cs.selector<<4;
641   guest_state->cs.attrib.raw = 0xf3;
642
643   
644   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
645   for ( i = 0; segregs[i] != NULL; i++) {
646     struct vmcb_selector * seg = segregs[i];
647     
648     seg->selector = 0x0000;
649     seg->base = seg->selector << 4;
650     seg->attrib.raw = 0xf3;
651     seg->limit = ~0u;
652   }
653   
654   if (vm_info.io_map.num_ports > 0) {
655     struct vmm_io_hook * iter;
656     addr_t io_port_bitmap;
657     
658     io_port_bitmap = (addr_t)V3_AllocPages(3);
659     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
660     
661     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
662
663     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
664
665     FOREACH_IO_HOOK(vm_info.io_map, iter) {
666       ushort_t port = iter->port;
667       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
668
669       bitmap += (port / 8);
670       PrintDebug("Setting Bit in block %x\n", bitmap);
671       *bitmap |= 1 << (port % 8);
672     }
673
674
675     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
676
677     ctrl_area->instrs.IOIO_PROT = 1;
678   }
679
680   ctrl_area->instrs.INTR = 1;
681
682
683
684   if (vm_info.page_mode == SHADOW_PAGING) {
685     PrintDebug("Creating initial shadow page table\n");
686     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
687     PrintDebug("Created\n");
688
689     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
690
691     ctrl_area->cr_reads.cr3 = 1;
692     ctrl_area->cr_writes.cr3 = 1;
693
694
695     ctrl_area->instrs.INVLPG = 1;
696     ctrl_area->instrs.INVLPGA = 1;
697
698     guest_state->g_pat = 0x7040600070406ULL;
699
700     guest_state->cr0 |= 0x80000000;
701   } else if (vm_info.page_mode == NESTED_PAGING) {
702     // Flush the TLB on entries/exits
703     //ctrl_area->TLB_CONTROL = 1;
704
705     // Enable Nested Paging
706     //ctrl_area->NP_ENABLE = 1;
707
708     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
709
710         // Set the Nested Page Table pointer
711     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
712     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
713
714     //   ctrl_area->N_CR3 = Get_CR3();
715     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
716
717     //    guest_state->g_pat = 0x7040600070406ULL;
718   }
719
720
721
722 }
723 */
724
725
726
727
728
729
730
731 #if 0
732 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
733   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
734   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
735   uint_t i = 0;
736
737
738   guest_state->rsp = vm_info.vm_regs.rsp;
739   guest_state->rip = vm_info.rip;
740
741
742   /* I pretty much just gutted this from TVMM */
743   /* Note: That means its probably wrong */
744
745   // set the segment registers to mirror ours
746   guest_state->cs.selector = 1<<3;
747   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
748   guest_state->cs.attrib.fields.S = 1;
749   guest_state->cs.attrib.fields.P = 1;
750   guest_state->cs.attrib.fields.db = 1;
751   guest_state->cs.attrib.fields.G = 1;
752   guest_state->cs.limit = 0xfffff;
753   guest_state->cs.base = 0;
754   
755   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
756   for ( i = 0; segregs[i] != NULL; i++) {
757     struct vmcb_selector * seg = segregs[i];
758     
759     seg->selector = 2<<3;
760     seg->attrib.fields.type = 0x2; // Data Segment+read/write
761     seg->attrib.fields.S = 1;
762     seg->attrib.fields.P = 1;
763     seg->attrib.fields.db = 1;
764     seg->attrib.fields.G = 1;
765     seg->limit = 0xfffff;
766     seg->base = 0;
767   }
768
769
770   {
771     /* JRL THIS HAS TO GO */
772     
773     //    guest_state->tr.selector = GetTR_Selector();
774     guest_state->tr.attrib.fields.type = 0x9; 
775     guest_state->tr.attrib.fields.P = 1;
776     // guest_state->tr.limit = GetTR_Limit();
777     //guest_state->tr.base = GetTR_Base();// - 0x2000;
778     /* ** */
779   }
780
781
782   /* ** */
783
784
785   guest_state->efer |= EFER_MSR_svm_enable;
786   guest_state->rflags = 0x00000002; // The reserved bit is always 1
787   ctrl_area->svm_instrs.VMRUN = 1;
788   guest_state->cr0 = 0x00000001;    // PE 
789   ctrl_area->guest_ASID = 1;
790
791
792   //  guest_state->cpl = 0;
793
794
795
796   // Setup exits
797
798   ctrl_area->cr_writes.cr4 = 1;
799   
800   ctrl_area->exceptions.de = 1;
801   ctrl_area->exceptions.df = 1;
802   ctrl_area->exceptions.pf = 1;
803   ctrl_area->exceptions.ts = 1;
804   ctrl_area->exceptions.ss = 1;
805   ctrl_area->exceptions.ac = 1;
806   ctrl_area->exceptions.mc = 1;
807   ctrl_area->exceptions.gp = 1;
808   ctrl_area->exceptions.ud = 1;
809   ctrl_area->exceptions.np = 1;
810   ctrl_area->exceptions.of = 1;
811   ctrl_area->exceptions.nmi = 1;
812
813   
814
815   ctrl_area->instrs.IOIO_PROT = 1;
816   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
817   
818   {
819     reg_ex_t tmp_reg;
820     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
821     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
822   }
823
824   ctrl_area->instrs.INTR = 1;
825
826   
827   {
828     char gdt_buf[6];
829     char idt_buf[6];
830
831     memset(gdt_buf, 0, 6);
832     memset(idt_buf, 0, 6);
833
834
835     uint_t gdt_base, idt_base;
836     ushort_t gdt_limit, idt_limit;
837     
838     GetGDTR(gdt_buf);
839     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
840     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
841     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
842
843     GetIDTR(idt_buf);
844     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
845     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
846     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
847
848
849     // gdt_base -= 0x2000;
850     //idt_base -= 0x2000;
851
852     guest_state->gdtr.base = gdt_base;
853     guest_state->gdtr.limit = gdt_limit;
854     guest_state->idtr.base = idt_base;
855     guest_state->idtr.limit = idt_limit;
856
857
858   }
859   
860   
861   // also determine if CPU supports nested paging
862   /*
863   if (vm_info.page_tables) {
864     //   if (0) {
865     // Flush the TLB on entries/exits
866     ctrl_area->TLB_CONTROL = 1;
867
868     // Enable Nested Paging
869     ctrl_area->NP_ENABLE = 1;
870
871     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
872
873         // Set the Nested Page Table pointer
874     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
875
876
877     //   ctrl_area->N_CR3 = Get_CR3();
878     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
879
880     guest_state->g_pat = 0x7040600070406ULL;
881
882     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
883     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
884     // Enable Paging
885     //    guest_state->cr0 |= 0x80000000;
886   }
887   */
888
889 }
890
891
892
893
894
895 #endif
896
897