Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


cleaned up the memory handing implementation
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44
45
46 extern void v3_stgi();
47 extern void v3_clgi();
48 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
49 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
50
51
52 static vmcb_t * Allocate_VMCB() {
53     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
54
55     memset(vmcb_page, 0, 4096);
56
57     return vmcb_page;
58 }
59
60
61
62 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
63     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
64     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
65     uint_t i;
66
67
68     guest_state->rsp = vm_info->vm_regs.rsp;
69     // guest_state->rip = vm_info->rip;
70     guest_state->rip = 0xfff0;
71
72     guest_state->cpl = 0;
73
74     guest_state->efer |= EFER_MSR_svm_enable;
75
76
77     guest_state->rflags = 0x00000002; // The reserved bit is always 1
78     ctrl_area->svm_instrs.VMRUN = 1;
79     ctrl_area->svm_instrs.VMMCALL = 1;
80     ctrl_area->svm_instrs.VMLOAD = 1;
81     ctrl_area->svm_instrs.VMSAVE = 1;
82     ctrl_area->svm_instrs.STGI = 1;
83     ctrl_area->svm_instrs.CLGI = 1;
84     ctrl_area->svm_instrs.SKINIT = 1;
85     ctrl_area->svm_instrs.RDTSCP = 1;
86     ctrl_area->svm_instrs.ICEBP = 1;
87     ctrl_area->svm_instrs.WBINVD = 1;
88     ctrl_area->svm_instrs.MONITOR = 1;
89     ctrl_area->svm_instrs.MWAIT_always = 1;
90     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
91     ctrl_area->instrs.INVLPGA = 1;
92
93
94     ctrl_area->instrs.HLT = 1;
95     // guest_state->cr0 = 0x00000001;    // PE 
96   
97     /*
98       ctrl_area->exceptions.de = 1;
99       ctrl_area->exceptions.df = 1;
100       
101       ctrl_area->exceptions.ts = 1;
102       ctrl_area->exceptions.ss = 1;
103       ctrl_area->exceptions.ac = 1;
104       ctrl_area->exceptions.mc = 1;
105       ctrl_area->exceptions.gp = 1;
106       ctrl_area->exceptions.ud = 1;
107       ctrl_area->exceptions.np = 1;
108       ctrl_area->exceptions.of = 1;
109       
110       ctrl_area->exceptions.nmi = 1;
111     */
112     
113
114     ctrl_area->instrs.NMI = 1;
115     ctrl_area->instrs.SMI = 1;
116     ctrl_area->instrs.INIT = 1;
117     ctrl_area->instrs.PAUSE = 1;
118     ctrl_area->instrs.shutdown_evts = 1;
119
120     vm_info->vm_regs.rdx = 0x00000f00;
121
122     guest_state->cr0 = 0x60000010;
123
124
125     guest_state->cs.selector = 0xf000;
126     guest_state->cs.limit = 0xffff;
127     guest_state->cs.base = 0x0000000f0000LL;
128     guest_state->cs.attrib.raw = 0xf3;
129
130
131     /* DEBUG FOR RETURN CODE */
132     ctrl_area->exit_code = 1;
133
134
135     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
136                                         &(guest_state->es), &(guest_state->fs), 
137                                         &(guest_state->gs), NULL};
138
139     for ( i = 0; segregs[i] != NULL; i++) {
140         struct vmcb_selector * seg = segregs[i];
141         
142         seg->selector = 0x0000;
143         //    seg->base = seg->selector << 4;
144         seg->base = 0x00000000;
145         seg->attrib.raw = 0xf3;
146         seg->limit = ~0u;
147     }
148
149     guest_state->gdtr.limit = 0x0000ffff;
150     guest_state->gdtr.base = 0x0000000000000000LL;
151     guest_state->idtr.limit = 0x0000ffff;
152     guest_state->idtr.base = 0x0000000000000000LL;
153
154     guest_state->ldtr.selector = 0x0000;
155     guest_state->ldtr.limit = 0x0000ffff;
156     guest_state->ldtr.base = 0x0000000000000000LL;
157     guest_state->tr.selector = 0x0000;
158     guest_state->tr.limit = 0x0000ffff;
159     guest_state->tr.base = 0x0000000000000000LL;
160
161
162     guest_state->dr6 = 0x00000000ffff0ff0LL;
163     guest_state->dr7 = 0x0000000000000400LL;
164
165
166     if ( !RB_EMPTY_ROOT(&(vm_info->io_map)) ) {
167         struct v3_io_hook * iter;
168         struct rb_node * io_node = v3_rb_first(&(vm_info->io_map));
169         addr_t io_port_bitmap;
170         int i = 0;
171         
172         io_port_bitmap = (addr_t)V3_VAddr(V3_AllocPages(3));
173         memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
174
175         ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr((void *)io_port_bitmap);
176
177         //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
178
179         do {
180             iter = rb_entry(io_node, struct v3_io_hook, tree_node);
181
182             ushort_t port = iter->port;
183             uchar_t * bitmap = (uchar_t *)io_port_bitmap;
184             //PrintDebug("%d: Hooking Port %d\n", i, port);
185
186             bitmap += (port / 8);
187             //      PrintDebug("Setting Bit for port 0x%x\n", port);
188             *bitmap |= 1 << (port % 8);
189
190             i++;
191         } while ((io_node = v3_rb_next(io_node)));
192
193         //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
194
195         ctrl_area->instrs.IOIO_PROT = 1;
196     }
197
198
199     PrintDebug("Exiting on interrupts\n");
200     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
201     ctrl_area->instrs.INTR = 1;
202
203
204     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
205         PrintDebug("Creating initial shadow page table\n");
206         
207         ctrl_area->guest_ASID = 1;
208         
209         
210         if (v3_init_passthrough_pts(vm_info) == -1) {
211             PrintError("Could not initialize passthrough page tables\n");
212             return ;
213         }
214
215
216         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
217         PrintDebug("Created\n");
218         
219         guest_state->cr3 = vm_info->direct_map_pt;
220
221         ctrl_area->cr_reads.cr0 = 1;
222         ctrl_area->cr_writes.cr0 = 1;
223         //ctrl_area->cr_reads.cr4 = 1;
224         ctrl_area->cr_writes.cr4 = 1;
225         ctrl_area->cr_reads.cr3 = 1;
226         ctrl_area->cr_writes.cr3 = 1;
227
228         vm_info->guest_efer.value = 0x0LL;
229
230         v3_hook_msr(vm_info, EFER_MSR, 
231                     &v3_handle_efer_read,
232                     &v3_handle_efer_write, 
233                     vm_info);
234
235         ctrl_area->instrs.INVLPG = 1;
236
237         ctrl_area->exceptions.pf = 1;
238
239         /* JRL: This is a performance killer, and a simplistic solution */
240         /* We need to fix this */
241         ctrl_area->TLB_CONTROL = 1;
242
243         guest_state->g_pat = 0x7040600070406ULL;
244
245         guest_state->cr0 |= 0x80000000;
246
247     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
248         // Flush the TLB on entries/exits
249         ctrl_area->TLB_CONTROL = 1;
250         ctrl_area->guest_ASID = 1;
251
252         // Enable Nested Paging
253         ctrl_area->NP_ENABLE = 1;
254
255         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
256
257         // Set the Nested Page Table pointer
258         if (v3_init_passthrough_pts(vm_info) == -1) {
259             PrintError("Could not initialize Nested page tables\n");
260             return ;
261         }
262
263         ctrl_area->N_CR3 = vm_info->direct_map_pt;
264
265         guest_state->g_pat = 0x7040600070406ULL;
266     }
267
268     if (vm_info->msr_map.num_hooks > 0) {
269         PrintDebug("Hooking %d msrs\n", vm_info->msr_map.num_hooks);
270         ctrl_area->MSRPM_BASE_PA = v3_init_svm_msr_map(vm_info);
271         ctrl_area->instrs.MSR_PROT = 1;
272     }
273
274     /* Safety locations for fs/gs */
275     //    vm_info->fs = 0;
276     //    vm_info->gs = 0;
277 }
278
279
280 static int init_svm_guest(struct guest_info *info) {
281     PrintDebug("Allocating VMCB\n");
282     info->vmm_data = (void*)Allocate_VMCB();
283
284     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
285     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
286   
287
288     info->run_state = VM_STOPPED;
289
290     //  info->rip = 0;
291     
292     info->vm_regs.rdi = 0;
293     info->vm_regs.rsi = 0;
294     info->vm_regs.rbp = 0;
295     info->vm_regs.rsp = 0;
296     info->vm_regs.rbx = 0;
297     info->vm_regs.rdx = 0;
298     info->vm_regs.rcx = 0;
299     info->vm_regs.rax = 0;
300     
301     return 0;
302 }
303
304
305
306 // can we start a kernel thread here...
307 static int start_svm_guest(struct guest_info *info) {
308     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
309     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
310     uint_t num_exits = 0;
311
312
313
314     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
315     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
316     
317     info->run_state = VM_RUNNING;
318     
319     while (1) {
320         ullong_t tmp_tsc;
321         
322
323
324 #define MSR_STAR      0xc0000081
325 #define MSR_LSTAR     0xc0000082
326 #define MSR_CSTAR     0xc0000083
327 #define MSR_SF_MASK   0xc0000084
328 #define MSR_GS_BASE   0xc0000101
329 #define MSR_KERNGS_BASE   0xc0000102
330
331
332         struct v3_msr host_cstar;
333         struct v3_msr host_star;
334         struct v3_msr host_lstar;
335         struct v3_msr host_syscall_mask;
336         struct v3_msr host_gs_base;
337         struct v3_msr host_kerngs_base;
338
339 /*      v3_enable_ints(); */
340 /*      v3_clgi(); */
341
342
343         /*
344           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
345           (void *)(addr_t)info->segments.cs.base, 
346           (void *)(addr_t)info->rip);
347         */
348
349
350         v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo));
351         v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo));
352         v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo));
353         v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo));
354         v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo));
355         v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo));
356
357
358         rdtscll(info->time_state.cached_host_tsc);
359         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
360         
361         //v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), &(info->fs), &(info->gs));
362         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
363         
364         rdtscll(tmp_tsc);
365         
366         v3_set_msr(MSR_STAR, host_star.hi, host_star.lo);
367         v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo);
368         v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo);
369         v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo);
370         v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo);
371         v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo);
372         
373         //PrintDebug("SVM Returned\n");
374
375
376
377         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
378         num_exits++;
379
380         //PrintDebug("Turning on global interrupts\n");
381         v3_stgi();
382         v3_clgi();
383         
384         if ((num_exits % 5000) == 0) {
385             PrintDebug("SVM Exit number %d\n", num_exits);
386
387             if (info->enable_profiler) {
388                 v3_print_profile(info);
389             }
390         }
391
392
393      
394         if (v3_handle_svm_exit(info) != 0) {
395             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
396             addr_t host_addr;
397             addr_t linear_addr = 0;
398             
399             info->run_state = VM_ERROR;
400             
401             PrintDebug("SVM ERROR!!\n"); 
402       
403             PrintDebug("RIP: %p\n", (void *)(addr_t)(guest_state->rip));
404
405
406             linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
407
408
409             PrintDebug("RIP Linear: %p\n", (void *)linear_addr);
410             v3_print_segments(info);
411             v3_print_ctrl_regs(info);
412             if (info->shdw_pg_mode == SHADOW_PAGING) {
413                 PrintDebug("Shadow Paging Guest Registers:\n");
414                 PrintDebug("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
415                 PrintDebug("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
416                 // efer
417                 // CR4
418             }
419             v3_print_GPRs(info);
420
421             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
422       
423             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
424             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
425       
426             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
427             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
428       
429             if (info->mem_mode == PHYSICAL_MEM) {
430                 guest_pa_to_host_va(info, linear_addr, &host_addr);
431             } else if (info->mem_mode == VIRTUAL_MEM) {
432                 guest_va_to_host_va(info, linear_addr, &host_addr);
433             }
434
435
436             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
437
438             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
439             PrintTraceMemDump((uchar_t *)host_addr, 15);
440
441             break;
442         }
443     }
444     return 0;
445 }
446
447
448
449
450
451 /* Checks machine SVM capability */
452 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
453 int v3_is_svm_capable() {
454     // Dinda
455     uint_t vm_cr_low = 0, vm_cr_high = 0;
456     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
457
458     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
459   
460     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
461
462     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
463       PrintDebug("SVM Not Available\n");
464       return 0;
465     }  else {
466         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
467         
468         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
469         
470         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
471             PrintDebug("SVM is available but is disabled.\n");
472             
473             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
474             
475             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
476             
477             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
478                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
479             } else {
480                 PrintDebug("SVM is locked with a key\n");
481             }
482             return 0;
483
484         } else {
485             PrintDebug("SVM is available and  enabled.\n");
486
487             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
488             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
489             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
490             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
491             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
492
493
494             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
495                 PrintDebug("SVM Nested Paging not supported\n");
496             } else {
497                 PrintDebug("SVM Nested Paging supported\n");
498             }
499
500             return 1;
501         }
502     }
503 }
504
505 static int has_svm_nested_paging() {
506     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
507
508     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
509
510     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
511
512     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
513         PrintDebug("SVM Nested Paging not supported\n");
514         return 0;
515     } else {
516         PrintDebug("SVM Nested Paging supported\n");
517         return 1;
518     }
519 }
520
521
522
523 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
524     reg_ex_t msr;
525     void * host_state;
526     extern v3_cpu_arch_t v3_cpu_type;
527
528     // Enable SVM on the CPU
529     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
530     msr.e_reg.low |= EFER_MSR_svm_enable;
531     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
532
533     PrintDebug("SVM Enabled\n");
534
535
536     // Setup the host state save area
537     host_state = V3_AllocPages(4);
538
539
540     /* 64-BIT-ISSUE */
541     //  msr.e_reg.high = 0;
542     //msr.e_reg.low = (uint_t)host_state;
543     msr.r_reg = (addr_t)host_state;
544
545     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state);
546     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
547
548     if (has_svm_nested_paging() == 1) {
549         v3_cpu_type = V3_SVM_REV3_CPU;
550     } else {
551         v3_cpu_type = V3_SVM_CPU;
552     }
553
554     // Setup the SVM specific vmm operations
555     vmm_ops->init_guest = &init_svm_guest;
556     vmm_ops->start_guest = &start_svm_guest;
557     vmm_ops->has_nested_paging = &has_svm_nested_paging;
558
559     return;
560 }
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
614   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
615   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
616   uint_t i;
617
618
619   guest_state->rsp = vm_info.vm_regs.rsp;
620   guest_state->rip = vm_info.rip;
621
622
623   //ctrl_area->instrs.instrs.CR0 = 1;
624   ctrl_area->cr_reads.cr0 = 1;
625   ctrl_area->cr_writes.cr0 = 1;
626
627   guest_state->efer |= EFER_MSR_svm_enable;
628   guest_state->rflags = 0x00000002; // The reserved bit is always 1
629   ctrl_area->svm_instrs.VMRUN = 1;
630   // guest_state->cr0 = 0x00000001;    // PE 
631   ctrl_area->guest_ASID = 1;
632
633
634   ctrl_area->exceptions.de = 1;
635   ctrl_area->exceptions.df = 1;
636   ctrl_area->exceptions.pf = 1;
637   ctrl_area->exceptions.ts = 1;
638   ctrl_area->exceptions.ss = 1;
639   ctrl_area->exceptions.ac = 1;
640   ctrl_area->exceptions.mc = 1;
641   ctrl_area->exceptions.gp = 1;
642   ctrl_area->exceptions.ud = 1;
643   ctrl_area->exceptions.np = 1;
644   ctrl_area->exceptions.of = 1;
645   ctrl_area->exceptions.nmi = 1;
646
647   guest_state->cs.selector = 0x0000;
648   guest_state->cs.limit=~0u;
649   guest_state->cs.base = guest_state->cs.selector<<4;
650   guest_state->cs.attrib.raw = 0xf3;
651
652   
653   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
654   for ( i = 0; segregs[i] != NULL; i++) {
655     struct vmcb_selector * seg = segregs[i];
656     
657     seg->selector = 0x0000;
658     seg->base = seg->selector << 4;
659     seg->attrib.raw = 0xf3;
660     seg->limit = ~0u;
661   }
662   
663   if (vm_info.io_map.num_ports > 0) {
664     struct vmm_io_hook * iter;
665     addr_t io_port_bitmap;
666     
667     io_port_bitmap = (addr_t)V3_AllocPages(3);
668     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
669     
670     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
671
672     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
673
674     FOREACH_IO_HOOK(vm_info.io_map, iter) {
675       ushort_t port = iter->port;
676       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
677
678       bitmap += (port / 8);
679       PrintDebug("Setting Bit in block %x\n", bitmap);
680       *bitmap |= 1 << (port % 8);
681     }
682
683
684     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
685
686     ctrl_area->instrs.IOIO_PROT = 1;
687   }
688
689   ctrl_area->instrs.INTR = 1;
690
691
692
693   if (vm_info.page_mode == SHADOW_PAGING) {
694     PrintDebug("Creating initial shadow page table\n");
695     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
696     PrintDebug("Created\n");
697
698     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
699
700     ctrl_area->cr_reads.cr3 = 1;
701     ctrl_area->cr_writes.cr3 = 1;
702
703
704     ctrl_area->instrs.INVLPG = 1;
705     ctrl_area->instrs.INVLPGA = 1;
706
707     guest_state->g_pat = 0x7040600070406ULL;
708
709     guest_state->cr0 |= 0x80000000;
710   } else if (vm_info.page_mode == NESTED_PAGING) {
711     // Flush the TLB on entries/exits
712     //ctrl_area->TLB_CONTROL = 1;
713
714     // Enable Nested Paging
715     //ctrl_area->NP_ENABLE = 1;
716
717     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
718
719         // Set the Nested Page Table pointer
720     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
721     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
722
723     //   ctrl_area->N_CR3 = Get_CR3();
724     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
725
726     //    guest_state->g_pat = 0x7040600070406ULL;
727   }
728
729
730
731 }
732 */
733
734
735
736
737
738
739
740 #if 0
741 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
742   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
743   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
744   uint_t i = 0;
745
746
747   guest_state->rsp = vm_info.vm_regs.rsp;
748   guest_state->rip = vm_info.rip;
749
750
751   /* I pretty much just gutted this from TVMM */
752   /* Note: That means its probably wrong */
753
754   // set the segment registers to mirror ours
755   guest_state->cs.selector = 1<<3;
756   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
757   guest_state->cs.attrib.fields.S = 1;
758   guest_state->cs.attrib.fields.P = 1;
759   guest_state->cs.attrib.fields.db = 1;
760   guest_state->cs.attrib.fields.G = 1;
761   guest_state->cs.limit = 0xfffff;
762   guest_state->cs.base = 0;
763   
764   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
765   for ( i = 0; segregs[i] != NULL; i++) {
766     struct vmcb_selector * seg = segregs[i];
767     
768     seg->selector = 2<<3;
769     seg->attrib.fields.type = 0x2; // Data Segment+read/write
770     seg->attrib.fields.S = 1;
771     seg->attrib.fields.P = 1;
772     seg->attrib.fields.db = 1;
773     seg->attrib.fields.G = 1;
774     seg->limit = 0xfffff;
775     seg->base = 0;
776   }
777
778
779   {
780     /* JRL THIS HAS TO GO */
781     
782     //    guest_state->tr.selector = GetTR_Selector();
783     guest_state->tr.attrib.fields.type = 0x9; 
784     guest_state->tr.attrib.fields.P = 1;
785     // guest_state->tr.limit = GetTR_Limit();
786     //guest_state->tr.base = GetTR_Base();// - 0x2000;
787     /* ** */
788   }
789
790
791   /* ** */
792
793
794   guest_state->efer |= EFER_MSR_svm_enable;
795   guest_state->rflags = 0x00000002; // The reserved bit is always 1
796   ctrl_area->svm_instrs.VMRUN = 1;
797   guest_state->cr0 = 0x00000001;    // PE 
798   ctrl_area->guest_ASID = 1;
799
800
801   //  guest_state->cpl = 0;
802
803
804
805   // Setup exits
806
807   ctrl_area->cr_writes.cr4 = 1;
808   
809   ctrl_area->exceptions.de = 1;
810   ctrl_area->exceptions.df = 1;
811   ctrl_area->exceptions.pf = 1;
812   ctrl_area->exceptions.ts = 1;
813   ctrl_area->exceptions.ss = 1;
814   ctrl_area->exceptions.ac = 1;
815   ctrl_area->exceptions.mc = 1;
816   ctrl_area->exceptions.gp = 1;
817   ctrl_area->exceptions.ud = 1;
818   ctrl_area->exceptions.np = 1;
819   ctrl_area->exceptions.of = 1;
820   ctrl_area->exceptions.nmi = 1;
821
822   
823
824   ctrl_area->instrs.IOIO_PROT = 1;
825   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
826   
827   {
828     reg_ex_t tmp_reg;
829     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
830     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
831   }
832
833   ctrl_area->instrs.INTR = 1;
834
835   
836   {
837     char gdt_buf[6];
838     char idt_buf[6];
839
840     memset(gdt_buf, 0, 6);
841     memset(idt_buf, 0, 6);
842
843
844     uint_t gdt_base, idt_base;
845     ushort_t gdt_limit, idt_limit;
846     
847     GetGDTR(gdt_buf);
848     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
849     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
850     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
851
852     GetIDTR(idt_buf);
853     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
854     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
855     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
856
857
858     // gdt_base -= 0x2000;
859     //idt_base -= 0x2000;
860
861     guest_state->gdtr.base = gdt_base;
862     guest_state->gdtr.limit = gdt_limit;
863     guest_state->idtr.base = idt_base;
864     guest_state->idtr.limit = idt_limit;
865
866
867   }
868   
869   
870   // also determine if CPU supports nested paging
871   /*
872   if (vm_info.page_tables) {
873     //   if (0) {
874     // Flush the TLB on entries/exits
875     ctrl_area->TLB_CONTROL = 1;
876
877     // Enable Nested Paging
878     ctrl_area->NP_ENABLE = 1;
879
880     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
881
882         // Set the Nested Page Table pointer
883     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
884
885
886     //   ctrl_area->N_CR3 = Get_CR3();
887     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
888
889     guest_state->g_pat = 0x7040600070406ULL;
890
891     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
892     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
893     // Enable Paging
894     //    guest_state->cr0 |= 0x80000000;
895   }
896   */
897
898 }
899
900
901
902
903
904 #endif
905
906