Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


updated the configuration/init process
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45
46
47 extern void v3_stgi();
48 extern void v3_clgi();
49 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
50 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
51
52
53 static vmcb_t * Allocate_VMCB() {
54     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
55
56     memset(vmcb_page, 0, 4096);
57
58     return vmcb_page;
59 }
60
61
62
63 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
64     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
65     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
66     uint_t i;
67
68
69     guest_state->rsp = vm_info->vm_regs.rsp;
70     // guest_state->rip = vm_info->rip;
71     guest_state->rip = 0xfff0;
72
73     guest_state->cpl = 0;
74
75     guest_state->efer |= EFER_MSR_svm_enable;
76
77
78     guest_state->rflags = 0x00000002; // The reserved bit is always 1
79     ctrl_area->svm_instrs.VMRUN = 1;
80     ctrl_area->svm_instrs.VMMCALL = 1;
81     ctrl_area->svm_instrs.VMLOAD = 1;
82     ctrl_area->svm_instrs.VMSAVE = 1;
83     ctrl_area->svm_instrs.STGI = 1;
84     ctrl_area->svm_instrs.CLGI = 1;
85     ctrl_area->svm_instrs.SKINIT = 1;
86     ctrl_area->svm_instrs.RDTSCP = 1;
87     ctrl_area->svm_instrs.ICEBP = 1;
88     ctrl_area->svm_instrs.WBINVD = 1;
89     ctrl_area->svm_instrs.MONITOR = 1;
90     ctrl_area->svm_instrs.MWAIT_always = 1;
91     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
92     ctrl_area->instrs.INVLPGA = 1;
93
94
95     ctrl_area->instrs.HLT = 1;
96     // guest_state->cr0 = 0x00000001;    // PE 
97   
98     /*
99       ctrl_area->exceptions.de = 1;
100       ctrl_area->exceptions.df = 1;
101       
102       ctrl_area->exceptions.ts = 1;
103       ctrl_area->exceptions.ss = 1;
104       ctrl_area->exceptions.ac = 1;
105       ctrl_area->exceptions.mc = 1;
106       ctrl_area->exceptions.gp = 1;
107       ctrl_area->exceptions.ud = 1;
108       ctrl_area->exceptions.np = 1;
109       ctrl_area->exceptions.of = 1;
110       
111       ctrl_area->exceptions.nmi = 1;
112     */
113     
114
115     ctrl_area->instrs.NMI = 1;
116     ctrl_area->instrs.SMI = 1;
117     ctrl_area->instrs.INIT = 1;
118     ctrl_area->instrs.PAUSE = 1;
119     ctrl_area->instrs.shutdown_evts = 1;
120
121     vm_info->vm_regs.rdx = 0x00000f00;
122
123     guest_state->cr0 = 0x60000010;
124
125
126     guest_state->cs.selector = 0xf000;
127     guest_state->cs.limit = 0xffff;
128     guest_state->cs.base = 0x0000000f0000LL;
129     guest_state->cs.attrib.raw = 0xf3;
130
131
132     /* DEBUG FOR RETURN CODE */
133     ctrl_area->exit_code = 1;
134
135
136     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
137                                         &(guest_state->es), &(guest_state->fs), 
138                                         &(guest_state->gs), NULL};
139
140     for ( i = 0; segregs[i] != NULL; i++) {
141         struct vmcb_selector * seg = segregs[i];
142         
143         seg->selector = 0x0000;
144         //    seg->base = seg->selector << 4;
145         seg->base = 0x00000000;
146         seg->attrib.raw = 0xf3;
147         seg->limit = ~0u;
148     }
149
150     guest_state->gdtr.limit = 0x0000ffff;
151     guest_state->gdtr.base = 0x0000000000000000LL;
152     guest_state->idtr.limit = 0x0000ffff;
153     guest_state->idtr.base = 0x0000000000000000LL;
154
155     guest_state->ldtr.selector = 0x0000;
156     guest_state->ldtr.limit = 0x0000ffff;
157     guest_state->ldtr.base = 0x0000000000000000LL;
158     guest_state->tr.selector = 0x0000;
159     guest_state->tr.limit = 0x0000ffff;
160     guest_state->tr.base = 0x0000000000000000LL;
161
162
163     guest_state->dr6 = 0x00000000ffff0ff0LL;
164     guest_state->dr7 = 0x0000000000000400LL;
165
166
167     if ( !RB_EMPTY_ROOT(&(vm_info->io_map)) ) {
168         struct v3_io_hook * iter;
169         struct rb_node * io_node = v3_rb_first(&(vm_info->io_map));
170         addr_t io_port_bitmap;
171         int i = 0;
172         
173         io_port_bitmap = (addr_t)V3_VAddr(V3_AllocPages(3));
174         memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
175
176         ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr((void *)io_port_bitmap);
177
178         //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
179
180         do {
181             iter = rb_entry(io_node, struct v3_io_hook, tree_node);
182
183             ushort_t port = iter->port;
184             uchar_t * bitmap = (uchar_t *)io_port_bitmap;
185             //PrintDebug("%d: Hooking Port %d\n", i, port);
186
187             bitmap += (port / 8);
188             //      PrintDebug("Setting Bit for port 0x%x\n", port);
189             *bitmap |= 1 << (port % 8);
190
191             i++;
192         } while ((io_node = v3_rb_next(io_node)));
193
194         //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
195
196         ctrl_area->instrs.IOIO_PROT = 1;
197     }
198
199
200     PrintDebug("Exiting on interrupts\n");
201     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
202     ctrl_area->instrs.INTR = 1;
203
204
205     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
206         PrintDebug("Creating initial shadow page table\n");
207         
208         ctrl_area->guest_ASID = 1;
209         
210         
211         if (v3_init_passthrough_pts(vm_info) == -1) {
212             PrintError("Could not initialize passthrough page tables\n");
213             return ;
214         }
215
216
217         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
218         PrintDebug("Created\n");
219         
220         guest_state->cr3 = vm_info->direct_map_pt;
221
222         ctrl_area->cr_reads.cr0 = 1;
223         ctrl_area->cr_writes.cr0 = 1;
224         //ctrl_area->cr_reads.cr4 = 1;
225         ctrl_area->cr_writes.cr4 = 1;
226         ctrl_area->cr_reads.cr3 = 1;
227         ctrl_area->cr_writes.cr3 = 1;
228
229         vm_info->guest_efer.value = 0x0LL;
230
231         v3_hook_msr(vm_info, EFER_MSR, 
232                     &v3_handle_efer_read,
233                     &v3_handle_efer_write, 
234                     vm_info);
235
236         ctrl_area->instrs.INVLPG = 1;
237
238         ctrl_area->exceptions.pf = 1;
239
240         /* JRL: This is a performance killer, and a simplistic solution */
241         /* We need to fix this */
242         ctrl_area->TLB_CONTROL = 1;
243
244         guest_state->g_pat = 0x7040600070406ULL;
245
246         guest_state->cr0 |= 0x80000000;
247
248     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
249         // Flush the TLB on entries/exits
250         ctrl_area->TLB_CONTROL = 1;
251         ctrl_area->guest_ASID = 1;
252
253         // Enable Nested Paging
254         ctrl_area->NP_ENABLE = 1;
255
256         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
257
258         // Set the Nested Page Table pointer
259         if (v3_init_passthrough_pts(vm_info) == -1) {
260             PrintError("Could not initialize Nested page tables\n");
261             return ;
262         }
263
264         ctrl_area->N_CR3 = vm_info->direct_map_pt;
265
266         guest_state->g_pat = 0x7040600070406ULL;
267     }
268
269     if (vm_info->msr_map.num_hooks > 0) {
270         PrintDebug("Hooking %d msrs\n", vm_info->msr_map.num_hooks);
271         ctrl_area->MSRPM_BASE_PA = v3_init_svm_msr_map(vm_info);
272         ctrl_area->instrs.MSR_PROT = 1;
273     }
274
275     /* Safety locations for fs/gs */
276     //    vm_info->fs = 0;
277     //    vm_info->gs = 0;
278 }
279
280
281 static int init_svm_guest(struct guest_info *info, struct v3_vm_config * config_ptr) {
282     v3_config_guest(info, config_ptr);
283
284     PrintDebug("Allocating VMCB\n");
285     info->vmm_data = (void*)Allocate_VMCB();
286
287     v3_config_devices(info, config_ptr);
288
289     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
290     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
291   
292
293     
294     info->run_state = VM_STOPPED;
295
296     //  info->rip = 0;
297     
298     info->vm_regs.rdi = 0;
299     info->vm_regs.rsi = 0;
300     info->vm_regs.rbp = 0;
301     info->vm_regs.rsp = 0;
302     info->vm_regs.rbx = 0;
303     info->vm_regs.rdx = 0;
304     info->vm_regs.rcx = 0;
305     info->vm_regs.rax = 0;
306     
307     return 0;
308 }
309
310
311
312 // can we start a kernel thread here...
313 static int start_svm_guest(struct guest_info *info) {
314     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
315     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
316     uint_t num_exits = 0;
317
318
319
320     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
321     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
322     
323     info->run_state = VM_RUNNING;
324     
325     while (1) {
326         ullong_t tmp_tsc;
327         
328
329
330 #define MSR_STAR      0xc0000081
331 #define MSR_LSTAR     0xc0000082
332 #define MSR_CSTAR     0xc0000083
333 #define MSR_SF_MASK   0xc0000084
334 #define MSR_GS_BASE   0xc0000101
335 #define MSR_KERNGS_BASE   0xc0000102
336
337
338         struct v3_msr host_cstar;
339         struct v3_msr host_star;
340         struct v3_msr host_lstar;
341         struct v3_msr host_syscall_mask;
342         struct v3_msr host_gs_base;
343         struct v3_msr host_kerngs_base;
344
345 /*      v3_enable_ints(); */
346 /*      v3_clgi(); */
347
348
349         /*
350           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
351           (void *)(addr_t)info->segments.cs.base, 
352           (void *)(addr_t)info->rip);
353         */
354
355
356         v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo));
357         v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo));
358         v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo));
359         v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo));
360         v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo));
361         v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo));
362
363
364         rdtscll(info->time_state.cached_host_tsc);
365         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
366         
367         //v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), &(info->fs), &(info->gs));
368         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
369         
370         rdtscll(tmp_tsc);
371         
372         v3_set_msr(MSR_STAR, host_star.hi, host_star.lo);
373         v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo);
374         v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo);
375         v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo);
376         v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo);
377         v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo);
378         
379         //PrintDebug("SVM Returned\n");
380
381
382
383         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
384         num_exits++;
385
386         //PrintDebug("Turning on global interrupts\n");
387         v3_stgi();
388         v3_clgi();
389         
390         if ((num_exits % 5000) == 0) {
391             PrintDebug("SVM Exit number %d\n", num_exits);
392
393             if (info->enable_profiler) {
394                 v3_print_profile(info);
395             }
396         }
397
398
399      
400         if (v3_handle_svm_exit(info) != 0) {
401             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
402             addr_t host_addr;
403             addr_t linear_addr = 0;
404             
405             info->run_state = VM_ERROR;
406             
407             PrintDebug("SVM ERROR!!\n"); 
408       
409             PrintDebug("RIP: %p\n", (void *)(addr_t)(guest_state->rip));
410
411
412             linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
413
414
415             PrintDebug("RIP Linear: %p\n", (void *)linear_addr);
416             v3_print_segments(info);
417             v3_print_ctrl_regs(info);
418             if (info->shdw_pg_mode == SHADOW_PAGING) {
419                 PrintDebug("Shadow Paging Guest Registers:\n");
420                 PrintDebug("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
421                 PrintDebug("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
422                 // efer
423                 // CR4
424             }
425             v3_print_GPRs(info);
426
427             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
428       
429             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
430             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
431       
432             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
433             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
434       
435             if (info->mem_mode == PHYSICAL_MEM) {
436                 guest_pa_to_host_va(info, linear_addr, &host_addr);
437             } else if (info->mem_mode == VIRTUAL_MEM) {
438                 guest_va_to_host_va(info, linear_addr, &host_addr);
439             }
440
441
442             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
443
444             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
445             PrintTraceMemDump((uchar_t *)host_addr, 15);
446
447             break;
448         }
449     }
450     return 0;
451 }
452
453
454
455
456
457 /* Checks machine SVM capability */
458 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
459 int v3_is_svm_capable() {
460     // Dinda
461     uint_t vm_cr_low = 0, vm_cr_high = 0;
462     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
463
464     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
465   
466     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
467
468     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
469       PrintDebug("SVM Not Available\n");
470       return 0;
471     }  else {
472         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
473         
474         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
475         
476         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
477             PrintDebug("SVM is available but is disabled.\n");
478             
479             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
480             
481             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
482             
483             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
484                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
485             } else {
486                 PrintDebug("SVM is locked with a key\n");
487             }
488             return 0;
489
490         } else {
491             PrintDebug("SVM is available and  enabled.\n");
492
493             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
494             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
495             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
496             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
497             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
498
499
500             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
501                 PrintDebug("SVM Nested Paging not supported\n");
502             } else {
503                 PrintDebug("SVM Nested Paging supported\n");
504             }
505
506             return 1;
507         }
508     }
509 }
510
511 static int has_svm_nested_paging() {
512     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
513
514     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
515
516     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
517
518     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
519         PrintDebug("SVM Nested Paging not supported\n");
520         return 0;
521     } else {
522         PrintDebug("SVM Nested Paging supported\n");
523         return 1;
524     }
525 }
526
527
528
529 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
530     reg_ex_t msr;
531     void * host_state;
532     extern v3_cpu_arch_t v3_cpu_type;
533
534     // Enable SVM on the CPU
535     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
536     msr.e_reg.low |= EFER_MSR_svm_enable;
537     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
538
539     PrintDebug("SVM Enabled\n");
540
541
542     // Setup the host state save area
543     host_state = V3_AllocPages(4);
544
545
546     /* 64-BIT-ISSUE */
547     //  msr.e_reg.high = 0;
548     //msr.e_reg.low = (uint_t)host_state;
549     msr.r_reg = (addr_t)host_state;
550
551     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state);
552     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
553
554     if (has_svm_nested_paging() == 1) {
555         v3_cpu_type = V3_SVM_REV3_CPU;
556     } else {
557         v3_cpu_type = V3_SVM_CPU;
558     }
559
560     // Setup the SVM specific vmm operations
561     vmm_ops->init_guest = &init_svm_guest;
562     vmm_ops->start_guest = &start_svm_guest;
563     vmm_ops->has_nested_paging = &has_svm_nested_paging;
564
565     return;
566 }
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
620   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
621   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
622   uint_t i;
623
624
625   guest_state->rsp = vm_info.vm_regs.rsp;
626   guest_state->rip = vm_info.rip;
627
628
629   //ctrl_area->instrs.instrs.CR0 = 1;
630   ctrl_area->cr_reads.cr0 = 1;
631   ctrl_area->cr_writes.cr0 = 1;
632
633   guest_state->efer |= EFER_MSR_svm_enable;
634   guest_state->rflags = 0x00000002; // The reserved bit is always 1
635   ctrl_area->svm_instrs.VMRUN = 1;
636   // guest_state->cr0 = 0x00000001;    // PE 
637   ctrl_area->guest_ASID = 1;
638
639
640   ctrl_area->exceptions.de = 1;
641   ctrl_area->exceptions.df = 1;
642   ctrl_area->exceptions.pf = 1;
643   ctrl_area->exceptions.ts = 1;
644   ctrl_area->exceptions.ss = 1;
645   ctrl_area->exceptions.ac = 1;
646   ctrl_area->exceptions.mc = 1;
647   ctrl_area->exceptions.gp = 1;
648   ctrl_area->exceptions.ud = 1;
649   ctrl_area->exceptions.np = 1;
650   ctrl_area->exceptions.of = 1;
651   ctrl_area->exceptions.nmi = 1;
652
653   guest_state->cs.selector = 0x0000;
654   guest_state->cs.limit=~0u;
655   guest_state->cs.base = guest_state->cs.selector<<4;
656   guest_state->cs.attrib.raw = 0xf3;
657
658   
659   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
660   for ( i = 0; segregs[i] != NULL; i++) {
661     struct vmcb_selector * seg = segregs[i];
662     
663     seg->selector = 0x0000;
664     seg->base = seg->selector << 4;
665     seg->attrib.raw = 0xf3;
666     seg->limit = ~0u;
667   }
668   
669   if (vm_info.io_map.num_ports > 0) {
670     struct vmm_io_hook * iter;
671     addr_t io_port_bitmap;
672     
673     io_port_bitmap = (addr_t)V3_AllocPages(3);
674     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
675     
676     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
677
678     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
679
680     FOREACH_IO_HOOK(vm_info.io_map, iter) {
681       ushort_t port = iter->port;
682       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
683
684       bitmap += (port / 8);
685       PrintDebug("Setting Bit in block %x\n", bitmap);
686       *bitmap |= 1 << (port % 8);
687     }
688
689
690     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
691
692     ctrl_area->instrs.IOIO_PROT = 1;
693   }
694
695   ctrl_area->instrs.INTR = 1;
696
697
698
699   if (vm_info.page_mode == SHADOW_PAGING) {
700     PrintDebug("Creating initial shadow page table\n");
701     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
702     PrintDebug("Created\n");
703
704     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
705
706     ctrl_area->cr_reads.cr3 = 1;
707     ctrl_area->cr_writes.cr3 = 1;
708
709
710     ctrl_area->instrs.INVLPG = 1;
711     ctrl_area->instrs.INVLPGA = 1;
712
713     guest_state->g_pat = 0x7040600070406ULL;
714
715     guest_state->cr0 |= 0x80000000;
716   } else if (vm_info.page_mode == NESTED_PAGING) {
717     // Flush the TLB on entries/exits
718     //ctrl_area->TLB_CONTROL = 1;
719
720     // Enable Nested Paging
721     //ctrl_area->NP_ENABLE = 1;
722
723     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
724
725         // Set the Nested Page Table pointer
726     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
727     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
728
729     //   ctrl_area->N_CR3 = Get_CR3();
730     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
731
732     //    guest_state->g_pat = 0x7040600070406ULL;
733   }
734
735
736
737 }
738 */
739
740
741
742
743
744
745
746 #if 0
747 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
748   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
749   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
750   uint_t i = 0;
751
752
753   guest_state->rsp = vm_info.vm_regs.rsp;
754   guest_state->rip = vm_info.rip;
755
756
757   /* I pretty much just gutted this from TVMM */
758   /* Note: That means its probably wrong */
759
760   // set the segment registers to mirror ours
761   guest_state->cs.selector = 1<<3;
762   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
763   guest_state->cs.attrib.fields.S = 1;
764   guest_state->cs.attrib.fields.P = 1;
765   guest_state->cs.attrib.fields.db = 1;
766   guest_state->cs.attrib.fields.G = 1;
767   guest_state->cs.limit = 0xfffff;
768   guest_state->cs.base = 0;
769   
770   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
771   for ( i = 0; segregs[i] != NULL; i++) {
772     struct vmcb_selector * seg = segregs[i];
773     
774     seg->selector = 2<<3;
775     seg->attrib.fields.type = 0x2; // Data Segment+read/write
776     seg->attrib.fields.S = 1;
777     seg->attrib.fields.P = 1;
778     seg->attrib.fields.db = 1;
779     seg->attrib.fields.G = 1;
780     seg->limit = 0xfffff;
781     seg->base = 0;
782   }
783
784
785   {
786     /* JRL THIS HAS TO GO */
787     
788     //    guest_state->tr.selector = GetTR_Selector();
789     guest_state->tr.attrib.fields.type = 0x9; 
790     guest_state->tr.attrib.fields.P = 1;
791     // guest_state->tr.limit = GetTR_Limit();
792     //guest_state->tr.base = GetTR_Base();// - 0x2000;
793     /* ** */
794   }
795
796
797   /* ** */
798
799
800   guest_state->efer |= EFER_MSR_svm_enable;
801   guest_state->rflags = 0x00000002; // The reserved bit is always 1
802   ctrl_area->svm_instrs.VMRUN = 1;
803   guest_state->cr0 = 0x00000001;    // PE 
804   ctrl_area->guest_ASID = 1;
805
806
807   //  guest_state->cpl = 0;
808
809
810
811   // Setup exits
812
813   ctrl_area->cr_writes.cr4 = 1;
814   
815   ctrl_area->exceptions.de = 1;
816   ctrl_area->exceptions.df = 1;
817   ctrl_area->exceptions.pf = 1;
818   ctrl_area->exceptions.ts = 1;
819   ctrl_area->exceptions.ss = 1;
820   ctrl_area->exceptions.ac = 1;
821   ctrl_area->exceptions.mc = 1;
822   ctrl_area->exceptions.gp = 1;
823   ctrl_area->exceptions.ud = 1;
824   ctrl_area->exceptions.np = 1;
825   ctrl_area->exceptions.of = 1;
826   ctrl_area->exceptions.nmi = 1;
827
828   
829
830   ctrl_area->instrs.IOIO_PROT = 1;
831   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
832   
833   {
834     reg_ex_t tmp_reg;
835     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
836     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
837   }
838
839   ctrl_area->instrs.INTR = 1;
840
841   
842   {
843     char gdt_buf[6];
844     char idt_buf[6];
845
846     memset(gdt_buf, 0, 6);
847     memset(idt_buf, 0, 6);
848
849
850     uint_t gdt_base, idt_base;
851     ushort_t gdt_limit, idt_limit;
852     
853     GetGDTR(gdt_buf);
854     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
855     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
856     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
857
858     GetIDTR(idt_buf);
859     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
860     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
861     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
862
863
864     // gdt_base -= 0x2000;
865     //idt_base -= 0x2000;
866
867     guest_state->gdtr.base = gdt_base;
868     guest_state->gdtr.limit = gdt_limit;
869     guest_state->idtr.base = idt_base;
870     guest_state->idtr.limit = idt_limit;
871
872
873   }
874   
875   
876   // also determine if CPU supports nested paging
877   /*
878   if (vm_info.page_tables) {
879     //   if (0) {
880     // Flush the TLB on entries/exits
881     ctrl_area->TLB_CONTROL = 1;
882
883     // Enable Nested Paging
884     ctrl_area->NP_ENABLE = 1;
885
886     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
887
888         // Set the Nested Page Table pointer
889     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
890
891
892     //   ctrl_area->N_CR3 = Get_CR3();
893     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
894
895     guest_state->g_pat = 0x7040600070406ULL;
896
897     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
898     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
899     // Enable Paging
900     //    guest_state->cr0 |= 0x80000000;
901   }
902   */
903
904 }
905
906
907
908
909
910 #endif
911
912