Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


updated IO and MSRs to allow hooking/unhooking dynamic at runtime
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48 extern void v3_stgi();
49 extern void v3_clgi();
50 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
51 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
52
53
54 static vmcb_t * Allocate_VMCB() {
55     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
56
57     memset(vmcb_page, 0, 4096);
58
59     return vmcb_page;
60 }
61
62
63
64 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
65     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
66     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
67     uint_t i;
68
69
70     guest_state->rsp = vm_info->vm_regs.rsp;
71     // guest_state->rip = vm_info->rip;
72     guest_state->rip = 0xfff0;
73
74     guest_state->cpl = 0;
75
76     guest_state->efer |= EFER_MSR_svm_enable;
77
78
79     guest_state->rflags = 0x00000002; // The reserved bit is always 1
80     ctrl_area->svm_instrs.VMRUN = 1;
81     ctrl_area->svm_instrs.VMMCALL = 1;
82     ctrl_area->svm_instrs.VMLOAD = 1;
83     ctrl_area->svm_instrs.VMSAVE = 1;
84     ctrl_area->svm_instrs.STGI = 1;
85     ctrl_area->svm_instrs.CLGI = 1;
86     ctrl_area->svm_instrs.SKINIT = 1;
87     ctrl_area->svm_instrs.RDTSCP = 1;
88     ctrl_area->svm_instrs.ICEBP = 1;
89     ctrl_area->svm_instrs.WBINVD = 1;
90     ctrl_area->svm_instrs.MONITOR = 1;
91     ctrl_area->svm_instrs.MWAIT_always = 1;
92     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
93     ctrl_area->instrs.INVLPGA = 1;
94
95
96     ctrl_area->instrs.HLT = 1;
97     // guest_state->cr0 = 0x00000001;    // PE 
98   
99     /*
100       ctrl_area->exceptions.de = 1;
101       ctrl_area->exceptions.df = 1;
102       
103       ctrl_area->exceptions.ts = 1;
104       ctrl_area->exceptions.ss = 1;
105       ctrl_area->exceptions.ac = 1;
106       ctrl_area->exceptions.mc = 1;
107       ctrl_area->exceptions.gp = 1;
108       ctrl_area->exceptions.ud = 1;
109       ctrl_area->exceptions.np = 1;
110       ctrl_area->exceptions.of = 1;
111       
112       ctrl_area->exceptions.nmi = 1;
113     */
114     
115
116     ctrl_area->instrs.NMI = 1;
117     ctrl_area->instrs.SMI = 1;
118     ctrl_area->instrs.INIT = 1;
119     ctrl_area->instrs.PAUSE = 1;
120     ctrl_area->instrs.shutdown_evts = 1;
121
122     vm_info->vm_regs.rdx = 0x00000f00;
123
124     guest_state->cr0 = 0x60000010;
125
126
127     guest_state->cs.selector = 0xf000;
128     guest_state->cs.limit = 0xffff;
129     guest_state->cs.base = 0x0000000f0000LL;
130     guest_state->cs.attrib.raw = 0xf3;
131
132
133     /* DEBUG FOR RETURN CODE */
134     ctrl_area->exit_code = 1;
135
136
137     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
138                                         &(guest_state->es), &(guest_state->fs), 
139                                         &(guest_state->gs), NULL};
140
141     for ( i = 0; segregs[i] != NULL; i++) {
142         struct vmcb_selector * seg = segregs[i];
143         
144         seg->selector = 0x0000;
145         //    seg->base = seg->selector << 4;
146         seg->base = 0x00000000;
147         seg->attrib.raw = 0xf3;
148         seg->limit = ~0u;
149     }
150
151     guest_state->gdtr.limit = 0x0000ffff;
152     guest_state->gdtr.base = 0x0000000000000000LL;
153     guest_state->idtr.limit = 0x0000ffff;
154     guest_state->idtr.base = 0x0000000000000000LL;
155
156     guest_state->ldtr.selector = 0x0000;
157     guest_state->ldtr.limit = 0x0000ffff;
158     guest_state->ldtr.base = 0x0000000000000000LL;
159     guest_state->tr.selector = 0x0000;
160     guest_state->tr.limit = 0x0000ffff;
161     guest_state->tr.base = 0x0000000000000000LL;
162
163
164     guest_state->dr6 = 0x00000000ffff0ff0LL;
165     guest_state->dr7 = 0x0000000000000400LL;
166
167
168     v3_init_svm_io_map(vm_info);
169     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
170     ctrl_area->instrs.IOIO_PROT = 1;
171
172
173
174     v3_init_svm_msr_map(vm_info);
175     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
176     ctrl_area->instrs.MSR_PROT = 1;
177
178
179
180     PrintDebug("Exiting on interrupts\n");
181     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
182     ctrl_area->instrs.INTR = 1;
183
184
185     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
186         PrintDebug("Creating initial shadow page table\n");
187         
188         /* JRL: This is a performance killer, and a simplistic solution */
189         /* We need to fix this */
190         ctrl_area->TLB_CONTROL = 1;
191         ctrl_area->guest_ASID = 1;
192         
193         
194         if (v3_init_passthrough_pts(vm_info) == -1) {
195             PrintError("Could not initialize passthrough page tables\n");
196             return ;
197         }
198
199
200         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
201         PrintDebug("Created\n");
202         
203         guest_state->cr3 = vm_info->direct_map_pt;
204
205         ctrl_area->cr_reads.cr0 = 1;
206         ctrl_area->cr_writes.cr0 = 1;
207         //ctrl_area->cr_reads.cr4 = 1;
208         ctrl_area->cr_writes.cr4 = 1;
209         ctrl_area->cr_reads.cr3 = 1;
210         ctrl_area->cr_writes.cr3 = 1;
211
212         vm_info->guest_efer.value = 0x0LL;
213
214         v3_hook_msr(vm_info, EFER_MSR, 
215                     &v3_handle_efer_read,
216                     &v3_handle_efer_write, 
217                     vm_info);
218
219         ctrl_area->instrs.INVLPG = 1;
220
221         ctrl_area->exceptions.pf = 1;
222
223
224
225         guest_state->g_pat = 0x7040600070406ULL;
226
227         guest_state->cr0 |= 0x80000000;
228
229     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
230         // Flush the TLB on entries/exits
231         ctrl_area->TLB_CONTROL = 1;
232         ctrl_area->guest_ASID = 1;
233
234         // Enable Nested Paging
235         ctrl_area->NP_ENABLE = 1;
236
237         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
238
239         // Set the Nested Page Table pointer
240         if (v3_init_passthrough_pts(vm_info) == -1) {
241             PrintError("Could not initialize Nested page tables\n");
242             return ;
243         }
244
245         ctrl_area->N_CR3 = vm_info->direct_map_pt;
246
247         guest_state->g_pat = 0x7040600070406ULL;
248     }
249
250
251
252     /* Safety locations for fs/gs */
253     //    vm_info->fs = 0;
254     //    vm_info->gs = 0;
255 }
256
257
258 static int init_svm_guest(struct guest_info *info, struct v3_vm_config * config_ptr) {
259     v3_config_guest(info, config_ptr);
260
261     PrintDebug("Allocating VMCB\n");
262     info->vmm_data = (void*)Allocate_VMCB();
263
264     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
265
266     v3_config_devices(info, config_ptr);
267
268     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
269
270
271     info->run_state = VM_STOPPED;
272
273     //  info->rip = 0;
274
275     info->vm_regs.rdi = 0;
276     info->vm_regs.rsi = 0;
277     info->vm_regs.rbp = 0;
278     info->vm_regs.rsp = 0;
279     info->vm_regs.rbx = 0;
280     info->vm_regs.rdx = 0;
281     info->vm_regs.rcx = 0;
282     info->vm_regs.rax = 0;
283
284     return 0;
285 }
286
287
288
289 // can we start a kernel thread here...
290 static int start_svm_guest(struct guest_info *info) {
291     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
292     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
293     uint_t num_exits = 0;
294
295
296
297     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
298     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
299     
300     info->run_state = VM_RUNNING;
301     
302     while (1) {
303         ullong_t tmp_tsc;
304         
305
306
307 #define MSR_STAR      0xc0000081
308 #define MSR_LSTAR     0xc0000082
309 #define MSR_CSTAR     0xc0000083
310 #define MSR_SF_MASK   0xc0000084
311 #define MSR_GS_BASE   0xc0000101
312 #define MSR_KERNGS_BASE   0xc0000102
313
314
315         struct v3_msr host_cstar;
316         struct v3_msr host_star;
317         struct v3_msr host_lstar;
318         struct v3_msr host_syscall_mask;
319         struct v3_msr host_gs_base;
320         struct v3_msr host_kerngs_base;
321
322 /*      v3_enable_ints(); */
323 /*      v3_clgi(); */
324
325
326         /*
327           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
328           (void *)(addr_t)info->segments.cs.base, 
329           (void *)(addr_t)info->rip);
330         */
331
332
333         v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo));
334         v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo));
335         v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo));
336         v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo));
337         v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo));
338         v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo));
339
340
341         rdtscll(info->time_state.cached_host_tsc);
342         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
343         
344         //v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), &(info->fs), &(info->gs));
345         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
346         
347         rdtscll(tmp_tsc);
348         
349         v3_set_msr(MSR_STAR, host_star.hi, host_star.lo);
350         v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo);
351         v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo);
352         v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo);
353         v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo);
354         v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo);
355         
356         //PrintDebug("SVM Returned\n");
357
358
359
360         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
361         num_exits++;
362
363         //PrintDebug("Turning on global interrupts\n");
364         v3_stgi();
365         v3_clgi();
366         
367         if ((num_exits % 5000) == 0) {
368             PrintDebug("SVM Exit number %d\n", num_exits);
369
370             if (info->enable_profiler) {
371                 v3_print_profile(info);
372             }
373         }
374
375
376      
377         if (v3_handle_svm_exit(info) != 0) {
378             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
379             addr_t host_addr;
380             addr_t linear_addr = 0;
381             
382             info->run_state = VM_ERROR;
383             
384             PrintDebug("SVM ERROR!!\n"); 
385       
386             PrintDebug("RIP: %p\n", (void *)(addr_t)(guest_state->rip));
387
388
389             linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
390
391
392             PrintDebug("RIP Linear: %p\n", (void *)linear_addr);
393             v3_print_segments(info);
394             v3_print_ctrl_regs(info);
395             if (info->shdw_pg_mode == SHADOW_PAGING) {
396                 PrintDebug("Shadow Paging Guest Registers:\n");
397                 PrintDebug("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
398                 PrintDebug("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
399                 // efer
400                 // CR4
401             }
402             v3_print_GPRs(info);
403
404             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
405       
406             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
407             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
408       
409             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
410             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
411       
412             if (info->mem_mode == PHYSICAL_MEM) {
413                 guest_pa_to_host_va(info, linear_addr, &host_addr);
414             } else if (info->mem_mode == VIRTUAL_MEM) {
415                 guest_va_to_host_va(info, linear_addr, &host_addr);
416             }
417
418
419             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
420
421             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
422             PrintTraceMemDump((uchar_t *)host_addr, 15);
423
424             break;
425         }
426     }
427     return 0;
428 }
429
430
431
432
433
434 /* Checks machine SVM capability */
435 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
436 int v3_is_svm_capable() {
437     // Dinda
438     uint_t vm_cr_low = 0, vm_cr_high = 0;
439     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
440
441     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
442   
443     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
444
445     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
446       PrintDebug("SVM Not Available\n");
447       return 0;
448     }  else {
449         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
450         
451         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
452         
453         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
454             PrintDebug("SVM is available but is disabled.\n");
455             
456             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
457             
458             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
459             
460             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
461                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
462             } else {
463                 PrintDebug("SVM is locked with a key\n");
464             }
465             return 0;
466
467         } else {
468             PrintDebug("SVM is available and  enabled.\n");
469
470             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
471             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
472             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
473             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
474             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
475
476
477             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
478                 PrintDebug("SVM Nested Paging not supported\n");
479             } else {
480                 PrintDebug("SVM Nested Paging supported\n");
481             }
482
483             return 1;
484         }
485     }
486 }
487
488 static int has_svm_nested_paging() {
489     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
490
491     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
492
493     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
494
495     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
496         PrintDebug("SVM Nested Paging not supported\n");
497         return 0;
498     } else {
499         PrintDebug("SVM Nested Paging supported\n");
500         return 1;
501     }
502 }
503
504
505
506 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
507     reg_ex_t msr;
508     void * host_state;
509     extern v3_cpu_arch_t v3_cpu_type;
510
511     // Enable SVM on the CPU
512     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
513     msr.e_reg.low |= EFER_MSR_svm_enable;
514     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
515
516     PrintDebug("SVM Enabled\n");
517
518
519     // Setup the host state save area
520     host_state = V3_AllocPages(4);
521
522
523     /* 64-BIT-ISSUE */
524     //  msr.e_reg.high = 0;
525     //msr.e_reg.low = (uint_t)host_state;
526     msr.r_reg = (addr_t)host_state;
527
528     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state);
529     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
530
531     if (has_svm_nested_paging() == 1) {
532         v3_cpu_type = V3_SVM_REV3_CPU;
533     } else {
534         v3_cpu_type = V3_SVM_CPU;
535     }
536
537     // Setup the SVM specific vmm operations
538     vmm_ops->init_guest = &init_svm_guest;
539     vmm_ops->start_guest = &start_svm_guest;
540     vmm_ops->has_nested_paging = &has_svm_nested_paging;
541
542     return;
543 }
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
597   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
598   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
599   uint_t i;
600
601
602   guest_state->rsp = vm_info.vm_regs.rsp;
603   guest_state->rip = vm_info.rip;
604
605
606   //ctrl_area->instrs.instrs.CR0 = 1;
607   ctrl_area->cr_reads.cr0 = 1;
608   ctrl_area->cr_writes.cr0 = 1;
609
610   guest_state->efer |= EFER_MSR_svm_enable;
611   guest_state->rflags = 0x00000002; // The reserved bit is always 1
612   ctrl_area->svm_instrs.VMRUN = 1;
613   // guest_state->cr0 = 0x00000001;    // PE 
614   ctrl_area->guest_ASID = 1;
615
616
617   ctrl_area->exceptions.de = 1;
618   ctrl_area->exceptions.df = 1;
619   ctrl_area->exceptions.pf = 1;
620   ctrl_area->exceptions.ts = 1;
621   ctrl_area->exceptions.ss = 1;
622   ctrl_area->exceptions.ac = 1;
623   ctrl_area->exceptions.mc = 1;
624   ctrl_area->exceptions.gp = 1;
625   ctrl_area->exceptions.ud = 1;
626   ctrl_area->exceptions.np = 1;
627   ctrl_area->exceptions.of = 1;
628   ctrl_area->exceptions.nmi = 1;
629
630   guest_state->cs.selector = 0x0000;
631   guest_state->cs.limit=~0u;
632   guest_state->cs.base = guest_state->cs.selector<<4;
633   guest_state->cs.attrib.raw = 0xf3;
634
635   
636   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
637   for ( i = 0; segregs[i] != NULL; i++) {
638     struct vmcb_selector * seg = segregs[i];
639     
640     seg->selector = 0x0000;
641     seg->base = seg->selector << 4;
642     seg->attrib.raw = 0xf3;
643     seg->limit = ~0u;
644   }
645   
646   if (vm_info.io_map.num_ports > 0) {
647     struct vmm_io_hook * iter;
648     addr_t io_port_bitmap;
649     
650     io_port_bitmap = (addr_t)V3_AllocPages(3);
651     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
652     
653     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
654
655     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
656
657     FOREACH_IO_HOOK(vm_info.io_map, iter) {
658       ushort_t port = iter->port;
659       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
660
661       bitmap += (port / 8);
662       PrintDebug("Setting Bit in block %x\n", bitmap);
663       *bitmap |= 1 << (port % 8);
664     }
665
666
667     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
668
669     ctrl_area->instrs.IOIO_PROT = 1;
670   }
671
672   ctrl_area->instrs.INTR = 1;
673
674
675
676   if (vm_info.page_mode == SHADOW_PAGING) {
677     PrintDebug("Creating initial shadow page table\n");
678     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
679     PrintDebug("Created\n");
680
681     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
682
683     ctrl_area->cr_reads.cr3 = 1;
684     ctrl_area->cr_writes.cr3 = 1;
685
686
687     ctrl_area->instrs.INVLPG = 1;
688     ctrl_area->instrs.INVLPGA = 1;
689
690     guest_state->g_pat = 0x7040600070406ULL;
691
692     guest_state->cr0 |= 0x80000000;
693   } else if (vm_info.page_mode == NESTED_PAGING) {
694     // Flush the TLB on entries/exits
695     //ctrl_area->TLB_CONTROL = 1;
696
697     // Enable Nested Paging
698     //ctrl_area->NP_ENABLE = 1;
699
700     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
701
702         // Set the Nested Page Table pointer
703     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
704     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
705
706     //   ctrl_area->N_CR3 = Get_CR3();
707     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
708
709     //    guest_state->g_pat = 0x7040600070406ULL;
710   }
711
712
713
714 }
715 */
716
717
718
719
720
721
722
723 #if 0
724 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
725   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
726   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
727   uint_t i = 0;
728
729
730   guest_state->rsp = vm_info.vm_regs.rsp;
731   guest_state->rip = vm_info.rip;
732
733
734   /* I pretty much just gutted this from TVMM */
735   /* Note: That means its probably wrong */
736
737   // set the segment registers to mirror ours
738   guest_state->cs.selector = 1<<3;
739   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
740   guest_state->cs.attrib.fields.S = 1;
741   guest_state->cs.attrib.fields.P = 1;
742   guest_state->cs.attrib.fields.db = 1;
743   guest_state->cs.attrib.fields.G = 1;
744   guest_state->cs.limit = 0xfffff;
745   guest_state->cs.base = 0;
746   
747   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
748   for ( i = 0; segregs[i] != NULL; i++) {
749     struct vmcb_selector * seg = segregs[i];
750     
751     seg->selector = 2<<3;
752     seg->attrib.fields.type = 0x2; // Data Segment+read/write
753     seg->attrib.fields.S = 1;
754     seg->attrib.fields.P = 1;
755     seg->attrib.fields.db = 1;
756     seg->attrib.fields.G = 1;
757     seg->limit = 0xfffff;
758     seg->base = 0;
759   }
760
761
762   {
763     /* JRL THIS HAS TO GO */
764     
765     //    guest_state->tr.selector = GetTR_Selector();
766     guest_state->tr.attrib.fields.type = 0x9; 
767     guest_state->tr.attrib.fields.P = 1;
768     // guest_state->tr.limit = GetTR_Limit();
769     //guest_state->tr.base = GetTR_Base();// - 0x2000;
770     /* ** */
771   }
772
773
774   /* ** */
775
776
777   guest_state->efer |= EFER_MSR_svm_enable;
778   guest_state->rflags = 0x00000002; // The reserved bit is always 1
779   ctrl_area->svm_instrs.VMRUN = 1;
780   guest_state->cr0 = 0x00000001;    // PE 
781   ctrl_area->guest_ASID = 1;
782
783
784   //  guest_state->cpl = 0;
785
786
787
788   // Setup exits
789
790   ctrl_area->cr_writes.cr4 = 1;
791   
792   ctrl_area->exceptions.de = 1;
793   ctrl_area->exceptions.df = 1;
794   ctrl_area->exceptions.pf = 1;
795   ctrl_area->exceptions.ts = 1;
796   ctrl_area->exceptions.ss = 1;
797   ctrl_area->exceptions.ac = 1;
798   ctrl_area->exceptions.mc = 1;
799   ctrl_area->exceptions.gp = 1;
800   ctrl_area->exceptions.ud = 1;
801   ctrl_area->exceptions.np = 1;
802   ctrl_area->exceptions.of = 1;
803   ctrl_area->exceptions.nmi = 1;
804
805   
806
807   ctrl_area->instrs.IOIO_PROT = 1;
808   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
809   
810   {
811     reg_ex_t tmp_reg;
812     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
813     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
814   }
815
816   ctrl_area->instrs.INTR = 1;
817
818   
819   {
820     char gdt_buf[6];
821     char idt_buf[6];
822
823     memset(gdt_buf, 0, 6);
824     memset(idt_buf, 0, 6);
825
826
827     uint_t gdt_base, idt_base;
828     ushort_t gdt_limit, idt_limit;
829     
830     GetGDTR(gdt_buf);
831     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
832     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
833     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
834
835     GetIDTR(idt_buf);
836     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
837     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
838     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
839
840
841     // gdt_base -= 0x2000;
842     //idt_base -= 0x2000;
843
844     guest_state->gdtr.base = gdt_base;
845     guest_state->gdtr.limit = gdt_limit;
846     guest_state->idtr.base = idt_base;
847     guest_state->idtr.limit = idt_limit;
848
849
850   }
851   
852   
853   // also determine if CPU supports nested paging
854   /*
855   if (vm_info.page_tables) {
856     //   if (0) {
857     // Flush the TLB on entries/exits
858     ctrl_area->TLB_CONTROL = 1;
859
860     // Enable Nested Paging
861     ctrl_area->NP_ENABLE = 1;
862
863     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
864
865         // Set the Nested Page Table pointer
866     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
867
868
869     //   ctrl_area->N_CR3 = Get_CR3();
870     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
871
872     guest_state->g_pat = 0x7040600070406ULL;
873
874     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
875     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
876     // Enable Paging
877     //    guest_state->cr0 |= 0x80000000;
878   }
879   */
880
881 }
882
883
884
885
886
887 #endif
888
889