Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


fixed configuration macro checks and a few configuration bugs
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48
49 // This is a global pointer to the host's VMCB
50 static void * host_vmcb = NULL;
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.ICEBP = 1;
94     ctrl_area->svm_instrs.WBINVD = 1;
95     ctrl_area->svm_instrs.MONITOR = 1;
96     ctrl_area->svm_instrs.MWAIT_always = 1;
97     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
98     ctrl_area->instrs.INVLPGA = 1;
99
100
101     ctrl_area->instrs.HLT = 1;
102     // guest_state->cr0 = 0x00000001;    // PE 
103   
104     /*
105       ctrl_area->exceptions.de = 1;
106       ctrl_area->exceptions.df = 1;
107       
108       ctrl_area->exceptions.ts = 1;
109       ctrl_area->exceptions.ss = 1;
110       ctrl_area->exceptions.ac = 1;
111       ctrl_area->exceptions.mc = 1;
112       ctrl_area->exceptions.gp = 1;
113       ctrl_area->exceptions.ud = 1;
114       ctrl_area->exceptions.np = 1;
115       ctrl_area->exceptions.of = 1;
116       
117       ctrl_area->exceptions.nmi = 1;
118     */
119     
120
121     ctrl_area->instrs.NMI = 1;
122     ctrl_area->instrs.SMI = 1;
123     ctrl_area->instrs.INIT = 1;
124     ctrl_area->instrs.PAUSE = 1;
125     ctrl_area->instrs.shutdown_evts = 1;
126
127     vm_info->vm_regs.rdx = 0x00000f00;
128
129     guest_state->cr0 = 0x60000010;
130
131
132     guest_state->cs.selector = 0xf000;
133     guest_state->cs.limit = 0xffff;
134     guest_state->cs.base = 0x0000000f0000LL;
135     guest_state->cs.attrib.raw = 0xf3;
136
137
138     /* DEBUG FOR RETURN CODE */
139     ctrl_area->exit_code = 1;
140
141
142     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
143                                         &(guest_state->es), &(guest_state->fs), 
144                                         &(guest_state->gs), NULL};
145
146     for ( i = 0; segregs[i] != NULL; i++) {
147         struct vmcb_selector * seg = segregs[i];
148         
149         seg->selector = 0x0000;
150         //    seg->base = seg->selector << 4;
151         seg->base = 0x00000000;
152         seg->attrib.raw = 0xf3;
153         seg->limit = ~0u;
154     }
155
156     guest_state->gdtr.limit = 0x0000ffff;
157     guest_state->gdtr.base = 0x0000000000000000LL;
158     guest_state->idtr.limit = 0x0000ffff;
159     guest_state->idtr.base = 0x0000000000000000LL;
160
161     guest_state->ldtr.selector = 0x0000;
162     guest_state->ldtr.limit = 0x0000ffff;
163     guest_state->ldtr.base = 0x0000000000000000LL;
164     guest_state->tr.selector = 0x0000;
165     guest_state->tr.limit = 0x0000ffff;
166     guest_state->tr.base = 0x0000000000000000LL;
167
168
169     guest_state->dr6 = 0x00000000ffff0ff0LL;
170     guest_state->dr7 = 0x0000000000000400LL;
171
172
173     v3_init_svm_io_map(vm_info);
174     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
175     ctrl_area->instrs.IOIO_PROT = 1;
176
177
178
179     v3_init_svm_msr_map(vm_info);
180     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
181     ctrl_area->instrs.MSR_PROT = 1;
182
183
184
185     PrintDebug("Exiting on interrupts\n");
186     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
187     ctrl_area->instrs.INTR = 1;
188
189
190     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
191         PrintDebug("Creating initial shadow page table\n");
192         
193         /* JRL: This is a performance killer, and a simplistic solution */
194         /* We need to fix this */
195         ctrl_area->TLB_CONTROL = 1;
196         ctrl_area->guest_ASID = 1;
197         
198         
199         if (v3_init_passthrough_pts(vm_info) == -1) {
200             PrintError("Could not initialize passthrough page tables\n");
201             return ;
202         }
203
204
205         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
206         PrintDebug("Created\n");
207         
208         guest_state->cr3 = vm_info->direct_map_pt;
209
210         ctrl_area->cr_reads.cr0 = 1;
211         ctrl_area->cr_writes.cr0 = 1;
212         //ctrl_area->cr_reads.cr4 = 1;
213         ctrl_area->cr_writes.cr4 = 1;
214         ctrl_area->cr_reads.cr3 = 1;
215         ctrl_area->cr_writes.cr3 = 1;
216
217         v3_hook_msr(vm_info, EFER_MSR, 
218                     &v3_handle_efer_read,
219                     &v3_handle_efer_write, 
220                     vm_info);
221
222         ctrl_area->instrs.INVLPG = 1;
223
224         ctrl_area->exceptions.pf = 1;
225
226         guest_state->g_pat = 0x7040600070406ULL;
227
228         guest_state->cr0 |= 0x80000000;
229
230     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231         // Flush the TLB on entries/exits
232         ctrl_area->TLB_CONTROL = 1;
233         ctrl_area->guest_ASID = 1;
234
235         // Enable Nested Paging
236         ctrl_area->NP_ENABLE = 1;
237
238         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
239
240         // Set the Nested Page Table pointer
241         if (v3_init_passthrough_pts(vm_info) == -1) {
242             PrintError("Could not initialize Nested page tables\n");
243             return ;
244         }
245
246         ctrl_area->N_CR3 = vm_info->direct_map_pt;
247
248         guest_state->g_pat = 0x7040600070406ULL;
249     }
250 }
251
252
253 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
254
255
256     v3_pre_config_guest(info, config_ptr);
257
258     PrintDebug("Allocating VMCB\n");
259     info->vmm_data = (void*)Allocate_VMCB();
260
261     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_post_config_guest(info, config_ptr);
265
266     return 0;
267 }
268
269 static int start_svm_guest(struct guest_info *info) {
270     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
271     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
272     uint_t num_exits = 0;
273
274
275
276     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
277     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
278     
279     info->run_state = VM_RUNNING;
280     rdtscll(info->yield_start_cycle);
281
282
283     while (1) {
284         ullong_t tmp_tsc;
285         
286         // Conditionally yield the CPU if the timeslice has expired
287         v3_yield_cond(info);
288
289         /*
290           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
291           (void *)(addr_t)info->segments.cs.base, 
292           (void *)(addr_t)info->rip);
293         */
294
295         // disable global interrupts for vm state transition
296         v3_clgi();
297
298
299
300         rdtscll(info->time_state.cached_host_tsc);
301         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
302         
303         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb);
304         
305         rdtscll(tmp_tsc);
306
307         
308         //PrintDebug("SVM Returned\n");
309
310         // reenable global interrupts after vm exit
311         v3_stgi();
312
313
314         // Conditionally yield the CPU if the timeslice has expired
315         v3_yield_cond(info);
316
317
318         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
319         num_exits++;
320         
321         if ((num_exits % 5000) == 0) {
322             PrintDebug("SVM Exit number %d\n", num_exits);
323
324 #ifdef CONFIG_PROFILE_VMM
325             if (info->enable_profiler) {
326                 v3_print_profile(info);
327             }
328 #endif
329         }
330
331         if (v3_handle_svm_exit(info) != 0) {
332             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
333             addr_t host_addr;
334             addr_t linear_addr = 0;
335             
336             info->run_state = VM_ERROR;
337             
338             PrintDebug("SVM ERROR!!\n"); 
339       
340             v3_print_guest_state(info);
341
342             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
343       
344             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
345             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
346       
347             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
348             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
349       
350             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
351
352             if (info->mem_mode == PHYSICAL_MEM) {
353                 guest_pa_to_host_va(info, linear_addr, &host_addr);
354             } else if (info->mem_mode == VIRTUAL_MEM) {
355                 guest_va_to_host_va(info, linear_addr, &host_addr);
356             }
357
358             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
359
360             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
361             PrintTraceMemDump((uchar_t *)host_addr, 15);
362
363             break;
364         }
365     }
366     return 0;
367 }
368
369
370
371
372
373 /* Checks machine SVM capability */
374 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
375 int v3_is_svm_capable() {
376     // Dinda
377     uint_t vm_cr_low = 0, vm_cr_high = 0;
378     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
379
380     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
381   
382     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=%p\n", (void *)ecx);
383
384     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
385       PrintDebug("SVM Not Available\n");
386       return 0;
387     }  else {
388         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
389         
390         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
391         
392         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
393             PrintDebug("SVM is available but is disabled.\n");
394             
395             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
396             
397             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
398             
399             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
400                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
401             } else {
402                 PrintDebug("SVM is locked with a key\n");
403             }
404             return 0;
405
406         } else {
407             PrintDebug("SVM is available and  enabled.\n");
408
409             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
410             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=%p\n", (void *)eax);
411             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=%p\n", (void *)ebx);
412             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=%p\n", (void *)ecx);
413             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
414
415
416             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
417                 PrintDebug("SVM Nested Paging not supported\n");
418             } else {
419                 PrintDebug("SVM Nested Paging supported\n");
420             }
421
422             return 1;
423         }
424     }
425 }
426
427 static int has_svm_nested_paging() {
428     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
429
430     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
431
432     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
433
434     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
435         PrintDebug("SVM Nested Paging not supported\n");
436         return 0;
437     } else {
438         PrintDebug("SVM Nested Paging supported\n");
439         return 1;
440     }
441 }
442
443
444
445 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
446     reg_ex_t msr;
447     extern v3_cpu_arch_t v3_cpu_type;
448
449     // Enable SVM on the CPU
450     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
451     msr.e_reg.low |= EFER_MSR_svm_enable;
452     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
453
454     PrintDebug("SVM Enabled\n");
455
456     // Setup the host state save area
457     host_vmcb = V3_AllocPages(4);
458
459     /* 64-BIT-ISSUE */
460     //  msr.e_reg.high = 0;
461     //msr.e_reg.low = (uint_t)host_vmcb;
462     msr.r_reg = (addr_t)host_vmcb;
463
464     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb);
465     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
466
467     /* 
468      * Test VMSAVE/VMLOAD Latency 
469      */
470 #define vmsave ".byte 0x0F,0x01,0xDB ; "
471 #define vmload ".byte 0x0F,0x01,0xDA ; "
472     {
473         uint32_t start_lo, start_hi;
474         uint32_t end_lo, end_hi;
475         uint64_t start, end;
476
477         __asm__ __volatile__ (
478                               "rdtsc ; "
479                               "movl %%eax, %%esi ; "
480                               "movl %%edx, %%edi ; "
481                               "movq  %%rcx, %%rax ; "
482                               vmsave
483                               "rdtsc ; "
484                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
485                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
486                               );
487         
488         start = start_hi;
489         start <<= 32;
490         start += start_lo;
491
492         end = end_hi;
493         end <<= 32;
494         end += end_lo;
495
496         PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
497         
498         __asm__ __volatile__ (
499                               "rdtsc ; "
500                               "movl %%eax, %%esi ; "
501                               "movl %%edx, %%edi ; "
502                               "movq  %%rcx, %%rax ; "
503                               vmload
504                               "rdtsc ; "
505                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
506                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
507                               );
508         
509         start = start_hi;
510         start <<= 32;
511         start += start_lo;
512
513         end = end_hi;
514         end <<= 32;
515         end += end_lo;
516
517
518         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
519     }
520     /* End Latency Test */
521
522     if (has_svm_nested_paging() == 1) {
523         v3_cpu_type = V3_SVM_REV3_CPU;
524     } else {
525         v3_cpu_type = V3_SVM_CPU;
526     }
527
528     // Setup the SVM specific vmm operations
529     vmm_ops->init_guest = &init_svm_guest;
530     vmm_ops->start_guest = &start_svm_guest;
531     vmm_ops->has_nested_paging = &has_svm_nested_paging;
532
533     return;
534 }
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
588   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
589   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
590   uint_t i;
591
592
593   guest_state->rsp = vm_info.vm_regs.rsp;
594   guest_state->rip = vm_info.rip;
595
596
597   //ctrl_area->instrs.instrs.CR0 = 1;
598   ctrl_area->cr_reads.cr0 = 1;
599   ctrl_area->cr_writes.cr0 = 1;
600
601   guest_state->efer |= EFER_MSR_svm_enable;
602   guest_state->rflags = 0x00000002; // The reserved bit is always 1
603   ctrl_area->svm_instrs.VMRUN = 1;
604   // guest_state->cr0 = 0x00000001;    // PE 
605   ctrl_area->guest_ASID = 1;
606
607
608   ctrl_area->exceptions.de = 1;
609   ctrl_area->exceptions.df = 1;
610   ctrl_area->exceptions.pf = 1;
611   ctrl_area->exceptions.ts = 1;
612   ctrl_area->exceptions.ss = 1;
613   ctrl_area->exceptions.ac = 1;
614   ctrl_area->exceptions.mc = 1;
615   ctrl_area->exceptions.gp = 1;
616   ctrl_area->exceptions.ud = 1;
617   ctrl_area->exceptions.np = 1;
618   ctrl_area->exceptions.of = 1;
619   ctrl_area->exceptions.nmi = 1;
620
621   guest_state->cs.selector = 0x0000;
622   guest_state->cs.limit=~0u;
623   guest_state->cs.base = guest_state->cs.selector<<4;
624   guest_state->cs.attrib.raw = 0xf3;
625
626   
627   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
628   for ( i = 0; segregs[i] != NULL; i++) {
629     struct vmcb_selector * seg = segregs[i];
630     
631     seg->selector = 0x0000;
632     seg->base = seg->selector << 4;
633     seg->attrib.raw = 0xf3;
634     seg->limit = ~0u;
635   }
636   
637   if (vm_info.io_map.num_ports > 0) {
638     struct vmm_io_hook * iter;
639     addr_t io_port_bitmap;
640     
641     io_port_bitmap = (addr_t)V3_AllocPages(3);
642     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
643     
644     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
645
646     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
647
648     FOREACH_IO_HOOK(vm_info.io_map, iter) {
649       ushort_t port = iter->port;
650       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
651
652       bitmap += (port / 8);
653       PrintDebug("Setting Bit in block %x\n", bitmap);
654       *bitmap |= 1 << (port % 8);
655     }
656
657
658     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
659
660     ctrl_area->instrs.IOIO_PROT = 1;
661   }
662
663   ctrl_area->instrs.INTR = 1;
664
665
666
667   if (vm_info.page_mode == SHADOW_PAGING) {
668     PrintDebug("Creating initial shadow page table\n");
669     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
670     PrintDebug("Created\n");
671
672     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
673
674     ctrl_area->cr_reads.cr3 = 1;
675     ctrl_area->cr_writes.cr3 = 1;
676
677
678     ctrl_area->instrs.INVLPG = 1;
679     ctrl_area->instrs.INVLPGA = 1;
680
681     guest_state->g_pat = 0x7040600070406ULL;
682
683     guest_state->cr0 |= 0x80000000;
684   } else if (vm_info.page_mode == NESTED_PAGING) {
685     // Flush the TLB on entries/exits
686     //ctrl_area->TLB_CONTROL = 1;
687
688     // Enable Nested Paging
689     //ctrl_area->NP_ENABLE = 1;
690
691     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
692
693         // Set the Nested Page Table pointer
694     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
695     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
696
697     //   ctrl_area->N_CR3 = Get_CR3();
698     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
699
700     //    guest_state->g_pat = 0x7040600070406ULL;
701   }
702
703
704
705 }
706 */
707
708
709
710
711
712
713
714 #if 0
715 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
716   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
717   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
718   uint_t i = 0;
719
720
721   guest_state->rsp = vm_info.vm_regs.rsp;
722   guest_state->rip = vm_info.rip;
723
724
725   /* I pretty much just gutted this from TVMM */
726   /* Note: That means its probably wrong */
727
728   // set the segment registers to mirror ours
729   guest_state->cs.selector = 1<<3;
730   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
731   guest_state->cs.attrib.fields.S = 1;
732   guest_state->cs.attrib.fields.P = 1;
733   guest_state->cs.attrib.fields.db = 1;
734   guest_state->cs.attrib.fields.G = 1;
735   guest_state->cs.limit = 0xfffff;
736   guest_state->cs.base = 0;
737   
738   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
739   for ( i = 0; segregs[i] != NULL; i++) {
740     struct vmcb_selector * seg = segregs[i];
741     
742     seg->selector = 2<<3;
743     seg->attrib.fields.type = 0x2; // Data Segment+read/write
744     seg->attrib.fields.S = 1;
745     seg->attrib.fields.P = 1;
746     seg->attrib.fields.db = 1;
747     seg->attrib.fields.G = 1;
748     seg->limit = 0xfffff;
749     seg->base = 0;
750   }
751
752
753   {
754     /* JRL THIS HAS TO GO */
755     
756     //    guest_state->tr.selector = GetTR_Selector();
757     guest_state->tr.attrib.fields.type = 0x9; 
758     guest_state->tr.attrib.fields.P = 1;
759     // guest_state->tr.limit = GetTR_Limit();
760     //guest_state->tr.base = GetTR_Base();// - 0x2000;
761     /* ** */
762   }
763
764
765   /* ** */
766
767
768   guest_state->efer |= EFER_MSR_svm_enable;
769   guest_state->rflags = 0x00000002; // The reserved bit is always 1
770   ctrl_area->svm_instrs.VMRUN = 1;
771   guest_state->cr0 = 0x00000001;    // PE 
772   ctrl_area->guest_ASID = 1;
773
774
775   //  guest_state->cpl = 0;
776
777
778
779   // Setup exits
780
781   ctrl_area->cr_writes.cr4 = 1;
782   
783   ctrl_area->exceptions.de = 1;
784   ctrl_area->exceptions.df = 1;
785   ctrl_area->exceptions.pf = 1;
786   ctrl_area->exceptions.ts = 1;
787   ctrl_area->exceptions.ss = 1;
788   ctrl_area->exceptions.ac = 1;
789   ctrl_area->exceptions.mc = 1;
790   ctrl_area->exceptions.gp = 1;
791   ctrl_area->exceptions.ud = 1;
792   ctrl_area->exceptions.np = 1;
793   ctrl_area->exceptions.of = 1;
794   ctrl_area->exceptions.nmi = 1;
795
796   
797
798   ctrl_area->instrs.IOIO_PROT = 1;
799   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
800   
801   {
802     reg_ex_t tmp_reg;
803     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
804     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
805   }
806
807   ctrl_area->instrs.INTR = 1;
808
809   
810   {
811     char gdt_buf[6];
812     char idt_buf[6];
813
814     memset(gdt_buf, 0, 6);
815     memset(idt_buf, 0, 6);
816
817
818     uint_t gdt_base, idt_base;
819     ushort_t gdt_limit, idt_limit;
820     
821     GetGDTR(gdt_buf);
822     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
823     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
824     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
825
826     GetIDTR(idt_buf);
827     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
828     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
829     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
830
831
832     // gdt_base -= 0x2000;
833     //idt_base -= 0x2000;
834
835     guest_state->gdtr.base = gdt_base;
836     guest_state->gdtr.limit = gdt_limit;
837     guest_state->idtr.base = idt_base;
838     guest_state->idtr.limit = idt_limit;
839
840
841   }
842   
843   
844   // also determine if CPU supports nested paging
845   /*
846   if (vm_info.page_tables) {
847     //   if (0) {
848     // Flush the TLB on entries/exits
849     ctrl_area->TLB_CONTROL = 1;
850
851     // Enable Nested Paging
852     ctrl_area->NP_ENABLE = 1;
853
854     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
855
856         // Set the Nested Page Table pointer
857     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
858
859
860     //   ctrl_area->N_CR3 = Get_CR3();
861     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
862
863     guest_state->g_pat = 0x7040600070406ULL;
864
865     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
866     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
867     // Enable Paging
868     //    guest_state->cr0 |= 0x80000000;
869   }
870   */
871
872 }
873
874
875
876
877
878 #endif
879
880