Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added vmsave/vmload latency measurementS
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48
49 // This is a global pointer to the host's VMCB
50 static void * host_vmcb = NULL;
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.ICEBP = 1;
94     ctrl_area->svm_instrs.WBINVD = 1;
95     ctrl_area->svm_instrs.MONITOR = 1;
96     ctrl_area->svm_instrs.MWAIT_always = 1;
97     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
98     ctrl_area->instrs.INVLPGA = 1;
99
100
101     ctrl_area->instrs.HLT = 1;
102     // guest_state->cr0 = 0x00000001;    // PE 
103   
104     /*
105       ctrl_area->exceptions.de = 1;
106       ctrl_area->exceptions.df = 1;
107       
108       ctrl_area->exceptions.ts = 1;
109       ctrl_area->exceptions.ss = 1;
110       ctrl_area->exceptions.ac = 1;
111       ctrl_area->exceptions.mc = 1;
112       ctrl_area->exceptions.gp = 1;
113       ctrl_area->exceptions.ud = 1;
114       ctrl_area->exceptions.np = 1;
115       ctrl_area->exceptions.of = 1;
116       
117       ctrl_area->exceptions.nmi = 1;
118     */
119     
120
121     ctrl_area->instrs.NMI = 1;
122     ctrl_area->instrs.SMI = 1;
123     ctrl_area->instrs.INIT = 1;
124     ctrl_area->instrs.PAUSE = 1;
125     ctrl_area->instrs.shutdown_evts = 1;
126
127     vm_info->vm_regs.rdx = 0x00000f00;
128
129     guest_state->cr0 = 0x60000010;
130
131
132     guest_state->cs.selector = 0xf000;
133     guest_state->cs.limit = 0xffff;
134     guest_state->cs.base = 0x0000000f0000LL;
135     guest_state->cs.attrib.raw = 0xf3;
136
137
138     /* DEBUG FOR RETURN CODE */
139     ctrl_area->exit_code = 1;
140
141
142     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
143                                         &(guest_state->es), &(guest_state->fs), 
144                                         &(guest_state->gs), NULL};
145
146     for ( i = 0; segregs[i] != NULL; i++) {
147         struct vmcb_selector * seg = segregs[i];
148         
149         seg->selector = 0x0000;
150         //    seg->base = seg->selector << 4;
151         seg->base = 0x00000000;
152         seg->attrib.raw = 0xf3;
153         seg->limit = ~0u;
154     }
155
156     guest_state->gdtr.limit = 0x0000ffff;
157     guest_state->gdtr.base = 0x0000000000000000LL;
158     guest_state->idtr.limit = 0x0000ffff;
159     guest_state->idtr.base = 0x0000000000000000LL;
160
161     guest_state->ldtr.selector = 0x0000;
162     guest_state->ldtr.limit = 0x0000ffff;
163     guest_state->ldtr.base = 0x0000000000000000LL;
164     guest_state->tr.selector = 0x0000;
165     guest_state->tr.limit = 0x0000ffff;
166     guest_state->tr.base = 0x0000000000000000LL;
167
168
169     guest_state->dr6 = 0x00000000ffff0ff0LL;
170     guest_state->dr7 = 0x0000000000000400LL;
171
172
173     v3_init_svm_io_map(vm_info);
174     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
175     ctrl_area->instrs.IOIO_PROT = 1;
176
177
178
179     v3_init_svm_msr_map(vm_info);
180     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
181     ctrl_area->instrs.MSR_PROT = 1;
182
183
184
185     PrintDebug("Exiting on interrupts\n");
186     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
187     ctrl_area->instrs.INTR = 1;
188
189
190     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
191         PrintDebug("Creating initial shadow page table\n");
192         
193         /* JRL: This is a performance killer, and a simplistic solution */
194         /* We need to fix this */
195         ctrl_area->TLB_CONTROL = 1;
196         ctrl_area->guest_ASID = 1;
197         
198         
199         if (v3_init_passthrough_pts(vm_info) == -1) {
200             PrintError("Could not initialize passthrough page tables\n");
201             return ;
202         }
203
204
205         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
206         PrintDebug("Created\n");
207         
208         guest_state->cr3 = vm_info->direct_map_pt;
209
210         ctrl_area->cr_reads.cr0 = 1;
211         ctrl_area->cr_writes.cr0 = 1;
212         //ctrl_area->cr_reads.cr4 = 1;
213         ctrl_area->cr_writes.cr4 = 1;
214         ctrl_area->cr_reads.cr3 = 1;
215         ctrl_area->cr_writes.cr3 = 1;
216
217         v3_hook_msr(vm_info, EFER_MSR, 
218                     &v3_handle_efer_read,
219                     &v3_handle_efer_write, 
220                     vm_info);
221
222         ctrl_area->instrs.INVLPG = 1;
223
224         ctrl_area->exceptions.pf = 1;
225
226         guest_state->g_pat = 0x7040600070406ULL;
227
228         guest_state->cr0 |= 0x80000000;
229
230     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231         // Flush the TLB on entries/exits
232         ctrl_area->TLB_CONTROL = 1;
233         ctrl_area->guest_ASID = 1;
234
235         // Enable Nested Paging
236         ctrl_area->NP_ENABLE = 1;
237
238         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
239
240         // Set the Nested Page Table pointer
241         if (v3_init_passthrough_pts(vm_info) == -1) {
242             PrintError("Could not initialize Nested page tables\n");
243             return ;
244         }
245
246         ctrl_area->N_CR3 = vm_info->direct_map_pt;
247
248         guest_state->g_pat = 0x7040600070406ULL;
249     }
250 }
251
252
253 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
254
255
256     v3_pre_config_guest(info, config_ptr);
257
258     PrintDebug("Allocating VMCB\n");
259     info->vmm_data = (void*)Allocate_VMCB();
260
261     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_post_config_guest(info, config_ptr);
265
266     return 0;
267 }
268
269
270
271 // can we start a kernel thread here...
272 static int start_svm_guest(struct guest_info *info) {
273     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
274     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
275     uint_t num_exits = 0;
276
277
278
279     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
280     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
281     
282     info->run_state = VM_RUNNING;
283     
284     while (1) {
285         ullong_t tmp_tsc;
286         
287
288         /*
289           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
290           (void *)(addr_t)info->segments.cs.base, 
291           (void *)(addr_t)info->rip);
292         */
293
294         // disable global interrupts for vm state transition
295         v3_clgi();
296
297
298
299         rdtscll(info->time_state.cached_host_tsc);
300         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
301         
302         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb);
303         
304         rdtscll(tmp_tsc);
305
306         
307         //PrintDebug("SVM Returned\n");
308
309         // reenable global interrupts after vm exit
310         v3_stgi();
311
312         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
313         num_exits++;
314         
315         if ((num_exits % 5000) == 0) {
316             PrintDebug("SVM Exit number %d\n", num_exits);
317
318             if (info->enable_profiler) {
319                 v3_print_profile(info);
320             }
321         }
322
323      
324         if (v3_handle_svm_exit(info) != 0) {
325             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
326             addr_t host_addr;
327             addr_t linear_addr = 0;
328             
329             info->run_state = VM_ERROR;
330             
331             PrintDebug("SVM ERROR!!\n"); 
332       
333             v3_print_guest_state(info);
334
335             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
336       
337             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
338             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
339       
340             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
341             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
342       
343             if (info->mem_mode == PHYSICAL_MEM) {
344                 guest_pa_to_host_va(info, linear_addr, &host_addr);
345             } else if (info->mem_mode == VIRTUAL_MEM) {
346                 guest_va_to_host_va(info, linear_addr, &host_addr);
347             }
348
349
350             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
351
352             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
353             PrintTraceMemDump((uchar_t *)host_addr, 15);
354
355             break;
356         }
357     }
358     return 0;
359 }
360
361
362
363
364
365 /* Checks machine SVM capability */
366 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
367 int v3_is_svm_capable() {
368     // Dinda
369     uint_t vm_cr_low = 0, vm_cr_high = 0;
370     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
371
372     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
373   
374     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
375
376     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
377       PrintDebug("SVM Not Available\n");
378       return 0;
379     }  else {
380         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
381         
382         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
383         
384         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
385             PrintDebug("SVM is available but is disabled.\n");
386             
387             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
388             
389             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
390             
391             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
392                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
393             } else {
394                 PrintDebug("SVM is locked with a key\n");
395             }
396             return 0;
397
398         } else {
399             PrintDebug("SVM is available and  enabled.\n");
400
401             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
402             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
403             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
404             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
405             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
406
407
408             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
409                 PrintDebug("SVM Nested Paging not supported\n");
410             } else {
411                 PrintDebug("SVM Nested Paging supported\n");
412             }
413
414             return 1;
415         }
416     }
417 }
418
419 static int has_svm_nested_paging() {
420     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
421
422     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
423
424     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
425
426     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
427         PrintDebug("SVM Nested Paging not supported\n");
428         return 0;
429     } else {
430         PrintDebug("SVM Nested Paging supported\n");
431         return 1;
432     }
433 }
434
435
436
437 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
438     reg_ex_t msr;
439     extern v3_cpu_arch_t v3_cpu_type;
440
441     // Enable SVM on the CPU
442     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
443     msr.e_reg.low |= EFER_MSR_svm_enable;
444     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
445
446     PrintDebug("SVM Enabled\n");
447
448
449     // Setup the host state save area
450     host_vmcb = V3_AllocPages(4);
451
452
453     /* 64-BIT-ISSUE */
454     //  msr.e_reg.high = 0;
455     //msr.e_reg.low = (uint_t)host_vmcb;
456     msr.r_reg = (addr_t)host_vmcb;
457
458     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb);
459     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
460
461
462
463     /* 
464      * Test VMSAVE/VMLOAD Latency 
465      */
466 #define vmsave ".byte 0x0F,0x01,0xDB ; "
467 #define vmload ".byte 0x0F,0x01,0xDA ; "
468     {
469         uint32_t start_lo, start_hi;
470         uint32_t end_lo, end_hi;
471         uint64_t start, end;
472
473         __asm__ __volatile__ (
474                               "rdtsc ; "
475                               "movl %%eax, %%esi ; "
476                               "movl %%edx, %%edi ; "
477                               "movq  %%rcx, %%rax ; "
478                               vmsave
479                               "rdtsc ; "
480                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
481                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
482                               );
483         
484         start = start_hi;
485         start <<= 32;
486         start += start_lo;
487
488         end = end_hi;
489         end <<= 32;
490         end += end_lo;
491
492
493         PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
494         
495
496
497
498         __asm__ __volatile__ (
499                               "rdtsc ; "
500                               "movl %%eax, %%esi ; "
501                               "movl %%edx, %%edi ; "
502                               "movq  %%rcx, %%rax ; "
503                               vmload
504                               "rdtsc ; "
505                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
506                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
507                               );
508         
509         start = start_hi;
510         start <<= 32;
511         start += start_lo;
512
513         end = end_hi;
514         end <<= 32;
515         end += end_lo;
516
517
518         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
519         
520
521                                
522     }
523
524
525     /* End Latency Test */
526
527     if (has_svm_nested_paging() == 1) {
528         v3_cpu_type = V3_SVM_REV3_CPU;
529     } else {
530         v3_cpu_type = V3_SVM_CPU;
531     }
532
533     // Setup the SVM specific vmm operations
534     vmm_ops->init_guest = &init_svm_guest;
535     vmm_ops->start_guest = &start_svm_guest;
536     vmm_ops->has_nested_paging = &has_svm_nested_paging;
537
538     return;
539 }
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
593   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
594   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
595   uint_t i;
596
597
598   guest_state->rsp = vm_info.vm_regs.rsp;
599   guest_state->rip = vm_info.rip;
600
601
602   //ctrl_area->instrs.instrs.CR0 = 1;
603   ctrl_area->cr_reads.cr0 = 1;
604   ctrl_area->cr_writes.cr0 = 1;
605
606   guest_state->efer |= EFER_MSR_svm_enable;
607   guest_state->rflags = 0x00000002; // The reserved bit is always 1
608   ctrl_area->svm_instrs.VMRUN = 1;
609   // guest_state->cr0 = 0x00000001;    // PE 
610   ctrl_area->guest_ASID = 1;
611
612
613   ctrl_area->exceptions.de = 1;
614   ctrl_area->exceptions.df = 1;
615   ctrl_area->exceptions.pf = 1;
616   ctrl_area->exceptions.ts = 1;
617   ctrl_area->exceptions.ss = 1;
618   ctrl_area->exceptions.ac = 1;
619   ctrl_area->exceptions.mc = 1;
620   ctrl_area->exceptions.gp = 1;
621   ctrl_area->exceptions.ud = 1;
622   ctrl_area->exceptions.np = 1;
623   ctrl_area->exceptions.of = 1;
624   ctrl_area->exceptions.nmi = 1;
625
626   guest_state->cs.selector = 0x0000;
627   guest_state->cs.limit=~0u;
628   guest_state->cs.base = guest_state->cs.selector<<4;
629   guest_state->cs.attrib.raw = 0xf3;
630
631   
632   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
633   for ( i = 0; segregs[i] != NULL; i++) {
634     struct vmcb_selector * seg = segregs[i];
635     
636     seg->selector = 0x0000;
637     seg->base = seg->selector << 4;
638     seg->attrib.raw = 0xf3;
639     seg->limit = ~0u;
640   }
641   
642   if (vm_info.io_map.num_ports > 0) {
643     struct vmm_io_hook * iter;
644     addr_t io_port_bitmap;
645     
646     io_port_bitmap = (addr_t)V3_AllocPages(3);
647     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
648     
649     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
650
651     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
652
653     FOREACH_IO_HOOK(vm_info.io_map, iter) {
654       ushort_t port = iter->port;
655       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
656
657       bitmap += (port / 8);
658       PrintDebug("Setting Bit in block %x\n", bitmap);
659       *bitmap |= 1 << (port % 8);
660     }
661
662
663     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
664
665     ctrl_area->instrs.IOIO_PROT = 1;
666   }
667
668   ctrl_area->instrs.INTR = 1;
669
670
671
672   if (vm_info.page_mode == SHADOW_PAGING) {
673     PrintDebug("Creating initial shadow page table\n");
674     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
675     PrintDebug("Created\n");
676
677     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
678
679     ctrl_area->cr_reads.cr3 = 1;
680     ctrl_area->cr_writes.cr3 = 1;
681
682
683     ctrl_area->instrs.INVLPG = 1;
684     ctrl_area->instrs.INVLPGA = 1;
685
686     guest_state->g_pat = 0x7040600070406ULL;
687
688     guest_state->cr0 |= 0x80000000;
689   } else if (vm_info.page_mode == NESTED_PAGING) {
690     // Flush the TLB on entries/exits
691     //ctrl_area->TLB_CONTROL = 1;
692
693     // Enable Nested Paging
694     //ctrl_area->NP_ENABLE = 1;
695
696     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
697
698         // Set the Nested Page Table pointer
699     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
700     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
701
702     //   ctrl_area->N_CR3 = Get_CR3();
703     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
704
705     //    guest_state->g_pat = 0x7040600070406ULL;
706   }
707
708
709
710 }
711 */
712
713
714
715
716
717
718
719 #if 0
720 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
721   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
722   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
723   uint_t i = 0;
724
725
726   guest_state->rsp = vm_info.vm_regs.rsp;
727   guest_state->rip = vm_info.rip;
728
729
730   /* I pretty much just gutted this from TVMM */
731   /* Note: That means its probably wrong */
732
733   // set the segment registers to mirror ours
734   guest_state->cs.selector = 1<<3;
735   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
736   guest_state->cs.attrib.fields.S = 1;
737   guest_state->cs.attrib.fields.P = 1;
738   guest_state->cs.attrib.fields.db = 1;
739   guest_state->cs.attrib.fields.G = 1;
740   guest_state->cs.limit = 0xfffff;
741   guest_state->cs.base = 0;
742   
743   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
744   for ( i = 0; segregs[i] != NULL; i++) {
745     struct vmcb_selector * seg = segregs[i];
746     
747     seg->selector = 2<<3;
748     seg->attrib.fields.type = 0x2; // Data Segment+read/write
749     seg->attrib.fields.S = 1;
750     seg->attrib.fields.P = 1;
751     seg->attrib.fields.db = 1;
752     seg->attrib.fields.G = 1;
753     seg->limit = 0xfffff;
754     seg->base = 0;
755   }
756
757
758   {
759     /* JRL THIS HAS TO GO */
760     
761     //    guest_state->tr.selector = GetTR_Selector();
762     guest_state->tr.attrib.fields.type = 0x9; 
763     guest_state->tr.attrib.fields.P = 1;
764     // guest_state->tr.limit = GetTR_Limit();
765     //guest_state->tr.base = GetTR_Base();// - 0x2000;
766     /* ** */
767   }
768
769
770   /* ** */
771
772
773   guest_state->efer |= EFER_MSR_svm_enable;
774   guest_state->rflags = 0x00000002; // The reserved bit is always 1
775   ctrl_area->svm_instrs.VMRUN = 1;
776   guest_state->cr0 = 0x00000001;    // PE 
777   ctrl_area->guest_ASID = 1;
778
779
780   //  guest_state->cpl = 0;
781
782
783
784   // Setup exits
785
786   ctrl_area->cr_writes.cr4 = 1;
787   
788   ctrl_area->exceptions.de = 1;
789   ctrl_area->exceptions.df = 1;
790   ctrl_area->exceptions.pf = 1;
791   ctrl_area->exceptions.ts = 1;
792   ctrl_area->exceptions.ss = 1;
793   ctrl_area->exceptions.ac = 1;
794   ctrl_area->exceptions.mc = 1;
795   ctrl_area->exceptions.gp = 1;
796   ctrl_area->exceptions.ud = 1;
797   ctrl_area->exceptions.np = 1;
798   ctrl_area->exceptions.of = 1;
799   ctrl_area->exceptions.nmi = 1;
800
801   
802
803   ctrl_area->instrs.IOIO_PROT = 1;
804   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
805   
806   {
807     reg_ex_t tmp_reg;
808     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
809     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
810   }
811
812   ctrl_area->instrs.INTR = 1;
813
814   
815   {
816     char gdt_buf[6];
817     char idt_buf[6];
818
819     memset(gdt_buf, 0, 6);
820     memset(idt_buf, 0, 6);
821
822
823     uint_t gdt_base, idt_base;
824     ushort_t gdt_limit, idt_limit;
825     
826     GetGDTR(gdt_buf);
827     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
828     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
829     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
830
831     GetIDTR(idt_buf);
832     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
833     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
834     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
835
836
837     // gdt_base -= 0x2000;
838     //idt_base -= 0x2000;
839
840     guest_state->gdtr.base = gdt_base;
841     guest_state->gdtr.limit = gdt_limit;
842     guest_state->idtr.base = idt_base;
843     guest_state->idtr.limit = idt_limit;
844
845
846   }
847   
848   
849   // also determine if CPU supports nested paging
850   /*
851   if (vm_info.page_tables) {
852     //   if (0) {
853     // Flush the TLB on entries/exits
854     ctrl_area->TLB_CONTROL = 1;
855
856     // Enable Nested Paging
857     ctrl_area->NP_ENABLE = 1;
858
859     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
860
861         // Set the Nested Page Table pointer
862     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
863
864
865     //   ctrl_area->N_CR3 = Get_CR3();
866     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
867
868     guest_state->g_pat = 0x7040600070406ULL;
869
870     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
871     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
872     // Enable Paging
873     //    guest_state->cr0 |= 0x80000000;
874   }
875   */
876
877 }
878
879
880
881
882
883 #endif
884
885