Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


first cut at cleaning up the VMX mess
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48
49 // This is a global pointer to the host's VMCB
50 static void * host_vmcb = NULL;
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.ICEBP = 1;
94     ctrl_area->svm_instrs.WBINVD = 1;
95     ctrl_area->svm_instrs.MONITOR = 1;
96     ctrl_area->svm_instrs.MWAIT_always = 1;
97     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
98     ctrl_area->instrs.INVLPGA = 1;
99
100
101     ctrl_area->instrs.HLT = 1;
102     // guest_state->cr0 = 0x00000001;    // PE 
103   
104     /*
105       ctrl_area->exceptions.de = 1;
106       ctrl_area->exceptions.df = 1;
107       
108       ctrl_area->exceptions.ts = 1;
109       ctrl_area->exceptions.ss = 1;
110       ctrl_area->exceptions.ac = 1;
111       ctrl_area->exceptions.mc = 1;
112       ctrl_area->exceptions.gp = 1;
113       ctrl_area->exceptions.ud = 1;
114       ctrl_area->exceptions.np = 1;
115       ctrl_area->exceptions.of = 1;
116       
117       ctrl_area->exceptions.nmi = 1;
118     */
119     
120
121     ctrl_area->instrs.NMI = 1;
122     ctrl_area->instrs.SMI = 1;
123     ctrl_area->instrs.INIT = 1;
124     ctrl_area->instrs.PAUSE = 1;
125     ctrl_area->instrs.shutdown_evts = 1;
126
127     vm_info->vm_regs.rdx = 0x00000f00;
128
129     guest_state->cr0 = 0x60000010;
130
131
132     guest_state->cs.selector = 0xf000;
133     guest_state->cs.limit = 0xffff;
134     guest_state->cs.base = 0x0000000f0000LL;
135     guest_state->cs.attrib.raw = 0xf3;
136
137
138     /* DEBUG FOR RETURN CODE */
139     ctrl_area->exit_code = 1;
140
141
142     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
143                                         &(guest_state->es), &(guest_state->fs), 
144                                         &(guest_state->gs), NULL};
145
146     for ( i = 0; segregs[i] != NULL; i++) {
147         struct vmcb_selector * seg = segregs[i];
148         
149         seg->selector = 0x0000;
150         //    seg->base = seg->selector << 4;
151         seg->base = 0x00000000;
152         seg->attrib.raw = 0xf3;
153         seg->limit = ~0u;
154     }
155
156     guest_state->gdtr.limit = 0x0000ffff;
157     guest_state->gdtr.base = 0x0000000000000000LL;
158     guest_state->idtr.limit = 0x0000ffff;
159     guest_state->idtr.base = 0x0000000000000000LL;
160
161     guest_state->ldtr.selector = 0x0000;
162     guest_state->ldtr.limit = 0x0000ffff;
163     guest_state->ldtr.base = 0x0000000000000000LL;
164     guest_state->tr.selector = 0x0000;
165     guest_state->tr.limit = 0x0000ffff;
166     guest_state->tr.base = 0x0000000000000000LL;
167
168
169     guest_state->dr6 = 0x00000000ffff0ff0LL;
170     guest_state->dr7 = 0x0000000000000400LL;
171
172
173     v3_init_svm_io_map(vm_info);
174     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
175     ctrl_area->instrs.IOIO_PROT = 1;
176
177
178
179     v3_init_svm_msr_map(vm_info);
180     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
181     ctrl_area->instrs.MSR_PROT = 1;
182
183
184
185     PrintDebug("Exiting on interrupts\n");
186     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
187     ctrl_area->instrs.INTR = 1;
188
189
190     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
191         PrintDebug("Creating initial shadow page table\n");
192         
193         /* JRL: This is a performance killer, and a simplistic solution */
194         /* We need to fix this */
195         ctrl_area->TLB_CONTROL = 1;
196         ctrl_area->guest_ASID = 1;
197         
198         
199         if (v3_init_passthrough_pts(vm_info) == -1) {
200             PrintError("Could not initialize passthrough page tables\n");
201             return ;
202         }
203
204
205         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
206         PrintDebug("Created\n");
207         
208         guest_state->cr3 = vm_info->direct_map_pt;
209
210         ctrl_area->cr_reads.cr0 = 1;
211         ctrl_area->cr_writes.cr0 = 1;
212         //ctrl_area->cr_reads.cr4 = 1;
213         ctrl_area->cr_writes.cr4 = 1;
214         ctrl_area->cr_reads.cr3 = 1;
215         ctrl_area->cr_writes.cr3 = 1;
216
217         v3_hook_msr(vm_info, EFER_MSR, 
218                     &v3_handle_efer_read,
219                     &v3_handle_efer_write, 
220                     vm_info);
221
222         ctrl_area->instrs.INVLPG = 1;
223
224         ctrl_area->exceptions.pf = 1;
225
226         guest_state->g_pat = 0x7040600070406ULL;
227
228         guest_state->cr0 |= 0x80000000;
229
230     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231         // Flush the TLB on entries/exits
232         ctrl_area->TLB_CONTROL = 1;
233         ctrl_area->guest_ASID = 1;
234
235         // Enable Nested Paging
236         ctrl_area->NP_ENABLE = 1;
237
238         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
239
240         // Set the Nested Page Table pointer
241         if (v3_init_passthrough_pts(vm_info) == -1) {
242             PrintError("Could not initialize Nested page tables\n");
243             return ;
244         }
245
246         ctrl_area->N_CR3 = vm_info->direct_map_pt;
247
248         guest_state->g_pat = 0x7040600070406ULL;
249     }
250 }
251
252
253 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
254
255
256     v3_pre_config_guest(info, config_ptr);
257
258     PrintDebug("Allocating VMCB\n");
259     info->vmm_data = (void*)Allocate_VMCB();
260
261     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_post_config_guest(info, config_ptr);
265
266     return 0;
267 }
268
269 static int start_svm_guest(struct guest_info *info) {
270     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
271     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
272     uint_t num_exits = 0;
273
274
275
276     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
277     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
278     
279     info->run_state = VM_RUNNING;
280     
281     while (1) {
282         ullong_t tmp_tsc;
283         
284
285         /*
286           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
287           (void *)(addr_t)info->segments.cs.base, 
288           (void *)(addr_t)info->rip);
289         */
290
291         // disable global interrupts for vm state transition
292         v3_clgi();
293
294
295
296         rdtscll(info->time_state.cached_host_tsc);
297         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
298         
299         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb);
300         
301         rdtscll(tmp_tsc);
302
303         
304         //PrintDebug("SVM Returned\n");
305
306         // reenable global interrupts after vm exit
307         v3_stgi();
308
309         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
310         num_exits++;
311         
312         if ((num_exits % 5000) == 0) {
313             PrintDebug("SVM Exit number %d\n", num_exits);
314
315             if (info->enable_profiler) {
316                 v3_print_profile(info);
317             }
318         }
319
320      
321         if (v3_handle_svm_exit(info) != 0) {
322             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
323             addr_t host_addr;
324             addr_t linear_addr = 0;
325             
326             info->run_state = VM_ERROR;
327             
328             PrintDebug("SVM ERROR!!\n"); 
329       
330             v3_print_guest_state(info);
331
332             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
333       
334             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
335             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
336       
337             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
338             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
339       
340             if (info->mem_mode == PHYSICAL_MEM) {
341                 guest_pa_to_host_va(info, linear_addr, &host_addr);
342             } else if (info->mem_mode == VIRTUAL_MEM) {
343                 guest_va_to_host_va(info, linear_addr, &host_addr);
344             }
345
346
347             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
348
349             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
350             PrintTraceMemDump((uchar_t *)host_addr, 15);
351
352             break;
353         }
354     }
355     return 0;
356 }
357
358
359
360
361
362 /* Checks machine SVM capability */
363 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
364 int v3_is_svm_capable() {
365     // Dinda
366     uint_t vm_cr_low = 0, vm_cr_high = 0;
367     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
368
369     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
370   
371     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
372
373     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
374       PrintDebug("SVM Not Available\n");
375       return 0;
376     }  else {
377         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
378         
379         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
380         
381         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
382             PrintDebug("SVM is available but is disabled.\n");
383             
384             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
385             
386             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
387             
388             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
389                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
390             } else {
391                 PrintDebug("SVM is locked with a key\n");
392             }
393             return 0;
394
395         } else {
396             PrintDebug("SVM is available and  enabled.\n");
397
398             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
399             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=%p\n", (void *)eax);
400             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=%p\n", (void *)ebx);
401             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=%p\n", (void *)ecx);
402             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
403
404
405             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
406                 PrintDebug("SVM Nested Paging not supported\n");
407             } else {
408                 PrintDebug("SVM Nested Paging supported\n");
409             }
410
411             return 1;
412         }
413     }
414 }
415
416 static int has_svm_nested_paging() {
417     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
418
419     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
420
421     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
422
423     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
424         PrintDebug("SVM Nested Paging not supported\n");
425         return 0;
426     } else {
427         PrintDebug("SVM Nested Paging supported\n");
428         return 1;
429     }
430 }
431
432
433
434 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
435     reg_ex_t msr;
436     extern v3_cpu_arch_t v3_cpu_type;
437
438     // Enable SVM on the CPU
439     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
440     msr.e_reg.low |= EFER_MSR_svm_enable;
441     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
442
443     PrintDebug("SVM Enabled\n");
444
445
446     // Setup the host state save area
447     host_vmcb = V3_AllocPages(4);
448
449
450     /* 64-BIT-ISSUE */
451     //  msr.e_reg.high = 0;
452     //msr.e_reg.low = (uint_t)host_vmcb;
453     msr.r_reg = (addr_t)host_vmcb;
454
455     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb);
456     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
457
458
459
460     /* 
461      * Test VMSAVE/VMLOAD Latency 
462      */
463 #define vmsave ".byte 0x0F,0x01,0xDB ; "
464 #define vmload ".byte 0x0F,0x01,0xDA ; "
465     {
466         uint32_t start_lo, start_hi;
467         uint32_t end_lo, end_hi;
468         uint64_t start, end;
469
470         __asm__ __volatile__ (
471                               "rdtsc ; "
472                               "movl %%eax, %%esi ; "
473                               "movl %%edx, %%edi ; "
474                               "movq  %%rcx, %%rax ; "
475                               vmsave
476                               "rdtsc ; "
477                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
478                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
479                               );
480         
481         start = start_hi;
482         start <<= 32;
483         start += start_lo;
484
485         end = end_hi;
486         end <<= 32;
487         end += end_lo;
488
489
490         PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
491         
492
493
494
495         __asm__ __volatile__ (
496                               "rdtsc ; "
497                               "movl %%eax, %%esi ; "
498                               "movl %%edx, %%edi ; "
499                               "movq  %%rcx, %%rax ; "
500                               vmload
501                               "rdtsc ; "
502                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
503                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
504                               );
505         
506         start = start_hi;
507         start <<= 32;
508         start += start_lo;
509
510         end = end_hi;
511         end <<= 32;
512         end += end_lo;
513
514
515         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
516         
517
518                                
519     }
520
521
522     /* End Latency Test */
523
524     if (has_svm_nested_paging() == 1) {
525         v3_cpu_type = V3_SVM_REV3_CPU;
526     } else {
527         v3_cpu_type = V3_SVM_CPU;
528     }
529
530     // Setup the SVM specific vmm operations
531     vmm_ops->init_guest = &init_svm_guest;
532     vmm_ops->start_guest = &start_svm_guest;
533     vmm_ops->has_nested_paging = &has_svm_nested_paging;
534
535     return;
536 }
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
590   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
591   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
592   uint_t i;
593
594
595   guest_state->rsp = vm_info.vm_regs.rsp;
596   guest_state->rip = vm_info.rip;
597
598
599   //ctrl_area->instrs.instrs.CR0 = 1;
600   ctrl_area->cr_reads.cr0 = 1;
601   ctrl_area->cr_writes.cr0 = 1;
602
603   guest_state->efer |= EFER_MSR_svm_enable;
604   guest_state->rflags = 0x00000002; // The reserved bit is always 1
605   ctrl_area->svm_instrs.VMRUN = 1;
606   // guest_state->cr0 = 0x00000001;    // PE 
607   ctrl_area->guest_ASID = 1;
608
609
610   ctrl_area->exceptions.de = 1;
611   ctrl_area->exceptions.df = 1;
612   ctrl_area->exceptions.pf = 1;
613   ctrl_area->exceptions.ts = 1;
614   ctrl_area->exceptions.ss = 1;
615   ctrl_area->exceptions.ac = 1;
616   ctrl_area->exceptions.mc = 1;
617   ctrl_area->exceptions.gp = 1;
618   ctrl_area->exceptions.ud = 1;
619   ctrl_area->exceptions.np = 1;
620   ctrl_area->exceptions.of = 1;
621   ctrl_area->exceptions.nmi = 1;
622
623   guest_state->cs.selector = 0x0000;
624   guest_state->cs.limit=~0u;
625   guest_state->cs.base = guest_state->cs.selector<<4;
626   guest_state->cs.attrib.raw = 0xf3;
627
628   
629   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
630   for ( i = 0; segregs[i] != NULL; i++) {
631     struct vmcb_selector * seg = segregs[i];
632     
633     seg->selector = 0x0000;
634     seg->base = seg->selector << 4;
635     seg->attrib.raw = 0xf3;
636     seg->limit = ~0u;
637   }
638   
639   if (vm_info.io_map.num_ports > 0) {
640     struct vmm_io_hook * iter;
641     addr_t io_port_bitmap;
642     
643     io_port_bitmap = (addr_t)V3_AllocPages(3);
644     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
645     
646     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
647
648     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
649
650     FOREACH_IO_HOOK(vm_info.io_map, iter) {
651       ushort_t port = iter->port;
652       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
653
654       bitmap += (port / 8);
655       PrintDebug("Setting Bit in block %x\n", bitmap);
656       *bitmap |= 1 << (port % 8);
657     }
658
659
660     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
661
662     ctrl_area->instrs.IOIO_PROT = 1;
663   }
664
665   ctrl_area->instrs.INTR = 1;
666
667
668
669   if (vm_info.page_mode == SHADOW_PAGING) {
670     PrintDebug("Creating initial shadow page table\n");
671     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
672     PrintDebug("Created\n");
673
674     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
675
676     ctrl_area->cr_reads.cr3 = 1;
677     ctrl_area->cr_writes.cr3 = 1;
678
679
680     ctrl_area->instrs.INVLPG = 1;
681     ctrl_area->instrs.INVLPGA = 1;
682
683     guest_state->g_pat = 0x7040600070406ULL;
684
685     guest_state->cr0 |= 0x80000000;
686   } else if (vm_info.page_mode == NESTED_PAGING) {
687     // Flush the TLB on entries/exits
688     //ctrl_area->TLB_CONTROL = 1;
689
690     // Enable Nested Paging
691     //ctrl_area->NP_ENABLE = 1;
692
693     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
694
695         // Set the Nested Page Table pointer
696     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
697     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
698
699     //   ctrl_area->N_CR3 = Get_CR3();
700     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
701
702     //    guest_state->g_pat = 0x7040600070406ULL;
703   }
704
705
706
707 }
708 */
709
710
711
712
713
714
715
716 #if 0
717 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
718   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
719   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
720   uint_t i = 0;
721
722
723   guest_state->rsp = vm_info.vm_regs.rsp;
724   guest_state->rip = vm_info.rip;
725
726
727   /* I pretty much just gutted this from TVMM */
728   /* Note: That means its probably wrong */
729
730   // set the segment registers to mirror ours
731   guest_state->cs.selector = 1<<3;
732   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
733   guest_state->cs.attrib.fields.S = 1;
734   guest_state->cs.attrib.fields.P = 1;
735   guest_state->cs.attrib.fields.db = 1;
736   guest_state->cs.attrib.fields.G = 1;
737   guest_state->cs.limit = 0xfffff;
738   guest_state->cs.base = 0;
739   
740   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
741   for ( i = 0; segregs[i] != NULL; i++) {
742     struct vmcb_selector * seg = segregs[i];
743     
744     seg->selector = 2<<3;
745     seg->attrib.fields.type = 0x2; // Data Segment+read/write
746     seg->attrib.fields.S = 1;
747     seg->attrib.fields.P = 1;
748     seg->attrib.fields.db = 1;
749     seg->attrib.fields.G = 1;
750     seg->limit = 0xfffff;
751     seg->base = 0;
752   }
753
754
755   {
756     /* JRL THIS HAS TO GO */
757     
758     //    guest_state->tr.selector = GetTR_Selector();
759     guest_state->tr.attrib.fields.type = 0x9; 
760     guest_state->tr.attrib.fields.P = 1;
761     // guest_state->tr.limit = GetTR_Limit();
762     //guest_state->tr.base = GetTR_Base();// - 0x2000;
763     /* ** */
764   }
765
766
767   /* ** */
768
769
770   guest_state->efer |= EFER_MSR_svm_enable;
771   guest_state->rflags = 0x00000002; // The reserved bit is always 1
772   ctrl_area->svm_instrs.VMRUN = 1;
773   guest_state->cr0 = 0x00000001;    // PE 
774   ctrl_area->guest_ASID = 1;
775
776
777   //  guest_state->cpl = 0;
778
779
780
781   // Setup exits
782
783   ctrl_area->cr_writes.cr4 = 1;
784   
785   ctrl_area->exceptions.de = 1;
786   ctrl_area->exceptions.df = 1;
787   ctrl_area->exceptions.pf = 1;
788   ctrl_area->exceptions.ts = 1;
789   ctrl_area->exceptions.ss = 1;
790   ctrl_area->exceptions.ac = 1;
791   ctrl_area->exceptions.mc = 1;
792   ctrl_area->exceptions.gp = 1;
793   ctrl_area->exceptions.ud = 1;
794   ctrl_area->exceptions.np = 1;
795   ctrl_area->exceptions.of = 1;
796   ctrl_area->exceptions.nmi = 1;
797
798   
799
800   ctrl_area->instrs.IOIO_PROT = 1;
801   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
802   
803   {
804     reg_ex_t tmp_reg;
805     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
806     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
807   }
808
809   ctrl_area->instrs.INTR = 1;
810
811   
812   {
813     char gdt_buf[6];
814     char idt_buf[6];
815
816     memset(gdt_buf, 0, 6);
817     memset(idt_buf, 0, 6);
818
819
820     uint_t gdt_base, idt_base;
821     ushort_t gdt_limit, idt_limit;
822     
823     GetGDTR(gdt_buf);
824     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
825     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
826     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
827
828     GetIDTR(idt_buf);
829     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
830     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
831     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
832
833
834     // gdt_base -= 0x2000;
835     //idt_base -= 0x2000;
836
837     guest_state->gdtr.base = gdt_base;
838     guest_state->gdtr.limit = gdt_limit;
839     guest_state->idtr.base = idt_base;
840     guest_state->idtr.limit = idt_limit;
841
842
843   }
844   
845   
846   // also determine if CPU supports nested paging
847   /*
848   if (vm_info.page_tables) {
849     //   if (0) {
850     // Flush the TLB on entries/exits
851     ctrl_area->TLB_CONTROL = 1;
852
853     // Enable Nested Paging
854     ctrl_area->NP_ENABLE = 1;
855
856     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
857
858         // Set the Nested Page Table pointer
859     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
860
861
862     //   ctrl_area->N_CR3 = Get_CR3();
863     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
864
865     guest_state->g_pat = 0x7040600070406ULL;
866
867     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
868     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
869     // Enable Paging
870     //    guest_state->cr0 |= 0x80000000;
871   }
872   */
873
874 }
875
876
877
878
879
880 #endif
881
882