Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


more cleanup
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48
49 // This is a global pointer to the host's VMCB
50 static void * host_vmcb = NULL;
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.ICEBP = 1;
94     ctrl_area->svm_instrs.WBINVD = 1;
95     ctrl_area->svm_instrs.MONITOR = 1;
96     ctrl_area->svm_instrs.MWAIT_always = 1;
97     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
98     ctrl_area->instrs.INVLPGA = 1;
99
100
101     ctrl_area->instrs.HLT = 1;
102     // guest_state->cr0 = 0x00000001;    // PE 
103   
104     /*
105       ctrl_area->exceptions.de = 1;
106       ctrl_area->exceptions.df = 1;
107       
108       ctrl_area->exceptions.ts = 1;
109       ctrl_area->exceptions.ss = 1;
110       ctrl_area->exceptions.ac = 1;
111       ctrl_area->exceptions.mc = 1;
112       ctrl_area->exceptions.gp = 1;
113       ctrl_area->exceptions.ud = 1;
114       ctrl_area->exceptions.np = 1;
115       ctrl_area->exceptions.of = 1;
116       
117       ctrl_area->exceptions.nmi = 1;
118     */
119     
120
121     ctrl_area->instrs.NMI = 1;
122     ctrl_area->instrs.SMI = 1;
123     ctrl_area->instrs.INIT = 1;
124     ctrl_area->instrs.PAUSE = 1;
125     ctrl_area->instrs.shutdown_evts = 1;
126
127     vm_info->vm_regs.rdx = 0x00000f00;
128
129     guest_state->cr0 = 0x60000010;
130
131
132     guest_state->cs.selector = 0xf000;
133     guest_state->cs.limit = 0xffff;
134     guest_state->cs.base = 0x0000000f0000LL;
135     guest_state->cs.attrib.raw = 0xf3;
136
137
138     /* DEBUG FOR RETURN CODE */
139     ctrl_area->exit_code = 1;
140
141
142     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
143                                         &(guest_state->es), &(guest_state->fs), 
144                                         &(guest_state->gs), NULL};
145
146     for ( i = 0; segregs[i] != NULL; i++) {
147         struct vmcb_selector * seg = segregs[i];
148         
149         seg->selector = 0x0000;
150         //    seg->base = seg->selector << 4;
151         seg->base = 0x00000000;
152         seg->attrib.raw = 0xf3;
153         seg->limit = ~0u;
154     }
155
156     guest_state->gdtr.limit = 0x0000ffff;
157     guest_state->gdtr.base = 0x0000000000000000LL;
158     guest_state->idtr.limit = 0x0000ffff;
159     guest_state->idtr.base = 0x0000000000000000LL;
160
161     guest_state->ldtr.selector = 0x0000;
162     guest_state->ldtr.limit = 0x0000ffff;
163     guest_state->ldtr.base = 0x0000000000000000LL;
164     guest_state->tr.selector = 0x0000;
165     guest_state->tr.limit = 0x0000ffff;
166     guest_state->tr.base = 0x0000000000000000LL;
167
168
169     guest_state->dr6 = 0x00000000ffff0ff0LL;
170     guest_state->dr7 = 0x0000000000000400LL;
171
172
173     v3_init_svm_io_map(vm_info);
174     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
175     ctrl_area->instrs.IOIO_PROT = 1;
176
177
178
179     v3_init_svm_msr_map(vm_info);
180     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
181     ctrl_area->instrs.MSR_PROT = 1;
182
183
184
185     PrintDebug("Exiting on interrupts\n");
186     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
187     ctrl_area->instrs.INTR = 1;
188
189
190     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
191         PrintDebug("Creating initial shadow page table\n");
192         
193         /* JRL: This is a performance killer, and a simplistic solution */
194         /* We need to fix this */
195         ctrl_area->TLB_CONTROL = 1;
196         ctrl_area->guest_ASID = 1;
197         
198         
199         if (v3_init_passthrough_pts(vm_info) == -1) {
200             PrintError("Could not initialize passthrough page tables\n");
201             return ;
202         }
203
204
205         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
206         PrintDebug("Created\n");
207         
208         guest_state->cr3 = vm_info->direct_map_pt;
209
210         ctrl_area->cr_reads.cr0 = 1;
211         ctrl_area->cr_writes.cr0 = 1;
212         //ctrl_area->cr_reads.cr4 = 1;
213         ctrl_area->cr_writes.cr4 = 1;
214         ctrl_area->cr_reads.cr3 = 1;
215         ctrl_area->cr_writes.cr3 = 1;
216
217         v3_hook_msr(vm_info, EFER_MSR, 
218                     &v3_handle_efer_read,
219                     &v3_handle_efer_write, 
220                     vm_info);
221
222         ctrl_area->instrs.INVLPG = 1;
223
224         ctrl_area->exceptions.pf = 1;
225
226         guest_state->g_pat = 0x7040600070406ULL;
227
228         guest_state->cr0 |= 0x80000000;
229
230     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231         // Flush the TLB on entries/exits
232         ctrl_area->TLB_CONTROL = 1;
233         ctrl_area->guest_ASID = 1;
234
235         // Enable Nested Paging
236         ctrl_area->NP_ENABLE = 1;
237
238         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
239
240         // Set the Nested Page Table pointer
241         if (v3_init_passthrough_pts(vm_info) == -1) {
242             PrintError("Could not initialize Nested page tables\n");
243             return ;
244         }
245
246         ctrl_area->N_CR3 = vm_info->direct_map_pt;
247
248         guest_state->g_pat = 0x7040600070406ULL;
249     }
250 }
251
252
253 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
254
255
256     v3_pre_config_guest(info, config_ptr);
257
258     PrintDebug("Allocating VMCB\n");
259     info->vmm_data = (void*)Allocate_VMCB();
260
261     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_post_config_guest(info, config_ptr);
265
266     return 0;
267 }
268
269 static int start_svm_guest(struct guest_info *info) {
270     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
271     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
272     uint_t num_exits = 0;
273
274
275
276     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
277     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
278     
279     info->run_state = VM_RUNNING;
280     
281     while (1) {
282         ullong_t tmp_tsc;
283         
284
285         /*
286           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
287           (void *)(addr_t)info->segments.cs.base, 
288           (void *)(addr_t)info->rip);
289         */
290
291         // disable global interrupts for vm state transition
292         v3_clgi();
293
294
295
296         rdtscll(info->time_state.cached_host_tsc);
297         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
298         
299         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb);
300         
301         rdtscll(tmp_tsc);
302
303         
304         //PrintDebug("SVM Returned\n");
305
306         // reenable global interrupts after vm exit
307         v3_stgi();
308
309         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
310         num_exits++;
311         
312         if ((num_exits % 5000) == 0) {
313             PrintDebug("SVM Exit number %d\n", num_exits);
314
315             if (info->enable_profiler) {
316                 v3_print_profile(info);
317             }
318         }
319
320      
321         if (v3_handle_svm_exit(info) != 0) {
322             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
323             addr_t host_addr;
324             addr_t linear_addr = 0;
325             
326             info->run_state = VM_ERROR;
327             
328             PrintDebug("SVM ERROR!!\n"); 
329       
330             v3_print_guest_state(info);
331
332             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
333       
334             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
335             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
336       
337             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
338             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
339       
340             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
341
342             if (info->mem_mode == PHYSICAL_MEM) {
343                 guest_pa_to_host_va(info, linear_addr, &host_addr);
344             } else if (info->mem_mode == VIRTUAL_MEM) {
345                 guest_va_to_host_va(info, linear_addr, &host_addr);
346             }
347
348             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
349
350             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
351             PrintTraceMemDump((uchar_t *)host_addr, 15);
352
353             break;
354         }
355     }
356     return 0;
357 }
358
359
360
361
362
363 /* Checks machine SVM capability */
364 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
365 int v3_is_svm_capable() {
366     // Dinda
367     uint_t vm_cr_low = 0, vm_cr_high = 0;
368     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
369
370     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
371   
372     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
373
374     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
375       PrintDebug("SVM Not Available\n");
376       return 0;
377     }  else {
378         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
379         
380         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
381         
382         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
383             PrintDebug("SVM is available but is disabled.\n");
384             
385             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
386             
387             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
388             
389             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
390                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
391             } else {
392                 PrintDebug("SVM is locked with a key\n");
393             }
394             return 0;
395
396         } else {
397             PrintDebug("SVM is available and  enabled.\n");
398
399             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
400             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=%p\n", (void *)eax);
401             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=%p\n", (void *)ebx);
402             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=%p\n", (void *)ecx);
403             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
404
405
406             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
407                 PrintDebug("SVM Nested Paging not supported\n");
408             } else {
409                 PrintDebug("SVM Nested Paging supported\n");
410             }
411
412             return 1;
413         }
414     }
415 }
416
417 static int has_svm_nested_paging() {
418     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
419
420     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
421
422     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
423
424     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
425         PrintDebug("SVM Nested Paging not supported\n");
426         return 0;
427     } else {
428         PrintDebug("SVM Nested Paging supported\n");
429         return 1;
430     }
431 }
432
433
434
435 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
436     reg_ex_t msr;
437     extern v3_cpu_arch_t v3_cpu_type;
438
439     // Enable SVM on the CPU
440     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
441     msr.e_reg.low |= EFER_MSR_svm_enable;
442     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
443
444     PrintDebug("SVM Enabled\n");
445
446     // Setup the host state save area
447     host_vmcb = V3_AllocPages(4);
448
449     /* 64-BIT-ISSUE */
450     //  msr.e_reg.high = 0;
451     //msr.e_reg.low = (uint_t)host_vmcb;
452     msr.r_reg = (addr_t)host_vmcb;
453
454     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb);
455     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
456
457     /* 
458      * Test VMSAVE/VMLOAD Latency 
459      */
460 #define vmsave ".byte 0x0F,0x01,0xDB ; "
461 #define vmload ".byte 0x0F,0x01,0xDA ; "
462     {
463         uint32_t start_lo, start_hi;
464         uint32_t end_lo, end_hi;
465         uint64_t start, end;
466
467         __asm__ __volatile__ (
468                               "rdtsc ; "
469                               "movl %%eax, %%esi ; "
470                               "movl %%edx, %%edi ; "
471                               "movq  %%rcx, %%rax ; "
472                               vmsave
473                               "rdtsc ; "
474                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
475                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
476                               );
477         
478         start = start_hi;
479         start <<= 32;
480         start += start_lo;
481
482         end = end_hi;
483         end <<= 32;
484         end += end_lo;
485
486         PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
487         
488         __asm__ __volatile__ (
489                               "rdtsc ; "
490                               "movl %%eax, %%esi ; "
491                               "movl %%edx, %%edi ; "
492                               "movq  %%rcx, %%rax ; "
493                               vmload
494                               "rdtsc ; "
495                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
496                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
497                               );
498         
499         start = start_hi;
500         start <<= 32;
501         start += start_lo;
502
503         end = end_hi;
504         end <<= 32;
505         end += end_lo;
506
507
508         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
509     }
510     /* End Latency Test */
511
512     if (has_svm_nested_paging() == 1) {
513         v3_cpu_type = V3_SVM_REV3_CPU;
514     } else {
515         v3_cpu_type = V3_SVM_CPU;
516     }
517
518     // Setup the SVM specific vmm operations
519     vmm_ops->init_guest = &init_svm_guest;
520     vmm_ops->start_guest = &start_svm_guest;
521     vmm_ops->has_nested_paging = &has_svm_nested_paging;
522
523     return;
524 }
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
578   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
579   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
580   uint_t i;
581
582
583   guest_state->rsp = vm_info.vm_regs.rsp;
584   guest_state->rip = vm_info.rip;
585
586
587   //ctrl_area->instrs.instrs.CR0 = 1;
588   ctrl_area->cr_reads.cr0 = 1;
589   ctrl_area->cr_writes.cr0 = 1;
590
591   guest_state->efer |= EFER_MSR_svm_enable;
592   guest_state->rflags = 0x00000002; // The reserved bit is always 1
593   ctrl_area->svm_instrs.VMRUN = 1;
594   // guest_state->cr0 = 0x00000001;    // PE 
595   ctrl_area->guest_ASID = 1;
596
597
598   ctrl_area->exceptions.de = 1;
599   ctrl_area->exceptions.df = 1;
600   ctrl_area->exceptions.pf = 1;
601   ctrl_area->exceptions.ts = 1;
602   ctrl_area->exceptions.ss = 1;
603   ctrl_area->exceptions.ac = 1;
604   ctrl_area->exceptions.mc = 1;
605   ctrl_area->exceptions.gp = 1;
606   ctrl_area->exceptions.ud = 1;
607   ctrl_area->exceptions.np = 1;
608   ctrl_area->exceptions.of = 1;
609   ctrl_area->exceptions.nmi = 1;
610
611   guest_state->cs.selector = 0x0000;
612   guest_state->cs.limit=~0u;
613   guest_state->cs.base = guest_state->cs.selector<<4;
614   guest_state->cs.attrib.raw = 0xf3;
615
616   
617   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
618   for ( i = 0; segregs[i] != NULL; i++) {
619     struct vmcb_selector * seg = segregs[i];
620     
621     seg->selector = 0x0000;
622     seg->base = seg->selector << 4;
623     seg->attrib.raw = 0xf3;
624     seg->limit = ~0u;
625   }
626   
627   if (vm_info.io_map.num_ports > 0) {
628     struct vmm_io_hook * iter;
629     addr_t io_port_bitmap;
630     
631     io_port_bitmap = (addr_t)V3_AllocPages(3);
632     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
633     
634     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
635
636     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
637
638     FOREACH_IO_HOOK(vm_info.io_map, iter) {
639       ushort_t port = iter->port;
640       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
641
642       bitmap += (port / 8);
643       PrintDebug("Setting Bit in block %x\n", bitmap);
644       *bitmap |= 1 << (port % 8);
645     }
646
647
648     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
649
650     ctrl_area->instrs.IOIO_PROT = 1;
651   }
652
653   ctrl_area->instrs.INTR = 1;
654
655
656
657   if (vm_info.page_mode == SHADOW_PAGING) {
658     PrintDebug("Creating initial shadow page table\n");
659     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
660     PrintDebug("Created\n");
661
662     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
663
664     ctrl_area->cr_reads.cr3 = 1;
665     ctrl_area->cr_writes.cr3 = 1;
666
667
668     ctrl_area->instrs.INVLPG = 1;
669     ctrl_area->instrs.INVLPGA = 1;
670
671     guest_state->g_pat = 0x7040600070406ULL;
672
673     guest_state->cr0 |= 0x80000000;
674   } else if (vm_info.page_mode == NESTED_PAGING) {
675     // Flush the TLB on entries/exits
676     //ctrl_area->TLB_CONTROL = 1;
677
678     // Enable Nested Paging
679     //ctrl_area->NP_ENABLE = 1;
680
681     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
682
683         // Set the Nested Page Table pointer
684     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
685     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
686
687     //   ctrl_area->N_CR3 = Get_CR3();
688     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
689
690     //    guest_state->g_pat = 0x7040600070406ULL;
691   }
692
693
694
695 }
696 */
697
698
699
700
701
702
703
704 #if 0
705 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
706   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
707   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
708   uint_t i = 0;
709
710
711   guest_state->rsp = vm_info.vm_regs.rsp;
712   guest_state->rip = vm_info.rip;
713
714
715   /* I pretty much just gutted this from TVMM */
716   /* Note: That means its probably wrong */
717
718   // set the segment registers to mirror ours
719   guest_state->cs.selector = 1<<3;
720   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
721   guest_state->cs.attrib.fields.S = 1;
722   guest_state->cs.attrib.fields.P = 1;
723   guest_state->cs.attrib.fields.db = 1;
724   guest_state->cs.attrib.fields.G = 1;
725   guest_state->cs.limit = 0xfffff;
726   guest_state->cs.base = 0;
727   
728   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
729   for ( i = 0; segregs[i] != NULL; i++) {
730     struct vmcb_selector * seg = segregs[i];
731     
732     seg->selector = 2<<3;
733     seg->attrib.fields.type = 0x2; // Data Segment+read/write
734     seg->attrib.fields.S = 1;
735     seg->attrib.fields.P = 1;
736     seg->attrib.fields.db = 1;
737     seg->attrib.fields.G = 1;
738     seg->limit = 0xfffff;
739     seg->base = 0;
740   }
741
742
743   {
744     /* JRL THIS HAS TO GO */
745     
746     //    guest_state->tr.selector = GetTR_Selector();
747     guest_state->tr.attrib.fields.type = 0x9; 
748     guest_state->tr.attrib.fields.P = 1;
749     // guest_state->tr.limit = GetTR_Limit();
750     //guest_state->tr.base = GetTR_Base();// - 0x2000;
751     /* ** */
752   }
753
754
755   /* ** */
756
757
758   guest_state->efer |= EFER_MSR_svm_enable;
759   guest_state->rflags = 0x00000002; // The reserved bit is always 1
760   ctrl_area->svm_instrs.VMRUN = 1;
761   guest_state->cr0 = 0x00000001;    // PE 
762   ctrl_area->guest_ASID = 1;
763
764
765   //  guest_state->cpl = 0;
766
767
768
769   // Setup exits
770
771   ctrl_area->cr_writes.cr4 = 1;
772   
773   ctrl_area->exceptions.de = 1;
774   ctrl_area->exceptions.df = 1;
775   ctrl_area->exceptions.pf = 1;
776   ctrl_area->exceptions.ts = 1;
777   ctrl_area->exceptions.ss = 1;
778   ctrl_area->exceptions.ac = 1;
779   ctrl_area->exceptions.mc = 1;
780   ctrl_area->exceptions.gp = 1;
781   ctrl_area->exceptions.ud = 1;
782   ctrl_area->exceptions.np = 1;
783   ctrl_area->exceptions.of = 1;
784   ctrl_area->exceptions.nmi = 1;
785
786   
787
788   ctrl_area->instrs.IOIO_PROT = 1;
789   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
790   
791   {
792     reg_ex_t tmp_reg;
793     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
794     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
795   }
796
797   ctrl_area->instrs.INTR = 1;
798
799   
800   {
801     char gdt_buf[6];
802     char idt_buf[6];
803
804     memset(gdt_buf, 0, 6);
805     memset(idt_buf, 0, 6);
806
807
808     uint_t gdt_base, idt_base;
809     ushort_t gdt_limit, idt_limit;
810     
811     GetGDTR(gdt_buf);
812     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
813     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
814     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
815
816     GetIDTR(idt_buf);
817     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
818     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
819     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
820
821
822     // gdt_base -= 0x2000;
823     //idt_base -= 0x2000;
824
825     guest_state->gdtr.base = gdt_base;
826     guest_state->gdtr.limit = gdt_limit;
827     guest_state->idtr.base = idt_base;
828     guest_state->idtr.limit = idt_limit;
829
830
831   }
832   
833   
834   // also determine if CPU supports nested paging
835   /*
836   if (vm_info.page_tables) {
837     //   if (0) {
838     // Flush the TLB on entries/exits
839     ctrl_area->TLB_CONTROL = 1;
840
841     // Enable Nested Paging
842     ctrl_area->NP_ENABLE = 1;
843
844     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
845
846         // Set the Nested Page Table pointer
847     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
848
849
850     //   ctrl_area->N_CR3 = Get_CR3();
851     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
852
853     guest_state->g_pat = 0x7040600070406ULL;
854
855     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
856     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
857     // Enable Paging
858     //    guest_state->cr0 |= 0x80000000;
859   }
860   */
861
862 }
863
864
865
866
867
868 #endif
869
870