Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


minor changes to yields to avoid guest lock ups due to irq flooding scenarios
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48
49 // This is a global pointer to the host's VMCB
50 static void * host_vmcb = NULL;
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.ICEBP = 1;
94     ctrl_area->svm_instrs.WBINVD = 1;
95     ctrl_area->svm_instrs.MONITOR = 1;
96     ctrl_area->svm_instrs.MWAIT_always = 1;
97     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
98     ctrl_area->instrs.INVLPGA = 1;
99
100
101     ctrl_area->instrs.HLT = 1;
102     // guest_state->cr0 = 0x00000001;    // PE 
103   
104     /*
105       ctrl_area->exceptions.de = 1;
106       ctrl_area->exceptions.df = 1;
107       
108       ctrl_area->exceptions.ts = 1;
109       ctrl_area->exceptions.ss = 1;
110       ctrl_area->exceptions.ac = 1;
111       ctrl_area->exceptions.mc = 1;
112       ctrl_area->exceptions.gp = 1;
113       ctrl_area->exceptions.ud = 1;
114       ctrl_area->exceptions.np = 1;
115       ctrl_area->exceptions.of = 1;
116       
117       ctrl_area->exceptions.nmi = 1;
118     */
119     
120
121     ctrl_area->instrs.NMI = 1;
122     ctrl_area->instrs.SMI = 1;
123     ctrl_area->instrs.INIT = 1;
124     ctrl_area->instrs.PAUSE = 1;
125     ctrl_area->instrs.shutdown_evts = 1;
126
127     vm_info->vm_regs.rdx = 0x00000f00;
128
129     guest_state->cr0 = 0x60000010;
130
131
132     guest_state->cs.selector = 0xf000;
133     guest_state->cs.limit = 0xffff;
134     guest_state->cs.base = 0x0000000f0000LL;
135     guest_state->cs.attrib.raw = 0xf3;
136
137
138     /* DEBUG FOR RETURN CODE */
139     ctrl_area->exit_code = 1;
140
141
142     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
143                                         &(guest_state->es), &(guest_state->fs), 
144                                         &(guest_state->gs), NULL};
145
146     for ( i = 0; segregs[i] != NULL; i++) {
147         struct vmcb_selector * seg = segregs[i];
148         
149         seg->selector = 0x0000;
150         //    seg->base = seg->selector << 4;
151         seg->base = 0x00000000;
152         seg->attrib.raw = 0xf3;
153         seg->limit = ~0u;
154     }
155
156     guest_state->gdtr.limit = 0x0000ffff;
157     guest_state->gdtr.base = 0x0000000000000000LL;
158     guest_state->idtr.limit = 0x0000ffff;
159     guest_state->idtr.base = 0x0000000000000000LL;
160
161     guest_state->ldtr.selector = 0x0000;
162     guest_state->ldtr.limit = 0x0000ffff;
163     guest_state->ldtr.base = 0x0000000000000000LL;
164     guest_state->tr.selector = 0x0000;
165     guest_state->tr.limit = 0x0000ffff;
166     guest_state->tr.base = 0x0000000000000000LL;
167
168
169     guest_state->dr6 = 0x00000000ffff0ff0LL;
170     guest_state->dr7 = 0x0000000000000400LL;
171
172
173     v3_init_svm_io_map(vm_info);
174     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
175     ctrl_area->instrs.IOIO_PROT = 1;
176
177
178
179     v3_init_svm_msr_map(vm_info);
180     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
181     ctrl_area->instrs.MSR_PROT = 1;
182
183
184
185     PrintDebug("Exiting on interrupts\n");
186     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
187     ctrl_area->instrs.INTR = 1;
188
189
190     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
191         PrintDebug("Creating initial shadow page table\n");
192         
193         /* JRL: This is a performance killer, and a simplistic solution */
194         /* We need to fix this */
195         ctrl_area->TLB_CONTROL = 1;
196         ctrl_area->guest_ASID = 1;
197         
198         
199         if (v3_init_passthrough_pts(vm_info) == -1) {
200             PrintError("Could not initialize passthrough page tables\n");
201             return ;
202         }
203
204
205         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
206         PrintDebug("Created\n");
207         
208         guest_state->cr3 = vm_info->direct_map_pt;
209
210         ctrl_area->cr_reads.cr0 = 1;
211         ctrl_area->cr_writes.cr0 = 1;
212         //ctrl_area->cr_reads.cr4 = 1;
213         ctrl_area->cr_writes.cr4 = 1;
214         ctrl_area->cr_reads.cr3 = 1;
215         ctrl_area->cr_writes.cr3 = 1;
216
217         v3_hook_msr(vm_info, EFER_MSR, 
218                     &v3_handle_efer_read,
219                     &v3_handle_efer_write, 
220                     vm_info);
221
222         ctrl_area->instrs.INVLPG = 1;
223
224         ctrl_area->exceptions.pf = 1;
225
226         guest_state->g_pat = 0x7040600070406ULL;
227
228         guest_state->cr0 |= 0x80000000;
229
230     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231         // Flush the TLB on entries/exits
232         ctrl_area->TLB_CONTROL = 1;
233         ctrl_area->guest_ASID = 1;
234
235         // Enable Nested Paging
236         ctrl_area->NP_ENABLE = 1;
237
238         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
239
240         // Set the Nested Page Table pointer
241         if (v3_init_passthrough_pts(vm_info) == -1) {
242             PrintError("Could not initialize Nested page tables\n");
243             return ;
244         }
245
246         ctrl_area->N_CR3 = vm_info->direct_map_pt;
247
248         guest_state->g_pat = 0x7040600070406ULL;
249     }
250 }
251
252
253 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
254
255
256     v3_pre_config_guest(info, config_ptr);
257
258     PrintDebug("Allocating VMCB\n");
259     info->vmm_data = (void*)Allocate_VMCB();
260
261     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_post_config_guest(info, config_ptr);
265
266     return 0;
267 }
268
269 static int start_svm_guest(struct guest_info *info) {
270     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
271     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
272     uint_t num_exits = 0;
273
274
275
276     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
277     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
278     
279     info->run_state = VM_RUNNING;
280     rdtscll(info->yield_start_cycle);
281
282
283     while (1) {
284         ullong_t tmp_tsc;
285         
286         // Conditionally yield the CPU if the timeslice has expired
287         v3_yield_cond(info);
288
289         /*
290           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
291           (void *)(addr_t)info->segments.cs.base, 
292           (void *)(addr_t)info->rip);
293         */
294
295         // disable global interrupts for vm state transition
296         v3_clgi();
297
298
299
300         rdtscll(info->time_state.cached_host_tsc);
301         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
302         
303         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb);
304         
305         rdtscll(tmp_tsc);
306
307         
308         //PrintDebug("SVM Returned\n");
309
310         // reenable global interrupts after vm exit
311         v3_stgi();
312
313
314         // Conditionally yield the CPU if the timeslice has expired
315         v3_yield_cond(info);
316
317
318         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
319         num_exits++;
320         
321         if ((num_exits % 5000) == 0) {
322             PrintDebug("SVM Exit number %d\n", num_exits);
323
324             if (info->enable_profiler) {
325                 v3_print_profile(info);
326             }
327         }
328
329         if (v3_handle_svm_exit(info) != 0) {
330             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
331             addr_t host_addr;
332             addr_t linear_addr = 0;
333             
334             info->run_state = VM_ERROR;
335             
336             PrintDebug("SVM ERROR!!\n"); 
337       
338             v3_print_guest_state(info);
339
340             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
341       
342             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
343             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
344       
345             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
346             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
347       
348             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
349
350             if (info->mem_mode == PHYSICAL_MEM) {
351                 guest_pa_to_host_va(info, linear_addr, &host_addr);
352             } else if (info->mem_mode == VIRTUAL_MEM) {
353                 guest_va_to_host_va(info, linear_addr, &host_addr);
354             }
355
356             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
357
358             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
359             PrintTraceMemDump((uchar_t *)host_addr, 15);
360
361             break;
362         }
363     }
364     return 0;
365 }
366
367
368
369
370
371 /* Checks machine SVM capability */
372 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
373 int v3_is_svm_capable() {
374     // Dinda
375     uint_t vm_cr_low = 0, vm_cr_high = 0;
376     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
377
378     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
379   
380     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=%p\n", (void *)ecx);
381
382     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
383       PrintDebug("SVM Not Available\n");
384       return 0;
385     }  else {
386         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
387         
388         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
389         
390         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
391             PrintDebug("SVM is available but is disabled.\n");
392             
393             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
394             
395             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
396             
397             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
398                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
399             } else {
400                 PrintDebug("SVM is locked with a key\n");
401             }
402             return 0;
403
404         } else {
405             PrintDebug("SVM is available and  enabled.\n");
406
407             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
408             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=%p\n", (void *)eax);
409             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=%p\n", (void *)ebx);
410             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=%p\n", (void *)ecx);
411             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
412
413
414             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
415                 PrintDebug("SVM Nested Paging not supported\n");
416             } else {
417                 PrintDebug("SVM Nested Paging supported\n");
418             }
419
420             return 1;
421         }
422     }
423 }
424
425 static int has_svm_nested_paging() {
426     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
427
428     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
429
430     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
431
432     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
433         PrintDebug("SVM Nested Paging not supported\n");
434         return 0;
435     } else {
436         PrintDebug("SVM Nested Paging supported\n");
437         return 1;
438     }
439 }
440
441
442
443 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
444     reg_ex_t msr;
445     extern v3_cpu_arch_t v3_cpu_type;
446
447     // Enable SVM on the CPU
448     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
449     msr.e_reg.low |= EFER_MSR_svm_enable;
450     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
451
452     PrintDebug("SVM Enabled\n");
453
454     // Setup the host state save area
455     host_vmcb = V3_AllocPages(4);
456
457     /* 64-BIT-ISSUE */
458     //  msr.e_reg.high = 0;
459     //msr.e_reg.low = (uint_t)host_vmcb;
460     msr.r_reg = (addr_t)host_vmcb;
461
462     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb);
463     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
464
465     /* 
466      * Test VMSAVE/VMLOAD Latency 
467      */
468 #define vmsave ".byte 0x0F,0x01,0xDB ; "
469 #define vmload ".byte 0x0F,0x01,0xDA ; "
470     {
471         uint32_t start_lo, start_hi;
472         uint32_t end_lo, end_hi;
473         uint64_t start, end;
474
475         __asm__ __volatile__ (
476                               "rdtsc ; "
477                               "movl %%eax, %%esi ; "
478                               "movl %%edx, %%edi ; "
479                               "movq  %%rcx, %%rax ; "
480                               vmsave
481                               "rdtsc ; "
482                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
483                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
484                               );
485         
486         start = start_hi;
487         start <<= 32;
488         start += start_lo;
489
490         end = end_hi;
491         end <<= 32;
492         end += end_lo;
493
494         PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
495         
496         __asm__ __volatile__ (
497                               "rdtsc ; "
498                               "movl %%eax, %%esi ; "
499                               "movl %%edx, %%edi ; "
500                               "movq  %%rcx, %%rax ; "
501                               vmload
502                               "rdtsc ; "
503                               : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
504                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
505                               );
506         
507         start = start_hi;
508         start <<= 32;
509         start += start_lo;
510
511         end = end_hi;
512         end <<= 32;
513         end += end_lo;
514
515
516         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
517     }
518     /* End Latency Test */
519
520     if (has_svm_nested_paging() == 1) {
521         v3_cpu_type = V3_SVM_REV3_CPU;
522     } else {
523         v3_cpu_type = V3_SVM_CPU;
524     }
525
526     // Setup the SVM specific vmm operations
527     vmm_ops->init_guest = &init_svm_guest;
528     vmm_ops->start_guest = &start_svm_guest;
529     vmm_ops->has_nested_paging = &has_svm_nested_paging;
530
531     return;
532 }
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
586   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
587   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
588   uint_t i;
589
590
591   guest_state->rsp = vm_info.vm_regs.rsp;
592   guest_state->rip = vm_info.rip;
593
594
595   //ctrl_area->instrs.instrs.CR0 = 1;
596   ctrl_area->cr_reads.cr0 = 1;
597   ctrl_area->cr_writes.cr0 = 1;
598
599   guest_state->efer |= EFER_MSR_svm_enable;
600   guest_state->rflags = 0x00000002; // The reserved bit is always 1
601   ctrl_area->svm_instrs.VMRUN = 1;
602   // guest_state->cr0 = 0x00000001;    // PE 
603   ctrl_area->guest_ASID = 1;
604
605
606   ctrl_area->exceptions.de = 1;
607   ctrl_area->exceptions.df = 1;
608   ctrl_area->exceptions.pf = 1;
609   ctrl_area->exceptions.ts = 1;
610   ctrl_area->exceptions.ss = 1;
611   ctrl_area->exceptions.ac = 1;
612   ctrl_area->exceptions.mc = 1;
613   ctrl_area->exceptions.gp = 1;
614   ctrl_area->exceptions.ud = 1;
615   ctrl_area->exceptions.np = 1;
616   ctrl_area->exceptions.of = 1;
617   ctrl_area->exceptions.nmi = 1;
618
619   guest_state->cs.selector = 0x0000;
620   guest_state->cs.limit=~0u;
621   guest_state->cs.base = guest_state->cs.selector<<4;
622   guest_state->cs.attrib.raw = 0xf3;
623
624   
625   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
626   for ( i = 0; segregs[i] != NULL; i++) {
627     struct vmcb_selector * seg = segregs[i];
628     
629     seg->selector = 0x0000;
630     seg->base = seg->selector << 4;
631     seg->attrib.raw = 0xf3;
632     seg->limit = ~0u;
633   }
634   
635   if (vm_info.io_map.num_ports > 0) {
636     struct vmm_io_hook * iter;
637     addr_t io_port_bitmap;
638     
639     io_port_bitmap = (addr_t)V3_AllocPages(3);
640     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
641     
642     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
643
644     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
645
646     FOREACH_IO_HOOK(vm_info.io_map, iter) {
647       ushort_t port = iter->port;
648       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
649
650       bitmap += (port / 8);
651       PrintDebug("Setting Bit in block %x\n", bitmap);
652       *bitmap |= 1 << (port % 8);
653     }
654
655
656     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
657
658     ctrl_area->instrs.IOIO_PROT = 1;
659   }
660
661   ctrl_area->instrs.INTR = 1;
662
663
664
665   if (vm_info.page_mode == SHADOW_PAGING) {
666     PrintDebug("Creating initial shadow page table\n");
667     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
668     PrintDebug("Created\n");
669
670     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
671
672     ctrl_area->cr_reads.cr3 = 1;
673     ctrl_area->cr_writes.cr3 = 1;
674
675
676     ctrl_area->instrs.INVLPG = 1;
677     ctrl_area->instrs.INVLPGA = 1;
678
679     guest_state->g_pat = 0x7040600070406ULL;
680
681     guest_state->cr0 |= 0x80000000;
682   } else if (vm_info.page_mode == NESTED_PAGING) {
683     // Flush the TLB on entries/exits
684     //ctrl_area->TLB_CONTROL = 1;
685
686     // Enable Nested Paging
687     //ctrl_area->NP_ENABLE = 1;
688
689     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
690
691         // Set the Nested Page Table pointer
692     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
693     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
694
695     //   ctrl_area->N_CR3 = Get_CR3();
696     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
697
698     //    guest_state->g_pat = 0x7040600070406ULL;
699   }
700
701
702
703 }
704 */
705
706
707
708
709
710
711
712 #if 0
713 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
714   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
715   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
716   uint_t i = 0;
717
718
719   guest_state->rsp = vm_info.vm_regs.rsp;
720   guest_state->rip = vm_info.rip;
721
722
723   /* I pretty much just gutted this from TVMM */
724   /* Note: That means its probably wrong */
725
726   // set the segment registers to mirror ours
727   guest_state->cs.selector = 1<<3;
728   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
729   guest_state->cs.attrib.fields.S = 1;
730   guest_state->cs.attrib.fields.P = 1;
731   guest_state->cs.attrib.fields.db = 1;
732   guest_state->cs.attrib.fields.G = 1;
733   guest_state->cs.limit = 0xfffff;
734   guest_state->cs.base = 0;
735   
736   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
737   for ( i = 0; segregs[i] != NULL; i++) {
738     struct vmcb_selector * seg = segregs[i];
739     
740     seg->selector = 2<<3;
741     seg->attrib.fields.type = 0x2; // Data Segment+read/write
742     seg->attrib.fields.S = 1;
743     seg->attrib.fields.P = 1;
744     seg->attrib.fields.db = 1;
745     seg->attrib.fields.G = 1;
746     seg->limit = 0xfffff;
747     seg->base = 0;
748   }
749
750
751   {
752     /* JRL THIS HAS TO GO */
753     
754     //    guest_state->tr.selector = GetTR_Selector();
755     guest_state->tr.attrib.fields.type = 0x9; 
756     guest_state->tr.attrib.fields.P = 1;
757     // guest_state->tr.limit = GetTR_Limit();
758     //guest_state->tr.base = GetTR_Base();// - 0x2000;
759     /* ** */
760   }
761
762
763   /* ** */
764
765
766   guest_state->efer |= EFER_MSR_svm_enable;
767   guest_state->rflags = 0x00000002; // The reserved bit is always 1
768   ctrl_area->svm_instrs.VMRUN = 1;
769   guest_state->cr0 = 0x00000001;    // PE 
770   ctrl_area->guest_ASID = 1;
771
772
773   //  guest_state->cpl = 0;
774
775
776
777   // Setup exits
778
779   ctrl_area->cr_writes.cr4 = 1;
780   
781   ctrl_area->exceptions.de = 1;
782   ctrl_area->exceptions.df = 1;
783   ctrl_area->exceptions.pf = 1;
784   ctrl_area->exceptions.ts = 1;
785   ctrl_area->exceptions.ss = 1;
786   ctrl_area->exceptions.ac = 1;
787   ctrl_area->exceptions.mc = 1;
788   ctrl_area->exceptions.gp = 1;
789   ctrl_area->exceptions.ud = 1;
790   ctrl_area->exceptions.np = 1;
791   ctrl_area->exceptions.of = 1;
792   ctrl_area->exceptions.nmi = 1;
793
794   
795
796   ctrl_area->instrs.IOIO_PROT = 1;
797   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
798   
799   {
800     reg_ex_t tmp_reg;
801     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
802     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
803   }
804
805   ctrl_area->instrs.INTR = 1;
806
807   
808   {
809     char gdt_buf[6];
810     char idt_buf[6];
811
812     memset(gdt_buf, 0, 6);
813     memset(idt_buf, 0, 6);
814
815
816     uint_t gdt_base, idt_base;
817     ushort_t gdt_limit, idt_limit;
818     
819     GetGDTR(gdt_buf);
820     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
821     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
822     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
823
824     GetIDTR(idt_buf);
825     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
826     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
827     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
828
829
830     // gdt_base -= 0x2000;
831     //idt_base -= 0x2000;
832
833     guest_state->gdtr.base = gdt_base;
834     guest_state->gdtr.limit = gdt_limit;
835     guest_state->idtr.base = idt_base;
836     guest_state->idtr.limit = idt_limit;
837
838
839   }
840   
841   
842   // also determine if CPU supports nested paging
843   /*
844   if (vm_info.page_tables) {
845     //   if (0) {
846     // Flush the TLB on entries/exits
847     ctrl_area->TLB_CONTROL = 1;
848
849     // Enable Nested Paging
850     ctrl_area->NP_ENABLE = 1;
851
852     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
853
854         // Set the Nested Page Table pointer
855     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
856
857
858     //   ctrl_area->N_CR3 = Get_CR3();
859     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
860
861     guest_state->g_pat = 0x7040600070406ULL;
862
863     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
864     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
865     // Enable Paging
866     //    guest_state->cr0 |= 0x80000000;
867   }
868   */
869
870 }
871
872
873
874
875
876 #endif
877
878