Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


various fixes. Hopefully this fixes the transient shutdown bug...
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45
46
47 extern void v3_stgi();
48 extern void v3_clgi();
49 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
50 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
51
52
53 static vmcb_t * Allocate_VMCB() {
54     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
55
56     memset(vmcb_page, 0, 4096);
57
58     return vmcb_page;
59 }
60
61
62
63 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
64     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
65     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
66     uint_t i;
67
68
69     guest_state->rsp = vm_info->vm_regs.rsp;
70     // guest_state->rip = vm_info->rip;
71     guest_state->rip = 0xfff0;
72
73     guest_state->cpl = 0;
74
75     guest_state->efer |= EFER_MSR_svm_enable;
76
77
78     guest_state->rflags = 0x00000002; // The reserved bit is always 1
79     ctrl_area->svm_instrs.VMRUN = 1;
80     ctrl_area->svm_instrs.VMMCALL = 1;
81     ctrl_area->svm_instrs.VMLOAD = 1;
82     ctrl_area->svm_instrs.VMSAVE = 1;
83     ctrl_area->svm_instrs.STGI = 1;
84     ctrl_area->svm_instrs.CLGI = 1;
85     ctrl_area->svm_instrs.SKINIT = 1;
86     ctrl_area->svm_instrs.RDTSCP = 1;
87     ctrl_area->svm_instrs.ICEBP = 1;
88     ctrl_area->svm_instrs.WBINVD = 1;
89     ctrl_area->svm_instrs.MONITOR = 1;
90     ctrl_area->svm_instrs.MWAIT_always = 1;
91     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
92     ctrl_area->instrs.INVLPGA = 1;
93
94
95     ctrl_area->instrs.HLT = 1;
96     // guest_state->cr0 = 0x00000001;    // PE 
97   
98     /*
99       ctrl_area->exceptions.de = 1;
100       ctrl_area->exceptions.df = 1;
101       
102       ctrl_area->exceptions.ts = 1;
103       ctrl_area->exceptions.ss = 1;
104       ctrl_area->exceptions.ac = 1;
105       ctrl_area->exceptions.mc = 1;
106       ctrl_area->exceptions.gp = 1;
107       ctrl_area->exceptions.ud = 1;
108       ctrl_area->exceptions.np = 1;
109       ctrl_area->exceptions.of = 1;
110       
111       ctrl_area->exceptions.nmi = 1;
112     */
113     
114
115     ctrl_area->instrs.NMI = 1;
116     ctrl_area->instrs.SMI = 1;
117     ctrl_area->instrs.INIT = 1;
118     ctrl_area->instrs.PAUSE = 1;
119     ctrl_area->instrs.shutdown_evts = 1;
120
121     vm_info->vm_regs.rdx = 0x00000f00;
122
123     guest_state->cr0 = 0x60000010;
124
125
126     guest_state->cs.selector = 0xf000;
127     guest_state->cs.limit = 0xffff;
128     guest_state->cs.base = 0x0000000f0000LL;
129     guest_state->cs.attrib.raw = 0xf3;
130
131
132     /* DEBUG FOR RETURN CODE */
133     ctrl_area->exit_code = 1;
134
135
136     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
137                                         &(guest_state->es), &(guest_state->fs), 
138                                         &(guest_state->gs), NULL};
139
140     for ( i = 0; segregs[i] != NULL; i++) {
141         struct vmcb_selector * seg = segregs[i];
142         
143         seg->selector = 0x0000;
144         //    seg->base = seg->selector << 4;
145         seg->base = 0x00000000;
146         seg->attrib.raw = 0xf3;
147         seg->limit = ~0u;
148     }
149
150     guest_state->gdtr.limit = 0x0000ffff;
151     guest_state->gdtr.base = 0x0000000000000000LL;
152     guest_state->idtr.limit = 0x0000ffff;
153     guest_state->idtr.base = 0x0000000000000000LL;
154
155     guest_state->ldtr.selector = 0x0000;
156     guest_state->ldtr.limit = 0x0000ffff;
157     guest_state->ldtr.base = 0x0000000000000000LL;
158     guest_state->tr.selector = 0x0000;
159     guest_state->tr.limit = 0x0000ffff;
160     guest_state->tr.base = 0x0000000000000000LL;
161
162
163     guest_state->dr6 = 0x00000000ffff0ff0LL;
164     guest_state->dr7 = 0x0000000000000400LL;
165
166
167     if ( !RB_EMPTY_ROOT(&(vm_info->io_map)) ) {
168         struct v3_io_hook * iter;
169         struct rb_node * io_node = v3_rb_first(&(vm_info->io_map));
170         addr_t io_port_bitmap;
171         int i = 0;
172         
173         io_port_bitmap = (addr_t)V3_VAddr(V3_AllocPages(3));
174         memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
175
176         ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr((void *)io_port_bitmap);
177
178         //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
179
180         do {
181             iter = rb_entry(io_node, struct v3_io_hook, tree_node);
182
183             ushort_t port = iter->port;
184             uchar_t * bitmap = (uchar_t *)io_port_bitmap;
185             //PrintDebug("%d: Hooking Port %d\n", i, port);
186
187             bitmap += (port / 8);
188             //      PrintDebug("Setting Bit for port 0x%x\n", port);
189             *bitmap |= 1 << (port % 8);
190
191             i++;
192         } while ((io_node = v3_rb_next(io_node)));
193
194         //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
195
196         ctrl_area->instrs.IOIO_PROT = 1;
197     }
198
199
200     PrintDebug("Exiting on interrupts\n");
201     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
202     ctrl_area->instrs.INTR = 1;
203
204
205     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
206         PrintDebug("Creating initial shadow page table\n");
207         
208         /* JRL: This is a performance killer, and a simplistic solution */
209         /* We need to fix this */
210         ctrl_area->TLB_CONTROL = 1;
211         ctrl_area->guest_ASID = 1;
212         
213         
214         if (v3_init_passthrough_pts(vm_info) == -1) {
215             PrintError("Could not initialize passthrough page tables\n");
216             return ;
217         }
218
219
220         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
221         PrintDebug("Created\n");
222         
223         guest_state->cr3 = vm_info->direct_map_pt;
224
225         ctrl_area->cr_reads.cr0 = 1;
226         ctrl_area->cr_writes.cr0 = 1;
227         //ctrl_area->cr_reads.cr4 = 1;
228         ctrl_area->cr_writes.cr4 = 1;
229         ctrl_area->cr_reads.cr3 = 1;
230         ctrl_area->cr_writes.cr3 = 1;
231
232         vm_info->guest_efer.value = 0x0LL;
233
234         v3_hook_msr(vm_info, EFER_MSR, 
235                     &v3_handle_efer_read,
236                     &v3_handle_efer_write, 
237                     vm_info);
238
239         ctrl_area->instrs.INVLPG = 1;
240
241         ctrl_area->exceptions.pf = 1;
242
243
244
245         guest_state->g_pat = 0x7040600070406ULL;
246
247         guest_state->cr0 |= 0x80000000;
248
249     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
250         // Flush the TLB on entries/exits
251         ctrl_area->TLB_CONTROL = 1;
252         ctrl_area->guest_ASID = 1;
253
254         // Enable Nested Paging
255         ctrl_area->NP_ENABLE = 1;
256
257         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
258
259         // Set the Nested Page Table pointer
260         if (v3_init_passthrough_pts(vm_info) == -1) {
261             PrintError("Could not initialize Nested page tables\n");
262             return ;
263         }
264
265         ctrl_area->N_CR3 = vm_info->direct_map_pt;
266
267         guest_state->g_pat = 0x7040600070406ULL;
268     }
269
270     if (vm_info->msr_map.num_hooks > 0) {
271         PrintDebug("Hooking %d msrs\n", vm_info->msr_map.num_hooks);
272         ctrl_area->MSRPM_BASE_PA = v3_init_svm_msr_map(vm_info);
273         ctrl_area->instrs.MSR_PROT = 1;
274     }
275
276     /* Safety locations for fs/gs */
277     //    vm_info->fs = 0;
278     //    vm_info->gs = 0;
279 }
280
281
282 static int init_svm_guest(struct guest_info *info, struct v3_vm_config * config_ptr) {
283     v3_config_guest(info, config_ptr);
284
285     PrintDebug("Allocating VMCB\n");
286     info->vmm_data = (void*)Allocate_VMCB();
287
288     v3_config_devices(info, config_ptr);
289
290     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
291     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
292   
293
294     
295     info->run_state = VM_STOPPED;
296
297     //  info->rip = 0;
298     
299     info->vm_regs.rdi = 0;
300     info->vm_regs.rsi = 0;
301     info->vm_regs.rbp = 0;
302     info->vm_regs.rsp = 0;
303     info->vm_regs.rbx = 0;
304     info->vm_regs.rdx = 0;
305     info->vm_regs.rcx = 0;
306     info->vm_regs.rax = 0;
307     
308     return 0;
309 }
310
311
312
313 // can we start a kernel thread here...
314 static int start_svm_guest(struct guest_info *info) {
315     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
316     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
317     uint_t num_exits = 0;
318
319
320
321     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
322     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
323     
324     info->run_state = VM_RUNNING;
325     
326     while (1) {
327         ullong_t tmp_tsc;
328         
329
330
331 #define MSR_STAR      0xc0000081
332 #define MSR_LSTAR     0xc0000082
333 #define MSR_CSTAR     0xc0000083
334 #define MSR_SF_MASK   0xc0000084
335 #define MSR_GS_BASE   0xc0000101
336 #define MSR_KERNGS_BASE   0xc0000102
337
338
339         struct v3_msr host_cstar;
340         struct v3_msr host_star;
341         struct v3_msr host_lstar;
342         struct v3_msr host_syscall_mask;
343         struct v3_msr host_gs_base;
344         struct v3_msr host_kerngs_base;
345
346 /*      v3_enable_ints(); */
347 /*      v3_clgi(); */
348
349
350         /*
351           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
352           (void *)(addr_t)info->segments.cs.base, 
353           (void *)(addr_t)info->rip);
354         */
355
356
357         v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo));
358         v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo));
359         v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo));
360         v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo));
361         v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo));
362         v3_get_msr(MSR_KERNGS_BASE, &(host_kerngs_base.hi), &(host_kerngs_base.lo));
363
364
365         rdtscll(info->time_state.cached_host_tsc);
366         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
367         
368         //v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), &(info->fs), &(info->gs));
369         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
370         
371         rdtscll(tmp_tsc);
372         
373         v3_set_msr(MSR_STAR, host_star.hi, host_star.lo);
374         v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo);
375         v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo);
376         v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo);
377         v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo);
378         v3_set_msr(MSR_KERNGS_BASE, host_kerngs_base.hi, host_kerngs_base.lo);
379         
380         //PrintDebug("SVM Returned\n");
381
382
383
384         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
385         num_exits++;
386
387         //PrintDebug("Turning on global interrupts\n");
388         v3_stgi();
389         v3_clgi();
390         
391         if ((num_exits % 5000) == 0) {
392             PrintDebug("SVM Exit number %d\n", num_exits);
393
394             if (info->enable_profiler) {
395                 v3_print_profile(info);
396             }
397         }
398
399
400      
401         if (v3_handle_svm_exit(info) != 0) {
402             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
403             addr_t host_addr;
404             addr_t linear_addr = 0;
405             
406             info->run_state = VM_ERROR;
407             
408             PrintDebug("SVM ERROR!!\n"); 
409       
410             PrintDebug("RIP: %p\n", (void *)(addr_t)(guest_state->rip));
411
412
413             linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
414
415
416             PrintDebug("RIP Linear: %p\n", (void *)linear_addr);
417             v3_print_segments(info);
418             v3_print_ctrl_regs(info);
419             if (info->shdw_pg_mode == SHADOW_PAGING) {
420                 PrintDebug("Shadow Paging Guest Registers:\n");
421                 PrintDebug("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
422                 PrintDebug("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
423                 // efer
424                 // CR4
425             }
426             v3_print_GPRs(info);
427
428             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
429       
430             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
431             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
432       
433             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
434             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
435       
436             if (info->mem_mode == PHYSICAL_MEM) {
437                 guest_pa_to_host_va(info, linear_addr, &host_addr);
438             } else if (info->mem_mode == VIRTUAL_MEM) {
439                 guest_va_to_host_va(info, linear_addr, &host_addr);
440             }
441
442
443             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
444
445             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
446             PrintTraceMemDump((uchar_t *)host_addr, 15);
447
448             break;
449         }
450     }
451     return 0;
452 }
453
454
455
456
457
458 /* Checks machine SVM capability */
459 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
460 int v3_is_svm_capable() {
461     // Dinda
462     uint_t vm_cr_low = 0, vm_cr_high = 0;
463     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
464
465     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
466   
467     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
468
469     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
470       PrintDebug("SVM Not Available\n");
471       return 0;
472     }  else {
473         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
474         
475         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
476         
477         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
478             PrintDebug("SVM is available but is disabled.\n");
479             
480             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
481             
482             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
483             
484             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
485                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
486             } else {
487                 PrintDebug("SVM is locked with a key\n");
488             }
489             return 0;
490
491         } else {
492             PrintDebug("SVM is available and  enabled.\n");
493
494             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
495             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
496             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
497             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
498             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
499
500
501             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
502                 PrintDebug("SVM Nested Paging not supported\n");
503             } else {
504                 PrintDebug("SVM Nested Paging supported\n");
505             }
506
507             return 1;
508         }
509     }
510 }
511
512 static int has_svm_nested_paging() {
513     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
514
515     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
516
517     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
518
519     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
520         PrintDebug("SVM Nested Paging not supported\n");
521         return 0;
522     } else {
523         PrintDebug("SVM Nested Paging supported\n");
524         return 1;
525     }
526 }
527
528
529
530 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
531     reg_ex_t msr;
532     void * host_state;
533     extern v3_cpu_arch_t v3_cpu_type;
534
535     // Enable SVM on the CPU
536     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
537     msr.e_reg.low |= EFER_MSR_svm_enable;
538     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
539
540     PrintDebug("SVM Enabled\n");
541
542
543     // Setup the host state save area
544     host_state = V3_AllocPages(4);
545
546
547     /* 64-BIT-ISSUE */
548     //  msr.e_reg.high = 0;
549     //msr.e_reg.low = (uint_t)host_state;
550     msr.r_reg = (addr_t)host_state;
551
552     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state);
553     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
554
555     if (has_svm_nested_paging() == 1) {
556         v3_cpu_type = V3_SVM_REV3_CPU;
557     } else {
558         v3_cpu_type = V3_SVM_CPU;
559     }
560
561     // Setup the SVM specific vmm operations
562     vmm_ops->init_guest = &init_svm_guest;
563     vmm_ops->start_guest = &start_svm_guest;
564     vmm_ops->has_nested_paging = &has_svm_nested_paging;
565
566     return;
567 }
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
621   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
622   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
623   uint_t i;
624
625
626   guest_state->rsp = vm_info.vm_regs.rsp;
627   guest_state->rip = vm_info.rip;
628
629
630   //ctrl_area->instrs.instrs.CR0 = 1;
631   ctrl_area->cr_reads.cr0 = 1;
632   ctrl_area->cr_writes.cr0 = 1;
633
634   guest_state->efer |= EFER_MSR_svm_enable;
635   guest_state->rflags = 0x00000002; // The reserved bit is always 1
636   ctrl_area->svm_instrs.VMRUN = 1;
637   // guest_state->cr0 = 0x00000001;    // PE 
638   ctrl_area->guest_ASID = 1;
639
640
641   ctrl_area->exceptions.de = 1;
642   ctrl_area->exceptions.df = 1;
643   ctrl_area->exceptions.pf = 1;
644   ctrl_area->exceptions.ts = 1;
645   ctrl_area->exceptions.ss = 1;
646   ctrl_area->exceptions.ac = 1;
647   ctrl_area->exceptions.mc = 1;
648   ctrl_area->exceptions.gp = 1;
649   ctrl_area->exceptions.ud = 1;
650   ctrl_area->exceptions.np = 1;
651   ctrl_area->exceptions.of = 1;
652   ctrl_area->exceptions.nmi = 1;
653
654   guest_state->cs.selector = 0x0000;
655   guest_state->cs.limit=~0u;
656   guest_state->cs.base = guest_state->cs.selector<<4;
657   guest_state->cs.attrib.raw = 0xf3;
658
659   
660   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
661   for ( i = 0; segregs[i] != NULL; i++) {
662     struct vmcb_selector * seg = segregs[i];
663     
664     seg->selector = 0x0000;
665     seg->base = seg->selector << 4;
666     seg->attrib.raw = 0xf3;
667     seg->limit = ~0u;
668   }
669   
670   if (vm_info.io_map.num_ports > 0) {
671     struct vmm_io_hook * iter;
672     addr_t io_port_bitmap;
673     
674     io_port_bitmap = (addr_t)V3_AllocPages(3);
675     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
676     
677     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
678
679     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
680
681     FOREACH_IO_HOOK(vm_info.io_map, iter) {
682       ushort_t port = iter->port;
683       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
684
685       bitmap += (port / 8);
686       PrintDebug("Setting Bit in block %x\n", bitmap);
687       *bitmap |= 1 << (port % 8);
688     }
689
690
691     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
692
693     ctrl_area->instrs.IOIO_PROT = 1;
694   }
695
696   ctrl_area->instrs.INTR = 1;
697
698
699
700   if (vm_info.page_mode == SHADOW_PAGING) {
701     PrintDebug("Creating initial shadow page table\n");
702     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
703     PrintDebug("Created\n");
704
705     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
706
707     ctrl_area->cr_reads.cr3 = 1;
708     ctrl_area->cr_writes.cr3 = 1;
709
710
711     ctrl_area->instrs.INVLPG = 1;
712     ctrl_area->instrs.INVLPGA = 1;
713
714     guest_state->g_pat = 0x7040600070406ULL;
715
716     guest_state->cr0 |= 0x80000000;
717   } else if (vm_info.page_mode == NESTED_PAGING) {
718     // Flush the TLB on entries/exits
719     //ctrl_area->TLB_CONTROL = 1;
720
721     // Enable Nested Paging
722     //ctrl_area->NP_ENABLE = 1;
723
724     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
725
726         // Set the Nested Page Table pointer
727     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
728     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
729
730     //   ctrl_area->N_CR3 = Get_CR3();
731     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
732
733     //    guest_state->g_pat = 0x7040600070406ULL;
734   }
735
736
737
738 }
739 */
740
741
742
743
744
745
746
747 #if 0
748 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
749   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
750   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
751   uint_t i = 0;
752
753
754   guest_state->rsp = vm_info.vm_regs.rsp;
755   guest_state->rip = vm_info.rip;
756
757
758   /* I pretty much just gutted this from TVMM */
759   /* Note: That means its probably wrong */
760
761   // set the segment registers to mirror ours
762   guest_state->cs.selector = 1<<3;
763   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
764   guest_state->cs.attrib.fields.S = 1;
765   guest_state->cs.attrib.fields.P = 1;
766   guest_state->cs.attrib.fields.db = 1;
767   guest_state->cs.attrib.fields.G = 1;
768   guest_state->cs.limit = 0xfffff;
769   guest_state->cs.base = 0;
770   
771   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
772   for ( i = 0; segregs[i] != NULL; i++) {
773     struct vmcb_selector * seg = segregs[i];
774     
775     seg->selector = 2<<3;
776     seg->attrib.fields.type = 0x2; // Data Segment+read/write
777     seg->attrib.fields.S = 1;
778     seg->attrib.fields.P = 1;
779     seg->attrib.fields.db = 1;
780     seg->attrib.fields.G = 1;
781     seg->limit = 0xfffff;
782     seg->base = 0;
783   }
784
785
786   {
787     /* JRL THIS HAS TO GO */
788     
789     //    guest_state->tr.selector = GetTR_Selector();
790     guest_state->tr.attrib.fields.type = 0x9; 
791     guest_state->tr.attrib.fields.P = 1;
792     // guest_state->tr.limit = GetTR_Limit();
793     //guest_state->tr.base = GetTR_Base();// - 0x2000;
794     /* ** */
795   }
796
797
798   /* ** */
799
800
801   guest_state->efer |= EFER_MSR_svm_enable;
802   guest_state->rflags = 0x00000002; // The reserved bit is always 1
803   ctrl_area->svm_instrs.VMRUN = 1;
804   guest_state->cr0 = 0x00000001;    // PE 
805   ctrl_area->guest_ASID = 1;
806
807
808   //  guest_state->cpl = 0;
809
810
811
812   // Setup exits
813
814   ctrl_area->cr_writes.cr4 = 1;
815   
816   ctrl_area->exceptions.de = 1;
817   ctrl_area->exceptions.df = 1;
818   ctrl_area->exceptions.pf = 1;
819   ctrl_area->exceptions.ts = 1;
820   ctrl_area->exceptions.ss = 1;
821   ctrl_area->exceptions.ac = 1;
822   ctrl_area->exceptions.mc = 1;
823   ctrl_area->exceptions.gp = 1;
824   ctrl_area->exceptions.ud = 1;
825   ctrl_area->exceptions.np = 1;
826   ctrl_area->exceptions.of = 1;
827   ctrl_area->exceptions.nmi = 1;
828
829   
830
831   ctrl_area->instrs.IOIO_PROT = 1;
832   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
833   
834   {
835     reg_ex_t tmp_reg;
836     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
837     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
838   }
839
840   ctrl_area->instrs.INTR = 1;
841
842   
843   {
844     char gdt_buf[6];
845     char idt_buf[6];
846
847     memset(gdt_buf, 0, 6);
848     memset(idt_buf, 0, 6);
849
850
851     uint_t gdt_base, idt_base;
852     ushort_t gdt_limit, idt_limit;
853     
854     GetGDTR(gdt_buf);
855     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
856     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
857     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
858
859     GetIDTR(idt_buf);
860     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
861     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
862     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
863
864
865     // gdt_base -= 0x2000;
866     //idt_base -= 0x2000;
867
868     guest_state->gdtr.base = gdt_base;
869     guest_state->gdtr.limit = gdt_limit;
870     guest_state->idtr.base = idt_base;
871     guest_state->idtr.limit = idt_limit;
872
873
874   }
875   
876   
877   // also determine if CPU supports nested paging
878   /*
879   if (vm_info.page_tables) {
880     //   if (0) {
881     // Flush the TLB on entries/exits
882     ctrl_area->TLB_CONTROL = 1;
883
884     // Enable Nested Paging
885     ctrl_area->NP_ENABLE = 1;
886
887     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
888
889         // Set the Nested Page Table pointer
890     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
891
892
893     //   ctrl_area->N_CR3 = Get_CR3();
894     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
895
896     guest_state->g_pat = 0x7040600070406ULL;
897
898     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
899     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
900     // Enable Paging
901     //    guest_state->cr0 |= 0x80000000;
902   }
903   */
904
905 }
906
907
908
909
910
911 #endif
912
913