Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


format change
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21
22
23
24 #include <palacios/svm.h>
25 #include <palacios/vmm.h>
26
27 #include <palacios/vmcb.h>
28 #include <palacios/vmm_mem.h>
29 #include <palacios/vmm_paging.h>
30 #include <palacios/svm_handler.h>
31
32 #include <palacios/vmm_debug.h>
33 #include <palacios/vm_guest_mem.h>
34
35 #include <palacios/vmm_decoder.h>
36 #include <palacios/vmm_string.h>
37 #include <palacios/vmm_lowlevel.h>
38 #include <palacios/svm_msr.h>
39
40 #include <palacios/vmm_rbtree.h>
41
42 #include <palacios/vmm_profiler.h>
43
44
45 extern void v3_stgi();
46 extern void v3_clgi();
47 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
48
49
50
51
52 static vmcb_t * Allocate_VMCB() {
53   vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
54
55   memset(vmcb_page, 0, 4096);
56
57   return vmcb_page;
58 }
59
60
61
62 #include <palacios/vmm_ctrl_regs.h>
63
64 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
65   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
66   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
67   uint_t i;
68
69
70   guest_state->rsp = vm_info->vm_regs.rsp;
71   // guest_state->rip = vm_info->rip;
72   guest_state->rip = 0xfff0;
73
74   guest_state->cpl = 0;
75
76   //ctrl_area->instrs.instrs.CR0 = 1;
77   ctrl_area->cr_reads.cr0 = 1;
78   ctrl_area->cr_writes.cr0 = 1;
79   //ctrl_area->cr_reads.cr4 = 1;
80   ctrl_area->cr_writes.cr4 = 1;
81
82
83   /* Set up the efer to enable 64 bit page tables */
84   /*
85   {
86     struct efer_64 * efer = (struct efer_64 *)&(guest_state->efer);
87     struct cr4_32 * cr4 = (struct cr4_32 *)&(guest_state->cr4);
88     efer->lma = 1;
89     efer->lme = 1;
90
91     cr4->pae = 1;
92   }
93   */
94
95   guest_state->efer |= EFER_MSR_svm_enable;
96   vm_info->guest_efer.value = 0x0LL;
97
98   v3_hook_msr(vm_info, EFER_MSR, 
99               &v3_handle_efer_read,
100               &v3_handle_efer_write, 
101               vm_info);
102
103
104
105   guest_state->rflags = 0x00000002; // The reserved bit is always 1
106   ctrl_area->svm_instrs.VMRUN = 1;
107   ctrl_area->svm_instrs.VMMCALL = 1;
108   ctrl_area->svm_instrs.VMLOAD = 1;
109   ctrl_area->svm_instrs.VMSAVE = 1;
110   ctrl_area->svm_instrs.STGI = 1;
111   ctrl_area->svm_instrs.CLGI = 1;
112   ctrl_area->svm_instrs.SKINIT = 1;
113   ctrl_area->svm_instrs.RDTSCP = 1;
114   ctrl_area->svm_instrs.ICEBP = 1;
115   ctrl_area->svm_instrs.WBINVD = 1;
116   ctrl_area->svm_instrs.MONITOR = 1;
117   ctrl_area->svm_instrs.MWAIT_always = 1;
118   ctrl_area->svm_instrs.MWAIT_if_armed = 1;
119
120
121   ctrl_area->instrs.HLT = 1;
122   // guest_state->cr0 = 0x00000001;    // PE 
123   ctrl_area->guest_ASID = 1;
124
125   
126   /*
127     ctrl_area->exceptions.de = 1;
128     ctrl_area->exceptions.df = 1;
129     
130     ctrl_area->exceptions.ts = 1;
131     ctrl_area->exceptions.ss = 1;
132     ctrl_area->exceptions.ac = 1;
133     ctrl_area->exceptions.mc = 1;
134     ctrl_area->exceptions.gp = 1;
135     ctrl_area->exceptions.ud = 1;
136     ctrl_area->exceptions.np = 1;
137     ctrl_area->exceptions.of = 1;
138   
139     ctrl_area->exceptions.nmi = 1;
140   */
141
142
143   // Debug of boot on physical machines - 7/14/08
144   ctrl_area->instrs.NMI=1;
145   ctrl_area->instrs.SMI=1;
146   ctrl_area->instrs.INIT=1;
147   ctrl_area->instrs.PAUSE=1;
148   ctrl_area->instrs.shutdown_evts=1;
149
150   vm_info->vm_regs.rdx = 0x00000f00;
151
152   guest_state->cr0 = 0x60000010;
153
154
155   guest_state->cs.selector = 0xf000;
156   guest_state->cs.limit=0xffff;
157   guest_state->cs.base = 0x0000000f0000LL;
158   guest_state->cs.attrib.raw = 0xf3;
159
160   
161   /* DEBUG FOR RETURN CODE */
162   ctrl_area->exit_code = 1;
163
164
165   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
166   for ( i = 0; segregs[i] != NULL; i++) {
167     struct vmcb_selector * seg = segregs[i];
168     
169     seg->selector = 0x0000;
170     //    seg->base = seg->selector << 4;
171     seg->base = 0x00000000;
172     seg->attrib.raw = 0xf3;
173     seg->limit = ~0u;
174   }
175   
176   guest_state->gdtr.limit = 0x0000ffff;
177   guest_state->gdtr.base = 0x0000000000000000LL;
178   guest_state->idtr.limit = 0x0000ffff;
179   guest_state->idtr.base = 0x0000000000000000LL;
180
181   guest_state->ldtr.selector = 0x0000;
182   guest_state->ldtr.limit = 0x0000ffff;
183   guest_state->ldtr.base = 0x0000000000000000LL;
184   guest_state->tr.selector = 0x0000;
185   guest_state->tr.limit = 0x0000ffff;
186   guest_state->tr.base = 0x0000000000000000LL;
187
188
189   guest_state->dr6 = 0x00000000ffff0ff0LL;
190   guest_state->dr7 = 0x0000000000000400LL;
191
192   
193   
194
195
196
197   if ( !RB_EMPTY_ROOT(&(vm_info->io_map)) ) {
198     struct v3_io_hook * iter;
199     struct rb_node * io_node = v3_rb_first(&(vm_info->io_map));
200     addr_t io_port_bitmap;
201     int i = 0;
202
203     io_port_bitmap = (addr_t)V3_VAddr(V3_AllocPages(3));
204     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
205     
206     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr((void *)io_port_bitmap);
207
208     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
209
210     do {
211       iter = rb_entry(io_node, struct v3_io_hook, tree_node);
212
213       ushort_t port = iter->port;
214       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
215       PrintDebug("%d: Hooking Port %d\n", i, port);
216
217       bitmap += (port / 8);
218       //      PrintDebug("Setting Bit for port 0x%x\n", port);
219       *bitmap |= 1 << (port % 8);
220
221       i++;
222     } while ((io_node = v3_rb_next(io_node)));
223
224
225     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
226
227     ctrl_area->instrs.IOIO_PROT = 1;
228   }
229   
230
231   PrintDebug("Exiting on interrupts\n");
232   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
233   ctrl_area->instrs.INTR = 1;
234
235
236   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
237     PrintDebug("Creating initial shadow page table\n");
238
239
240
241     /* Testing 64 bit page tables for long paged real mode guests */
242     //    vm_info->direct_map_pt = (addr_t)V3_PAddr(create_passthrough_pts_64(vm_info));
243     vm_info->direct_map_pt = (addr_t)V3_PAddr(create_passthrough_pts_32(vm_info));
244     /* End Test */
245
246     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
247     PrintDebug("Created\n");
248
249
250     guest_state->cr3 = vm_info->direct_map_pt;
251
252
253     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
254
255     ctrl_area->cr_reads.cr3 = 1;
256     ctrl_area->cr_writes.cr3 = 1;
257
258
259     ctrl_area->instrs.INVLPG = 1;
260     ctrl_area->instrs.INVLPGA = 1;
261
262     ctrl_area->exceptions.pf = 1;
263
264     /* JRL: This is a performance killer, and a simplistic solution */
265     /* We need to fix this */
266     ctrl_area->TLB_CONTROL = 1;
267     
268
269     guest_state->g_pat = 0x7040600070406ULL;
270
271     guest_state->cr0 |= 0x80000000;
272
273   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
274     // Flush the TLB on entries/exits
275     ctrl_area->TLB_CONTROL = 1;
276
277     // Enable Nested Paging
278     ctrl_area->NP_ENABLE = 1;
279
280     PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
281
282     // Set the Nested Page Table pointer
283     vm_info->direct_map_pt = ((addr_t)create_passthrough_pts_32(vm_info) & ~0xfff);
284     ctrl_area->N_CR3 = vm_info->direct_map_pt;
285
286     //   ctrl_area->N_CR3 = Get_CR3();
287     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
288
289     guest_state->g_pat = 0x7040600070406ULL;
290   }
291
292
293   if (vm_info->msr_map.num_hooks > 0) {
294     PrintDebug("Hooking %d msrs\n", vm_info->msr_map.num_hooks);
295     ctrl_area->MSRPM_BASE_PA = v3_init_svm_msr_map(vm_info);
296     ctrl_area->instrs.MSR_PROT = 1;
297
298   }
299
300
301
302 }
303
304
305 static int init_svm_guest(struct guest_info *info) {
306  
307   PrintDebug("Allocating VMCB\n");
308   info->vmm_data = (void*)Allocate_VMCB();
309
310
311   //PrintDebug("Generating Guest nested page tables\n");
312   //  info->page_tables = NULL;
313   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
314   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
315   //  PrintDebugPageTables(info->page_tables);
316
317
318   PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
319   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
320   
321
322   info->run_state = VM_STOPPED;
323
324   //  info->rip = 0;
325
326   info->vm_regs.rdi = 0;
327   info->vm_regs.rsi = 0;
328   info->vm_regs.rbp = 0;
329   info->vm_regs.rsp = 0;
330   info->vm_regs.rbx = 0;
331   info->vm_regs.rdx = 0;
332   info->vm_regs.rcx = 0;
333   info->vm_regs.rax = 0;
334   
335   return 0;
336 }
337
338
339
340 // can we start a kernel thread here...
341 static int start_svm_guest(struct guest_info *info) {
342   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
343   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
344   uint_t num_exits = 0;
345
346
347
348   PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
349   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
350
351   info->run_state = VM_RUNNING;
352
353   while (1) {
354     ullong_t tmp_tsc;
355
356
357
358 #define MSR_STAR      0xc0000081
359 #define MSR_LSTAR     0xc0000082
360 #define MSR_CSTAR     0xc0000083
361 #define MSR_SF_MASK   0xc0000084
362 #define MSR_GS_BASE   0xc0000101
363
364     struct v3_msr host_cstar;
365     struct v3_msr host_star;
366     struct v3_msr host_lstar;
367     struct v3_msr host_syscall_mask;
368     struct v3_msr host_gs_base;
369
370     v3_enable_ints();
371     v3_clgi();
372
373
374     /*
375     PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
376                (void *)(addr_t)info->segments.cs.base, 
377                (void *)(addr_t)info->rip);
378     */
379
380
381     v3_get_msr(MSR_STAR, &(host_star.hi), &(host_star.lo));
382     v3_get_msr(MSR_LSTAR, &(host_lstar.hi), &(host_lstar.lo));
383     v3_get_msr(MSR_CSTAR, &(host_cstar.hi), &(host_cstar.lo));
384     v3_get_msr(MSR_SF_MASK, &(host_syscall_mask.hi), &(host_syscall_mask.lo));
385     v3_get_msr(MSR_GS_BASE, &(host_gs_base.hi), &(host_gs_base.lo));
386
387     rdtscll(info->time_state.cached_host_tsc);
388     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
389
390     v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
391
392     rdtscll(tmp_tsc);
393
394     v3_set_msr(MSR_STAR, host_star.hi, host_star.lo);
395     v3_set_msr(MSR_LSTAR, host_lstar.hi, host_lstar.lo);
396     v3_set_msr(MSR_CSTAR, host_cstar.hi, host_cstar.lo);
397     v3_set_msr(MSR_SF_MASK, host_syscall_mask.hi, host_syscall_mask.lo);
398     v3_set_msr(MSR_GS_BASE, host_gs_base.hi, host_gs_base.lo);
399
400     //PrintDebug("SVM Returned\n");
401
402
403
404     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
405     num_exits++;
406
407     //PrintDebug("Turning on global interrupts\n");
408     v3_stgi();
409
410
411     if ((num_exits % 5000) == 0) {
412       PrintDebug("SVM Exit number %d\n", num_exits);
413       if (info->enable_profiler) 
414         v3_print_profile(info);
415     }
416
417
418      
419     if (v3_handle_svm_exit(info) != 0) {
420       vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
421       addr_t host_addr;
422       addr_t linear_addr = 0;
423
424       info->run_state = VM_ERROR;
425
426       PrintDebug("SVM ERROR!!\n"); 
427       
428       PrintDebug("RIP: %p\n", (void *)(addr_t)(guest_state->rip));
429
430
431       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
432
433
434       PrintDebug("RIP Linear: %p\n", (void *)linear_addr);
435       v3_print_segments(info);
436       v3_print_ctrl_regs(info);
437       if (info->shdw_pg_mode == SHADOW_PAGING) {
438         PrintDebug("Shadow Paging Guest Registers:\n");
439         PrintDebug("\tGuest CR0=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr0));
440         PrintDebug("\tGuest CR3=%p\n", (void *)(addr_t)(info->shdw_pg_state.guest_cr3));
441         // efer
442         // CR4
443       }
444       v3_print_GPRs(info);
445
446       PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
447       
448       PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
449       PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
450       
451       PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
452       PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
453       
454       if (info->mem_mode == PHYSICAL_MEM) {
455         guest_pa_to_host_va(info, linear_addr, &host_addr);
456       } else if (info->mem_mode == VIRTUAL_MEM) {
457         guest_va_to_host_va(info, linear_addr, &host_addr);
458       }
459
460
461       PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
462
463       PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
464       PrintTraceMemDump((uchar_t *)host_addr, 15);
465
466       break;
467     }
468
469   }
470   return 0;
471 }
472
473
474
475
476
477 /* Checks machine SVM capability */
478 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
479 int v3_is_svm_capable() {
480
481 #if 1
482   // Dinda
483   uint_t vm_cr_low = 0, vm_cr_high = 0;
484   addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
485
486   v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
487   
488   PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
489
490   if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
491     PrintDebug("SVM Not Available\n");
492     return 0;
493   }  else {
494     v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
495     
496     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
497     
498     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
499       PrintDebug("SVM is available but is disabled.\n");
500
501       v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
502       
503       PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
504       
505       if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
506         PrintDebug("SVM BIOS Disabled, not unlockable\n");
507       } else {
508         PrintDebug("SVM is locked with a key\n");
509       }
510       return 0;
511
512     } else {
513       PrintDebug("SVM is available and  enabled.\n");
514
515       v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
516       PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
517       PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
518       PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);      
519       PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
520
521
522       if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
523         PrintDebug("SVM Nested Paging not supported\n");
524       } else {
525         PrintDebug("SVM Nested Paging supported\n");
526       }
527       
528       return 1;
529       
530     }
531   }
532
533 #else
534   uint_t eax = 0, ebx = 0, ecx = 0, edx = 0;
535   addr_t vm_cr_low = 0, vm_cr_high = 0;
536
537   v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
538
539   if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
540     PrintDebug("SVM Not Available\n");
541     return 0;
542   } 
543
544   v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
545
546   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
547
548
549   // this part is clearly wrong, since the np bit is in 
550   // edx, not ecx
551   if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
552     PrintDebug("Nested Paging not supported\n");
553   } else {
554     PrintDebug("Nested Paging supported\n");
555   }
556
557   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
558     PrintDebug("SVM is disabled.\n");
559     return 1;
560   }
561
562   v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
563
564   if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
565     PrintDebug("SVM BIOS Disabled, not unlockable\n");
566   } else {
567     PrintDebug("SVM is locked with a key\n");
568   }
569
570   return 0;
571
572 #endif
573
574 }
575
576 static int has_svm_nested_paging() {
577   addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
578
579   v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
580       
581   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
582   
583   if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
584     PrintDebug("SVM Nested Paging not supported\n");
585     return 0;
586   } else {
587     PrintDebug("SVM Nested Paging supported\n");
588     return 1;
589   }
590
591 }
592
593
594
595 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
596   reg_ex_t msr;
597   void * host_state;
598
599
600   // Enable SVM on the CPU
601   v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
602   msr.e_reg.low |= EFER_MSR_svm_enable;
603   v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
604   
605   PrintDebug("SVM Enabled\n");
606
607
608   // Setup the host state save area
609   host_state = V3_AllocPages(4);
610   
611
612   /* 64-BIT-ISSUE */
613   //  msr.e_reg.high = 0;
614   //msr.e_reg.low = (uint_t)host_state;
615   msr.r_reg = (addr_t)host_state;
616
617   PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_state);
618   v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
619
620
621
622   // Setup the SVM specific vmm operations
623   vmm_ops->init_guest = &init_svm_guest;
624   vmm_ops->start_guest = &start_svm_guest;
625   vmm_ops->has_nested_paging = &has_svm_nested_paging;
626
627   return;
628 }
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
682   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
683   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
684   uint_t i;
685
686
687   guest_state->rsp = vm_info.vm_regs.rsp;
688   guest_state->rip = vm_info.rip;
689
690
691   //ctrl_area->instrs.instrs.CR0 = 1;
692   ctrl_area->cr_reads.cr0 = 1;
693   ctrl_area->cr_writes.cr0 = 1;
694
695   guest_state->efer |= EFER_MSR_svm_enable;
696   guest_state->rflags = 0x00000002; // The reserved bit is always 1
697   ctrl_area->svm_instrs.VMRUN = 1;
698   // guest_state->cr0 = 0x00000001;    // PE 
699   ctrl_area->guest_ASID = 1;
700
701
702   ctrl_area->exceptions.de = 1;
703   ctrl_area->exceptions.df = 1;
704   ctrl_area->exceptions.pf = 1;
705   ctrl_area->exceptions.ts = 1;
706   ctrl_area->exceptions.ss = 1;
707   ctrl_area->exceptions.ac = 1;
708   ctrl_area->exceptions.mc = 1;
709   ctrl_area->exceptions.gp = 1;
710   ctrl_area->exceptions.ud = 1;
711   ctrl_area->exceptions.np = 1;
712   ctrl_area->exceptions.of = 1;
713   ctrl_area->exceptions.nmi = 1;
714
715   guest_state->cs.selector = 0x0000;
716   guest_state->cs.limit=~0u;
717   guest_state->cs.base = guest_state->cs.selector<<4;
718   guest_state->cs.attrib.raw = 0xf3;
719
720   
721   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
722   for ( i = 0; segregs[i] != NULL; i++) {
723     struct vmcb_selector * seg = segregs[i];
724     
725     seg->selector = 0x0000;
726     seg->base = seg->selector << 4;
727     seg->attrib.raw = 0xf3;
728     seg->limit = ~0u;
729   }
730   
731   if (vm_info.io_map.num_ports > 0) {
732     struct vmm_io_hook * iter;
733     addr_t io_port_bitmap;
734     
735     io_port_bitmap = (addr_t)V3_AllocPages(3);
736     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
737     
738     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
739
740     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
741
742     FOREACH_IO_HOOK(vm_info.io_map, iter) {
743       ushort_t port = iter->port;
744       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
745
746       bitmap += (port / 8);
747       PrintDebug("Setting Bit in block %x\n", bitmap);
748       *bitmap |= 1 << (port % 8);
749     }
750
751
752     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
753
754     ctrl_area->instrs.IOIO_PROT = 1;
755   }
756
757   ctrl_area->instrs.INTR = 1;
758
759
760
761   if (vm_info.page_mode == SHADOW_PAGING) {
762     PrintDebug("Creating initial shadow page table\n");
763     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
764     PrintDebug("Created\n");
765
766     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
767
768     ctrl_area->cr_reads.cr3 = 1;
769     ctrl_area->cr_writes.cr3 = 1;
770
771
772     ctrl_area->instrs.INVLPG = 1;
773     ctrl_area->instrs.INVLPGA = 1;
774
775     guest_state->g_pat = 0x7040600070406ULL;
776
777     guest_state->cr0 |= 0x80000000;
778   } else if (vm_info.page_mode == NESTED_PAGING) {
779     // Flush the TLB on entries/exits
780     //ctrl_area->TLB_CONTROL = 1;
781
782     // Enable Nested Paging
783     //ctrl_area->NP_ENABLE = 1;
784
785     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
786
787         // Set the Nested Page Table pointer
788     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
789     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
790
791     //   ctrl_area->N_CR3 = Get_CR3();
792     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
793
794     //    guest_state->g_pat = 0x7040600070406ULL;
795   }
796
797
798
799 }
800 */
801
802
803
804
805
806
807
808 #if 0
809 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
810   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
811   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
812   uint_t i = 0;
813
814
815   guest_state->rsp = vm_info.vm_regs.rsp;
816   guest_state->rip = vm_info.rip;
817
818
819   /* I pretty much just gutted this from TVMM */
820   /* Note: That means its probably wrong */
821
822   // set the segment registers to mirror ours
823   guest_state->cs.selector = 1<<3;
824   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
825   guest_state->cs.attrib.fields.S = 1;
826   guest_state->cs.attrib.fields.P = 1;
827   guest_state->cs.attrib.fields.db = 1;
828   guest_state->cs.attrib.fields.G = 1;
829   guest_state->cs.limit = 0xfffff;
830   guest_state->cs.base = 0;
831   
832   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
833   for ( i = 0; segregs[i] != NULL; i++) {
834     struct vmcb_selector * seg = segregs[i];
835     
836     seg->selector = 2<<3;
837     seg->attrib.fields.type = 0x2; // Data Segment+read/write
838     seg->attrib.fields.S = 1;
839     seg->attrib.fields.P = 1;
840     seg->attrib.fields.db = 1;
841     seg->attrib.fields.G = 1;
842     seg->limit = 0xfffff;
843     seg->base = 0;
844   }
845
846
847   {
848     /* JRL THIS HAS TO GO */
849     
850     //    guest_state->tr.selector = GetTR_Selector();
851     guest_state->tr.attrib.fields.type = 0x9; 
852     guest_state->tr.attrib.fields.P = 1;
853     // guest_state->tr.limit = GetTR_Limit();
854     //guest_state->tr.base = GetTR_Base();// - 0x2000;
855     /* ** */
856   }
857
858
859   /* ** */
860
861
862   guest_state->efer |= EFER_MSR_svm_enable;
863   guest_state->rflags = 0x00000002; // The reserved bit is always 1
864   ctrl_area->svm_instrs.VMRUN = 1;
865   guest_state->cr0 = 0x00000001;    // PE 
866   ctrl_area->guest_ASID = 1;
867
868
869   //  guest_state->cpl = 0;
870
871
872
873   // Setup exits
874
875   ctrl_area->cr_writes.cr4 = 1;
876   
877   ctrl_area->exceptions.de = 1;
878   ctrl_area->exceptions.df = 1;
879   ctrl_area->exceptions.pf = 1;
880   ctrl_area->exceptions.ts = 1;
881   ctrl_area->exceptions.ss = 1;
882   ctrl_area->exceptions.ac = 1;
883   ctrl_area->exceptions.mc = 1;
884   ctrl_area->exceptions.gp = 1;
885   ctrl_area->exceptions.ud = 1;
886   ctrl_area->exceptions.np = 1;
887   ctrl_area->exceptions.of = 1;
888   ctrl_area->exceptions.nmi = 1;
889
890   
891
892   ctrl_area->instrs.IOIO_PROT = 1;
893   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
894   
895   {
896     reg_ex_t tmp_reg;
897     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
898     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
899   }
900
901   ctrl_area->instrs.INTR = 1;
902
903   
904   {
905     char gdt_buf[6];
906     char idt_buf[6];
907
908     memset(gdt_buf, 0, 6);
909     memset(idt_buf, 0, 6);
910
911
912     uint_t gdt_base, idt_base;
913     ushort_t gdt_limit, idt_limit;
914     
915     GetGDTR(gdt_buf);
916     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
917     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
918     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
919
920     GetIDTR(idt_buf);
921     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
922     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
923     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
924
925
926     // gdt_base -= 0x2000;
927     //idt_base -= 0x2000;
928
929     guest_state->gdtr.base = gdt_base;
930     guest_state->gdtr.limit = gdt_limit;
931     guest_state->idtr.base = idt_base;
932     guest_state->idtr.limit = idt_limit;
933
934
935   }
936   
937   
938   // also determine if CPU supports nested paging
939   /*
940   if (vm_info.page_tables) {
941     //   if (0) {
942     // Flush the TLB on entries/exits
943     ctrl_area->TLB_CONTROL = 1;
944
945     // Enable Nested Paging
946     ctrl_area->NP_ENABLE = 1;
947
948     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
949
950         // Set the Nested Page Table pointer
951     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
952
953
954     //   ctrl_area->N_CR3 = Get_CR3();
955     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
956
957     guest_state->g_pat = 0x7040600070406ULL;
958
959     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
960     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
961     // Enable Paging
962     //    guest_state->cr0 |= 0x80000000;
963   }
964   */
965
966 }
967
968
969
970
971
972 #endif
973
974