Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


large scale namespace changes
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21
22
23
24 #include <palacios/svm.h>
25 #include <palacios/vmm.h>
26
27 #include <palacios/vmcb.h>
28 #include <palacios/vmm_mem.h>
29 #include <palacios/vmm_paging.h>
30 #include <palacios/svm_handler.h>
31
32 #include <palacios/vmm_debug.h>
33 #include <palacios/vm_guest_mem.h>
34
35 #include <palacios/vmm_decoder.h>
36 #include <palacios/vmm_string.h>
37
38
39
40
41 extern uint_t cpuid_ecx(uint_t op);
42 extern uint_t cpuid_edx(uint_t op);
43 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
44 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
45 extern uint_t launch_svm(vmcb_t * vmcb_addr);
46 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
47
48 extern void STGI();
49 extern void CLGI();
50
51 extern uint_t Get_CR3();
52
53
54 extern void DisableInts();
55 extern void EnableInts();
56
57
58
59
60
61
62
63 static vmcb_t * Allocate_VMCB() {
64   vmcb_t * vmcb_page = (vmcb_t *)V3_AllocPages(1);
65
66   memset(vmcb_page, 0, 4096);
67
68   return vmcb_page;
69 }
70
71
72
73
74
75 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
76   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
77   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
78   uint_t i;
79
80
81   guest_state->rsp = vm_info->vm_regs.rsp;
82   // guest_state->rip = vm_info->rip;
83   guest_state->rip = 0xfff0;
84
85   guest_state->cpl = 0;
86
87   //ctrl_area->instrs.instrs.CR0 = 1;
88   ctrl_area->cr_reads.cr0 = 1;
89   ctrl_area->cr_writes.cr0 = 1;
90
91   guest_state->efer |= EFER_MSR_svm_enable;
92   guest_state->rflags = 0x00000002; // The reserved bit is always 1
93   ctrl_area->svm_instrs.VMRUN = 1;
94   ctrl_area->svm_instrs.VMMCALL = 1;
95   ctrl_area->svm_instrs.VMLOAD = 1;
96   ctrl_area->svm_instrs.VMSAVE = 1;
97   ctrl_area->svm_instrs.STGI = 1;
98   ctrl_area->svm_instrs.CLGI = 1;
99   ctrl_area->svm_instrs.SKINIT = 1;
100   ctrl_area->svm_instrs.RDTSCP = 1;
101   ctrl_area->svm_instrs.ICEBP = 1;
102   ctrl_area->svm_instrs.WBINVD = 1;
103   ctrl_area->svm_instrs.MONITOR = 1;
104   ctrl_area->svm_instrs.MWAIT_always = 1;
105   ctrl_area->svm_instrs.MWAIT_if_armed = 1;
106
107
108   ctrl_area->instrs.HLT = 1;
109   // guest_state->cr0 = 0x00000001;    // PE 
110   ctrl_area->guest_ASID = 1;
111
112   
113   /*
114     ctrl_area->exceptions.de = 1;
115     ctrl_area->exceptions.df = 1;
116     
117     ctrl_area->exceptions.ts = 1;
118     ctrl_area->exceptions.ss = 1;
119     ctrl_area->exceptions.ac = 1;
120     ctrl_area->exceptions.mc = 1;
121     ctrl_area->exceptions.gp = 1;
122     ctrl_area->exceptions.ud = 1;
123     ctrl_area->exceptions.np = 1;
124     ctrl_area->exceptions.of = 1;
125   
126     ctrl_area->exceptions.nmi = 1;
127   */
128   // Debug of boot on physical machines - 7/14/08
129   ctrl_area->instrs.NMI=1;
130   ctrl_area->instrs.SMI=1;
131   ctrl_area->instrs.INIT=1;
132   ctrl_area->instrs.PAUSE=1;
133   ctrl_area->instrs.shutdown_evts=1;
134
135
136
137   vm_info->vm_regs.rdx = 0x00000f00;
138
139   guest_state->cr0 = 0x60000010;
140
141   guest_state->cs.selector = 0xf000;
142   guest_state->cs.limit=0xffff;
143   guest_state->cs.base = 0x0000000f0000LL;
144   guest_state->cs.attrib.raw = 0xf3;
145
146   
147   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
148   for ( i = 0; segregs[i] != NULL; i++) {
149     struct vmcb_selector * seg = segregs[i];
150     
151     seg->selector = 0x0000;
152     //    seg->base = seg->selector << 4;
153     seg->base = 0x00000000;
154     seg->attrib.raw = 0xf3;
155     seg->limit = ~0u;
156   }
157   
158   guest_state->gdtr.limit = 0x0000ffff;
159   guest_state->gdtr.base = 0x0000000000000000LL;
160   guest_state->idtr.limit = 0x0000ffff;
161   guest_state->idtr.base = 0x0000000000000000LL;
162
163   guest_state->ldtr.selector = 0x0000;
164   guest_state->ldtr.limit = 0x0000ffff;
165   guest_state->ldtr.base = 0x0000000000000000LL;
166   guest_state->tr.selector = 0x0000;
167   guest_state->tr.limit = 0x0000ffff;
168   guest_state->tr.base = 0x0000000000000000LL;
169
170
171   guest_state->dr6 = 0x00000000ffff0ff0LL;
172   guest_state->dr7 = 0x0000000000000400LL;
173
174   if (vm_info->io_map.num_ports > 0) {
175     struct vmm_io_hook * iter;
176     addr_t io_port_bitmap;
177     
178     io_port_bitmap = (addr_t)V3_AllocPages(3);
179     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
180     
181     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
182
183     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
184
185     FOREACH_IO_HOOK(vm_info->io_map, iter) {
186       ushort_t port = iter->port;
187       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
188
189       bitmap += (port / 8);
190       //      PrintDebug("Setting Bit for port 0x%x\n", port);
191       *bitmap |= 1 << (port % 8);
192     }
193
194
195     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
196
197     ctrl_area->instrs.IOIO_PROT = 1;
198   }
199
200
201
202   PrintDebug("Exiting on interrupts\n");
203   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
204   ctrl_area->instrs.INTR = 1;
205
206
207   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
208     PrintDebug("Creating initial shadow page table\n");
209     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
210     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
211     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
212     PrintDebug("Created\n");
213
214     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
215
216     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
217
218     ctrl_area->cr_reads.cr3 = 1;
219     ctrl_area->cr_writes.cr3 = 1;
220
221
222     ctrl_area->instrs.INVLPG = 1;
223     ctrl_area->instrs.INVLPGA = 1;
224
225     ctrl_area->exceptions.pf = 1;
226
227     /* JRL: This is a performance killer, and a simplistic solution */
228     /* We need to fix this */
229     ctrl_area->TLB_CONTROL = 1;
230     
231
232
233     guest_state->g_pat = 0x7040600070406ULL;
234
235     guest_state->cr0 |= 0x80000000;
236
237   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
238     // Flush the TLB on entries/exits
239     ctrl_area->TLB_CONTROL = 1;
240
241     // Enable Nested Paging
242     ctrl_area->NP_ENABLE = 1;
243
244     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
245
246     // Set the Nested Page Table pointer
247     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
248     ctrl_area->N_CR3 = vm_info->direct_map_pt;
249
250     //   ctrl_area->N_CR3 = Get_CR3();
251     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
252
253     guest_state->g_pat = 0x7040600070406ULL;
254   }
255
256
257
258 }
259
260
261 static int init_svm_guest(struct guest_info *info) {
262  
263   PrintDebug("Allocating VMCB\n");
264   info->vmm_data = (void*)Allocate_VMCB();
265
266
267   //PrintDebug("Generating Guest nested page tables\n");
268   //  info->page_tables = NULL;
269   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
270   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
271   //  PrintDebugPageTables(info->page_tables);
272
273
274   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
275   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
276   
277
278   info->run_state = VM_STOPPED;
279
280   //  info->rip = 0;
281
282   info->vm_regs.rdi = 0;
283   info->vm_regs.rsi = 0;
284   info->vm_regs.rbp = 0;
285   info->vm_regs.rsp = 0;
286   info->vm_regs.rbx = 0;
287   info->vm_regs.rdx = 0;
288   info->vm_regs.rcx = 0;
289   info->vm_regs.rax = 0;
290   
291   return 0;
292 }
293
294
295
296 // can we start a kernel thread here...
297 static int start_svm_guest(struct guest_info *info) {
298   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
299   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
300   uint_t num_exits = 0;
301
302
303
304   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
305   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
306
307   info->run_state = VM_RUNNING;
308
309   while (1) {
310     ullong_t tmp_tsc;
311
312
313     EnableInts();
314     CLGI();
315
316     //    PrintDebug("SVM Entry to rip=%x...\n", info->rip);
317
318     rdtscll(info->time_state.cached_host_tsc);
319     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
320
321     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
322
323     rdtscll(tmp_tsc);
324     //PrintDebug("SVM Returned\n");
325
326
327     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
328     num_exits++;
329
330     STGI();
331
332     if ((num_exits % 25) == 0) {
333       PrintDebug("SVM Exit number %d\n", num_exits);
334     }
335
336      
337     if (v3_handle_svm_exit(info) != 0) {
338
339       addr_t host_addr;
340       addr_t linear_addr = 0;
341
342       info->run_state = VM_ERROR;
343
344       PrintDebug("SVM ERROR!!\n"); 
345       
346       PrintDebug("RIP: %x\n", guest_state->rip);
347
348
349       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
350
351
352       PrintDebug("RIP Linear: %x\n", linear_addr);
353       v3_print_segments(info);
354       v3_print_ctrl_regs(info);
355       v3_print_GPRs(info);
356       
357       if (info->mem_mode == PHYSICAL_MEM) {
358         guest_pa_to_host_pa(info, linear_addr, &host_addr);
359       } else if (info->mem_mode == VIRTUAL_MEM) {
360         guest_va_to_host_pa(info, linear_addr, &host_addr);
361       }
362
363
364       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
365
366       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
367       PrintTraceMemDump((uchar_t *)host_addr, 15);
368
369       break;
370     }
371   }
372   return 0;
373 }
374
375
376
377
378
379 /* Checks machine SVM capability */
380 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
381 int v3_is_svm_capable() {
382
383 #if 1
384   // Dinda
385
386   uint_t ret;
387   uint_t vm_cr_low = 0, vm_cr_high = 0;
388
389
390   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
391   
392   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
393
394   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
395     PrintDebug("SVM Not Available\n");
396     return 0;
397   }  else {
398     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
399     
400     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
401     
402     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
403       PrintDebug("SVM is available but is disabled.\n");
404
405       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
406       
407       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
408       
409       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
410         PrintDebug("SVM BIOS Disabled, not unlockable\n");
411       } else {
412         PrintDebug("SVM is locked with a key\n");
413       }
414       return 0;
415
416     } else {
417       PrintDebug("SVM is available and  enabled.\n");
418
419       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
420       
421       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
422
423       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
424         PrintDebug("SVM Nested Paging not supported\n");
425       } else {
426         PrintDebug("SVM Nested Paging supported\n");
427       }
428       
429       return 1;
430       
431     }
432   }
433
434 #else
435
436   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
437   uint_t vm_cr_low = 0, vm_cr_high = 0;
438
439
440   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
441     PrintDebug("SVM Not Available\n");
442     return 0;
443   } 
444
445   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
446
447   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
448
449
450   // this part is clearly wrong, since the np bit is in 
451   // edx, not ecx
452   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
453     PrintDebug("Nested Paging not supported\n");
454   } else {
455     PrintDebug("Nested Paging supported\n");
456   }
457
458   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
459     PrintDebug("SVM is disabled.\n");
460     return 1;
461   }
462
463   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
464
465   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
466     PrintDebug("SVM BIOS Disabled, not unlockable\n");
467   } else {
468     PrintDebug("SVM is locked with a key\n");
469   }
470
471   return 0;
472
473 #endif
474
475 }
476
477 static int has_svm_nested_paging() {
478   uint32_t ret;
479
480   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
481       
482   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
483   
484   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
485     PrintDebug("SVM Nested Paging not supported\n");
486     return 0;
487   } else {
488     PrintDebug("SVM Nested Paging supported\n");
489     return 1;
490   }
491
492 }
493
494
495
496 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
497   reg_ex_t msr;
498   void * host_state;
499
500
501   // Enable SVM on the CPU
502   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
503   msr.e_reg.low |= EFER_MSR_svm_enable;
504   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
505   
506   PrintDebug("SVM Enabled\n");
507
508
509   // Setup the host state save area
510   host_state = V3_AllocPages(4);
511   
512
513   /* 64-BIT-ISSUE */
514   //  msr.e_reg.high = 0;
515   //msr.e_reg.low = (uint_t)host_state;
516   msr.r_reg = (addr_t)host_state;
517
518   PrintDebug("Host State being saved at %x\n", (addr_t)host_state);
519   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
520
521
522
523   // Setup the SVM specific vmm operations
524   vmm_ops->init_guest = &init_svm_guest;
525   vmm_ops->start_guest = &start_svm_guest;
526   vmm_ops->has_nested_paging = &has_svm_nested_paging;
527
528   return;
529 }
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
583   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
584   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
585   uint_t i;
586
587
588   guest_state->rsp = vm_info.vm_regs.rsp;
589   guest_state->rip = vm_info.rip;
590
591
592   //ctrl_area->instrs.instrs.CR0 = 1;
593   ctrl_area->cr_reads.cr0 = 1;
594   ctrl_area->cr_writes.cr0 = 1;
595
596   guest_state->efer |= EFER_MSR_svm_enable;
597   guest_state->rflags = 0x00000002; // The reserved bit is always 1
598   ctrl_area->svm_instrs.VMRUN = 1;
599   // guest_state->cr0 = 0x00000001;    // PE 
600   ctrl_area->guest_ASID = 1;
601
602
603   ctrl_area->exceptions.de = 1;
604   ctrl_area->exceptions.df = 1;
605   ctrl_area->exceptions.pf = 1;
606   ctrl_area->exceptions.ts = 1;
607   ctrl_area->exceptions.ss = 1;
608   ctrl_area->exceptions.ac = 1;
609   ctrl_area->exceptions.mc = 1;
610   ctrl_area->exceptions.gp = 1;
611   ctrl_area->exceptions.ud = 1;
612   ctrl_area->exceptions.np = 1;
613   ctrl_area->exceptions.of = 1;
614   ctrl_area->exceptions.nmi = 1;
615
616   guest_state->cs.selector = 0x0000;
617   guest_state->cs.limit=~0u;
618   guest_state->cs.base = guest_state->cs.selector<<4;
619   guest_state->cs.attrib.raw = 0xf3;
620
621   
622   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
623   for ( i = 0; segregs[i] != NULL; i++) {
624     struct vmcb_selector * seg = segregs[i];
625     
626     seg->selector = 0x0000;
627     seg->base = seg->selector << 4;
628     seg->attrib.raw = 0xf3;
629     seg->limit = ~0u;
630   }
631   
632   if (vm_info.io_map.num_ports > 0) {
633     struct vmm_io_hook * iter;
634     addr_t io_port_bitmap;
635     
636     io_port_bitmap = (addr_t)V3_AllocPages(3);
637     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
638     
639     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
640
641     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
642
643     FOREACH_IO_HOOK(vm_info.io_map, iter) {
644       ushort_t port = iter->port;
645       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
646
647       bitmap += (port / 8);
648       PrintDebug("Setting Bit in block %x\n", bitmap);
649       *bitmap |= 1 << (port % 8);
650     }
651
652
653     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
654
655     ctrl_area->instrs.IOIO_PROT = 1;
656   }
657
658   ctrl_area->instrs.INTR = 1;
659
660
661
662   if (vm_info.page_mode == SHADOW_PAGING) {
663     PrintDebug("Creating initial shadow page table\n");
664     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
665     PrintDebug("Created\n");
666
667     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
668
669     ctrl_area->cr_reads.cr3 = 1;
670     ctrl_area->cr_writes.cr3 = 1;
671
672
673     ctrl_area->instrs.INVLPG = 1;
674     ctrl_area->instrs.INVLPGA = 1;
675
676     guest_state->g_pat = 0x7040600070406ULL;
677
678     guest_state->cr0 |= 0x80000000;
679   } else if (vm_info.page_mode == NESTED_PAGING) {
680     // Flush the TLB on entries/exits
681     //ctrl_area->TLB_CONTROL = 1;
682
683     // Enable Nested Paging
684     //ctrl_area->NP_ENABLE = 1;
685
686     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
687
688         // Set the Nested Page Table pointer
689     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
690     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
691
692     //   ctrl_area->N_CR3 = Get_CR3();
693     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
694
695     //    guest_state->g_pat = 0x7040600070406ULL;
696   }
697
698
699
700 }
701 */
702
703
704
705
706
707
708
709 #if 0
710 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
711   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
712   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
713   uint_t i = 0;
714
715
716   guest_state->rsp = vm_info.vm_regs.rsp;
717   guest_state->rip = vm_info.rip;
718
719
720   /* I pretty much just gutted this from TVMM */
721   /* Note: That means its probably wrong */
722
723   // set the segment registers to mirror ours
724   guest_state->cs.selector = 1<<3;
725   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
726   guest_state->cs.attrib.fields.S = 1;
727   guest_state->cs.attrib.fields.P = 1;
728   guest_state->cs.attrib.fields.db = 1;
729   guest_state->cs.attrib.fields.G = 1;
730   guest_state->cs.limit = 0xfffff;
731   guest_state->cs.base = 0;
732   
733   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
734   for ( i = 0; segregs[i] != NULL; i++) {
735     struct vmcb_selector * seg = segregs[i];
736     
737     seg->selector = 2<<3;
738     seg->attrib.fields.type = 0x2; // Data Segment+read/write
739     seg->attrib.fields.S = 1;
740     seg->attrib.fields.P = 1;
741     seg->attrib.fields.db = 1;
742     seg->attrib.fields.G = 1;
743     seg->limit = 0xfffff;
744     seg->base = 0;
745   }
746
747
748   {
749     /* JRL THIS HAS TO GO */
750     
751     //    guest_state->tr.selector = GetTR_Selector();
752     guest_state->tr.attrib.fields.type = 0x9; 
753     guest_state->tr.attrib.fields.P = 1;
754     // guest_state->tr.limit = GetTR_Limit();
755     //guest_state->tr.base = GetTR_Base();// - 0x2000;
756     /* ** */
757   }
758
759
760   /* ** */
761
762
763   guest_state->efer |= EFER_MSR_svm_enable;
764   guest_state->rflags = 0x00000002; // The reserved bit is always 1
765   ctrl_area->svm_instrs.VMRUN = 1;
766   guest_state->cr0 = 0x00000001;    // PE 
767   ctrl_area->guest_ASID = 1;
768
769
770   //  guest_state->cpl = 0;
771
772
773
774   // Setup exits
775
776   ctrl_area->cr_writes.cr4 = 1;
777   
778   ctrl_area->exceptions.de = 1;
779   ctrl_area->exceptions.df = 1;
780   ctrl_area->exceptions.pf = 1;
781   ctrl_area->exceptions.ts = 1;
782   ctrl_area->exceptions.ss = 1;
783   ctrl_area->exceptions.ac = 1;
784   ctrl_area->exceptions.mc = 1;
785   ctrl_area->exceptions.gp = 1;
786   ctrl_area->exceptions.ud = 1;
787   ctrl_area->exceptions.np = 1;
788   ctrl_area->exceptions.of = 1;
789   ctrl_area->exceptions.nmi = 1;
790
791   
792
793   ctrl_area->instrs.IOIO_PROT = 1;
794   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
795   
796   {
797     reg_ex_t tmp_reg;
798     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
799     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
800   }
801
802   ctrl_area->instrs.INTR = 1;
803
804   
805   {
806     char gdt_buf[6];
807     char idt_buf[6];
808
809     memset(gdt_buf, 0, 6);
810     memset(idt_buf, 0, 6);
811
812
813     uint_t gdt_base, idt_base;
814     ushort_t gdt_limit, idt_limit;
815     
816     GetGDTR(gdt_buf);
817     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
818     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
819     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
820
821     GetIDTR(idt_buf);
822     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
823     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
824     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
825
826
827     // gdt_base -= 0x2000;
828     //idt_base -= 0x2000;
829
830     guest_state->gdtr.base = gdt_base;
831     guest_state->gdtr.limit = gdt_limit;
832     guest_state->idtr.base = idt_base;
833     guest_state->idtr.limit = idt_limit;
834
835
836   }
837   
838   
839   // also determine if CPU supports nested paging
840   /*
841   if (vm_info.page_tables) {
842     //   if (0) {
843     // Flush the TLB on entries/exits
844     ctrl_area->TLB_CONTROL = 1;
845
846     // Enable Nested Paging
847     ctrl_area->NP_ENABLE = 1;
848
849     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
850
851         // Set the Nested Page Table pointer
852     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
853
854
855     //   ctrl_area->N_CR3 = Get_CR3();
856     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
857
858     guest_state->g_pat = 0x7040600070406ULL;
859
860     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
861     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
862     // Enable Paging
863     //    guest_state->cr0 |= 0x80000000;
864   }
865   */
866
867 }
868
869
870
871
872
873 #endif
874
875