Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


build reorganization
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21
22
23
24 #include <palacios/svm.h>
25 #include <palacios/vmm.h>
26
27 #include <palacios/vmcb.h>
28 #include <palacios/vmm_mem.h>
29 #include <palacios/vmm_paging.h>
30 #include <palacios/svm_handler.h>
31
32 #include <palacios/vmm_debug.h>
33 #include <palacios/vm_guest_mem.h>
34
35 #include <palacios/vmm_decoder.h>
36 #include <palacios/vmm_string.h>
37
38
39
40
41 extern uint_t cpuid_ecx(uint_t op);
42 extern uint_t cpuid_edx(uint_t op);
43 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
44 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
45 extern uint_t launch_svm(vmcb_t * vmcb_addr);
46 extern void safe_svm_launch(vmcb_t * vmcb_addr, struct v3_gprs * gprs);
47
48 extern void STGI();
49 extern void CLGI();
50
51 extern uint_t Get_CR3();
52
53
54 extern void DisableInts();
55 extern void EnableInts();
56
57
58
59
60
61
62
63 static vmcb_t * Allocate_VMCB() {
64   vmcb_t * vmcb_page = (vmcb_t *)V3_AllocPages(1);
65
66
67   memset(vmcb_page, 0, 4096);
68
69   return vmcb_page;
70 }
71
72
73
74
75
76 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
77   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
78   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
79   uint_t i;
80
81
82   guest_state->rsp = vm_info->vm_regs.rsp;
83   // guest_state->rip = vm_info->rip;
84   guest_state->rip = 0xfff0;
85
86   guest_state->cpl = 0;
87
88   //ctrl_area->instrs.instrs.CR0 = 1;
89   ctrl_area->cr_reads.cr0 = 1;
90   ctrl_area->cr_writes.cr0 = 1;
91
92   guest_state->efer |= EFER_MSR_svm_enable;
93   guest_state->rflags = 0x00000002; // The reserved bit is always 1
94   ctrl_area->svm_instrs.VMRUN = 1;
95   ctrl_area->svm_instrs.VMMCALL = 1;
96   ctrl_area->svm_instrs.VMLOAD = 1;
97   ctrl_area->svm_instrs.VMSAVE = 1;
98   ctrl_area->svm_instrs.STGI = 1;
99   ctrl_area->svm_instrs.CLGI = 1;
100   ctrl_area->svm_instrs.SKINIT = 1;
101   ctrl_area->svm_instrs.RDTSCP = 1;
102   ctrl_area->svm_instrs.ICEBP = 1;
103   ctrl_area->svm_instrs.WBINVD = 1;
104   ctrl_area->svm_instrs.MONITOR = 1;
105   ctrl_area->svm_instrs.MWAIT_always = 1;
106   ctrl_area->svm_instrs.MWAIT_if_armed = 1;
107
108
109   ctrl_area->instrs.HLT = 1;
110   // guest_state->cr0 = 0x00000001;    // PE 
111   ctrl_area->guest_ASID = 1;
112
113   
114   /*
115     ctrl_area->exceptions.de = 1;
116     ctrl_area->exceptions.df = 1;
117     
118     ctrl_area->exceptions.ts = 1;
119     ctrl_area->exceptions.ss = 1;
120     ctrl_area->exceptions.ac = 1;
121     ctrl_area->exceptions.mc = 1;
122     ctrl_area->exceptions.gp = 1;
123     ctrl_area->exceptions.ud = 1;
124     ctrl_area->exceptions.np = 1;
125     ctrl_area->exceptions.of = 1;
126   
127     ctrl_area->exceptions.nmi = 1;
128   */
129   // Debug of boot on physical machines - 7/14/08
130   ctrl_area->instrs.NMI=1;
131   ctrl_area->instrs.SMI=1;
132   ctrl_area->instrs.INIT=1;
133   ctrl_area->instrs.PAUSE=1;
134   ctrl_area->instrs.shutdown_evts=1;
135
136
137
138   vm_info->vm_regs.rdx = 0x00000f00;
139
140   guest_state->cr0 = 0x60000010;
141
142   guest_state->cs.selector = 0xf000;
143   guest_state->cs.limit=0xffff;
144   guest_state->cs.base = 0x0000000f0000LL;
145   guest_state->cs.attrib.raw = 0xf3;
146
147   
148   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
149   for ( i = 0; segregs[i] != NULL; i++) {
150     struct vmcb_selector * seg = segregs[i];
151     
152     seg->selector = 0x0000;
153     //    seg->base = seg->selector << 4;
154     seg->base = 0x00000000;
155     seg->attrib.raw = 0xf3;
156     seg->limit = ~0u;
157   }
158   
159   guest_state->gdtr.limit = 0x0000ffff;
160   guest_state->gdtr.base = 0x0000000000000000LL;
161   guest_state->idtr.limit = 0x0000ffff;
162   guest_state->idtr.base = 0x0000000000000000LL;
163
164   guest_state->ldtr.selector = 0x0000;
165   guest_state->ldtr.limit = 0x0000ffff;
166   guest_state->ldtr.base = 0x0000000000000000LL;
167   guest_state->tr.selector = 0x0000;
168   guest_state->tr.limit = 0x0000ffff;
169   guest_state->tr.base = 0x0000000000000000LL;
170
171
172   guest_state->dr6 = 0x00000000ffff0ff0LL;
173   guest_state->dr7 = 0x0000000000000400LL;
174
175   if (vm_info->io_map.num_ports > 0) {
176     struct vmm_io_hook * iter;
177     addr_t io_port_bitmap;
178     
179     io_port_bitmap = (addr_t)V3_AllocPages(3);
180     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
181     
182     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
183
184     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
185
186     FOREACH_IO_HOOK(vm_info->io_map, iter) {
187       ushort_t port = iter->port;
188       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
189
190       bitmap += (port / 8);
191       PrintDebug("Setting Bit for port 0x%x\n", port);
192       *bitmap |= 1 << (port % 8);
193     }
194
195
196     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
197
198     ctrl_area->instrs.IOIO_PROT = 1;
199   }
200
201
202
203   PrintDebug("Exiting on interrupts\n");
204   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
205   ctrl_area->instrs.INTR = 1;
206
207
208   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
209     PrintDebug("Creating initial shadow page table\n");
210     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
211     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
212     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
213     PrintDebug("Created\n");
214
215     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
216
217     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
218
219     ctrl_area->cr_reads.cr3 = 1;
220     ctrl_area->cr_writes.cr3 = 1;
221
222
223     ctrl_area->instrs.INVLPG = 1;
224     ctrl_area->instrs.INVLPGA = 1;
225
226     ctrl_area->exceptions.pf = 1;
227
228     /* JRL: This is a performance killer, and a simplistic solution */
229     /* We need to fix this */
230     ctrl_area->TLB_CONTROL = 1;
231     
232
233
234     guest_state->g_pat = 0x7040600070406ULL;
235
236     guest_state->cr0 |= 0x80000000;
237
238   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
239     // Flush the TLB on entries/exits
240     ctrl_area->TLB_CONTROL = 1;
241
242     // Enable Nested Paging
243     ctrl_area->NP_ENABLE = 1;
244
245     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
246
247     // Set the Nested Page Table pointer
248     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
249     ctrl_area->N_CR3 = vm_info->direct_map_pt;
250
251     //   ctrl_area->N_CR3 = Get_CR3();
252     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
253
254     guest_state->g_pat = 0x7040600070406ULL;
255   }
256
257
258
259 }
260
261
262 static int init_svm_guest(struct guest_info *info) {
263  
264   PrintDebug("Allocating VMCB\n");
265   info->vmm_data = (void*)Allocate_VMCB();
266
267
268   //PrintDebug("Generating Guest nested page tables\n");
269   //  info->page_tables = NULL;
270   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
271   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
272   //  PrintDebugPageTables(info->page_tables);
273
274
275   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
276   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
277   
278
279   info->run_state = VM_STOPPED;
280
281   //  info->rip = 0;
282
283   info->vm_regs.rdi = 0;
284   info->vm_regs.rsi = 0;
285   info->vm_regs.rbp = 0;
286   info->vm_regs.rsp = 0;
287   info->vm_regs.rbx = 0;
288   info->vm_regs.rdx = 0;
289   info->vm_regs.rcx = 0;
290   info->vm_regs.rax = 0;
291   
292   return 0;
293 }
294
295
296
297 // can we start a kernel thread here...
298  int start_svm_guest(struct guest_info *info) {
299   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
300   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
301   uint_t num_exits = 0;
302
303
304
305   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
306   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
307
308   info->run_state = VM_RUNNING;
309
310   while (1) {
311     ullong_t tmp_tsc;
312
313
314     EnableInts();
315     CLGI();
316
317     //    PrintDebug("SVM Entry to rip=%x...\n", info->rip);
318
319     rdtscll(info->time_state.cached_host_tsc);
320     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
321
322     safe_svm_launch((vmcb_t*)(info->vmm_data), &(info->vm_regs));
323
324     rdtscll(tmp_tsc);
325     //PrintDebug("SVM Returned\n");
326
327
328     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
329     num_exits++;
330
331     STGI();
332
333     if ((num_exits % 25) == 0) {
334       PrintDebug("SVM Exit number %d\n", num_exits);
335     }
336
337      
338     if (handle_svm_exit(info) != 0) {
339
340       addr_t host_addr;
341       addr_t linear_addr = 0;
342
343       info->run_state = VM_ERROR;
344
345       PrintDebug("SVM ERROR!!\n"); 
346       
347       PrintDebug("RIP: %x\n", guest_state->rip);
348
349
350       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
351
352
353       PrintDebug("RIP Linear: %x\n", linear_addr);
354       PrintV3Segments(info);
355       PrintV3CtrlRegs(info);
356       PrintV3GPRs(info);
357       
358       if (info->mem_mode == PHYSICAL_MEM) {
359         guest_pa_to_host_pa(info, linear_addr, &host_addr);
360       } else if (info->mem_mode == VIRTUAL_MEM) {
361         guest_va_to_host_pa(info, linear_addr, &host_addr);
362       }
363
364
365       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
366
367       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
368       PrintTraceMemDump((char*)host_addr, 15);
369
370       break;
371     }
372   }
373   return 0;
374 }
375
376
377
378
379
380 /* Checks machine SVM capability */
381 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
382 int is_svm_capable() {
383
384 #if 1
385   // Dinda
386
387   uint_t ret;
388   uint_t vm_cr_low = 0, vm_cr_high = 0;
389
390
391   ret =  cpuid_ecx(CPUID_FEATURE_IDS);
392   
393   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n",ret);
394
395   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
396     PrintDebug("SVM Not Available\n");
397     return 0;
398   }  else {
399     Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
400     
401     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
402     
403     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
404       PrintDebug("SVM is available but is disabled.\n");
405
406       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
407       
408       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
409       
410       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
411         PrintDebug("SVM BIOS Disabled, not unlockable\n");
412       } else {
413         PrintDebug("SVM is locked with a key\n");
414       }
415       return 0;
416
417     } else {
418       PrintDebug("SVM is available and  enabled.\n");
419
420       ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
421       
422       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
423
424       if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
425         PrintDebug("SVM Nested Paging not supported\n");
426       } else {
427         PrintDebug("SVM Nested Paging supported\n");
428       }
429       
430       return 1;
431       
432     }
433   }
434
435 #else
436
437   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
438   uint_t vm_cr_low = 0, vm_cr_high = 0;
439
440
441   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
442     PrintDebug("SVM Not Available\n");
443     return 0;
444   } 
445
446   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
447
448   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n",vm_cr_high,vm_cr_low);
449
450
451   // this part is clearly wrong, since the np bit is in 
452   // edx, not ecx
453   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
454     PrintDebug("Nested Paging not supported\n");
455   } else {
456     PrintDebug("Nested Paging supported\n");
457   }
458
459   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
460     PrintDebug("SVM is disabled.\n");
461     return 1;
462   }
463
464   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
465
466   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
467     PrintDebug("SVM BIOS Disabled, not unlockable\n");
468   } else {
469     PrintDebug("SVM is locked with a key\n");
470   }
471
472   return 0;
473
474 #endif
475
476 }
477
478 int has_svm_nested_paging() {
479   uint32_t ret;
480
481   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
482       
483   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n",ret);
484   
485   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
486     PrintDebug("SVM Nested Paging not supported\n");
487     return 0;
488   } else {
489     PrintDebug("SVM Nested Paging supported\n");
490     return 1;
491   }
492
493 }
494
495
496
497 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
498   reg_ex_t msr;
499   void * host_state;
500
501
502   // Enable SVM on the CPU
503   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
504   msr.e_reg.low |= EFER_MSR_svm_enable;
505   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
506   
507   PrintDebug("SVM Enabled\n");
508
509
510   // Setup the host state save area
511   host_state = V3_AllocPages(4);
512   
513   msr.e_reg.high = 0;
514   msr.e_reg.low = (uint_t)host_state;
515
516
517   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
518   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
519
520
521
522   // Setup the SVM specific vmm operations
523   vmm_ops->init_guest = &init_svm_guest;
524   vmm_ops->start_guest = &start_svm_guest;
525   vmm_ops->has_nested_paging = &has_svm_nested_paging;
526
527   return;
528 }
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
582   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
583   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
584   uint_t i;
585
586
587   guest_state->rsp = vm_info.vm_regs.rsp;
588   guest_state->rip = vm_info.rip;
589
590
591   //ctrl_area->instrs.instrs.CR0 = 1;
592   ctrl_area->cr_reads.cr0 = 1;
593   ctrl_area->cr_writes.cr0 = 1;
594
595   guest_state->efer |= EFER_MSR_svm_enable;
596   guest_state->rflags = 0x00000002; // The reserved bit is always 1
597   ctrl_area->svm_instrs.VMRUN = 1;
598   // guest_state->cr0 = 0x00000001;    // PE 
599   ctrl_area->guest_ASID = 1;
600
601
602   ctrl_area->exceptions.de = 1;
603   ctrl_area->exceptions.df = 1;
604   ctrl_area->exceptions.pf = 1;
605   ctrl_area->exceptions.ts = 1;
606   ctrl_area->exceptions.ss = 1;
607   ctrl_area->exceptions.ac = 1;
608   ctrl_area->exceptions.mc = 1;
609   ctrl_area->exceptions.gp = 1;
610   ctrl_area->exceptions.ud = 1;
611   ctrl_area->exceptions.np = 1;
612   ctrl_area->exceptions.of = 1;
613   ctrl_area->exceptions.nmi = 1;
614
615   guest_state->cs.selector = 0x0000;
616   guest_state->cs.limit=~0u;
617   guest_state->cs.base = guest_state->cs.selector<<4;
618   guest_state->cs.attrib.raw = 0xf3;
619
620   
621   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
622   for ( i = 0; segregs[i] != NULL; i++) {
623     struct vmcb_selector * seg = segregs[i];
624     
625     seg->selector = 0x0000;
626     seg->base = seg->selector << 4;
627     seg->attrib.raw = 0xf3;
628     seg->limit = ~0u;
629   }
630   
631   if (vm_info.io_map.num_ports > 0) {
632     struct vmm_io_hook * iter;
633     addr_t io_port_bitmap;
634     
635     io_port_bitmap = (addr_t)V3_AllocPages(3);
636     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
637     
638     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
639
640     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
641
642     FOREACH_IO_HOOK(vm_info.io_map, iter) {
643       ushort_t port = iter->port;
644       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
645
646       bitmap += (port / 8);
647       PrintDebug("Setting Bit in block %x\n", bitmap);
648       *bitmap |= 1 << (port % 8);
649     }
650
651
652     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
653
654     ctrl_area->instrs.IOIO_PROT = 1;
655   }
656
657   ctrl_area->instrs.INTR = 1;
658
659
660
661   if (vm_info.page_mode == SHADOW_PAGING) {
662     PrintDebug("Creating initial shadow page table\n");
663     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
664     PrintDebug("Created\n");
665
666     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
667
668     ctrl_area->cr_reads.cr3 = 1;
669     ctrl_area->cr_writes.cr3 = 1;
670
671
672     ctrl_area->instrs.INVLPG = 1;
673     ctrl_area->instrs.INVLPGA = 1;
674
675     guest_state->g_pat = 0x7040600070406ULL;
676
677     guest_state->cr0 |= 0x80000000;
678   } else if (vm_info.page_mode == NESTED_PAGING) {
679     // Flush the TLB on entries/exits
680     //ctrl_area->TLB_CONTROL = 1;
681
682     // Enable Nested Paging
683     //ctrl_area->NP_ENABLE = 1;
684
685     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
686
687         // Set the Nested Page Table pointer
688     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
689     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
690
691     //   ctrl_area->N_CR3 = Get_CR3();
692     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
693
694     //    guest_state->g_pat = 0x7040600070406ULL;
695   }
696
697
698
699 }
700 */
701
702
703
704
705
706
707
708 #if 0
709 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
710   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
711   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
712   uint_t i = 0;
713
714
715   guest_state->rsp = vm_info.vm_regs.rsp;
716   guest_state->rip = vm_info.rip;
717
718
719   /* I pretty much just gutted this from TVMM */
720   /* Note: That means its probably wrong */
721
722   // set the segment registers to mirror ours
723   guest_state->cs.selector = 1<<3;
724   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
725   guest_state->cs.attrib.fields.S = 1;
726   guest_state->cs.attrib.fields.P = 1;
727   guest_state->cs.attrib.fields.db = 1;
728   guest_state->cs.attrib.fields.G = 1;
729   guest_state->cs.limit = 0xfffff;
730   guest_state->cs.base = 0;
731   
732   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
733   for ( i = 0; segregs[i] != NULL; i++) {
734     struct vmcb_selector * seg = segregs[i];
735     
736     seg->selector = 2<<3;
737     seg->attrib.fields.type = 0x2; // Data Segment+read/write
738     seg->attrib.fields.S = 1;
739     seg->attrib.fields.P = 1;
740     seg->attrib.fields.db = 1;
741     seg->attrib.fields.G = 1;
742     seg->limit = 0xfffff;
743     seg->base = 0;
744   }
745
746
747   {
748     /* JRL THIS HAS TO GO */
749     
750     //    guest_state->tr.selector = GetTR_Selector();
751     guest_state->tr.attrib.fields.type = 0x9; 
752     guest_state->tr.attrib.fields.P = 1;
753     // guest_state->tr.limit = GetTR_Limit();
754     //guest_state->tr.base = GetTR_Base();// - 0x2000;
755     /* ** */
756   }
757
758
759   /* ** */
760
761
762   guest_state->efer |= EFER_MSR_svm_enable;
763   guest_state->rflags = 0x00000002; // The reserved bit is always 1
764   ctrl_area->svm_instrs.VMRUN = 1;
765   guest_state->cr0 = 0x00000001;    // PE 
766   ctrl_area->guest_ASID = 1;
767
768
769   //  guest_state->cpl = 0;
770
771
772
773   // Setup exits
774
775   ctrl_area->cr_writes.cr4 = 1;
776   
777   ctrl_area->exceptions.de = 1;
778   ctrl_area->exceptions.df = 1;
779   ctrl_area->exceptions.pf = 1;
780   ctrl_area->exceptions.ts = 1;
781   ctrl_area->exceptions.ss = 1;
782   ctrl_area->exceptions.ac = 1;
783   ctrl_area->exceptions.mc = 1;
784   ctrl_area->exceptions.gp = 1;
785   ctrl_area->exceptions.ud = 1;
786   ctrl_area->exceptions.np = 1;
787   ctrl_area->exceptions.of = 1;
788   ctrl_area->exceptions.nmi = 1;
789
790   
791
792   ctrl_area->instrs.IOIO_PROT = 1;
793   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
794   
795   {
796     reg_ex_t tmp_reg;
797     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
798     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
799   }
800
801   ctrl_area->instrs.INTR = 1;
802
803   
804   {
805     char gdt_buf[6];
806     char idt_buf[6];
807
808     memset(gdt_buf, 0, 6);
809     memset(idt_buf, 0, 6);
810
811
812     uint_t gdt_base, idt_base;
813     ushort_t gdt_limit, idt_limit;
814     
815     GetGDTR(gdt_buf);
816     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
817     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
818     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
819
820     GetIDTR(idt_buf);
821     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
822     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
823     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
824
825
826     // gdt_base -= 0x2000;
827     //idt_base -= 0x2000;
828
829     guest_state->gdtr.base = gdt_base;
830     guest_state->gdtr.limit = gdt_limit;
831     guest_state->idtr.base = idt_base;
832     guest_state->idtr.limit = idt_limit;
833
834
835   }
836   
837   
838   // also determine if CPU supports nested paging
839   /*
840   if (vm_info.page_tables) {
841     //   if (0) {
842     // Flush the TLB on entries/exits
843     ctrl_area->TLB_CONTROL = 1;
844
845     // Enable Nested Paging
846     ctrl_area->NP_ENABLE = 1;
847
848     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
849
850         // Set the Nested Page Table pointer
851     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
852
853
854     //   ctrl_area->N_CR3 = Get_CR3();
855     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
856
857     guest_state->g_pat = 0x7040600070406ULL;
858
859     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
860     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
861     // Enable Paging
862     //    guest_state->cr0 |= 0x80000000;
863   }
864   */
865
866 }
867
868
869
870
871
872 #endif
873
874