Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


fixed ins/outs and moved the gs.base save/restore point
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21
22
23
24 #include <palacios/svm.h>
25 #include <palacios/vmm.h>
26
27 #include <palacios/vmcb.h>
28 #include <palacios/vmm_mem.h>
29 #include <palacios/vmm_paging.h>
30 #include <palacios/svm_handler.h>
31
32 #include <palacios/vmm_debug.h>
33 #include <palacios/vm_guest_mem.h>
34
35 #include <palacios/vmm_decoder.h>
36 #include <palacios/vmm_string.h>
37 #include <palacios/vmm_lowlevel.h>
38
39
40
41 extern uint_t Get_CR3();
42
43
44
45 extern void v3_stgi();
46 extern void v3_clgi();
47 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs);
48
49
50
51
52 static vmcb_t * Allocate_VMCB() {
53   vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
54
55   memset(vmcb_page, 0, 4096);
56
57   return vmcb_page;
58 }
59
60
61
62
63
64 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
65   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
66   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
67   uint_t i;
68
69
70   guest_state->rsp = vm_info->vm_regs.rsp;
71   // guest_state->rip = vm_info->rip;
72   guest_state->rip = 0xfff0;
73
74   guest_state->cpl = 0;
75
76   //ctrl_area->instrs.instrs.CR0 = 1;
77   ctrl_area->cr_reads.cr0 = 1;
78   ctrl_area->cr_writes.cr0 = 1;
79
80   guest_state->efer |= EFER_MSR_svm_enable;
81   guest_state->rflags = 0x00000002; // The reserved bit is always 1
82   ctrl_area->svm_instrs.VMRUN = 1;
83   ctrl_area->svm_instrs.VMMCALL = 1;
84   ctrl_area->svm_instrs.VMLOAD = 1;
85   ctrl_area->svm_instrs.VMSAVE = 1;
86   ctrl_area->svm_instrs.STGI = 1;
87   ctrl_area->svm_instrs.CLGI = 1;
88   ctrl_area->svm_instrs.SKINIT = 1;
89   ctrl_area->svm_instrs.RDTSCP = 1;
90   ctrl_area->svm_instrs.ICEBP = 1;
91   ctrl_area->svm_instrs.WBINVD = 1;
92   ctrl_area->svm_instrs.MONITOR = 1;
93   ctrl_area->svm_instrs.MWAIT_always = 1;
94   ctrl_area->svm_instrs.MWAIT_if_armed = 1;
95
96
97   ctrl_area->instrs.HLT = 1;
98   // guest_state->cr0 = 0x00000001;    // PE 
99   ctrl_area->guest_ASID = 1;
100
101   
102   /*
103     ctrl_area->exceptions.de = 1;
104     ctrl_area->exceptions.df = 1;
105     
106     ctrl_area->exceptions.ts = 1;
107     ctrl_area->exceptions.ss = 1;
108     ctrl_area->exceptions.ac = 1;
109     ctrl_area->exceptions.mc = 1;
110     ctrl_area->exceptions.gp = 1;
111     ctrl_area->exceptions.ud = 1;
112     ctrl_area->exceptions.np = 1;
113     ctrl_area->exceptions.of = 1;
114   
115     ctrl_area->exceptions.nmi = 1;
116   */
117   // Debug of boot on physical machines - 7/14/08
118   ctrl_area->instrs.NMI=1;
119   ctrl_area->instrs.SMI=1;
120   ctrl_area->instrs.INIT=1;
121   ctrl_area->instrs.PAUSE=1;
122   ctrl_area->instrs.shutdown_evts=1;
123
124
125
126   vm_info->vm_regs.rdx = 0x00000f00;
127
128   guest_state->cr0 = 0x60000010;
129
130   guest_state->cs.selector = 0xf000;
131   guest_state->cs.limit=0xffff;
132   guest_state->cs.base = 0x0000000f0000LL;
133   guest_state->cs.attrib.raw = 0xf3;
134
135   
136   /* DEBUG FOR RETURN CODE */
137   ctrl_area->exit_code = 1;
138
139
140   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
141   for ( i = 0; segregs[i] != NULL; i++) {
142     struct vmcb_selector * seg = segregs[i];
143     
144     seg->selector = 0x0000;
145     //    seg->base = seg->selector << 4;
146     seg->base = 0x00000000;
147     seg->attrib.raw = 0xf3;
148     seg->limit = ~0u;
149   }
150   
151   guest_state->gdtr.limit = 0x0000ffff;
152   guest_state->gdtr.base = 0x0000000000000000LL;
153   guest_state->idtr.limit = 0x0000ffff;
154   guest_state->idtr.base = 0x0000000000000000LL;
155
156   guest_state->ldtr.selector = 0x0000;
157   guest_state->ldtr.limit = 0x0000ffff;
158   guest_state->ldtr.base = 0x0000000000000000LL;
159   guest_state->tr.selector = 0x0000;
160   guest_state->tr.limit = 0x0000ffff;
161   guest_state->tr.base = 0x0000000000000000LL;
162
163
164   guest_state->dr6 = 0x00000000ffff0ff0LL;
165   guest_state->dr7 = 0x0000000000000400LL;
166
167   if (vm_info->io_map.num_ports > 0) {
168     struct vmm_io_hook * iter;
169     addr_t io_port_bitmap;
170     
171     io_port_bitmap = (addr_t)V3_VAddr(V3_AllocPages(3));
172     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
173     
174     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr((void *)io_port_bitmap);
175
176     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
177
178     FOREACH_IO_HOOK(vm_info->io_map, iter) {
179       ushort_t port = iter->port;
180       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
181
182       bitmap += (port / 8);
183       //      PrintDebug("Setting Bit for port 0x%x\n", port);
184       *bitmap |= 1 << (port % 8);
185     }
186
187
188     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
189
190     ctrl_area->instrs.IOIO_PROT = 1;
191   }
192
193
194
195   PrintDebug("Exiting on interrupts\n");
196   ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
197   ctrl_area->instrs.INTR = 1;
198
199
200   if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
201     PrintDebug("Creating initial shadow page table\n");
202     vm_info->direct_map_pt = (addr_t)create_passthrough_pde32_pts(vm_info);
203     vm_info->shdw_pg_state.shadow_cr3 |= (vm_info->direct_map_pt & ~0xfff);
204     vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
205     PrintDebug("Created\n");
206
207     guest_state->cr3 = vm_info->shdw_pg_state.shadow_cr3;
208
209     //PrintDebugPageTables((pde32_t*)(vm_info->shdw_pg_state.shadow_cr3.e_reg.low));
210
211     ctrl_area->cr_reads.cr3 = 1;
212     ctrl_area->cr_writes.cr3 = 1;
213
214
215     ctrl_area->instrs.INVLPG = 1;
216     ctrl_area->instrs.INVLPGA = 1;
217
218     ctrl_area->exceptions.pf = 1;
219
220     /* JRL: This is a performance killer, and a simplistic solution */
221     /* We need to fix this */
222     ctrl_area->TLB_CONTROL = 1;
223     
224
225
226     guest_state->g_pat = 0x7040600070406ULL;
227
228     guest_state->cr0 |= 0x80000000;
229
230   } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231     // Flush the TLB on entries/exits
232     ctrl_area->TLB_CONTROL = 1;
233
234     // Enable Nested Paging
235     ctrl_area->NP_ENABLE = 1;
236
237     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
238
239     // Set the Nested Page Table pointer
240     vm_info->direct_map_pt = ((addr_t)create_passthrough_pde32_pts(vm_info) & ~0xfff);
241     ctrl_area->N_CR3 = vm_info->direct_map_pt;
242
243     //   ctrl_area->N_CR3 = Get_CR3();
244     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
245
246     guest_state->g_pat = 0x7040600070406ULL;
247   }
248
249
250
251 }
252
253
254 static int init_svm_guest(struct guest_info *info) {
255  
256   PrintDebug("Allocating VMCB\n");
257   info->vmm_data = (void*)Allocate_VMCB();
258
259
260   //PrintDebug("Generating Guest nested page tables\n");
261   //  info->page_tables = NULL;
262   //info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
263   //info->page_tables = generate_guest_page_tables(&(info->mem_layout), &(info->mem_list));
264   //  PrintDebugPageTables(info->page_tables);
265
266
267   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
268   Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
269   
270
271   info->run_state = VM_STOPPED;
272
273   //  info->rip = 0;
274
275   info->vm_regs.rdi = 0;
276   info->vm_regs.rsi = 0;
277   info->vm_regs.rbp = 0;
278   info->vm_regs.rsp = 0;
279   info->vm_regs.rbx = 0;
280   info->vm_regs.rdx = 0;
281   info->vm_regs.rcx = 0;
282   info->vm_regs.rax = 0;
283   
284   return 0;
285 }
286
287
288
289 // can we start a kernel thread here...
290 static int start_svm_guest(struct guest_info *info) {
291   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
292   vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
293   uint_t num_exits = 0;
294
295
296
297   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
298   //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
299
300   info->run_state = VM_RUNNING;
301
302   while (1) {
303     ullong_t tmp_tsc;
304     uint_t vm_cr_low = 0, vm_cr_high = 0;
305
306
307     v3_enable_ints();
308     v3_clgi();
309
310
311     PrintDebug("SVM Entry to rip=%x...\n", info->rip);
312
313     v3_get_msr(0xc0000101, &vm_cr_high, &vm_cr_low);
314
315     rdtscll(info->time_state.cached_host_tsc);
316
317     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
318
319     v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs));
320     rdtscll(tmp_tsc);
321
322     v3_set_msr(0xc0000101, vm_cr_high, vm_cr_low);
323     PrintDebug("SVM Returned\n");
324
325
326     {
327       uint_t x = 0;
328       PrintDebug("RSP=%p\n", &x);
329     }
330
331
332     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
333     num_exits++;
334
335     PrintDebug("Turning on global interrupts\n");
336     v3_stgi();
337
338
339     PrintDebug("SVM Exit number %d\n", num_exits);
340
341
342      
343     if (v3_handle_svm_exit(info) != 0) {
344
345       addr_t host_addr;
346       addr_t linear_addr = 0;
347
348       info->run_state = VM_ERROR;
349
350       PrintDebug("SVM ERROR!!\n"); 
351       
352       PrintDebug("RIP: %x\n", guest_state->rip);
353
354
355       linear_addr = get_addr_linear(info, guest_state->rip, &(info->segments.cs));
356
357
358       PrintDebug("RIP Linear: %\n", linear_addr);
359       v3_print_segments(info);
360       v3_print_ctrl_regs(info);
361       v3_print_GPRs(info);
362       
363       if (info->mem_mode == PHYSICAL_MEM) {
364         guest_pa_to_host_va(info, linear_addr, &host_addr);
365       } else if (info->mem_mode == VIRTUAL_MEM) {
366         guest_va_to_host_va(info, linear_addr, &host_addr);
367       }
368
369
370       PrintDebug("Host Address of rip = 0x%x\n", host_addr);
371
372       PrintDebug("Instr (15 bytes) at %x:\n", host_addr);
373       PrintTraceMemDump((uchar_t *)host_addr, 15);
374
375       break;
376     }
377   }
378   return 0;
379 }
380
381
382
383
384
385 /* Checks machine SVM capability */
386 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
387 int v3_is_svm_capable() {
388
389 #if 1
390   // Dinda
391   uint_t vm_cr_low = 0, vm_cr_high = 0;
392   addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
393
394   v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
395   
396   PrintDebug("CPUID_FEATURE_IDS_ecx=0x%x\n", ecx);
397
398   if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
399     PrintDebug("SVM Not Available\n");
400     return 0;
401   }  else {
402     v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
403     
404     PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
405     
406     if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
407       PrintDebug("SVM is available but is disabled.\n");
408
409       v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
410       
411       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
412       
413       if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
414         PrintDebug("SVM BIOS Disabled, not unlockable\n");
415       } else {
416         PrintDebug("SVM is locked with a key\n");
417       }
418       return 0;
419
420     } else {
421       PrintDebug("SVM is available and  enabled.\n");
422
423       v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
424       
425       PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
426
427       if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
428         PrintDebug("SVM Nested Paging not supported\n");
429       } else {
430         PrintDebug("SVM Nested Paging supported\n");
431       }
432       
433       return 1;
434       
435     }
436   }
437
438 #else
439   uint_t eax = 0, ebx = 0, ecx = 0, edx = 0;
440   addr_t vm_cr_low = 0, vm_cr_high = 0;
441
442   v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
443
444   if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
445     PrintDebug("SVM Not Available\n");
446     return 0;
447   } 
448
449   v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
450
451   PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
452
453
454   // this part is clearly wrong, since the np bit is in 
455   // edx, not ecx
456   if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 1) {
457     PrintDebug("Nested Paging not supported\n");
458   } else {
459     PrintDebug("Nested Paging supported\n");
460   }
461
462   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
463     PrintDebug("SVM is disabled.\n");
464     return 1;
465   }
466
467   v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
468
469   if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
470     PrintDebug("SVM BIOS Disabled, not unlockable\n");
471   } else {
472     PrintDebug("SVM is locked with a key\n");
473   }
474
475   return 0;
476
477 #endif
478
479 }
480
481 static int has_svm_nested_paging() {
482   addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
483
484   v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
485       
486   //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
487   
488   if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
489     PrintDebug("SVM Nested Paging not supported\n");
490     return 0;
491   } else {
492     PrintDebug("SVM Nested Paging supported\n");
493     return 1;
494   }
495
496 }
497
498
499
500 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
501   reg_ex_t msr;
502   void * host_state;
503
504
505   // Enable SVM on the CPU
506   v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
507   msr.e_reg.low |= EFER_MSR_svm_enable;
508   v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
509   
510   PrintDebug("SVM Enabled\n");
511
512
513   // Setup the host state save area
514   host_state = V3_AllocPages(4);
515   
516
517   /* 64-BIT-ISSUE */
518   //  msr.e_reg.high = 0;
519   //msr.e_reg.low = (uint_t)host_state;
520   msr.r_reg = (addr_t)host_state;
521
522   PrintDebug("Host State being saved at %x\n", (addr_t)host_state);
523   v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
524
525
526
527   // Setup the SVM specific vmm operations
528   vmm_ops->init_guest = &init_svm_guest;
529   vmm_ops->start_guest = &start_svm_guest;
530   vmm_ops->has_nested_paging = &has_svm_nested_paging;
531
532   return;
533 }
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
587   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
588   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
589   uint_t i;
590
591
592   guest_state->rsp = vm_info.vm_regs.rsp;
593   guest_state->rip = vm_info.rip;
594
595
596   //ctrl_area->instrs.instrs.CR0 = 1;
597   ctrl_area->cr_reads.cr0 = 1;
598   ctrl_area->cr_writes.cr0 = 1;
599
600   guest_state->efer |= EFER_MSR_svm_enable;
601   guest_state->rflags = 0x00000002; // The reserved bit is always 1
602   ctrl_area->svm_instrs.VMRUN = 1;
603   // guest_state->cr0 = 0x00000001;    // PE 
604   ctrl_area->guest_ASID = 1;
605
606
607   ctrl_area->exceptions.de = 1;
608   ctrl_area->exceptions.df = 1;
609   ctrl_area->exceptions.pf = 1;
610   ctrl_area->exceptions.ts = 1;
611   ctrl_area->exceptions.ss = 1;
612   ctrl_area->exceptions.ac = 1;
613   ctrl_area->exceptions.mc = 1;
614   ctrl_area->exceptions.gp = 1;
615   ctrl_area->exceptions.ud = 1;
616   ctrl_area->exceptions.np = 1;
617   ctrl_area->exceptions.of = 1;
618   ctrl_area->exceptions.nmi = 1;
619
620   guest_state->cs.selector = 0x0000;
621   guest_state->cs.limit=~0u;
622   guest_state->cs.base = guest_state->cs.selector<<4;
623   guest_state->cs.attrib.raw = 0xf3;
624
625   
626   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
627   for ( i = 0; segregs[i] != NULL; i++) {
628     struct vmcb_selector * seg = segregs[i];
629     
630     seg->selector = 0x0000;
631     seg->base = seg->selector << 4;
632     seg->attrib.raw = 0xf3;
633     seg->limit = ~0u;
634   }
635   
636   if (vm_info.io_map.num_ports > 0) {
637     struct vmm_io_hook * iter;
638     addr_t io_port_bitmap;
639     
640     io_port_bitmap = (addr_t)V3_AllocPages(3);
641     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
642     
643     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
644
645     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
646
647     FOREACH_IO_HOOK(vm_info.io_map, iter) {
648       ushort_t port = iter->port;
649       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
650
651       bitmap += (port / 8);
652       PrintDebug("Setting Bit in block %x\n", bitmap);
653       *bitmap |= 1 << (port % 8);
654     }
655
656
657     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
658
659     ctrl_area->instrs.IOIO_PROT = 1;
660   }
661
662   ctrl_area->instrs.INTR = 1;
663
664
665
666   if (vm_info.page_mode == SHADOW_PAGING) {
667     PrintDebug("Creating initial shadow page table\n");
668     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pde32_pts(&vm_info) & ~0xfff);
669     PrintDebug("Created\n");
670
671     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
672
673     ctrl_area->cr_reads.cr3 = 1;
674     ctrl_area->cr_writes.cr3 = 1;
675
676
677     ctrl_area->instrs.INVLPG = 1;
678     ctrl_area->instrs.INVLPGA = 1;
679
680     guest_state->g_pat = 0x7040600070406ULL;
681
682     guest_state->cr0 |= 0x80000000;
683   } else if (vm_info.page_mode == NESTED_PAGING) {
684     // Flush the TLB on entries/exits
685     //ctrl_area->TLB_CONTROL = 1;
686
687     // Enable Nested Paging
688     //ctrl_area->NP_ENABLE = 1;
689
690     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
691
692         // Set the Nested Page Table pointer
693     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
694     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
695
696     //   ctrl_area->N_CR3 = Get_CR3();
697     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
698
699     //    guest_state->g_pat = 0x7040600070406ULL;
700   }
701
702
703
704 }
705 */
706
707
708
709
710
711
712
713 #if 0
714 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
715   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
716   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
717   uint_t i = 0;
718
719
720   guest_state->rsp = vm_info.vm_regs.rsp;
721   guest_state->rip = vm_info.rip;
722
723
724   /* I pretty much just gutted this from TVMM */
725   /* Note: That means its probably wrong */
726
727   // set the segment registers to mirror ours
728   guest_state->cs.selector = 1<<3;
729   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
730   guest_state->cs.attrib.fields.S = 1;
731   guest_state->cs.attrib.fields.P = 1;
732   guest_state->cs.attrib.fields.db = 1;
733   guest_state->cs.attrib.fields.G = 1;
734   guest_state->cs.limit = 0xfffff;
735   guest_state->cs.base = 0;
736   
737   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
738   for ( i = 0; segregs[i] != NULL; i++) {
739     struct vmcb_selector * seg = segregs[i];
740     
741     seg->selector = 2<<3;
742     seg->attrib.fields.type = 0x2; // Data Segment+read/write
743     seg->attrib.fields.S = 1;
744     seg->attrib.fields.P = 1;
745     seg->attrib.fields.db = 1;
746     seg->attrib.fields.G = 1;
747     seg->limit = 0xfffff;
748     seg->base = 0;
749   }
750
751
752   {
753     /* JRL THIS HAS TO GO */
754     
755     //    guest_state->tr.selector = GetTR_Selector();
756     guest_state->tr.attrib.fields.type = 0x9; 
757     guest_state->tr.attrib.fields.P = 1;
758     // guest_state->tr.limit = GetTR_Limit();
759     //guest_state->tr.base = GetTR_Base();// - 0x2000;
760     /* ** */
761   }
762
763
764   /* ** */
765
766
767   guest_state->efer |= EFER_MSR_svm_enable;
768   guest_state->rflags = 0x00000002; // The reserved bit is always 1
769   ctrl_area->svm_instrs.VMRUN = 1;
770   guest_state->cr0 = 0x00000001;    // PE 
771   ctrl_area->guest_ASID = 1;
772
773
774   //  guest_state->cpl = 0;
775
776
777
778   // Setup exits
779
780   ctrl_area->cr_writes.cr4 = 1;
781   
782   ctrl_area->exceptions.de = 1;
783   ctrl_area->exceptions.df = 1;
784   ctrl_area->exceptions.pf = 1;
785   ctrl_area->exceptions.ts = 1;
786   ctrl_area->exceptions.ss = 1;
787   ctrl_area->exceptions.ac = 1;
788   ctrl_area->exceptions.mc = 1;
789   ctrl_area->exceptions.gp = 1;
790   ctrl_area->exceptions.ud = 1;
791   ctrl_area->exceptions.np = 1;
792   ctrl_area->exceptions.of = 1;
793   ctrl_area->exceptions.nmi = 1;
794
795   
796
797   ctrl_area->instrs.IOIO_PROT = 1;
798   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
799   
800   {
801     reg_ex_t tmp_reg;
802     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
803     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
804   }
805
806   ctrl_area->instrs.INTR = 1;
807
808   
809   {
810     char gdt_buf[6];
811     char idt_buf[6];
812
813     memset(gdt_buf, 0, 6);
814     memset(idt_buf, 0, 6);
815
816
817     uint_t gdt_base, idt_base;
818     ushort_t gdt_limit, idt_limit;
819     
820     GetGDTR(gdt_buf);
821     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
822     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
823     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
824
825     GetIDTR(idt_buf);
826     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
827     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
828     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
829
830
831     // gdt_base -= 0x2000;
832     //idt_base -= 0x2000;
833
834     guest_state->gdtr.base = gdt_base;
835     guest_state->gdtr.limit = gdt_limit;
836     guest_state->idtr.base = idt_base;
837     guest_state->idtr.limit = idt_limit;
838
839
840   }
841   
842   
843   // also determine if CPU supports nested paging
844   /*
845   if (vm_info.page_tables) {
846     //   if (0) {
847     // Flush the TLB on entries/exits
848     ctrl_area->TLB_CONTROL = 1;
849
850     // Enable Nested Paging
851     ctrl_area->NP_ENABLE = 1;
852
853     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
854
855         // Set the Nested Page Table pointer
856     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
857
858
859     //   ctrl_area->N_CR3 = Get_CR3();
860     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
861
862     guest_state->g_pat = 0x7040600070406ULL;
863
864     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
865     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
866     // Enable Paging
867     //    guest_state->cr0 |= 0x80000000;
868   }
869   */
870
871 }
872
873
874
875
876
877 #endif
878
879