Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


fixed crash issue due to inproperly saving host state
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48
49 // This is a global pointer to the host's VMCB
50 static void * host_vmcb = NULL;
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.ICEBP = 1;
94     ctrl_area->svm_instrs.WBINVD = 1;
95     ctrl_area->svm_instrs.MONITOR = 1;
96     ctrl_area->svm_instrs.MWAIT_always = 1;
97     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
98     ctrl_area->instrs.INVLPGA = 1;
99
100
101     ctrl_area->instrs.HLT = 1;
102     // guest_state->cr0 = 0x00000001;    // PE 
103   
104     /*
105       ctrl_area->exceptions.de = 1;
106       ctrl_area->exceptions.df = 1;
107       
108       ctrl_area->exceptions.ts = 1;
109       ctrl_area->exceptions.ss = 1;
110       ctrl_area->exceptions.ac = 1;
111       ctrl_area->exceptions.mc = 1;
112       ctrl_area->exceptions.gp = 1;
113       ctrl_area->exceptions.ud = 1;
114       ctrl_area->exceptions.np = 1;
115       ctrl_area->exceptions.of = 1;
116       
117       ctrl_area->exceptions.nmi = 1;
118     */
119     
120
121     ctrl_area->instrs.NMI = 1;
122     ctrl_area->instrs.SMI = 1;
123     ctrl_area->instrs.INIT = 1;
124     ctrl_area->instrs.PAUSE = 1;
125     ctrl_area->instrs.shutdown_evts = 1;
126
127     vm_info->vm_regs.rdx = 0x00000f00;
128
129     guest_state->cr0 = 0x60000010;
130
131
132     guest_state->cs.selector = 0xf000;
133     guest_state->cs.limit = 0xffff;
134     guest_state->cs.base = 0x0000000f0000LL;
135     guest_state->cs.attrib.raw = 0xf3;
136
137
138     /* DEBUG FOR RETURN CODE */
139     ctrl_area->exit_code = 1;
140
141
142     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
143                                         &(guest_state->es), &(guest_state->fs), 
144                                         &(guest_state->gs), NULL};
145
146     for ( i = 0; segregs[i] != NULL; i++) {
147         struct vmcb_selector * seg = segregs[i];
148         
149         seg->selector = 0x0000;
150         //    seg->base = seg->selector << 4;
151         seg->base = 0x00000000;
152         seg->attrib.raw = 0xf3;
153         seg->limit = ~0u;
154     }
155
156     guest_state->gdtr.limit = 0x0000ffff;
157     guest_state->gdtr.base = 0x0000000000000000LL;
158     guest_state->idtr.limit = 0x0000ffff;
159     guest_state->idtr.base = 0x0000000000000000LL;
160
161     guest_state->ldtr.selector = 0x0000;
162     guest_state->ldtr.limit = 0x0000ffff;
163     guest_state->ldtr.base = 0x0000000000000000LL;
164     guest_state->tr.selector = 0x0000;
165     guest_state->tr.limit = 0x0000ffff;
166     guest_state->tr.base = 0x0000000000000000LL;
167
168
169     guest_state->dr6 = 0x00000000ffff0ff0LL;
170     guest_state->dr7 = 0x0000000000000400LL;
171
172
173     v3_init_svm_io_map(vm_info);
174     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
175     ctrl_area->instrs.IOIO_PROT = 1;
176
177
178
179     v3_init_svm_msr_map(vm_info);
180     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
181     ctrl_area->instrs.MSR_PROT = 1;
182
183
184
185     PrintDebug("Exiting on interrupts\n");
186     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
187     ctrl_area->instrs.INTR = 1;
188
189
190     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
191         PrintDebug("Creating initial shadow page table\n");
192         
193         /* JRL: This is a performance killer, and a simplistic solution */
194         /* We need to fix this */
195         ctrl_area->TLB_CONTROL = 1;
196         ctrl_area->guest_ASID = 1;
197         
198         
199         if (v3_init_passthrough_pts(vm_info) == -1) {
200             PrintError("Could not initialize passthrough page tables\n");
201             return ;
202         }
203
204
205         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
206         PrintDebug("Created\n");
207         
208         guest_state->cr3 = vm_info->direct_map_pt;
209
210         ctrl_area->cr_reads.cr0 = 1;
211         ctrl_area->cr_writes.cr0 = 1;
212         //ctrl_area->cr_reads.cr4 = 1;
213         ctrl_area->cr_writes.cr4 = 1;
214         ctrl_area->cr_reads.cr3 = 1;
215         ctrl_area->cr_writes.cr3 = 1;
216
217         v3_hook_msr(vm_info, EFER_MSR, 
218                     &v3_handle_efer_read,
219                     &v3_handle_efer_write, 
220                     vm_info);
221
222         ctrl_area->instrs.INVLPG = 1;
223
224         ctrl_area->exceptions.pf = 1;
225
226         guest_state->g_pat = 0x7040600070406ULL;
227
228         guest_state->cr0 |= 0x80000000;
229
230     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231         // Flush the TLB on entries/exits
232         ctrl_area->TLB_CONTROL = 1;
233         ctrl_area->guest_ASID = 1;
234
235         // Enable Nested Paging
236         ctrl_area->NP_ENABLE = 1;
237
238         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
239
240         // Set the Nested Page Table pointer
241         if (v3_init_passthrough_pts(vm_info) == -1) {
242             PrintError("Could not initialize Nested page tables\n");
243             return ;
244         }
245
246         ctrl_area->N_CR3 = vm_info->direct_map_pt;
247
248         guest_state->g_pat = 0x7040600070406ULL;
249     }
250 }
251
252
253 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
254
255
256     v3_pre_config_guest(info, config_ptr);
257
258     PrintDebug("Allocating VMCB\n");
259     info->vmm_data = (void*)Allocate_VMCB();
260
261     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_post_config_guest(info, config_ptr);
265
266     return 0;
267 }
268
269
270
271 // can we start a kernel thread here...
272 static int start_svm_guest(struct guest_info *info) {
273     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
274     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
275     uint_t num_exits = 0;
276
277
278
279     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
280     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
281     
282     info->run_state = VM_RUNNING;
283     
284     while (1) {
285         ullong_t tmp_tsc;
286         
287
288         /*
289           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
290           (void *)(addr_t)info->segments.cs.base, 
291           (void *)(addr_t)info->rip);
292         */
293
294         // disable global interrupts for vm state transition
295         v3_clgi();
296
297
298
299         rdtscll(info->time_state.cached_host_tsc);
300         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
301         
302         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb);
303         
304         rdtscll(tmp_tsc);
305
306         
307         //PrintDebug("SVM Returned\n");
308
309         // reenable global interrupts after vm exit
310         v3_stgi();
311
312         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
313         num_exits++;
314         
315         if ((num_exits % 5000) == 0) {
316             PrintDebug("SVM Exit number %d\n", num_exits);
317
318             if (info->enable_profiler) {
319                 v3_print_profile(info);
320             }
321         }
322
323      
324         if (v3_handle_svm_exit(info) != 0) {
325             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
326             addr_t host_addr;
327             addr_t linear_addr = 0;
328             
329             info->run_state = VM_ERROR;
330             
331             PrintDebug("SVM ERROR!!\n"); 
332       
333             v3_print_guest_state(info);
334
335             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
336       
337             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
338             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
339       
340             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
341             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
342       
343             if (info->mem_mode == PHYSICAL_MEM) {
344                 guest_pa_to_host_va(info, linear_addr, &host_addr);
345             } else if (info->mem_mode == VIRTUAL_MEM) {
346                 guest_va_to_host_va(info, linear_addr, &host_addr);
347             }
348
349
350             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
351
352             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
353             PrintTraceMemDump((uchar_t *)host_addr, 15);
354
355             break;
356         }
357     }
358     return 0;
359 }
360
361
362
363
364
365 /* Checks machine SVM capability */
366 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
367 int v3_is_svm_capable() {
368     // Dinda
369     uint_t vm_cr_low = 0, vm_cr_high = 0;
370     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
371
372     v3_cpuid(CPUID_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
373   
374     PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
375
376     if ((ecx & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
377       PrintDebug("SVM Not Available\n");
378       return 0;
379     }  else {
380         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
381         
382         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
383         
384         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
385             PrintDebug("SVM is available but is disabled.\n");
386             
387             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
388             
389             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
390             
391             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
392                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
393             } else {
394                 PrintDebug("SVM is locked with a key\n");
395             }
396             return 0;
397
398         } else {
399             PrintDebug("SVM is available and  enabled.\n");
400
401             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
402             PrintDebug("CPUID_FEATURE_IDS_eax=%p\n", (void *)eax);
403             PrintDebug("CPUID_FEATURE_IDS_ebx=%p\n", (void *)ebx);
404             PrintDebug("CPUID_FEATURE_IDS_ecx=%p\n", (void *)ecx);
405             PrintDebug("CPUID_FEATURE_IDS_edx=%p\n", (void *)edx);
406
407
408             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
409                 PrintDebug("SVM Nested Paging not supported\n");
410             } else {
411                 PrintDebug("SVM Nested Paging supported\n");
412             }
413
414             return 1;
415         }
416     }
417 }
418
419 static int has_svm_nested_paging() {
420     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
421
422     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
423
424     //PrintDebug("CPUID_FEATURE_IDS_edx=0x%x\n", edx);
425
426     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
427         PrintDebug("SVM Nested Paging not supported\n");
428         return 0;
429     } else {
430         PrintDebug("SVM Nested Paging supported\n");
431         return 1;
432     }
433 }
434
435
436
437 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
438     reg_ex_t msr;
439     extern v3_cpu_arch_t v3_cpu_type;
440
441     // Enable SVM on the CPU
442     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
443     msr.e_reg.low |= EFER_MSR_svm_enable;
444     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
445
446     PrintDebug("SVM Enabled\n");
447
448
449     // Setup the host state save area
450     host_vmcb = V3_AllocPages(4);
451
452
453     /* 64-BIT-ISSUE */
454     //  msr.e_reg.high = 0;
455     //msr.e_reg.low = (uint_t)host_vmcb;
456     msr.r_reg = (addr_t)host_vmcb;
457
458     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb);
459     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
460
461     if (has_svm_nested_paging() == 1) {
462         v3_cpu_type = V3_SVM_REV3_CPU;
463     } else {
464         v3_cpu_type = V3_SVM_CPU;
465     }
466
467     // Setup the SVM specific vmm operations
468     vmm_ops->init_guest = &init_svm_guest;
469     vmm_ops->start_guest = &start_svm_guest;
470     vmm_ops->has_nested_paging = &has_svm_nested_paging;
471
472     return;
473 }
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526 /*static void Init_VMCB(vmcb_t * vmcb, struct guest_info vm_info) {
527   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
528   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
529   uint_t i;
530
531
532   guest_state->rsp = vm_info.vm_regs.rsp;
533   guest_state->rip = vm_info.rip;
534
535
536   //ctrl_area->instrs.instrs.CR0 = 1;
537   ctrl_area->cr_reads.cr0 = 1;
538   ctrl_area->cr_writes.cr0 = 1;
539
540   guest_state->efer |= EFER_MSR_svm_enable;
541   guest_state->rflags = 0x00000002; // The reserved bit is always 1
542   ctrl_area->svm_instrs.VMRUN = 1;
543   // guest_state->cr0 = 0x00000001;    // PE 
544   ctrl_area->guest_ASID = 1;
545
546
547   ctrl_area->exceptions.de = 1;
548   ctrl_area->exceptions.df = 1;
549   ctrl_area->exceptions.pf = 1;
550   ctrl_area->exceptions.ts = 1;
551   ctrl_area->exceptions.ss = 1;
552   ctrl_area->exceptions.ac = 1;
553   ctrl_area->exceptions.mc = 1;
554   ctrl_area->exceptions.gp = 1;
555   ctrl_area->exceptions.ud = 1;
556   ctrl_area->exceptions.np = 1;
557   ctrl_area->exceptions.of = 1;
558   ctrl_area->exceptions.nmi = 1;
559
560   guest_state->cs.selector = 0x0000;
561   guest_state->cs.limit=~0u;
562   guest_state->cs.base = guest_state->cs.selector<<4;
563   guest_state->cs.attrib.raw = 0xf3;
564
565   
566   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
567   for ( i = 0; segregs[i] != NULL; i++) {
568     struct vmcb_selector * seg = segregs[i];
569     
570     seg->selector = 0x0000;
571     seg->base = seg->selector << 4;
572     seg->attrib.raw = 0xf3;
573     seg->limit = ~0u;
574   }
575   
576   if (vm_info.io_map.num_ports > 0) {
577     struct vmm_io_hook * iter;
578     addr_t io_port_bitmap;
579     
580     io_port_bitmap = (addr_t)V3_AllocPages(3);
581     memset((uchar_t*)io_port_bitmap, 0, PAGE_SIZE * 3);
582     
583     ctrl_area->IOPM_BASE_PA = io_port_bitmap;
584
585     //PrintDebug("Setting up IO Map at 0x%x\n", io_port_bitmap);
586
587     FOREACH_IO_HOOK(vm_info.io_map, iter) {
588       ushort_t port = iter->port;
589       uchar_t * bitmap = (uchar_t *)io_port_bitmap;
590
591       bitmap += (port / 8);
592       PrintDebug("Setting Bit in block %x\n", bitmap);
593       *bitmap |= 1 << (port % 8);
594     }
595
596
597     //PrintDebugMemDump((uchar_t*)io_port_bitmap, PAGE_SIZE *2);
598
599     ctrl_area->instrs.IOIO_PROT = 1;
600   }
601
602   ctrl_area->instrs.INTR = 1;
603
604
605
606   if (vm_info.page_mode == SHADOW_PAGING) {
607     PrintDebug("Creating initial shadow page table\n");
608     vm_info.shdw_pg_state.shadow_cr3 |= ((addr_t)create_passthrough_pts_32(&vm_info) & ~0xfff);
609     PrintDebug("Created\n");
610
611     guest_state->cr3 = vm_info.shdw_pg_state.shadow_cr3;
612
613     ctrl_area->cr_reads.cr3 = 1;
614     ctrl_area->cr_writes.cr3 = 1;
615
616
617     ctrl_area->instrs.INVLPG = 1;
618     ctrl_area->instrs.INVLPGA = 1;
619
620     guest_state->g_pat = 0x7040600070406ULL;
621
622     guest_state->cr0 |= 0x80000000;
623   } else if (vm_info.page_mode == NESTED_PAGING) {
624     // Flush the TLB on entries/exits
625     //ctrl_area->TLB_CONTROL = 1;
626
627     // Enable Nested Paging
628     //ctrl_area->NP_ENABLE = 1;
629
630     //PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
631
632         // Set the Nested Page Table pointer
633     //    ctrl_area->N_CR3 = ((addr_t)vm_info.page_tables);
634     // ctrl_area->N_CR3 = (addr_t)(vm_info.page_tables);
635
636     //   ctrl_area->N_CR3 = Get_CR3();
637     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
638
639     //    guest_state->g_pat = 0x7040600070406ULL;
640   }
641
642
643
644 }
645 */
646
647
648
649
650
651
652
653 #if 0
654 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
655   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
656   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
657   uint_t i = 0;
658
659
660   guest_state->rsp = vm_info.vm_regs.rsp;
661   guest_state->rip = vm_info.rip;
662
663
664   /* I pretty much just gutted this from TVMM */
665   /* Note: That means its probably wrong */
666
667   // set the segment registers to mirror ours
668   guest_state->cs.selector = 1<<3;
669   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
670   guest_state->cs.attrib.fields.S = 1;
671   guest_state->cs.attrib.fields.P = 1;
672   guest_state->cs.attrib.fields.db = 1;
673   guest_state->cs.attrib.fields.G = 1;
674   guest_state->cs.limit = 0xfffff;
675   guest_state->cs.base = 0;
676   
677   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
678   for ( i = 0; segregs[i] != NULL; i++) {
679     struct vmcb_selector * seg = segregs[i];
680     
681     seg->selector = 2<<3;
682     seg->attrib.fields.type = 0x2; // Data Segment+read/write
683     seg->attrib.fields.S = 1;
684     seg->attrib.fields.P = 1;
685     seg->attrib.fields.db = 1;
686     seg->attrib.fields.G = 1;
687     seg->limit = 0xfffff;
688     seg->base = 0;
689   }
690
691
692   {
693     /* JRL THIS HAS TO GO */
694     
695     //    guest_state->tr.selector = GetTR_Selector();
696     guest_state->tr.attrib.fields.type = 0x9; 
697     guest_state->tr.attrib.fields.P = 1;
698     // guest_state->tr.limit = GetTR_Limit();
699     //guest_state->tr.base = GetTR_Base();// - 0x2000;
700     /* ** */
701   }
702
703
704   /* ** */
705
706
707   guest_state->efer |= EFER_MSR_svm_enable;
708   guest_state->rflags = 0x00000002; // The reserved bit is always 1
709   ctrl_area->svm_instrs.VMRUN = 1;
710   guest_state->cr0 = 0x00000001;    // PE 
711   ctrl_area->guest_ASID = 1;
712
713
714   //  guest_state->cpl = 0;
715
716
717
718   // Setup exits
719
720   ctrl_area->cr_writes.cr4 = 1;
721   
722   ctrl_area->exceptions.de = 1;
723   ctrl_area->exceptions.df = 1;
724   ctrl_area->exceptions.pf = 1;
725   ctrl_area->exceptions.ts = 1;
726   ctrl_area->exceptions.ss = 1;
727   ctrl_area->exceptions.ac = 1;
728   ctrl_area->exceptions.mc = 1;
729   ctrl_area->exceptions.gp = 1;
730   ctrl_area->exceptions.ud = 1;
731   ctrl_area->exceptions.np = 1;
732   ctrl_area->exceptions.of = 1;
733   ctrl_area->exceptions.nmi = 1;
734
735   
736
737   ctrl_area->instrs.IOIO_PROT = 1;
738   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
739   
740   {
741     reg_ex_t tmp_reg;
742     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
743     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
744   }
745
746   ctrl_area->instrs.INTR = 1;
747
748   
749   {
750     char gdt_buf[6];
751     char idt_buf[6];
752
753     memset(gdt_buf, 0, 6);
754     memset(idt_buf, 0, 6);
755
756
757     uint_t gdt_base, idt_base;
758     ushort_t gdt_limit, idt_limit;
759     
760     GetGDTR(gdt_buf);
761     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
762     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
763     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
764
765     GetIDTR(idt_buf);
766     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
767     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
768     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
769
770
771     // gdt_base -= 0x2000;
772     //idt_base -= 0x2000;
773
774     guest_state->gdtr.base = gdt_base;
775     guest_state->gdtr.limit = gdt_limit;
776     guest_state->idtr.base = idt_base;
777     guest_state->idtr.limit = idt_limit;
778
779
780   }
781   
782   
783   // also determine if CPU supports nested paging
784   /*
785   if (vm_info.page_tables) {
786     //   if (0) {
787     // Flush the TLB on entries/exits
788     ctrl_area->TLB_CONTROL = 1;
789
790     // Enable Nested Paging
791     ctrl_area->NP_ENABLE = 1;
792
793     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
794
795         // Set the Nested Page Table pointer
796     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
797
798
799     //   ctrl_area->N_CR3 = Get_CR3();
800     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
801
802     guest_state->g_pat = 0x7040600070406ULL;
803
804     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
805     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
806     // Enable Paging
807     //    guest_state->cr0 |= 0x80000000;
808   }
809   */
810
811 }
812
813
814
815
816
817 #endif
818
819