Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


build fix
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_direct_paging.h>
40
41 #include <palacios/vmm_ctrl_regs.h>
42 #include <palacios/vmm_config.h>
43 #include <palacios/svm_io.h>
44
45
46
47 // This is a global pointer to the host's VMCB
48 static addr_t host_vmcbs[CONFIG_MAX_CPUS] = {0};
49
50
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.ICEBP = 1;
94     ctrl_area->svm_instrs.WBINVD = 1;
95     ctrl_area->svm_instrs.MONITOR = 1;
96     ctrl_area->svm_instrs.MWAIT_always = 1;
97     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
98     ctrl_area->instrs.INVLPGA = 1;
99     ctrl_area->instrs.CPUID = 1;
100
101     ctrl_area->instrs.HLT = 1;
102     // guest_state->cr0 = 0x00000001;    // PE 
103   
104     /*
105       ctrl_area->exceptions.de = 1;
106       ctrl_area->exceptions.df = 1;
107       
108       ctrl_area->exceptions.ts = 1;
109       ctrl_area->exceptions.ss = 1;
110       ctrl_area->exceptions.ac = 1;
111       ctrl_area->exceptions.mc = 1;
112       ctrl_area->exceptions.gp = 1;
113       ctrl_area->exceptions.ud = 1;
114       ctrl_area->exceptions.np = 1;
115       ctrl_area->exceptions.of = 1;
116       
117       ctrl_area->exceptions.nmi = 1;
118     */
119     
120
121     ctrl_area->instrs.NMI = 1;
122     ctrl_area->instrs.SMI = 1;
123     ctrl_area->instrs.INIT = 1;
124     ctrl_area->instrs.PAUSE = 1;
125     ctrl_area->instrs.shutdown_evts = 1;
126
127     vm_info->vm_regs.rdx = 0x00000f00;
128
129
130     guest_state->cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
131
132
133     guest_state->cs.selector = 0xf000;
134     guest_state->cs.limit = 0xffff;
135     guest_state->cs.base = 0x0000000f0000LL;
136     guest_state->cs.attrib.raw = 0xf3;
137
138
139     /* DEBUG FOR RETURN CODE */
140     ctrl_area->exit_code = 1;
141
142
143     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
144                                         &(guest_state->es), &(guest_state->fs), 
145                                         &(guest_state->gs), NULL};
146
147     for ( i = 0; segregs[i] != NULL; i++) {
148         struct vmcb_selector * seg = segregs[i];
149         
150         seg->selector = 0x0000;
151         //    seg->base = seg->selector << 4;
152         seg->base = 0x00000000;
153         seg->attrib.raw = 0xf3;
154         seg->limit = ~0u;
155     }
156
157     guest_state->gdtr.limit = 0x0000ffff;
158     guest_state->gdtr.base = 0x0000000000000000LL;
159     guest_state->idtr.limit = 0x0000ffff;
160     guest_state->idtr.base = 0x0000000000000000LL;
161
162     guest_state->ldtr.selector = 0x0000;
163     guest_state->ldtr.limit = 0x0000ffff;
164     guest_state->ldtr.base = 0x0000000000000000LL;
165     guest_state->tr.selector = 0x0000;
166     guest_state->tr.limit = 0x0000ffff;
167     guest_state->tr.base = 0x0000000000000000LL;
168
169
170     guest_state->dr6 = 0x00000000ffff0ff0LL;
171     guest_state->dr7 = 0x0000000000000400LL;
172
173
174     v3_init_svm_io_map(vm_info);
175     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
176     ctrl_area->instrs.IOIO_PROT = 1;
177
178
179     v3_init_svm_msr_map(vm_info);
180     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
181     ctrl_area->instrs.MSR_PROT = 1;
182
183
184     PrintDebug("Exiting on interrupts\n");
185     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
186     ctrl_area->instrs.INTR = 1;
187
188
189     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
190         PrintDebug("Creating initial shadow page table\n");
191         
192         /* JRL: This is a performance killer, and a simplistic solution */
193         /* We need to fix this */
194         ctrl_area->TLB_CONTROL = 1;
195         ctrl_area->guest_ASID = 1;
196         
197         
198         if (v3_init_passthrough_pts(vm_info) == -1) {
199             PrintError("Could not initialize passthrough page tables\n");
200             return ;
201         }
202
203
204         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
205         PrintDebug("Created\n");
206         
207         guest_state->cr3 = vm_info->direct_map_pt;
208
209         ctrl_area->cr_reads.cr0 = 1;
210         ctrl_area->cr_writes.cr0 = 1;
211         //ctrl_area->cr_reads.cr4 = 1;
212         ctrl_area->cr_writes.cr4 = 1;
213         ctrl_area->cr_reads.cr3 = 1;
214         ctrl_area->cr_writes.cr3 = 1;
215
216         v3_hook_msr(vm_info, EFER_MSR, 
217                     &v3_handle_efer_read,
218                     &v3_handle_efer_write, 
219                     vm_info);
220
221         ctrl_area->instrs.INVLPG = 1;
222
223         ctrl_area->exceptions.pf = 1;
224
225         guest_state->g_pat = 0x7040600070406ULL;
226
227         guest_state->cr0 |= 0x80000000;
228
229     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
230         // Flush the TLB on entries/exits
231         ctrl_area->TLB_CONTROL = 1;
232         ctrl_area->guest_ASID = 1;
233
234         // Enable Nested Paging
235         ctrl_area->NP_ENABLE = 1;
236
237         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
238
239         // Set the Nested Page Table pointer
240         if (v3_init_passthrough_pts(vm_info) == -1) {
241             PrintError("Could not initialize Nested page tables\n");
242             return ;
243         }
244
245         ctrl_area->N_CR3 = vm_info->direct_map_pt;
246
247         guest_state->g_pat = 0x7040600070406ULL;
248     }
249 }
250
251
252 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
253
254
255     v3_pre_config_guest(info, config_ptr);
256
257     PrintDebug("Allocating VMCB\n");
258     info->vmm_data = (void*)Allocate_VMCB();
259
260     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
261     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
262
263     v3_post_config_guest(info, config_ptr);
264
265     return 0;
266 }
267
268 static int start_svm_guest(struct guest_info *info) {
269     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
270     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
271     uint_t num_exits = 0;
272
273
274
275     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
276     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
277     
278     info->run_state = VM_RUNNING;
279     rdtscll(info->yield_start_cycle);
280
281
282     while (1) {
283         ullong_t tmp_tsc;
284         
285         // Conditionally yield the CPU if the timeslice has expired
286         v3_yield_cond(info);
287
288         /*
289           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
290           (void *)(addr_t)info->segments.cs.base, 
291           (void *)(addr_t)info->rip);
292         */
293
294         // disable global interrupts for vm state transition
295         v3_clgi();
296
297
298
299         rdtscll(info->time_state.cached_host_tsc);
300         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
301         
302         v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
303         
304         rdtscll(tmp_tsc);
305
306         
307         //PrintDebug("SVM Returned\n");
308
309         // reenable global interrupts after vm exit
310         v3_stgi();
311
312
313         // Conditionally yield the CPU if the timeslice has expired
314         v3_yield_cond(info);
315
316
317         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
318         num_exits++;
319         
320         if ((num_exits % 5000) == 0) {
321             PrintDebug("SVM Exit number %d\n", num_exits);
322         }
323
324         if (v3_handle_svm_exit(info) != 0) {
325             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
326             addr_t host_addr;
327             addr_t linear_addr = 0;
328             
329             info->run_state = VM_ERROR;
330             
331             PrintDebug("SVM ERROR!!\n"); 
332       
333             v3_print_guest_state(info);
334
335             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
336       
337             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
338             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
339       
340             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
341             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
342       
343             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
344
345             if (info->mem_mode == PHYSICAL_MEM) {
346                 guest_pa_to_host_va(info, linear_addr, &host_addr);
347             } else if (info->mem_mode == VIRTUAL_MEM) {
348                 guest_va_to_host_va(info, linear_addr, &host_addr);
349             }
350
351             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
352
353             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
354             v3_dump_mem((uint8_t *)host_addr, 15);
355
356             break;
357         }
358     }
359     return 0;
360 }
361
362
363
364
365
366 /* Checks machine SVM capability */
367 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
368 int v3_is_svm_capable() {
369     // Dinda
370     uint_t vm_cr_low = 0, vm_cr_high = 0;
371     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
372
373     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
374   
375     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
376
377     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
378       PrintDebug("SVM Not Available\n");
379       return 0;
380     }  else {
381         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
382         
383         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
384         
385         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
386             PrintDebug("SVM is available but is disabled.\n");
387             
388             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
389             
390             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
391             
392             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
393                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
394             } else {
395                 PrintDebug("SVM is locked with a key\n");
396             }
397             return 0;
398
399         } else {
400             PrintDebug("SVM is available and  enabled.\n");
401
402             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
403             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
404             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
405             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
406             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
407
408
409             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
410                 PrintDebug("SVM Nested Paging not supported\n");
411             } else {
412                 PrintDebug("SVM Nested Paging supported\n");
413             }
414
415             return 1;
416         }
417     }
418 }
419
420 static int has_svm_nested_paging() {
421     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
422
423     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
424
425     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
426
427     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
428         PrintDebug("SVM Nested Paging not supported\n");
429         return 0;
430     } else {
431         PrintDebug("SVM Nested Paging supported\n");
432         return 1;
433     }
434 }
435
436
437 void v3_init_svm_cpu(int cpu_id) {
438     reg_ex_t msr;
439     extern v3_cpu_arch_t v3_cpu_types[];
440
441     // Enable SVM on the CPU
442     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
443     msr.e_reg.low |= EFER_MSR_svm_enable;
444     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
445
446     PrintDebug("SVM Enabled\n");
447
448     // Setup the host state save area
449     host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
450
451     /* 64-BIT-ISSUE */
452     //  msr.e_reg.high = 0;
453     //msr.e_reg.low = (uint_t)host_vmcb;
454     msr.r_reg = host_vmcbs[cpu_id];
455
456     PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
457     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
458
459
460     if (has_svm_nested_paging() == 1) {
461         v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
462     } else {
463         v3_cpu_types[cpu_id] = V3_SVM_CPU;
464     }
465 }
466
467
468 void v3_init_svm_hooks(struct v3_ctrl_ops * vmm_ops) {
469
470     // Setup the SVM specific vmm operations
471     vmm_ops->init_guest = &init_svm_guest;
472     vmm_ops->start_guest = &start_svm_guest;
473     vmm_ops->has_nested_paging = &has_svm_nested_paging;
474
475     return;
476 }
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529 #if 0
530 /* 
531  * Test VMSAVE/VMLOAD Latency 
532  */
533 #define vmsave ".byte 0x0F,0x01,0xDB ; "
534 #define vmload ".byte 0x0F,0x01,0xDA ; "
535 {
536     uint32_t start_lo, start_hi;
537     uint32_t end_lo, end_hi;
538     uint64_t start, end;
539     
540     __asm__ __volatile__ (
541                           "rdtsc ; "
542                           "movl %%eax, %%esi ; "
543                           "movl %%edx, %%edi ; "
544                           "movq  %%rcx, %%rax ; "
545                           vmsave
546                           "rdtsc ; "
547                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
548                           : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
549                           );
550     
551     start = start_hi;
552     start <<= 32;
553     start += start_lo;
554     
555     end = end_hi;
556     end <<= 32;
557     end += end_lo;
558     
559     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
560     
561     __asm__ __volatile__ (
562                           "rdtsc ; "
563                           "movl %%eax, %%esi ; "
564                           "movl %%edx, %%edi ; "
565                           "movq  %%rcx, %%rax ; "
566                           vmload
567                           "rdtsc ; "
568                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
569                               : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
570                               );
571         
572         start = start_hi;
573         start <<= 32;
574         start += start_lo;
575
576         end = end_hi;
577         end <<= 32;
578         end += end_lo;
579
580
581         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
582     }
583     /* End Latency Test */
584
585 #endif
586
587
588
589
590
591
592
593 #if 0
594 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
595   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
596   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
597   uint_t i = 0;
598
599
600   guest_state->rsp = vm_info.vm_regs.rsp;
601   guest_state->rip = vm_info.rip;
602
603
604   /* I pretty much just gutted this from TVMM */
605   /* Note: That means its probably wrong */
606
607   // set the segment registers to mirror ours
608   guest_state->cs.selector = 1<<3;
609   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
610   guest_state->cs.attrib.fields.S = 1;
611   guest_state->cs.attrib.fields.P = 1;
612   guest_state->cs.attrib.fields.db = 1;
613   guest_state->cs.attrib.fields.G = 1;
614   guest_state->cs.limit = 0xfffff;
615   guest_state->cs.base = 0;
616   
617   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
618   for ( i = 0; segregs[i] != NULL; i++) {
619     struct vmcb_selector * seg = segregs[i];
620     
621     seg->selector = 2<<3;
622     seg->attrib.fields.type = 0x2; // Data Segment+read/write
623     seg->attrib.fields.S = 1;
624     seg->attrib.fields.P = 1;
625     seg->attrib.fields.db = 1;
626     seg->attrib.fields.G = 1;
627     seg->limit = 0xfffff;
628     seg->base = 0;
629   }
630
631
632   {
633     /* JRL THIS HAS TO GO */
634     
635     //    guest_state->tr.selector = GetTR_Selector();
636     guest_state->tr.attrib.fields.type = 0x9; 
637     guest_state->tr.attrib.fields.P = 1;
638     // guest_state->tr.limit = GetTR_Limit();
639     //guest_state->tr.base = GetTR_Base();// - 0x2000;
640     /* ** */
641   }
642
643
644   /* ** */
645
646
647   guest_state->efer |= EFER_MSR_svm_enable;
648   guest_state->rflags = 0x00000002; // The reserved bit is always 1
649   ctrl_area->svm_instrs.VMRUN = 1;
650   guest_state->cr0 = 0x00000001;    // PE 
651   ctrl_area->guest_ASID = 1;
652
653
654   //  guest_state->cpl = 0;
655
656
657
658   // Setup exits
659
660   ctrl_area->cr_writes.cr4 = 1;
661   
662   ctrl_area->exceptions.de = 1;
663   ctrl_area->exceptions.df = 1;
664   ctrl_area->exceptions.pf = 1;
665   ctrl_area->exceptions.ts = 1;
666   ctrl_area->exceptions.ss = 1;
667   ctrl_area->exceptions.ac = 1;
668   ctrl_area->exceptions.mc = 1;
669   ctrl_area->exceptions.gp = 1;
670   ctrl_area->exceptions.ud = 1;
671   ctrl_area->exceptions.np = 1;
672   ctrl_area->exceptions.of = 1;
673   ctrl_area->exceptions.nmi = 1;
674
675   
676
677   ctrl_area->instrs.IOIO_PROT = 1;
678   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
679   
680   {
681     reg_ex_t tmp_reg;
682     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
683     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
684   }
685
686   ctrl_area->instrs.INTR = 1;
687
688   
689   {
690     char gdt_buf[6];
691     char idt_buf[6];
692
693     memset(gdt_buf, 0, 6);
694     memset(idt_buf, 0, 6);
695
696
697     uint_t gdt_base, idt_base;
698     ushort_t gdt_limit, idt_limit;
699     
700     GetGDTR(gdt_buf);
701     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
702     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
703     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
704
705     GetIDTR(idt_buf);
706     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
707     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
708     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
709
710
711     // gdt_base -= 0x2000;
712     //idt_base -= 0x2000;
713
714     guest_state->gdtr.base = gdt_base;
715     guest_state->gdtr.limit = gdt_limit;
716     guest_state->idtr.base = idt_base;
717     guest_state->idtr.limit = idt_limit;
718
719
720   }
721   
722   
723   // also determine if CPU supports nested paging
724   /*
725   if (vm_info.page_tables) {
726     //   if (0) {
727     // Flush the TLB on entries/exits
728     ctrl_area->TLB_CONTROL = 1;
729
730     // Enable Nested Paging
731     ctrl_area->NP_ENABLE = 1;
732
733     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
734
735         // Set the Nested Page Table pointer
736     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
737
738
739     //   ctrl_area->N_CR3 = Get_CR3();
740     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
741
742     guest_state->g_pat = 0x7040600070406ULL;
743
744     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
745     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
746     // Enable Paging
747     //    guest_state->cr0 |= 0x80000000;
748   }
749   */
750
751 }
752
753
754
755
756
757 #endif
758
759