Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


3c3329292bad022c6376ca7989420238e1c2b4a0
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_direct_paging.h>
40
41 #include <palacios/vmm_ctrl_regs.h>
42 #include <palacios/vmm_config.h>
43 #include <palacios/svm_io.h>
44
45
46
47 // This is a global pointer to the host's VMCB
48 static addr_t host_vmcbs[CONFIG_MAX_CPUS] = {0};
49
50
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.ICEBP = 1;
94     ctrl_area->svm_instrs.WBINVD = 1;
95     ctrl_area->svm_instrs.MONITOR = 1;
96     ctrl_area->svm_instrs.MWAIT_always = 1;
97     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
98     ctrl_area->instrs.INVLPGA = 1;
99
100
101     ctrl_area->instrs.HLT = 1;
102     // guest_state->cr0 = 0x00000001;    // PE 
103   
104     /*
105       ctrl_area->exceptions.de = 1;
106       ctrl_area->exceptions.df = 1;
107       
108       ctrl_area->exceptions.ts = 1;
109       ctrl_area->exceptions.ss = 1;
110       ctrl_area->exceptions.ac = 1;
111       ctrl_area->exceptions.mc = 1;
112       ctrl_area->exceptions.gp = 1;
113       ctrl_area->exceptions.ud = 1;
114       ctrl_area->exceptions.np = 1;
115       ctrl_area->exceptions.of = 1;
116       
117       ctrl_area->exceptions.nmi = 1;
118     */
119     
120
121     ctrl_area->instrs.NMI = 1;
122     ctrl_area->instrs.SMI = 1;
123     ctrl_area->instrs.INIT = 1;
124     ctrl_area->instrs.PAUSE = 1;
125     ctrl_area->instrs.shutdown_evts = 1;
126
127     vm_info->vm_regs.rdx = 0x00000f00;
128
129
130     guest_state->cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
131
132
133     guest_state->cs.selector = 0xf000;
134     guest_state->cs.limit = 0xffff;
135     guest_state->cs.base = 0x0000000f0000LL;
136     guest_state->cs.attrib.raw = 0xf3;
137
138
139     /* DEBUG FOR RETURN CODE */
140     ctrl_area->exit_code = 1;
141
142
143     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
144                                         &(guest_state->es), &(guest_state->fs), 
145                                         &(guest_state->gs), NULL};
146
147     for ( i = 0; segregs[i] != NULL; i++) {
148         struct vmcb_selector * seg = segregs[i];
149         
150         seg->selector = 0x0000;
151         //    seg->base = seg->selector << 4;
152         seg->base = 0x00000000;
153         seg->attrib.raw = 0xf3;
154         seg->limit = ~0u;
155     }
156
157     guest_state->gdtr.limit = 0x0000ffff;
158     guest_state->gdtr.base = 0x0000000000000000LL;
159     guest_state->idtr.limit = 0x0000ffff;
160     guest_state->idtr.base = 0x0000000000000000LL;
161
162     guest_state->ldtr.selector = 0x0000;
163     guest_state->ldtr.limit = 0x0000ffff;
164     guest_state->ldtr.base = 0x0000000000000000LL;
165     guest_state->tr.selector = 0x0000;
166     guest_state->tr.limit = 0x0000ffff;
167     guest_state->tr.base = 0x0000000000000000LL;
168
169
170     guest_state->dr6 = 0x00000000ffff0ff0LL;
171     guest_state->dr7 = 0x0000000000000400LL;
172
173
174     v3_init_svm_io_map(vm_info);
175     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
176     ctrl_area->instrs.IOIO_PROT = 1;
177
178
179
180     v3_init_svm_msr_map(vm_info);
181     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
182     ctrl_area->instrs.MSR_PROT = 1;
183
184
185
186     PrintDebug("Exiting on interrupts\n");
187     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
188     ctrl_area->instrs.INTR = 1;
189
190
191     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
192         PrintDebug("Creating initial shadow page table\n");
193         
194         /* JRL: This is a performance killer, and a simplistic solution */
195         /* We need to fix this */
196         ctrl_area->TLB_CONTROL = 1;
197         ctrl_area->guest_ASID = 1;
198         
199         
200         if (v3_init_passthrough_pts(vm_info) == -1) {
201             PrintError("Could not initialize passthrough page tables\n");
202             return ;
203         }
204
205
206         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
207         PrintDebug("Created\n");
208         
209         guest_state->cr3 = vm_info->direct_map_pt;
210
211         ctrl_area->cr_reads.cr0 = 1;
212         ctrl_area->cr_writes.cr0 = 1;
213         //ctrl_area->cr_reads.cr4 = 1;
214         ctrl_area->cr_writes.cr4 = 1;
215         ctrl_area->cr_reads.cr3 = 1;
216         ctrl_area->cr_writes.cr3 = 1;
217
218         v3_hook_msr(vm_info, EFER_MSR, 
219                     &v3_handle_efer_read,
220                     &v3_handle_efer_write, 
221                     vm_info);
222
223         ctrl_area->instrs.INVLPG = 1;
224
225         ctrl_area->exceptions.pf = 1;
226
227         guest_state->g_pat = 0x7040600070406ULL;
228
229         guest_state->cr0 |= 0x80000000;
230
231     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
232         // Flush the TLB on entries/exits
233         ctrl_area->TLB_CONTROL = 1;
234         ctrl_area->guest_ASID = 1;
235
236         // Enable Nested Paging
237         ctrl_area->NP_ENABLE = 1;
238
239         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
240
241         // Set the Nested Page Table pointer
242         if (v3_init_passthrough_pts(vm_info) == -1) {
243             PrintError("Could not initialize Nested page tables\n");
244             return ;
245         }
246
247         ctrl_area->N_CR3 = vm_info->direct_map_pt;
248
249         guest_state->g_pat = 0x7040600070406ULL;
250     }
251 }
252
253
254 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
255
256
257     v3_pre_config_guest(info, config_ptr);
258
259     PrintDebug("Allocating VMCB\n");
260     info->vmm_data = (void*)Allocate_VMCB();
261
262     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
263     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
264
265     v3_post_config_guest(info, config_ptr);
266
267     return 0;
268 }
269
270 static int start_svm_guest(struct guest_info *info) {
271     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
272     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
273     uint_t num_exits = 0;
274
275
276
277     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
278     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
279     
280     info->run_state = VM_RUNNING;
281     rdtscll(info->yield_start_cycle);
282
283
284     while (1) {
285         ullong_t tmp_tsc;
286         
287         // Conditionally yield the CPU if the timeslice has expired
288         v3_yield_cond(info);
289
290         /*
291           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
292           (void *)(addr_t)info->segments.cs.base, 
293           (void *)(addr_t)info->rip);
294         */
295
296         // disable global interrupts for vm state transition
297         v3_clgi();
298
299
300
301         rdtscll(info->time_state.cached_host_tsc);
302         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
303         
304         v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
305         
306         rdtscll(tmp_tsc);
307
308         
309         //PrintDebug("SVM Returned\n");
310
311         // reenable global interrupts after vm exit
312         v3_stgi();
313
314
315         // Conditionally yield the CPU if the timeslice has expired
316         v3_yield_cond(info);
317
318
319         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
320         num_exits++;
321         
322         if ((num_exits % 5000) == 0) {
323             PrintDebug("SVM Exit number %d\n", num_exits);
324         }
325
326         if (v3_handle_svm_exit(info) != 0) {
327             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
328             addr_t host_addr;
329             addr_t linear_addr = 0;
330             
331             info->run_state = VM_ERROR;
332             
333             PrintDebug("SVM ERROR!!\n"); 
334       
335             v3_print_guest_state(info);
336
337             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
338       
339             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
340             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
341       
342             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
343             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
344       
345             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
346
347             if (info->mem_mode == PHYSICAL_MEM) {
348                 guest_pa_to_host_va(info, linear_addr, &host_addr);
349             } else if (info->mem_mode == VIRTUAL_MEM) {
350                 guest_va_to_host_va(info, linear_addr, &host_addr);
351             }
352
353             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
354
355             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
356             v3_dump_mem((uint8_t *)host_addr, 15);
357
358             break;
359         }
360     }
361     return 0;
362 }
363
364
365
366
367
368 /* Checks machine SVM capability */
369 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
370 int v3_is_svm_capable() {
371     // Dinda
372     uint_t vm_cr_low = 0, vm_cr_high = 0;
373     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
374
375     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
376   
377     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=%p\n", (void *)ecx);
378
379     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
380       PrintDebug("SVM Not Available\n");
381       return 0;
382     }  else {
383         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
384         
385         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
386         
387         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
388             PrintDebug("SVM is available but is disabled.\n");
389             
390             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
391             
392             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
393             
394             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
395                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
396             } else {
397                 PrintDebug("SVM is locked with a key\n");
398             }
399             return 0;
400
401         } else {
402             PrintDebug("SVM is available and  enabled.\n");
403
404             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
405             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=%p\n", (void *)eax);
406             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=%p\n", (void *)ebx);
407             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=%p\n", (void *)ecx);
408             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
409
410
411             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
412                 PrintDebug("SVM Nested Paging not supported\n");
413             } else {
414                 PrintDebug("SVM Nested Paging supported\n");
415             }
416
417             return 1;
418         }
419     }
420 }
421
422 static int has_svm_nested_paging() {
423     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
424
425     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
426
427     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
428
429     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
430         PrintDebug("SVM Nested Paging not supported\n");
431         return 0;
432     } else {
433         PrintDebug("SVM Nested Paging supported\n");
434         return 1;
435     }
436 }
437
438
439 void v3_init_svm_cpu(int cpu_id) {
440     reg_ex_t msr;
441     extern v3_cpu_arch_t v3_cpu_types[];
442
443     // Enable SVM on the CPU
444     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
445     msr.e_reg.low |= EFER_MSR_svm_enable;
446     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
447
448     PrintDebug("SVM Enabled\n");
449
450     // Setup the host state save area
451     host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
452
453     /* 64-BIT-ISSUE */
454     //  msr.e_reg.high = 0;
455     //msr.e_reg.low = (uint_t)host_vmcb;
456     msr.r_reg = host_vmcbs[cpu_id];
457
458     PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
459     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
460
461
462     if (has_svm_nested_paging() == 1) {
463         v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
464     } else {
465         v3_cpu_types[cpu_id] = V3_SVM_CPU;
466     }
467 }
468
469
470 void v3_init_svm_hooks(struct v3_ctrl_ops * vmm_ops) {
471
472     // Setup the SVM specific vmm operations
473     vmm_ops->init_guest = &init_svm_guest;
474     vmm_ops->start_guest = &start_svm_guest;
475     vmm_ops->has_nested_paging = &has_svm_nested_paging;
476
477     return;
478 }
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531 #if 0
532 /* 
533  * Test VMSAVE/VMLOAD Latency 
534  */
535 #define vmsave ".byte 0x0F,0x01,0xDB ; "
536 #define vmload ".byte 0x0F,0x01,0xDA ; "
537 {
538     uint32_t start_lo, start_hi;
539     uint32_t end_lo, end_hi;
540     uint64_t start, end;
541     
542     __asm__ __volatile__ (
543                           "rdtsc ; "
544                           "movl %%eax, %%esi ; "
545                           "movl %%edx, %%edi ; "
546                           "movq  %%rcx, %%rax ; "
547                           vmsave
548                           "rdtsc ; "
549                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
550                           : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
551                           );
552     
553     start = start_hi;
554     start <<= 32;
555     start += start_lo;
556     
557     end = end_hi;
558     end <<= 32;
559     end += end_lo;
560     
561     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
562     
563     __asm__ __volatile__ (
564                           "rdtsc ; "
565                           "movl %%eax, %%esi ; "
566                           "movl %%edx, %%edi ; "
567                           "movq  %%rcx, %%rax ; "
568                           vmload
569                           "rdtsc ; "
570                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
571                               : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
572                               );
573         
574         start = start_hi;
575         start <<= 32;
576         start += start_lo;
577
578         end = end_hi;
579         end <<= 32;
580         end += end_lo;
581
582
583         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
584     }
585     /* End Latency Test */
586
587 #endif
588
589
590
591
592
593
594
595 #if 0
596 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
597   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
598   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
599   uint_t i = 0;
600
601
602   guest_state->rsp = vm_info.vm_regs.rsp;
603   guest_state->rip = vm_info.rip;
604
605
606   /* I pretty much just gutted this from TVMM */
607   /* Note: That means its probably wrong */
608
609   // set the segment registers to mirror ours
610   guest_state->cs.selector = 1<<3;
611   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
612   guest_state->cs.attrib.fields.S = 1;
613   guest_state->cs.attrib.fields.P = 1;
614   guest_state->cs.attrib.fields.db = 1;
615   guest_state->cs.attrib.fields.G = 1;
616   guest_state->cs.limit = 0xfffff;
617   guest_state->cs.base = 0;
618   
619   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
620   for ( i = 0; segregs[i] != NULL; i++) {
621     struct vmcb_selector * seg = segregs[i];
622     
623     seg->selector = 2<<3;
624     seg->attrib.fields.type = 0x2; // Data Segment+read/write
625     seg->attrib.fields.S = 1;
626     seg->attrib.fields.P = 1;
627     seg->attrib.fields.db = 1;
628     seg->attrib.fields.G = 1;
629     seg->limit = 0xfffff;
630     seg->base = 0;
631   }
632
633
634   {
635     /* JRL THIS HAS TO GO */
636     
637     //    guest_state->tr.selector = GetTR_Selector();
638     guest_state->tr.attrib.fields.type = 0x9; 
639     guest_state->tr.attrib.fields.P = 1;
640     // guest_state->tr.limit = GetTR_Limit();
641     //guest_state->tr.base = GetTR_Base();// - 0x2000;
642     /* ** */
643   }
644
645
646   /* ** */
647
648
649   guest_state->efer |= EFER_MSR_svm_enable;
650   guest_state->rflags = 0x00000002; // The reserved bit is always 1
651   ctrl_area->svm_instrs.VMRUN = 1;
652   guest_state->cr0 = 0x00000001;    // PE 
653   ctrl_area->guest_ASID = 1;
654
655
656   //  guest_state->cpl = 0;
657
658
659
660   // Setup exits
661
662   ctrl_area->cr_writes.cr4 = 1;
663   
664   ctrl_area->exceptions.de = 1;
665   ctrl_area->exceptions.df = 1;
666   ctrl_area->exceptions.pf = 1;
667   ctrl_area->exceptions.ts = 1;
668   ctrl_area->exceptions.ss = 1;
669   ctrl_area->exceptions.ac = 1;
670   ctrl_area->exceptions.mc = 1;
671   ctrl_area->exceptions.gp = 1;
672   ctrl_area->exceptions.ud = 1;
673   ctrl_area->exceptions.np = 1;
674   ctrl_area->exceptions.of = 1;
675   ctrl_area->exceptions.nmi = 1;
676
677   
678
679   ctrl_area->instrs.IOIO_PROT = 1;
680   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
681   
682   {
683     reg_ex_t tmp_reg;
684     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
685     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
686   }
687
688   ctrl_area->instrs.INTR = 1;
689
690   
691   {
692     char gdt_buf[6];
693     char idt_buf[6];
694
695     memset(gdt_buf, 0, 6);
696     memset(idt_buf, 0, 6);
697
698
699     uint_t gdt_base, idt_base;
700     ushort_t gdt_limit, idt_limit;
701     
702     GetGDTR(gdt_buf);
703     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
704     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
705     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
706
707     GetIDTR(idt_buf);
708     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
709     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
710     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
711
712
713     // gdt_base -= 0x2000;
714     //idt_base -= 0x2000;
715
716     guest_state->gdtr.base = gdt_base;
717     guest_state->gdtr.limit = gdt_limit;
718     guest_state->idtr.base = idt_base;
719     guest_state->idtr.limit = idt_limit;
720
721
722   }
723   
724   
725   // also determine if CPU supports nested paging
726   /*
727   if (vm_info.page_tables) {
728     //   if (0) {
729     // Flush the TLB on entries/exits
730     ctrl_area->TLB_CONTROL = 1;
731
732     // Enable Nested Paging
733     ctrl_area->NP_ENABLE = 1;
734
735     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
736
737         // Set the Nested Page Table pointer
738     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
739
740
741     //   ctrl_area->N_CR3 = Get_CR3();
742     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
743
744     guest_state->g_pat = 0x7040600070406ULL;
745
746     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
747     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
748     // Enable Paging
749     //    guest_state->cr0 |= 0x80000000;
750   }
751   */
752
753 }
754
755
756
757
758
759 #endif
760
761