Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Added missing string delimiter.
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_profiler.h>
40
41 #include <palacios/vmm_direct_paging.h>
42
43 #include <palacios/vmm_ctrl_regs.h>
44 #include <palacios/vmm_config.h>
45 #include <palacios/svm_io.h>
46
47
48
49 // This is a global pointer to the host's VMCB
50 static void * host_vmcb = NULL;
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.ICEBP = 1;
94     ctrl_area->svm_instrs.WBINVD = 1;
95     ctrl_area->svm_instrs.MONITOR = 1;
96     ctrl_area->svm_instrs.MWAIT_always = 1;
97     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
98     ctrl_area->instrs.INVLPGA = 1;
99
100
101     ctrl_area->instrs.HLT = 1;
102     // guest_state->cr0 = 0x00000001;    // PE 
103   
104     /*
105       ctrl_area->exceptions.de = 1;
106       ctrl_area->exceptions.df = 1;
107       
108       ctrl_area->exceptions.ts = 1;
109       ctrl_area->exceptions.ss = 1;
110       ctrl_area->exceptions.ac = 1;
111       ctrl_area->exceptions.mc = 1;
112       ctrl_area->exceptions.gp = 1;
113       ctrl_area->exceptions.ud = 1;
114       ctrl_area->exceptions.np = 1;
115       ctrl_area->exceptions.of = 1;
116       
117       ctrl_area->exceptions.nmi = 1;
118     */
119     
120
121     ctrl_area->instrs.NMI = 1;
122     ctrl_area->instrs.SMI = 1;
123     ctrl_area->instrs.INIT = 1;
124     ctrl_area->instrs.PAUSE = 1;
125     ctrl_area->instrs.shutdown_evts = 1;
126
127     vm_info->vm_regs.rdx = 0x00000f00;
128
129     guest_state->cr0 = 0x60000010;
130
131
132     guest_state->cs.selector = 0xf000;
133     guest_state->cs.limit = 0xffff;
134     guest_state->cs.base = 0x0000000f0000LL;
135     guest_state->cs.attrib.raw = 0xf3;
136
137
138     /* DEBUG FOR RETURN CODE */
139     ctrl_area->exit_code = 1;
140
141
142     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
143                                         &(guest_state->es), &(guest_state->fs), 
144                                         &(guest_state->gs), NULL};
145
146     for ( i = 0; segregs[i] != NULL; i++) {
147         struct vmcb_selector * seg = segregs[i];
148         
149         seg->selector = 0x0000;
150         //    seg->base = seg->selector << 4;
151         seg->base = 0x00000000;
152         seg->attrib.raw = 0xf3;
153         seg->limit = ~0u;
154     }
155
156     guest_state->gdtr.limit = 0x0000ffff;
157     guest_state->gdtr.base = 0x0000000000000000LL;
158     guest_state->idtr.limit = 0x0000ffff;
159     guest_state->idtr.base = 0x0000000000000000LL;
160
161     guest_state->ldtr.selector = 0x0000;
162     guest_state->ldtr.limit = 0x0000ffff;
163     guest_state->ldtr.base = 0x0000000000000000LL;
164     guest_state->tr.selector = 0x0000;
165     guest_state->tr.limit = 0x0000ffff;
166     guest_state->tr.base = 0x0000000000000000LL;
167
168
169     guest_state->dr6 = 0x00000000ffff0ff0LL;
170     guest_state->dr7 = 0x0000000000000400LL;
171
172
173     v3_init_svm_io_map(vm_info);
174     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
175     ctrl_area->instrs.IOIO_PROT = 1;
176
177
178
179     v3_init_svm_msr_map(vm_info);
180     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
181     ctrl_area->instrs.MSR_PROT = 1;
182
183
184
185     PrintDebug("Exiting on interrupts\n");
186     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
187     ctrl_area->instrs.INTR = 1;
188
189
190     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
191         PrintDebug("Creating initial shadow page table\n");
192         
193         /* JRL: This is a performance killer, and a simplistic solution */
194         /* We need to fix this */
195         ctrl_area->TLB_CONTROL = 1;
196         ctrl_area->guest_ASID = 1;
197         
198         
199         if (v3_init_passthrough_pts(vm_info) == -1) {
200             PrintError("Could not initialize passthrough page tables\n");
201             return ;
202         }
203
204
205         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
206         PrintDebug("Created\n");
207         
208         guest_state->cr3 = vm_info->direct_map_pt;
209
210         ctrl_area->cr_reads.cr0 = 1;
211         ctrl_area->cr_writes.cr0 = 1;
212         //ctrl_area->cr_reads.cr4 = 1;
213         ctrl_area->cr_writes.cr4 = 1;
214         ctrl_area->cr_reads.cr3 = 1;
215         ctrl_area->cr_writes.cr3 = 1;
216
217         v3_hook_msr(vm_info, EFER_MSR, 
218                     &v3_handle_efer_read,
219                     &v3_handle_efer_write, 
220                     vm_info);
221
222         ctrl_area->instrs.INVLPG = 1;
223
224         ctrl_area->exceptions.pf = 1;
225
226         guest_state->g_pat = 0x7040600070406ULL;
227
228         guest_state->cr0 |= 0x80000000;
229
230     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231         // Flush the TLB on entries/exits
232         ctrl_area->TLB_CONTROL = 1;
233         ctrl_area->guest_ASID = 1;
234
235         // Enable Nested Paging
236         ctrl_area->NP_ENABLE = 1;
237
238         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
239
240         // Set the Nested Page Table pointer
241         if (v3_init_passthrough_pts(vm_info) == -1) {
242             PrintError("Could not initialize Nested page tables\n");
243             return ;
244         }
245
246         ctrl_area->N_CR3 = vm_info->direct_map_pt;
247
248         guest_state->g_pat = 0x7040600070406ULL;
249     }
250 }
251
252
253 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
254
255
256     v3_pre_config_guest(info, config_ptr);
257
258     PrintDebug("Allocating VMCB\n");
259     info->vmm_data = (void*)Allocate_VMCB();
260
261     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_post_config_guest(info, config_ptr);
265
266     return 0;
267 }
268
269 static int start_svm_guest(struct guest_info *info) {
270     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
271     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
272     uint_t num_exits = 0;
273
274
275
276     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
277     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
278     
279     info->run_state = VM_RUNNING;
280     rdtscll(info->yield_start_cycle);
281
282
283     while (1) {
284         ullong_t tmp_tsc;
285         
286         // Conditionally yield the CPU if the timeslice has expired
287         v3_yield_cond(info);
288
289         /*
290           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
291           (void *)(addr_t)info->segments.cs.base, 
292           (void *)(addr_t)info->rip);
293         */
294
295         // disable global interrupts for vm state transition
296         v3_clgi();
297
298
299
300         rdtscll(info->time_state.cached_host_tsc);
301         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
302         
303         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb);
304         
305         rdtscll(tmp_tsc);
306
307         
308         //PrintDebug("SVM Returned\n");
309
310         // reenable global interrupts after vm exit
311         v3_stgi();
312
313
314         // Conditionally yield the CPU if the timeslice has expired
315         v3_yield_cond(info);
316
317
318         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
319         num_exits++;
320         
321         if ((num_exits % 5000) == 0) {
322             PrintDebug("SVM Exit number %d\n", num_exits);
323
324 #ifdef CONFIG_PROFILE_VMM
325             if (info->enable_profiler) {
326                 v3_print_profile(info);
327             }
328 #endif
329         }
330
331         if (v3_handle_svm_exit(info) != 0) {
332             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
333             addr_t host_addr;
334             addr_t linear_addr = 0;
335             
336             info->run_state = VM_ERROR;
337             
338             PrintDebug("SVM ERROR!!\n"); 
339       
340             v3_print_guest_state(info);
341
342             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
343       
344             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
345             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
346       
347             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
348             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
349       
350             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
351
352             if (info->mem_mode == PHYSICAL_MEM) {
353                 guest_pa_to_host_va(info, linear_addr, &host_addr);
354             } else if (info->mem_mode == VIRTUAL_MEM) {
355                 guest_va_to_host_va(info, linear_addr, &host_addr);
356             }
357
358             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
359
360             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
361             PrintTraceMemDump((uchar_t *)host_addr, 15);
362
363             break;
364         }
365     }
366     return 0;
367 }
368
369
370
371
372
373 /* Checks machine SVM capability */
374 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
375 int v3_is_svm_capable() {
376     // Dinda
377     uint_t vm_cr_low = 0, vm_cr_high = 0;
378     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
379
380     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
381   
382     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=%p\n", (void *)ecx);
383
384     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
385       PrintDebug("SVM Not Available\n");
386       return 0;
387     }  else {
388         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
389         
390         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
391         
392         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
393             PrintDebug("SVM is available but is disabled.\n");
394             
395             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
396             
397             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
398             
399             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
400                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
401             } else {
402                 PrintDebug("SVM is locked with a key\n");
403             }
404             return 0;
405
406         } else {
407             PrintDebug("SVM is available and  enabled.\n");
408
409             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
410             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=%p\n", (void *)eax);
411             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=%p\n", (void *)ebx);
412             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=%p\n", (void *)ecx);
413             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
414
415
416             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
417                 PrintDebug("SVM Nested Paging not supported\n");
418             } else {
419                 PrintDebug("SVM Nested Paging supported\n");
420             }
421
422             return 1;
423         }
424     }
425 }
426
427 static int has_svm_nested_paging() {
428     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
429
430     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
431
432     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
433
434     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
435         PrintDebug("SVM Nested Paging not supported\n");
436         return 0;
437     } else {
438         PrintDebug("SVM Nested Paging supported\n");
439         return 1;
440     }
441 }
442
443
444
445 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
446     reg_ex_t msr;
447     extern v3_cpu_arch_t v3_cpu_type;
448
449     // Enable SVM on the CPU
450     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
451     msr.e_reg.low |= EFER_MSR_svm_enable;
452     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
453
454     PrintDebug("SVM Enabled\n");
455
456     // Setup the host state save area
457     host_vmcb = V3_AllocPages(4);
458
459     /* 64-BIT-ISSUE */
460     //  msr.e_reg.high = 0;
461     //msr.e_reg.low = (uint_t)host_vmcb;
462     msr.r_reg = (addr_t)host_vmcb;
463
464     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb);
465     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
466
467
468
469
470     if (has_svm_nested_paging() == 1) {
471         v3_cpu_type = V3_SVM_REV3_CPU;
472     } else {
473         v3_cpu_type = V3_SVM_CPU;
474     }
475
476     // Setup the SVM specific vmm operations
477     vmm_ops->init_guest = &init_svm_guest;
478     vmm_ops->start_guest = &start_svm_guest;
479     vmm_ops->has_nested_paging = &has_svm_nested_paging;
480
481     return;
482 }
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535 #if 0
536 /* 
537  * Test VMSAVE/VMLOAD Latency 
538  */
539 #define vmsave ".byte 0x0F,0x01,0xDB ; "
540 #define vmload ".byte 0x0F,0x01,0xDA ; "
541 {
542     uint32_t start_lo, start_hi;
543     uint32_t end_lo, end_hi;
544     uint64_t start, end;
545     
546     __asm__ __volatile__ (
547                           "rdtsc ; "
548                           "movl %%eax, %%esi ; "
549                           "movl %%edx, %%edi ; "
550                           "movq  %%rcx, %%rax ; "
551                           vmsave
552                           "rdtsc ; "
553                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
554                           : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
555                           );
556     
557     start = start_hi;
558     start <<= 32;
559     start += start_lo;
560     
561     end = end_hi;
562     end <<= 32;
563     end += end_lo;
564     
565     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
566     
567     __asm__ __volatile__ (
568                           "rdtsc ; "
569                           "movl %%eax, %%esi ; "
570                           "movl %%edx, %%edi ; "
571                           "movq  %%rcx, %%rax ; "
572                           vmload
573                           "rdtsc ; "
574                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
575                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
576                               );
577         
578         start = start_hi;
579         start <<= 32;
580         start += start_lo;
581
582         end = end_hi;
583         end <<= 32;
584         end += end_lo;
585
586
587         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
588     }
589     /* End Latency Test */
590
591 #endif
592
593
594
595
596
597
598
599 #if 0
600 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
601   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
602   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
603   uint_t i = 0;
604
605
606   guest_state->rsp = vm_info.vm_regs.rsp;
607   guest_state->rip = vm_info.rip;
608
609
610   /* I pretty much just gutted this from TVMM */
611   /* Note: That means its probably wrong */
612
613   // set the segment registers to mirror ours
614   guest_state->cs.selector = 1<<3;
615   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
616   guest_state->cs.attrib.fields.S = 1;
617   guest_state->cs.attrib.fields.P = 1;
618   guest_state->cs.attrib.fields.db = 1;
619   guest_state->cs.attrib.fields.G = 1;
620   guest_state->cs.limit = 0xfffff;
621   guest_state->cs.base = 0;
622   
623   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
624   for ( i = 0; segregs[i] != NULL; i++) {
625     struct vmcb_selector * seg = segregs[i];
626     
627     seg->selector = 2<<3;
628     seg->attrib.fields.type = 0x2; // Data Segment+read/write
629     seg->attrib.fields.S = 1;
630     seg->attrib.fields.P = 1;
631     seg->attrib.fields.db = 1;
632     seg->attrib.fields.G = 1;
633     seg->limit = 0xfffff;
634     seg->base = 0;
635   }
636
637
638   {
639     /* JRL THIS HAS TO GO */
640     
641     //    guest_state->tr.selector = GetTR_Selector();
642     guest_state->tr.attrib.fields.type = 0x9; 
643     guest_state->tr.attrib.fields.P = 1;
644     // guest_state->tr.limit = GetTR_Limit();
645     //guest_state->tr.base = GetTR_Base();// - 0x2000;
646     /* ** */
647   }
648
649
650   /* ** */
651
652
653   guest_state->efer |= EFER_MSR_svm_enable;
654   guest_state->rflags = 0x00000002; // The reserved bit is always 1
655   ctrl_area->svm_instrs.VMRUN = 1;
656   guest_state->cr0 = 0x00000001;    // PE 
657   ctrl_area->guest_ASID = 1;
658
659
660   //  guest_state->cpl = 0;
661
662
663
664   // Setup exits
665
666   ctrl_area->cr_writes.cr4 = 1;
667   
668   ctrl_area->exceptions.de = 1;
669   ctrl_area->exceptions.df = 1;
670   ctrl_area->exceptions.pf = 1;
671   ctrl_area->exceptions.ts = 1;
672   ctrl_area->exceptions.ss = 1;
673   ctrl_area->exceptions.ac = 1;
674   ctrl_area->exceptions.mc = 1;
675   ctrl_area->exceptions.gp = 1;
676   ctrl_area->exceptions.ud = 1;
677   ctrl_area->exceptions.np = 1;
678   ctrl_area->exceptions.of = 1;
679   ctrl_area->exceptions.nmi = 1;
680
681   
682
683   ctrl_area->instrs.IOIO_PROT = 1;
684   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
685   
686   {
687     reg_ex_t tmp_reg;
688     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
689     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
690   }
691
692   ctrl_area->instrs.INTR = 1;
693
694   
695   {
696     char gdt_buf[6];
697     char idt_buf[6];
698
699     memset(gdt_buf, 0, 6);
700     memset(idt_buf, 0, 6);
701
702
703     uint_t gdt_base, idt_base;
704     ushort_t gdt_limit, idt_limit;
705     
706     GetGDTR(gdt_buf);
707     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
708     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
709     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
710
711     GetIDTR(idt_buf);
712     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
713     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
714     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
715
716
717     // gdt_base -= 0x2000;
718     //idt_base -= 0x2000;
719
720     guest_state->gdtr.base = gdt_base;
721     guest_state->gdtr.limit = gdt_limit;
722     guest_state->idtr.base = idt_base;
723     guest_state->idtr.limit = idt_limit;
724
725
726   }
727   
728   
729   // also determine if CPU supports nested paging
730   /*
731   if (vm_info.page_tables) {
732     //   if (0) {
733     // Flush the TLB on entries/exits
734     ctrl_area->TLB_CONTROL = 1;
735
736     // Enable Nested Paging
737     ctrl_area->NP_ENABLE = 1;
738
739     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
740
741         // Set the Nested Page Table pointer
742     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
743
744
745     //   ctrl_area->N_CR3 = Get_CR3();
746     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
747
748     guest_state->g_pat = 0x7040600070406ULL;
749
750     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
751     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
752     // Enable Paging
753     //    guest_state->cr0 |= 0x80000000;
754   }
755   */
756
757 }
758
759
760
761
762
763 #endif
764
765