Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added cpuid hooking
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_direct_paging.h>
40
41 #include <palacios/vmm_ctrl_regs.h>
42 #include <palacios/vmm_config.h>
43 #include <palacios/svm_io.h>
44
45
46
47 // This is a global pointer to the host's VMCB
48 static addr_t host_vmcbs[CONFIG_MAX_CPUS] = {0};
49
50
51
52 extern void v3_stgi();
53 extern void v3_clgi();
54 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
55 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
56
57
58 static vmcb_t * Allocate_VMCB() {
59     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
60
61     memset(vmcb_page, 0, 4096);
62
63     return vmcb_page;
64 }
65
66
67
68 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
69     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
70     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
71     uint_t i;
72
73
74     //
75     guest_state->rsp = 0x00;
76     guest_state->rip = 0xfff0;
77
78
79     guest_state->cpl = 0;
80
81     guest_state->efer |= EFER_MSR_svm_enable;
82
83
84     guest_state->rflags = 0x00000002; // The reserved bit is always 1
85     ctrl_area->svm_instrs.VMRUN = 1;
86     ctrl_area->svm_instrs.VMMCALL = 1;
87     ctrl_area->svm_instrs.VMLOAD = 1;
88     ctrl_area->svm_instrs.VMSAVE = 1;
89     ctrl_area->svm_instrs.STGI = 1;
90     ctrl_area->svm_instrs.CLGI = 1;
91     ctrl_area->svm_instrs.SKINIT = 1;
92     ctrl_area->svm_instrs.RDTSCP = 1;
93     ctrl_area->svm_instrs.CPUID = 1;
94     ctrl_area->svm_instrs.ICEBP = 1;
95     ctrl_area->svm_instrs.WBINVD = 1;
96     ctrl_area->svm_instrs.MONITOR = 1;
97     ctrl_area->svm_instrs.MWAIT_always = 1;
98     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
99     ctrl_area->instrs.INVLPGA = 1;
100
101
102     ctrl_area->instrs.HLT = 1;
103     // guest_state->cr0 = 0x00000001;    // PE 
104   
105     /*
106       ctrl_area->exceptions.de = 1;
107       ctrl_area->exceptions.df = 1;
108       
109       ctrl_area->exceptions.ts = 1;
110       ctrl_area->exceptions.ss = 1;
111       ctrl_area->exceptions.ac = 1;
112       ctrl_area->exceptions.mc = 1;
113       ctrl_area->exceptions.gp = 1;
114       ctrl_area->exceptions.ud = 1;
115       ctrl_area->exceptions.np = 1;
116       ctrl_area->exceptions.of = 1;
117       
118       ctrl_area->exceptions.nmi = 1;
119     */
120     
121
122     ctrl_area->instrs.NMI = 1;
123     ctrl_area->instrs.SMI = 1;
124     ctrl_area->instrs.INIT = 1;
125     ctrl_area->instrs.PAUSE = 1;
126     ctrl_area->instrs.shutdown_evts = 1;
127
128     vm_info->vm_regs.rdx = 0x00000f00;
129
130
131     guest_state->cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
132
133
134     guest_state->cs.selector = 0xf000;
135     guest_state->cs.limit = 0xffff;
136     guest_state->cs.base = 0x0000000f0000LL;
137     guest_state->cs.attrib.raw = 0xf3;
138
139
140     /* DEBUG FOR RETURN CODE */
141     ctrl_area->exit_code = 1;
142
143
144     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
145                                         &(guest_state->es), &(guest_state->fs), 
146                                         &(guest_state->gs), NULL};
147
148     for ( i = 0; segregs[i] != NULL; i++) {
149         struct vmcb_selector * seg = segregs[i];
150         
151         seg->selector = 0x0000;
152         //    seg->base = seg->selector << 4;
153         seg->base = 0x00000000;
154         seg->attrib.raw = 0xf3;
155         seg->limit = ~0u;
156     }
157
158     guest_state->gdtr.limit = 0x0000ffff;
159     guest_state->gdtr.base = 0x0000000000000000LL;
160     guest_state->idtr.limit = 0x0000ffff;
161     guest_state->idtr.base = 0x0000000000000000LL;
162
163     guest_state->ldtr.selector = 0x0000;
164     guest_state->ldtr.limit = 0x0000ffff;
165     guest_state->ldtr.base = 0x0000000000000000LL;
166     guest_state->tr.selector = 0x0000;
167     guest_state->tr.limit = 0x0000ffff;
168     guest_state->tr.base = 0x0000000000000000LL;
169
170
171     guest_state->dr6 = 0x00000000ffff0ff0LL;
172     guest_state->dr7 = 0x0000000000000400LL;
173
174
175     v3_init_svm_io_map(vm_info);
176     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
177     ctrl_area->instrs.IOIO_PROT = 1;
178
179
180     v3_init_svm_msr_map(vm_info);
181     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
182     ctrl_area->instrs.MSR_PROT = 1;
183
184
185     PrintDebug("Exiting on interrupts\n");
186     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
187     ctrl_area->instrs.INTR = 1;
188
189
190     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
191         PrintDebug("Creating initial shadow page table\n");
192         
193         /* JRL: This is a performance killer, and a simplistic solution */
194         /* We need to fix this */
195         ctrl_area->TLB_CONTROL = 1;
196         ctrl_area->guest_ASID = 1;
197         
198         
199         if (v3_init_passthrough_pts(vm_info) == -1) {
200             PrintError("Could not initialize passthrough page tables\n");
201             return ;
202         }
203
204
205         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
206         PrintDebug("Created\n");
207         
208         guest_state->cr3 = vm_info->direct_map_pt;
209
210         ctrl_area->cr_reads.cr0 = 1;
211         ctrl_area->cr_writes.cr0 = 1;
212         //ctrl_area->cr_reads.cr4 = 1;
213         ctrl_area->cr_writes.cr4 = 1;
214         ctrl_area->cr_reads.cr3 = 1;
215         ctrl_area->cr_writes.cr3 = 1;
216
217         v3_hook_msr(vm_info, EFER_MSR, 
218                     &v3_handle_efer_read,
219                     &v3_handle_efer_write, 
220                     vm_info);
221
222         ctrl_area->instrs.INVLPG = 1;
223
224         ctrl_area->exceptions.pf = 1;
225
226         guest_state->g_pat = 0x7040600070406ULL;
227
228         guest_state->cr0 |= 0x80000000;
229
230     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231         // Flush the TLB on entries/exits
232         ctrl_area->TLB_CONTROL = 1;
233         ctrl_area->guest_ASID = 1;
234
235         // Enable Nested Paging
236         ctrl_area->NP_ENABLE = 1;
237
238         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
239
240         // Set the Nested Page Table pointer
241         if (v3_init_passthrough_pts(vm_info) == -1) {
242             PrintError("Could not initialize Nested page tables\n");
243             return ;
244         }
245
246         ctrl_area->N_CR3 = vm_info->direct_map_pt;
247
248         guest_state->g_pat = 0x7040600070406ULL;
249     }
250 }
251
252
253 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
254
255
256     v3_pre_config_guest(info, config_ptr);
257
258     PrintDebug("Allocating VMCB\n");
259     info->vmm_data = (void*)Allocate_VMCB();
260
261     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_post_config_guest(info, config_ptr);
265
266     return 0;
267 }
268
269 static int start_svm_guest(struct guest_info *info) {
270     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
271     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
272     uint_t num_exits = 0;
273
274
275
276     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
277     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
278     
279     info->run_state = VM_RUNNING;
280     rdtscll(info->yield_start_cycle);
281
282
283     while (1) {
284         ullong_t tmp_tsc;
285         
286         // Conditionally yield the CPU if the timeslice has expired
287         v3_yield_cond(info);
288
289         /*
290           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
291           (void *)(addr_t)info->segments.cs.base, 
292           (void *)(addr_t)info->rip);
293         */
294
295         // disable global interrupts for vm state transition
296         v3_clgi();
297
298
299
300         rdtscll(info->time_state.cached_host_tsc);
301         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
302         
303         v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
304         
305         rdtscll(tmp_tsc);
306
307         
308         //PrintDebug("SVM Returned\n");
309
310         // reenable global interrupts after vm exit
311         v3_stgi();
312
313
314         // Conditionally yield the CPU if the timeslice has expired
315         v3_yield_cond(info);
316
317
318         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
319         num_exits++;
320         
321         if ((num_exits % 5000) == 0) {
322             PrintDebug("SVM Exit number %d\n", num_exits);
323         }
324
325         if (v3_handle_svm_exit(info) != 0) {
326             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
327             addr_t host_addr;
328             addr_t linear_addr = 0;
329             
330             info->run_state = VM_ERROR;
331             
332             PrintDebug("SVM ERROR!!\n"); 
333       
334             v3_print_guest_state(info);
335
336             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
337       
338             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
339             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
340       
341             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
342             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
343       
344             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
345
346             if (info->mem_mode == PHYSICAL_MEM) {
347                 guest_pa_to_host_va(info, linear_addr, &host_addr);
348             } else if (info->mem_mode == VIRTUAL_MEM) {
349                 guest_va_to_host_va(info, linear_addr, &host_addr);
350             }
351
352             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
353
354             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
355             v3_dump_mem((uint8_t *)host_addr, 15);
356
357             break;
358         }
359     }
360     return 0;
361 }
362
363
364
365
366
367 /* Checks machine SVM capability */
368 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
369 int v3_is_svm_capable() {
370     // Dinda
371     uint_t vm_cr_low = 0, vm_cr_high = 0;
372     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
373
374     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
375   
376     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
377
378     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
379       PrintDebug("SVM Not Available\n");
380       return 0;
381     }  else {
382         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
383         
384         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
385         
386         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
387             PrintDebug("SVM is available but is disabled.\n");
388             
389             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
390             
391             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
392             
393             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
394                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
395             } else {
396                 PrintDebug("SVM is locked with a key\n");
397             }
398             return 0;
399
400         } else {
401             PrintDebug("SVM is available and  enabled.\n");
402
403             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
404             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
405             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
406             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
407             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
408
409
410             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
411                 PrintDebug("SVM Nested Paging not supported\n");
412             } else {
413                 PrintDebug("SVM Nested Paging supported\n");
414             }
415
416             return 1;
417         }
418     }
419 }
420
421 static int has_svm_nested_paging() {
422     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
423
424     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
425
426     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
427
428     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
429         PrintDebug("SVM Nested Paging not supported\n");
430         return 0;
431     } else {
432         PrintDebug("SVM Nested Paging supported\n");
433         return 1;
434     }
435 }
436
437
438 void v3_init_svm_cpu(int cpu_id) {
439     reg_ex_t msr;
440     extern v3_cpu_arch_t v3_cpu_types[];
441
442     // Enable SVM on the CPU
443     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
444     msr.e_reg.low |= EFER_MSR_svm_enable;
445     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
446
447     PrintDebug("SVM Enabled\n");
448
449     // Setup the host state save area
450     host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
451
452     /* 64-BIT-ISSUE */
453     //  msr.e_reg.high = 0;
454     //msr.e_reg.low = (uint_t)host_vmcb;
455     msr.r_reg = host_vmcbs[cpu_id];
456
457     PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
458     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
459
460
461     if (has_svm_nested_paging() == 1) {
462         v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
463     } else {
464         v3_cpu_types[cpu_id] = V3_SVM_CPU;
465     }
466 }
467
468
469 void v3_init_svm_hooks(struct v3_ctrl_ops * vmm_ops) {
470
471     // Setup the SVM specific vmm operations
472     vmm_ops->init_guest = &init_svm_guest;
473     vmm_ops->start_guest = &start_svm_guest;
474     vmm_ops->has_nested_paging = &has_svm_nested_paging;
475
476     return;
477 }
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530 #if 0
531 /* 
532  * Test VMSAVE/VMLOAD Latency 
533  */
534 #define vmsave ".byte 0x0F,0x01,0xDB ; "
535 #define vmload ".byte 0x0F,0x01,0xDA ; "
536 {
537     uint32_t start_lo, start_hi;
538     uint32_t end_lo, end_hi;
539     uint64_t start, end;
540     
541     __asm__ __volatile__ (
542                           "rdtsc ; "
543                           "movl %%eax, %%esi ; "
544                           "movl %%edx, %%edi ; "
545                           "movq  %%rcx, %%rax ; "
546                           vmsave
547                           "rdtsc ; "
548                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
549                           : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
550                           );
551     
552     start = start_hi;
553     start <<= 32;
554     start += start_lo;
555     
556     end = end_hi;
557     end <<= 32;
558     end += end_lo;
559     
560     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
561     
562     __asm__ __volatile__ (
563                           "rdtsc ; "
564                           "movl %%eax, %%esi ; "
565                           "movl %%edx, %%edi ; "
566                           "movq  %%rcx, %%rax ; "
567                           vmload
568                           "rdtsc ; "
569                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
570                               : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
571                               );
572         
573         start = start_hi;
574         start <<= 32;
575         start += start_lo;
576
577         end = end_hi;
578         end <<= 32;
579         end += end_lo;
580
581
582         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
583     }
584     /* End Latency Test */
585
586 #endif
587
588
589
590
591
592
593
594 #if 0
595 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
596   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
597   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
598   uint_t i = 0;
599
600
601   guest_state->rsp = vm_info.vm_regs.rsp;
602   guest_state->rip = vm_info.rip;
603
604
605   /* I pretty much just gutted this from TVMM */
606   /* Note: That means its probably wrong */
607
608   // set the segment registers to mirror ours
609   guest_state->cs.selector = 1<<3;
610   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
611   guest_state->cs.attrib.fields.S = 1;
612   guest_state->cs.attrib.fields.P = 1;
613   guest_state->cs.attrib.fields.db = 1;
614   guest_state->cs.attrib.fields.G = 1;
615   guest_state->cs.limit = 0xfffff;
616   guest_state->cs.base = 0;
617   
618   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
619   for ( i = 0; segregs[i] != NULL; i++) {
620     struct vmcb_selector * seg = segregs[i];
621     
622     seg->selector = 2<<3;
623     seg->attrib.fields.type = 0x2; // Data Segment+read/write
624     seg->attrib.fields.S = 1;
625     seg->attrib.fields.P = 1;
626     seg->attrib.fields.db = 1;
627     seg->attrib.fields.G = 1;
628     seg->limit = 0xfffff;
629     seg->base = 0;
630   }
631
632
633   {
634     /* JRL THIS HAS TO GO */
635     
636     //    guest_state->tr.selector = GetTR_Selector();
637     guest_state->tr.attrib.fields.type = 0x9; 
638     guest_state->tr.attrib.fields.P = 1;
639     // guest_state->tr.limit = GetTR_Limit();
640     //guest_state->tr.base = GetTR_Base();// - 0x2000;
641     /* ** */
642   }
643
644
645   /* ** */
646
647
648   guest_state->efer |= EFER_MSR_svm_enable;
649   guest_state->rflags = 0x00000002; // The reserved bit is always 1
650   ctrl_area->svm_instrs.VMRUN = 1;
651   guest_state->cr0 = 0x00000001;    // PE 
652   ctrl_area->guest_ASID = 1;
653
654
655   //  guest_state->cpl = 0;
656
657
658
659   // Setup exits
660
661   ctrl_area->cr_writes.cr4 = 1;
662   
663   ctrl_area->exceptions.de = 1;
664   ctrl_area->exceptions.df = 1;
665   ctrl_area->exceptions.pf = 1;
666   ctrl_area->exceptions.ts = 1;
667   ctrl_area->exceptions.ss = 1;
668   ctrl_area->exceptions.ac = 1;
669   ctrl_area->exceptions.mc = 1;
670   ctrl_area->exceptions.gp = 1;
671   ctrl_area->exceptions.ud = 1;
672   ctrl_area->exceptions.np = 1;
673   ctrl_area->exceptions.of = 1;
674   ctrl_area->exceptions.nmi = 1;
675
676   
677
678   ctrl_area->instrs.IOIO_PROT = 1;
679   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
680   
681   {
682     reg_ex_t tmp_reg;
683     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
684     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
685   }
686
687   ctrl_area->instrs.INTR = 1;
688
689   
690   {
691     char gdt_buf[6];
692     char idt_buf[6];
693
694     memset(gdt_buf, 0, 6);
695     memset(idt_buf, 0, 6);
696
697
698     uint_t gdt_base, idt_base;
699     ushort_t gdt_limit, idt_limit;
700     
701     GetGDTR(gdt_buf);
702     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
703     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
704     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
705
706     GetIDTR(idt_buf);
707     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
708     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
709     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
710
711
712     // gdt_base -= 0x2000;
713     //idt_base -= 0x2000;
714
715     guest_state->gdtr.base = gdt_base;
716     guest_state->gdtr.limit = gdt_limit;
717     guest_state->idtr.base = idt_base;
718     guest_state->idtr.limit = idt_limit;
719
720
721   }
722   
723   
724   // also determine if CPU supports nested paging
725   /*
726   if (vm_info.page_tables) {
727     //   if (0) {
728     // Flush the TLB on entries/exits
729     ctrl_area->TLB_CONTROL = 1;
730
731     // Enable Nested Paging
732     ctrl_area->NP_ENABLE = 1;
733
734     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
735
736         // Set the Nested Page Table pointer
737     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
738
739
740     //   ctrl_area->N_CR3 = Get_CR3();
741     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
742
743     guest_state->g_pat = 0x7040600070406ULL;
744
745     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
746     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
747     // Enable Paging
748     //    guest_state->cr0 |= 0x80000000;
749   }
750   */
751
752 }
753
754
755
756
757
758 #endif
759
760