Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added symbiotic interface
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_direct_paging.h>
40
41 #include <palacios/vmm_ctrl_regs.h>
42 #include <palacios/vmm_config.h>
43 #include <palacios/svm_io.h>
44
45 #include <palacios/vmm_sprintf.h>
46
47
48 // This is a global pointer to the host's VMCB
49 static addr_t host_vmcbs[CONFIG_MAX_CPUS] = {0};
50
51
52
53 extern void v3_stgi();
54 extern void v3_clgi();
55 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
56 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
57
58
59 static vmcb_t * Allocate_VMCB() {
60     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
61
62     memset(vmcb_page, 0, 4096);
63
64     return vmcb_page;
65 }
66
67
68
69 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
70     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
71     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
72     uint_t i;
73
74
75     //
76     guest_state->rsp = 0x00;
77     guest_state->rip = 0xfff0;
78
79
80     guest_state->cpl = 0;
81
82     guest_state->efer |= EFER_MSR_svm_enable;
83
84
85     guest_state->rflags = 0x00000002; // The reserved bit is always 1
86     ctrl_area->svm_instrs.VMRUN = 1;
87     ctrl_area->svm_instrs.VMMCALL = 1;
88     ctrl_area->svm_instrs.VMLOAD = 1;
89     ctrl_area->svm_instrs.VMSAVE = 1;
90     ctrl_area->svm_instrs.STGI = 1;
91     ctrl_area->svm_instrs.CLGI = 1;
92     ctrl_area->svm_instrs.SKINIT = 1;
93     ctrl_area->svm_instrs.RDTSCP = 1;
94     ctrl_area->svm_instrs.ICEBP = 1;
95     ctrl_area->svm_instrs.WBINVD = 1;
96     ctrl_area->svm_instrs.MONITOR = 1;
97     ctrl_area->svm_instrs.MWAIT_always = 1;
98     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
99     ctrl_area->instrs.INVLPGA = 1;
100     ctrl_area->instrs.CPUID = 1;
101
102     ctrl_area->instrs.HLT = 1;
103     // guest_state->cr0 = 0x00000001;    // PE 
104   
105     /*
106       ctrl_area->exceptions.de = 1;
107       ctrl_area->exceptions.df = 1;
108       
109       ctrl_area->exceptions.ts = 1;
110       ctrl_area->exceptions.ss = 1;
111       ctrl_area->exceptions.ac = 1;
112       ctrl_area->exceptions.mc = 1;
113       ctrl_area->exceptions.gp = 1;
114       ctrl_area->exceptions.ud = 1;
115       ctrl_area->exceptions.np = 1;
116       ctrl_area->exceptions.of = 1;
117       
118       ctrl_area->exceptions.nmi = 1;
119     */
120     
121
122     ctrl_area->instrs.NMI = 1;
123     ctrl_area->instrs.SMI = 1;
124     ctrl_area->instrs.INIT = 1;
125     ctrl_area->instrs.PAUSE = 1;
126     ctrl_area->instrs.shutdown_evts = 1;
127
128     vm_info->vm_regs.rdx = 0x00000f00;
129
130
131     guest_state->cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
132
133
134     guest_state->cs.selector = 0xf000;
135     guest_state->cs.limit = 0xffff;
136     guest_state->cs.base = 0x0000000f0000LL;
137     guest_state->cs.attrib.raw = 0xf3;
138
139
140     /* DEBUG FOR RETURN CODE */
141     ctrl_area->exit_code = 1;
142
143
144     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
145                                         &(guest_state->es), &(guest_state->fs), 
146                                         &(guest_state->gs), NULL};
147
148     for ( i = 0; segregs[i] != NULL; i++) {
149         struct vmcb_selector * seg = segregs[i];
150         
151         seg->selector = 0x0000;
152         //    seg->base = seg->selector << 4;
153         seg->base = 0x00000000;
154         seg->attrib.raw = 0xf3;
155         seg->limit = ~0u;
156     }
157
158     guest_state->gdtr.limit = 0x0000ffff;
159     guest_state->gdtr.base = 0x0000000000000000LL;
160     guest_state->idtr.limit = 0x0000ffff;
161     guest_state->idtr.base = 0x0000000000000000LL;
162
163     guest_state->ldtr.selector = 0x0000;
164     guest_state->ldtr.limit = 0x0000ffff;
165     guest_state->ldtr.base = 0x0000000000000000LL;
166     guest_state->tr.selector = 0x0000;
167     guest_state->tr.limit = 0x0000ffff;
168     guest_state->tr.base = 0x0000000000000000LL;
169
170
171     guest_state->dr6 = 0x00000000ffff0ff0LL;
172     guest_state->dr7 = 0x0000000000000400LL;
173
174
175     v3_init_svm_io_map(vm_info);
176     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
177     ctrl_area->instrs.IOIO_PROT = 1;
178
179
180     v3_init_svm_msr_map(vm_info);
181     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
182     ctrl_area->instrs.MSR_PROT = 1;
183
184
185     PrintDebug("Exiting on interrupts\n");
186     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
187     ctrl_area->instrs.INTR = 1;
188
189
190     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
191         PrintDebug("Creating initial shadow page table\n");
192         
193         /* JRL: This is a performance killer, and a simplistic solution */
194         /* We need to fix this */
195         ctrl_area->TLB_CONTROL = 1;
196         ctrl_area->guest_ASID = 1;
197         
198         
199         if (v3_init_passthrough_pts(vm_info) == -1) {
200             PrintError("Could not initialize passthrough page tables\n");
201             return ;
202         }
203
204
205         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
206         PrintDebug("Created\n");
207         
208         guest_state->cr3 = vm_info->direct_map_pt;
209
210         ctrl_area->cr_reads.cr0 = 1;
211         ctrl_area->cr_writes.cr0 = 1;
212         //ctrl_area->cr_reads.cr4 = 1;
213         ctrl_area->cr_writes.cr4 = 1;
214         ctrl_area->cr_reads.cr3 = 1;
215         ctrl_area->cr_writes.cr3 = 1;
216
217         v3_hook_msr(vm_info, EFER_MSR, 
218                     &v3_handle_efer_read,
219                     &v3_handle_efer_write, 
220                     vm_info);
221
222         ctrl_area->instrs.INVLPG = 1;
223
224         ctrl_area->exceptions.pf = 1;
225
226         guest_state->g_pat = 0x7040600070406ULL;
227
228         guest_state->cr0 |= 0x80000000;
229
230     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
231         // Flush the TLB on entries/exits
232         ctrl_area->TLB_CONTROL = 1;
233         ctrl_area->guest_ASID = 1;
234
235         // Enable Nested Paging
236         ctrl_area->NP_ENABLE = 1;
237
238         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
239
240         // Set the Nested Page Table pointer
241         if (v3_init_passthrough_pts(vm_info) == -1) {
242             PrintError("Could not initialize Nested page tables\n");
243             return ;
244         }
245
246         ctrl_area->N_CR3 = vm_info->direct_map_pt;
247
248         guest_state->g_pat = 0x7040600070406ULL;
249     }
250 }
251
252
253 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
254
255
256     v3_pre_config_guest(info, config_ptr);
257
258     PrintDebug("Allocating VMCB\n");
259     info->vmm_data = (void*)Allocate_VMCB();
260
261     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
262     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
263
264     v3_post_config_guest(info, config_ptr);
265
266     return 0;
267 }
268
269 static int start_svm_guest(struct guest_info *info) {
270     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
271     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
272     uint_t num_exits = 0;
273
274
275
276     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
277     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
278     
279     info->run_state = VM_RUNNING;
280     rdtscll(info->yield_start_cycle);
281
282
283     while (1) {
284         ullong_t tmp_tsc;
285         
286         // Conditionally yield the CPU if the timeslice has expired
287         v3_yield_cond(info);
288
289         /*
290           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
291           (void *)(addr_t)info->segments.cs.base, 
292           (void *)(addr_t)info->rip);
293         */
294
295         // disable global interrupts for vm state transition
296         v3_clgi();
297
298
299
300         rdtscll(info->time_state.cached_host_tsc);
301         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
302         
303         v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
304         
305         rdtscll(tmp_tsc);
306
307         
308         //PrintDebug("SVM Returned\n");
309
310         // reenable global interrupts after vm exit
311         v3_stgi();
312
313
314         // Conditionally yield the CPU if the timeslice has expired
315         v3_yield_cond(info);
316
317
318         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
319         num_exits++;
320         
321         if ((num_exits % 5000) == 0) {
322             PrintDebug("SVM Exit number %d\n", num_exits);
323         }
324
325         if (v3_handle_svm_exit(info) != 0) {
326             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
327             addr_t host_addr;
328             addr_t linear_addr = 0;
329
330             info->run_state = VM_ERROR;
331             
332             PrintDebug("SVM ERROR!!\n"); 
333       
334             v3_print_guest_state(info);
335
336             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
337       
338             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
339             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
340       
341             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
342             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
343       
344             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
345
346             if (info->mem_mode == PHYSICAL_MEM) {
347                 guest_pa_to_host_va(info, linear_addr, &host_addr);
348             } else if (info->mem_mode == VIRTUAL_MEM) {
349                 guest_va_to_host_va(info, linear_addr, &host_addr);
350             }
351
352             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
353
354             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
355             v3_dump_mem((uint8_t *)host_addr, 15);
356
357
358             v3_print_stack(info);
359
360
361             break;
362         }
363     }
364     return 0;
365 }
366
367
368
369
370
371 /* Checks machine SVM capability */
372 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
373 int v3_is_svm_capable() {
374     uint_t vm_cr_low = 0, vm_cr_high = 0;
375     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
376
377     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
378   
379     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
380
381     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
382       PrintDebug("SVM Not Available\n");
383       return 0;
384     }  else {
385         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
386         
387         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
388         
389         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
390             PrintDebug("SVM is available but is disabled.\n");
391             
392             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
393             
394             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
395             
396             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
397                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
398             } else {
399                 PrintDebug("SVM is locked with a key\n");
400             }
401             return 0;
402
403         } else {
404             PrintDebug("SVM is available and  enabled.\n");
405
406             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
407             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
408             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
409             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
410             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
411
412
413             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
414                 PrintDebug("SVM Nested Paging not supported\n");
415             } else {
416                 PrintDebug("SVM Nested Paging supported\n");
417             }
418
419             return 1;
420         }
421     }
422 }
423
424 static int has_svm_nested_paging() {
425     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
426
427     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
428
429     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
430
431     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
432         PrintDebug("SVM Nested Paging not supported\n");
433         return 0;
434     } else {
435         PrintDebug("SVM Nested Paging supported\n");
436         return 1;
437     }
438 }
439
440
441 void v3_init_svm_cpu(int cpu_id) {
442     reg_ex_t msr;
443     extern v3_cpu_arch_t v3_cpu_types[];
444
445     // Enable SVM on the CPU
446     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
447     msr.e_reg.low |= EFER_MSR_svm_enable;
448     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
449
450     PrintDebug("SVM Enabled\n");
451
452     // Setup the host state save area
453     host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
454
455     /* 64-BIT-ISSUE */
456     //  msr.e_reg.high = 0;
457     //msr.e_reg.low = (uint_t)host_vmcb;
458     msr.r_reg = host_vmcbs[cpu_id];
459
460     PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
461     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
462
463
464     if (has_svm_nested_paging() == 1) {
465         v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
466     } else {
467         v3_cpu_types[cpu_id] = V3_SVM_CPU;
468     }
469 }
470
471
472 void v3_init_svm_hooks(struct v3_ctrl_ops * vmm_ops) {
473
474     // Setup the SVM specific vmm operations
475     vmm_ops->init_guest = &init_svm_guest;
476     vmm_ops->start_guest = &start_svm_guest;
477     vmm_ops->has_nested_paging = &has_svm_nested_paging;
478
479     return;
480 }
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533 #if 0
534 /* 
535  * Test VMSAVE/VMLOAD Latency 
536  */
537 #define vmsave ".byte 0x0F,0x01,0xDB ; "
538 #define vmload ".byte 0x0F,0x01,0xDA ; "
539 {
540     uint32_t start_lo, start_hi;
541     uint32_t end_lo, end_hi;
542     uint64_t start, end;
543     
544     __asm__ __volatile__ (
545                           "rdtsc ; "
546                           "movl %%eax, %%esi ; "
547                           "movl %%edx, %%edi ; "
548                           "movq  %%rcx, %%rax ; "
549                           vmsave
550                           "rdtsc ; "
551                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
552                           : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
553                           );
554     
555     start = start_hi;
556     start <<= 32;
557     start += start_lo;
558     
559     end = end_hi;
560     end <<= 32;
561     end += end_lo;
562     
563     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
564     
565     __asm__ __volatile__ (
566                           "rdtsc ; "
567                           "movl %%eax, %%esi ; "
568                           "movl %%edx, %%edi ; "
569                           "movq  %%rcx, %%rax ; "
570                           vmload
571                           "rdtsc ; "
572                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
573                               : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
574                               );
575         
576         start = start_hi;
577         start <<= 32;
578         start += start_lo;
579
580         end = end_hi;
581         end <<= 32;
582         end += end_lo;
583
584
585         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
586     }
587     /* End Latency Test */
588
589 #endif
590
591
592
593
594
595
596
597 #if 0
598 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
599   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
600   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
601   uint_t i = 0;
602
603
604   guest_state->rsp = vm_info.vm_regs.rsp;
605   guest_state->rip = vm_info.rip;
606
607
608   /* I pretty much just gutted this from TVMM */
609   /* Note: That means its probably wrong */
610
611   // set the segment registers to mirror ours
612   guest_state->cs.selector = 1<<3;
613   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
614   guest_state->cs.attrib.fields.S = 1;
615   guest_state->cs.attrib.fields.P = 1;
616   guest_state->cs.attrib.fields.db = 1;
617   guest_state->cs.attrib.fields.G = 1;
618   guest_state->cs.limit = 0xfffff;
619   guest_state->cs.base = 0;
620   
621   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
622   for ( i = 0; segregs[i] != NULL; i++) {
623     struct vmcb_selector * seg = segregs[i];
624     
625     seg->selector = 2<<3;
626     seg->attrib.fields.type = 0x2; // Data Segment+read/write
627     seg->attrib.fields.S = 1;
628     seg->attrib.fields.P = 1;
629     seg->attrib.fields.db = 1;
630     seg->attrib.fields.G = 1;
631     seg->limit = 0xfffff;
632     seg->base = 0;
633   }
634
635
636   {
637     /* JRL THIS HAS TO GO */
638     
639     //    guest_state->tr.selector = GetTR_Selector();
640     guest_state->tr.attrib.fields.type = 0x9; 
641     guest_state->tr.attrib.fields.P = 1;
642     // guest_state->tr.limit = GetTR_Limit();
643     //guest_state->tr.base = GetTR_Base();// - 0x2000;
644     /* ** */
645   }
646
647
648   /* ** */
649
650
651   guest_state->efer |= EFER_MSR_svm_enable;
652   guest_state->rflags = 0x00000002; // The reserved bit is always 1
653   ctrl_area->svm_instrs.VMRUN = 1;
654   guest_state->cr0 = 0x00000001;    // PE 
655   ctrl_area->guest_ASID = 1;
656
657
658   //  guest_state->cpl = 0;
659
660
661
662   // Setup exits
663
664   ctrl_area->cr_writes.cr4 = 1;
665   
666   ctrl_area->exceptions.de = 1;
667   ctrl_area->exceptions.df = 1;
668   ctrl_area->exceptions.pf = 1;
669   ctrl_area->exceptions.ts = 1;
670   ctrl_area->exceptions.ss = 1;
671   ctrl_area->exceptions.ac = 1;
672   ctrl_area->exceptions.mc = 1;
673   ctrl_area->exceptions.gp = 1;
674   ctrl_area->exceptions.ud = 1;
675   ctrl_area->exceptions.np = 1;
676   ctrl_area->exceptions.of = 1;
677   ctrl_area->exceptions.nmi = 1;
678
679   
680
681   ctrl_area->instrs.IOIO_PROT = 1;
682   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
683   
684   {
685     reg_ex_t tmp_reg;
686     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
687     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
688   }
689
690   ctrl_area->instrs.INTR = 1;
691
692   
693   {
694     char gdt_buf[6];
695     char idt_buf[6];
696
697     memset(gdt_buf, 0, 6);
698     memset(idt_buf, 0, 6);
699
700
701     uint_t gdt_base, idt_base;
702     ushort_t gdt_limit, idt_limit;
703     
704     GetGDTR(gdt_buf);
705     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
706     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
707     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
708
709     GetIDTR(idt_buf);
710     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
711     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
712     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
713
714
715     // gdt_base -= 0x2000;
716     //idt_base -= 0x2000;
717
718     guest_state->gdtr.base = gdt_base;
719     guest_state->gdtr.limit = gdt_limit;
720     guest_state->idtr.base = idt_base;
721     guest_state->idtr.limit = idt_limit;
722
723
724   }
725   
726   
727   // also determine if CPU supports nested paging
728   /*
729   if (vm_info.page_tables) {
730     //   if (0) {
731     // Flush the TLB on entries/exits
732     ctrl_area->TLB_CONTROL = 1;
733
734     // Enable Nested Paging
735     ctrl_area->NP_ENABLE = 1;
736
737     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
738
739         // Set the Nested Page Table pointer
740     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
741
742
743     //   ctrl_area->N_CR3 = Get_CR3();
744     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
745
746     guest_state->g_pat = 0x7040600070406ULL;
747
748     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
749     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
750     // Enable Paging
751     //    guest_state->cr0 |= 0x80000000;
752   }
753   */
754
755 }
756
757
758
759
760
761 #endif
762
763