Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


ported the profiler over to the telemetry interface
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_direct_paging.h>
40
41 #include <palacios/vmm_ctrl_regs.h>
42 #include <palacios/vmm_config.h>
43 #include <palacios/svm_io.h>
44
45
46
47 // This is a global pointer to the host's VMCB
48 static void * host_vmcb = NULL;
49
50 extern void v3_stgi();
51 extern void v3_clgi();
52 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
53 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
54
55
56 static vmcb_t * Allocate_VMCB() {
57     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
58
59     memset(vmcb_page, 0, 4096);
60
61     return vmcb_page;
62 }
63
64
65
66 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
67     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
68     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
69     uint_t i;
70
71
72     //
73     guest_state->rsp = 0x00;
74     guest_state->rip = 0xfff0;
75
76
77     guest_state->cpl = 0;
78
79     guest_state->efer |= EFER_MSR_svm_enable;
80
81
82     guest_state->rflags = 0x00000002; // The reserved bit is always 1
83     ctrl_area->svm_instrs.VMRUN = 1;
84     ctrl_area->svm_instrs.VMMCALL = 1;
85     ctrl_area->svm_instrs.VMLOAD = 1;
86     ctrl_area->svm_instrs.VMSAVE = 1;
87     ctrl_area->svm_instrs.STGI = 1;
88     ctrl_area->svm_instrs.CLGI = 1;
89     ctrl_area->svm_instrs.SKINIT = 1;
90     ctrl_area->svm_instrs.RDTSCP = 1;
91     ctrl_area->svm_instrs.ICEBP = 1;
92     ctrl_area->svm_instrs.WBINVD = 1;
93     ctrl_area->svm_instrs.MONITOR = 1;
94     ctrl_area->svm_instrs.MWAIT_always = 1;
95     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
96     ctrl_area->instrs.INVLPGA = 1;
97
98
99     ctrl_area->instrs.HLT = 1;
100     // guest_state->cr0 = 0x00000001;    // PE 
101   
102     /*
103       ctrl_area->exceptions.de = 1;
104       ctrl_area->exceptions.df = 1;
105       
106       ctrl_area->exceptions.ts = 1;
107       ctrl_area->exceptions.ss = 1;
108       ctrl_area->exceptions.ac = 1;
109       ctrl_area->exceptions.mc = 1;
110       ctrl_area->exceptions.gp = 1;
111       ctrl_area->exceptions.ud = 1;
112       ctrl_area->exceptions.np = 1;
113       ctrl_area->exceptions.of = 1;
114       
115       ctrl_area->exceptions.nmi = 1;
116     */
117     
118
119     ctrl_area->instrs.NMI = 1;
120     ctrl_area->instrs.SMI = 1;
121     ctrl_area->instrs.INIT = 1;
122     ctrl_area->instrs.PAUSE = 1;
123     ctrl_area->instrs.shutdown_evts = 1;
124
125     vm_info->vm_regs.rdx = 0x00000f00;
126
127     guest_state->cr0 = 0x60000010;
128
129
130     guest_state->cs.selector = 0xf000;
131     guest_state->cs.limit = 0xffff;
132     guest_state->cs.base = 0x0000000f0000LL;
133     guest_state->cs.attrib.raw = 0xf3;
134
135
136     /* DEBUG FOR RETURN CODE */
137     ctrl_area->exit_code = 1;
138
139
140     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
141                                         &(guest_state->es), &(guest_state->fs), 
142                                         &(guest_state->gs), NULL};
143
144     for ( i = 0; segregs[i] != NULL; i++) {
145         struct vmcb_selector * seg = segregs[i];
146         
147         seg->selector = 0x0000;
148         //    seg->base = seg->selector << 4;
149         seg->base = 0x00000000;
150         seg->attrib.raw = 0xf3;
151         seg->limit = ~0u;
152     }
153
154     guest_state->gdtr.limit = 0x0000ffff;
155     guest_state->gdtr.base = 0x0000000000000000LL;
156     guest_state->idtr.limit = 0x0000ffff;
157     guest_state->idtr.base = 0x0000000000000000LL;
158
159     guest_state->ldtr.selector = 0x0000;
160     guest_state->ldtr.limit = 0x0000ffff;
161     guest_state->ldtr.base = 0x0000000000000000LL;
162     guest_state->tr.selector = 0x0000;
163     guest_state->tr.limit = 0x0000ffff;
164     guest_state->tr.base = 0x0000000000000000LL;
165
166
167     guest_state->dr6 = 0x00000000ffff0ff0LL;
168     guest_state->dr7 = 0x0000000000000400LL;
169
170
171     v3_init_svm_io_map(vm_info);
172     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
173     ctrl_area->instrs.IOIO_PROT = 1;
174
175
176
177     v3_init_svm_msr_map(vm_info);
178     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
179     ctrl_area->instrs.MSR_PROT = 1;
180
181
182
183     PrintDebug("Exiting on interrupts\n");
184     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
185     ctrl_area->instrs.INTR = 1;
186
187
188     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
189         PrintDebug("Creating initial shadow page table\n");
190         
191         /* JRL: This is a performance killer, and a simplistic solution */
192         /* We need to fix this */
193         ctrl_area->TLB_CONTROL = 1;
194         ctrl_area->guest_ASID = 1;
195         
196         
197         if (v3_init_passthrough_pts(vm_info) == -1) {
198             PrintError("Could not initialize passthrough page tables\n");
199             return ;
200         }
201
202
203         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
204         PrintDebug("Created\n");
205         
206         guest_state->cr3 = vm_info->direct_map_pt;
207
208         ctrl_area->cr_reads.cr0 = 1;
209         ctrl_area->cr_writes.cr0 = 1;
210         //ctrl_area->cr_reads.cr4 = 1;
211         ctrl_area->cr_writes.cr4 = 1;
212         ctrl_area->cr_reads.cr3 = 1;
213         ctrl_area->cr_writes.cr3 = 1;
214
215         v3_hook_msr(vm_info, EFER_MSR, 
216                     &v3_handle_efer_read,
217                     &v3_handle_efer_write, 
218                     vm_info);
219
220         ctrl_area->instrs.INVLPG = 1;
221
222         ctrl_area->exceptions.pf = 1;
223
224         guest_state->g_pat = 0x7040600070406ULL;
225
226         guest_state->cr0 |= 0x80000000;
227
228     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
229         // Flush the TLB on entries/exits
230         ctrl_area->TLB_CONTROL = 1;
231         ctrl_area->guest_ASID = 1;
232
233         // Enable Nested Paging
234         ctrl_area->NP_ENABLE = 1;
235
236         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
237
238         // Set the Nested Page Table pointer
239         if (v3_init_passthrough_pts(vm_info) == -1) {
240             PrintError("Could not initialize Nested page tables\n");
241             return ;
242         }
243
244         ctrl_area->N_CR3 = vm_info->direct_map_pt;
245
246         guest_state->g_pat = 0x7040600070406ULL;
247     }
248 }
249
250
251 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
252
253
254     v3_pre_config_guest(info, config_ptr);
255
256     PrintDebug("Allocating VMCB\n");
257     info->vmm_data = (void*)Allocate_VMCB();
258
259     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
260     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
261
262     v3_post_config_guest(info, config_ptr);
263
264     return 0;
265 }
266
267 static int start_svm_guest(struct guest_info *info) {
268     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
269     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
270     uint_t num_exits = 0;
271
272
273
274     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
275     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
276     
277     info->run_state = VM_RUNNING;
278     rdtscll(info->yield_start_cycle);
279
280
281     while (1) {
282         ullong_t tmp_tsc;
283         
284         // Conditionally yield the CPU if the timeslice has expired
285         v3_yield_cond(info);
286
287         /*
288           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
289           (void *)(addr_t)info->segments.cs.base, 
290           (void *)(addr_t)info->rip);
291         */
292
293         // disable global interrupts for vm state transition
294         v3_clgi();
295
296
297
298         rdtscll(info->time_state.cached_host_tsc);
299         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
300         
301         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb);
302         
303         rdtscll(tmp_tsc);
304
305         
306         //PrintDebug("SVM Returned\n");
307
308         // reenable global interrupts after vm exit
309         v3_stgi();
310
311
312         // Conditionally yield the CPU if the timeslice has expired
313         v3_yield_cond(info);
314
315
316         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
317         num_exits++;
318         
319         if ((num_exits % 5000) == 0) {
320             PrintDebug("SVM Exit number %d\n", num_exits);
321         }
322
323         if (v3_handle_svm_exit(info) != 0) {
324             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
325             addr_t host_addr;
326             addr_t linear_addr = 0;
327             
328             info->run_state = VM_ERROR;
329             
330             PrintDebug("SVM ERROR!!\n"); 
331       
332             v3_print_guest_state(info);
333
334             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
335       
336             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
337             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
338       
339             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
340             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
341       
342             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
343
344             if (info->mem_mode == PHYSICAL_MEM) {
345                 guest_pa_to_host_va(info, linear_addr, &host_addr);
346             } else if (info->mem_mode == VIRTUAL_MEM) {
347                 guest_va_to_host_va(info, linear_addr, &host_addr);
348             }
349
350             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
351
352             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
353             v3_dump_mem((uint8_t *)host_addr, 15);
354
355             break;
356         }
357     }
358     return 0;
359 }
360
361
362
363
364
365 /* Checks machine SVM capability */
366 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
367 int v3_is_svm_capable() {
368     // Dinda
369     uint_t vm_cr_low = 0, vm_cr_high = 0;
370     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
371
372     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
373   
374     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=%p\n", (void *)ecx);
375
376     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
377       PrintDebug("SVM Not Available\n");
378       return 0;
379     }  else {
380         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
381         
382         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
383         
384         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
385             PrintDebug("SVM is available but is disabled.\n");
386             
387             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
388             
389             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
390             
391             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
392                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
393             } else {
394                 PrintDebug("SVM is locked with a key\n");
395             }
396             return 0;
397
398         } else {
399             PrintDebug("SVM is available and  enabled.\n");
400
401             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
402             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=%p\n", (void *)eax);
403             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=%p\n", (void *)ebx);
404             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=%p\n", (void *)ecx);
405             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
406
407
408             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
409                 PrintDebug("SVM Nested Paging not supported\n");
410             } else {
411                 PrintDebug("SVM Nested Paging supported\n");
412             }
413
414             return 1;
415         }
416     }
417 }
418
419 static int has_svm_nested_paging() {
420     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
421
422     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
423
424     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
425
426     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
427         PrintDebug("SVM Nested Paging not supported\n");
428         return 0;
429     } else {
430         PrintDebug("SVM Nested Paging supported\n");
431         return 1;
432     }
433 }
434
435
436
437 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
438     reg_ex_t msr;
439     extern v3_cpu_arch_t v3_cpu_type;
440
441     // Enable SVM on the CPU
442     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
443     msr.e_reg.low |= EFER_MSR_svm_enable;
444     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
445
446     PrintDebug("SVM Enabled\n");
447
448     // Setup the host state save area
449     host_vmcb = V3_AllocPages(4);
450
451     /* 64-BIT-ISSUE */
452     //  msr.e_reg.high = 0;
453     //msr.e_reg.low = (uint_t)host_vmcb;
454     msr.r_reg = (addr_t)host_vmcb;
455
456     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb);
457     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
458
459
460
461
462     if (has_svm_nested_paging() == 1) {
463         v3_cpu_type = V3_SVM_REV3_CPU;
464     } else {
465         v3_cpu_type = V3_SVM_CPU;
466     }
467
468     // Setup the SVM specific vmm operations
469     vmm_ops->init_guest = &init_svm_guest;
470     vmm_ops->start_guest = &start_svm_guest;
471     vmm_ops->has_nested_paging = &has_svm_nested_paging;
472
473     return;
474 }
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527 #if 0
528 /* 
529  * Test VMSAVE/VMLOAD Latency 
530  */
531 #define vmsave ".byte 0x0F,0x01,0xDB ; "
532 #define vmload ".byte 0x0F,0x01,0xDA ; "
533 {
534     uint32_t start_lo, start_hi;
535     uint32_t end_lo, end_hi;
536     uint64_t start, end;
537     
538     __asm__ __volatile__ (
539                           "rdtsc ; "
540                           "movl %%eax, %%esi ; "
541                           "movl %%edx, %%edi ; "
542                           "movq  %%rcx, %%rax ; "
543                           vmsave
544                           "rdtsc ; "
545                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
546                           : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
547                           );
548     
549     start = start_hi;
550     start <<= 32;
551     start += start_lo;
552     
553     end = end_hi;
554     end <<= 32;
555     end += end_lo;
556     
557     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
558     
559     __asm__ __volatile__ (
560                           "rdtsc ; "
561                           "movl %%eax, %%esi ; "
562                           "movl %%edx, %%edi ; "
563                           "movq  %%rcx, %%rax ; "
564                           vmload
565                           "rdtsc ; "
566                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
567                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
568                               );
569         
570         start = start_hi;
571         start <<= 32;
572         start += start_lo;
573
574         end = end_hi;
575         end <<= 32;
576         end += end_lo;
577
578
579         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
580     }
581     /* End Latency Test */
582
583 #endif
584
585
586
587
588
589
590
591 #if 0
592 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
593   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
594   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
595   uint_t i = 0;
596
597
598   guest_state->rsp = vm_info.vm_regs.rsp;
599   guest_state->rip = vm_info.rip;
600
601
602   /* I pretty much just gutted this from TVMM */
603   /* Note: That means its probably wrong */
604
605   // set the segment registers to mirror ours
606   guest_state->cs.selector = 1<<3;
607   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
608   guest_state->cs.attrib.fields.S = 1;
609   guest_state->cs.attrib.fields.P = 1;
610   guest_state->cs.attrib.fields.db = 1;
611   guest_state->cs.attrib.fields.G = 1;
612   guest_state->cs.limit = 0xfffff;
613   guest_state->cs.base = 0;
614   
615   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
616   for ( i = 0; segregs[i] != NULL; i++) {
617     struct vmcb_selector * seg = segregs[i];
618     
619     seg->selector = 2<<3;
620     seg->attrib.fields.type = 0x2; // Data Segment+read/write
621     seg->attrib.fields.S = 1;
622     seg->attrib.fields.P = 1;
623     seg->attrib.fields.db = 1;
624     seg->attrib.fields.G = 1;
625     seg->limit = 0xfffff;
626     seg->base = 0;
627   }
628
629
630   {
631     /* JRL THIS HAS TO GO */
632     
633     //    guest_state->tr.selector = GetTR_Selector();
634     guest_state->tr.attrib.fields.type = 0x9; 
635     guest_state->tr.attrib.fields.P = 1;
636     // guest_state->tr.limit = GetTR_Limit();
637     //guest_state->tr.base = GetTR_Base();// - 0x2000;
638     /* ** */
639   }
640
641
642   /* ** */
643
644
645   guest_state->efer |= EFER_MSR_svm_enable;
646   guest_state->rflags = 0x00000002; // The reserved bit is always 1
647   ctrl_area->svm_instrs.VMRUN = 1;
648   guest_state->cr0 = 0x00000001;    // PE 
649   ctrl_area->guest_ASID = 1;
650
651
652   //  guest_state->cpl = 0;
653
654
655
656   // Setup exits
657
658   ctrl_area->cr_writes.cr4 = 1;
659   
660   ctrl_area->exceptions.de = 1;
661   ctrl_area->exceptions.df = 1;
662   ctrl_area->exceptions.pf = 1;
663   ctrl_area->exceptions.ts = 1;
664   ctrl_area->exceptions.ss = 1;
665   ctrl_area->exceptions.ac = 1;
666   ctrl_area->exceptions.mc = 1;
667   ctrl_area->exceptions.gp = 1;
668   ctrl_area->exceptions.ud = 1;
669   ctrl_area->exceptions.np = 1;
670   ctrl_area->exceptions.of = 1;
671   ctrl_area->exceptions.nmi = 1;
672
673   
674
675   ctrl_area->instrs.IOIO_PROT = 1;
676   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
677   
678   {
679     reg_ex_t tmp_reg;
680     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
681     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
682   }
683
684   ctrl_area->instrs.INTR = 1;
685
686   
687   {
688     char gdt_buf[6];
689     char idt_buf[6];
690
691     memset(gdt_buf, 0, 6);
692     memset(idt_buf, 0, 6);
693
694
695     uint_t gdt_base, idt_base;
696     ushort_t gdt_limit, idt_limit;
697     
698     GetGDTR(gdt_buf);
699     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
700     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
701     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
702
703     GetIDTR(idt_buf);
704     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
705     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
706     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
707
708
709     // gdt_base -= 0x2000;
710     //idt_base -= 0x2000;
711
712     guest_state->gdtr.base = gdt_base;
713     guest_state->gdtr.limit = gdt_limit;
714     guest_state->idtr.base = idt_base;
715     guest_state->idtr.limit = idt_limit;
716
717
718   }
719   
720   
721   // also determine if CPU supports nested paging
722   /*
723   if (vm_info.page_tables) {
724     //   if (0) {
725     // Flush the TLB on entries/exits
726     ctrl_area->TLB_CONTROL = 1;
727
728     // Enable Nested Paging
729     ctrl_area->NP_ENABLE = 1;
730
731     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
732
733         // Set the Nested Page Table pointer
734     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
735
736
737     //   ctrl_area->N_CR3 = Get_CR3();
738     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
739
740     guest_state->g_pat = 0x7040600070406ULL;
741
742     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
743     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
744     // Enable Paging
745     //    guest_state->cr0 |= 0x80000000;
746   }
747   */
748
749 }
750
751
752
753
754
755 #endif
756
757