Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


added write protect flag to realmode CR0, to enable memory hooks
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_direct_paging.h>
40
41 #include <palacios/vmm_ctrl_regs.h>
42 #include <palacios/vmm_config.h>
43 #include <palacios/svm_io.h>
44
45
46
47 // This is a global pointer to the host's VMCB
48 static void * host_vmcb = NULL;
49
50 extern void v3_stgi();
51 extern void v3_clgi();
52 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
53 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
54
55
56 static vmcb_t * Allocate_VMCB() {
57     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
58
59     memset(vmcb_page, 0, 4096);
60
61     return vmcb_page;
62 }
63
64
65
66 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info *vm_info) {
67     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
68     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
69     uint_t i;
70
71
72     //
73     guest_state->rsp = 0x00;
74     guest_state->rip = 0xfff0;
75
76
77     guest_state->cpl = 0;
78
79     guest_state->efer |= EFER_MSR_svm_enable;
80
81
82     guest_state->rflags = 0x00000002; // The reserved bit is always 1
83     ctrl_area->svm_instrs.VMRUN = 1;
84     ctrl_area->svm_instrs.VMMCALL = 1;
85     ctrl_area->svm_instrs.VMLOAD = 1;
86     ctrl_area->svm_instrs.VMSAVE = 1;
87     ctrl_area->svm_instrs.STGI = 1;
88     ctrl_area->svm_instrs.CLGI = 1;
89     ctrl_area->svm_instrs.SKINIT = 1;
90     ctrl_area->svm_instrs.RDTSCP = 1;
91     ctrl_area->svm_instrs.ICEBP = 1;
92     ctrl_area->svm_instrs.WBINVD = 1;
93     ctrl_area->svm_instrs.MONITOR = 1;
94     ctrl_area->svm_instrs.MWAIT_always = 1;
95     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
96     ctrl_area->instrs.INVLPGA = 1;
97
98
99     ctrl_area->instrs.HLT = 1;
100     // guest_state->cr0 = 0x00000001;    // PE 
101   
102     /*
103       ctrl_area->exceptions.de = 1;
104       ctrl_area->exceptions.df = 1;
105       
106       ctrl_area->exceptions.ts = 1;
107       ctrl_area->exceptions.ss = 1;
108       ctrl_area->exceptions.ac = 1;
109       ctrl_area->exceptions.mc = 1;
110       ctrl_area->exceptions.gp = 1;
111       ctrl_area->exceptions.ud = 1;
112       ctrl_area->exceptions.np = 1;
113       ctrl_area->exceptions.of = 1;
114       
115       ctrl_area->exceptions.nmi = 1;
116     */
117     
118
119     ctrl_area->instrs.NMI = 1;
120     ctrl_area->instrs.SMI = 1;
121     ctrl_area->instrs.INIT = 1;
122     ctrl_area->instrs.PAUSE = 1;
123     ctrl_area->instrs.shutdown_evts = 1;
124
125     vm_info->vm_regs.rdx = 0x00000f00;
126
127
128     guest_state->cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
129
130
131     guest_state->cs.selector = 0xf000;
132     guest_state->cs.limit = 0xffff;
133     guest_state->cs.base = 0x0000000f0000LL;
134     guest_state->cs.attrib.raw = 0xf3;
135
136
137     /* DEBUG FOR RETURN CODE */
138     ctrl_area->exit_code = 1;
139
140
141     struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), 
142                                         &(guest_state->es), &(guest_state->fs), 
143                                         &(guest_state->gs), NULL};
144
145     for ( i = 0; segregs[i] != NULL; i++) {
146         struct vmcb_selector * seg = segregs[i];
147         
148         seg->selector = 0x0000;
149         //    seg->base = seg->selector << 4;
150         seg->base = 0x00000000;
151         seg->attrib.raw = 0xf3;
152         seg->limit = ~0u;
153     }
154
155     guest_state->gdtr.limit = 0x0000ffff;
156     guest_state->gdtr.base = 0x0000000000000000LL;
157     guest_state->idtr.limit = 0x0000ffff;
158     guest_state->idtr.base = 0x0000000000000000LL;
159
160     guest_state->ldtr.selector = 0x0000;
161     guest_state->ldtr.limit = 0x0000ffff;
162     guest_state->ldtr.base = 0x0000000000000000LL;
163     guest_state->tr.selector = 0x0000;
164     guest_state->tr.limit = 0x0000ffff;
165     guest_state->tr.base = 0x0000000000000000LL;
166
167
168     guest_state->dr6 = 0x00000000ffff0ff0LL;
169     guest_state->dr7 = 0x0000000000000400LL;
170
171
172     v3_init_svm_io_map(vm_info);
173     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
174     ctrl_area->instrs.IOIO_PROT = 1;
175
176
177
178     v3_init_svm_msr_map(vm_info);
179     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
180     ctrl_area->instrs.MSR_PROT = 1;
181
182
183
184     PrintDebug("Exiting on interrupts\n");
185     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
186     ctrl_area->instrs.INTR = 1;
187
188
189     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
190         PrintDebug("Creating initial shadow page table\n");
191         
192         /* JRL: This is a performance killer, and a simplistic solution */
193         /* We need to fix this */
194         ctrl_area->TLB_CONTROL = 1;
195         ctrl_area->guest_ASID = 1;
196         
197         
198         if (v3_init_passthrough_pts(vm_info) == -1) {
199             PrintError("Could not initialize passthrough page tables\n");
200             return ;
201         }
202
203
204         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
205         PrintDebug("Created\n");
206         
207         guest_state->cr3 = vm_info->direct_map_pt;
208
209         ctrl_area->cr_reads.cr0 = 1;
210         ctrl_area->cr_writes.cr0 = 1;
211         //ctrl_area->cr_reads.cr4 = 1;
212         ctrl_area->cr_writes.cr4 = 1;
213         ctrl_area->cr_reads.cr3 = 1;
214         ctrl_area->cr_writes.cr3 = 1;
215
216         v3_hook_msr(vm_info, EFER_MSR, 
217                     &v3_handle_efer_read,
218                     &v3_handle_efer_write, 
219                     vm_info);
220
221         ctrl_area->instrs.INVLPG = 1;
222
223         ctrl_area->exceptions.pf = 1;
224
225         guest_state->g_pat = 0x7040600070406ULL;
226
227         guest_state->cr0 |= 0x80000000;
228
229     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
230         // Flush the TLB on entries/exits
231         ctrl_area->TLB_CONTROL = 1;
232         ctrl_area->guest_ASID = 1;
233
234         // Enable Nested Paging
235         ctrl_area->NP_ENABLE = 1;
236
237         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
238
239         // Set the Nested Page Table pointer
240         if (v3_init_passthrough_pts(vm_info) == -1) {
241             PrintError("Could not initialize Nested page tables\n");
242             return ;
243         }
244
245         ctrl_area->N_CR3 = vm_info->direct_map_pt;
246
247         guest_state->g_pat = 0x7040600070406ULL;
248     }
249 }
250
251
252 static int init_svm_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
253
254
255     v3_pre_config_guest(info, config_ptr);
256
257     PrintDebug("Allocating VMCB\n");
258     info->vmm_data = (void*)Allocate_VMCB();
259
260     PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
261     Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
262
263     v3_post_config_guest(info, config_ptr);
264
265     return 0;
266 }
267
268 static int start_svm_guest(struct guest_info *info) {
269     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
270     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
271     uint_t num_exits = 0;
272
273
274
275     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
276     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
277     
278     info->run_state = VM_RUNNING;
279     rdtscll(info->yield_start_cycle);
280
281
282     while (1) {
283         ullong_t tmp_tsc;
284         
285         // Conditionally yield the CPU if the timeslice has expired
286         v3_yield_cond(info);
287
288         /*
289           PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
290           (void *)(addr_t)info->segments.cs.base, 
291           (void *)(addr_t)info->rip);
292         */
293
294         // disable global interrupts for vm state transition
295         v3_clgi();
296
297
298
299         rdtscll(info->time_state.cached_host_tsc);
300         //    guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
301         
302         v3_svm_launch((vmcb_t*)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcb);
303         
304         rdtscll(tmp_tsc);
305
306         
307         //PrintDebug("SVM Returned\n");
308
309         // reenable global interrupts after vm exit
310         v3_stgi();
311
312
313         // Conditionally yield the CPU if the timeslice has expired
314         v3_yield_cond(info);
315
316
317         v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
318         num_exits++;
319         
320         if ((num_exits % 5000) == 0) {
321             PrintDebug("SVM Exit number %d\n", num_exits);
322         }
323
324         if (v3_handle_svm_exit(info) != 0) {
325             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
326             addr_t host_addr;
327             addr_t linear_addr = 0;
328             
329             info->run_state = VM_ERROR;
330             
331             PrintDebug("SVM ERROR!!\n"); 
332       
333             v3_print_guest_state(info);
334
335             PrintDebug("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
336       
337             PrintDebug("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
338             PrintDebug("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
339       
340             PrintDebug("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
341             PrintDebug("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
342       
343             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
344
345             if (info->mem_mode == PHYSICAL_MEM) {
346                 guest_pa_to_host_va(info, linear_addr, &host_addr);
347             } else if (info->mem_mode == VIRTUAL_MEM) {
348                 guest_va_to_host_va(info, linear_addr, &host_addr);
349             }
350
351             PrintDebug("Host Address of rip = 0x%p\n", (void *)host_addr);
352
353             PrintDebug("Instr (15 bytes) at %p:\n", (void *)host_addr);
354             v3_dump_mem((uint8_t *)host_addr, 15);
355
356             break;
357         }
358     }
359     return 0;
360 }
361
362
363
364
365
366 /* Checks machine SVM capability */
367 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
368 int v3_is_svm_capable() {
369     // Dinda
370     uint_t vm_cr_low = 0, vm_cr_high = 0;
371     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
372
373     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
374   
375     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=%p\n", (void *)ecx);
376
377     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
378       PrintDebug("SVM Not Available\n");
379       return 0;
380     }  else {
381         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
382         
383         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
384         
385         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
386             PrintDebug("SVM is available but is disabled.\n");
387             
388             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
389             
390             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
391             
392             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
393                 PrintDebug("SVM BIOS Disabled, not unlockable\n");
394             } else {
395                 PrintDebug("SVM is locked with a key\n");
396             }
397             return 0;
398
399         } else {
400             PrintDebug("SVM is available and  enabled.\n");
401
402             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
403             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=%p\n", (void *)eax);
404             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=%p\n", (void *)ebx);
405             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=%p\n", (void *)ecx);
406             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=%p\n", (void *)edx);
407
408
409             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
410                 PrintDebug("SVM Nested Paging not supported\n");
411             } else {
412                 PrintDebug("SVM Nested Paging supported\n");
413             }
414
415             return 1;
416         }
417     }
418 }
419
420 static int has_svm_nested_paging() {
421     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
422
423     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
424
425     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
426
427     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
428         PrintDebug("SVM Nested Paging not supported\n");
429         return 0;
430     } else {
431         PrintDebug("SVM Nested Paging supported\n");
432         return 1;
433     }
434 }
435
436
437
438 void v3_init_SVM(struct v3_ctrl_ops * vmm_ops) {
439     reg_ex_t msr;
440     extern v3_cpu_arch_t v3_cpu_type;
441
442     // Enable SVM on the CPU
443     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
444     msr.e_reg.low |= EFER_MSR_svm_enable;
445     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
446
447     PrintDebug("SVM Enabled\n");
448
449     // Setup the host state save area
450     host_vmcb = V3_AllocPages(4);
451
452     /* 64-BIT-ISSUE */
453     //  msr.e_reg.high = 0;
454     //msr.e_reg.low = (uint_t)host_vmcb;
455     msr.r_reg = (addr_t)host_vmcb;
456
457     PrintDebug("Host State being saved at %p\n", (void *)(addr_t)host_vmcb);
458     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
459
460
461
462
463     if (has_svm_nested_paging() == 1) {
464         v3_cpu_type = V3_SVM_REV3_CPU;
465     } else {
466         v3_cpu_type = V3_SVM_CPU;
467     }
468
469     // Setup the SVM specific vmm operations
470     vmm_ops->init_guest = &init_svm_guest;
471     vmm_ops->start_guest = &start_svm_guest;
472     vmm_ops->has_nested_paging = &has_svm_nested_paging;
473
474     return;
475 }
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528 #if 0
529 /* 
530  * Test VMSAVE/VMLOAD Latency 
531  */
532 #define vmsave ".byte 0x0F,0x01,0xDB ; "
533 #define vmload ".byte 0x0F,0x01,0xDA ; "
534 {
535     uint32_t start_lo, start_hi;
536     uint32_t end_lo, end_hi;
537     uint64_t start, end;
538     
539     __asm__ __volatile__ (
540                           "rdtsc ; "
541                           "movl %%eax, %%esi ; "
542                           "movl %%edx, %%edi ; "
543                           "movq  %%rcx, %%rax ; "
544                           vmsave
545                           "rdtsc ; "
546                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
547                           : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
548                           );
549     
550     start = start_hi;
551     start <<= 32;
552     start += start_lo;
553     
554     end = end_hi;
555     end <<= 32;
556     end += end_lo;
557     
558     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
559     
560     __asm__ __volatile__ (
561                           "rdtsc ; "
562                           "movl %%eax, %%esi ; "
563                           "movl %%edx, %%edi ; "
564                           "movq  %%rcx, %%rax ; "
565                           vmload
566                           "rdtsc ; "
567                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
568                               : "c"(host_vmcb), "0"(0), "1"(0), "2"(0), "3"(0)
569                               );
570         
571         start = start_hi;
572         start <<= 32;
573         start += start_lo;
574
575         end = end_hi;
576         end <<= 32;
577         end += end_lo;
578
579
580         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
581     }
582     /* End Latency Test */
583
584 #endif
585
586
587
588
589
590
591
592 #if 0
593 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
594   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
595   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
596   uint_t i = 0;
597
598
599   guest_state->rsp = vm_info.vm_regs.rsp;
600   guest_state->rip = vm_info.rip;
601
602
603   /* I pretty much just gutted this from TVMM */
604   /* Note: That means its probably wrong */
605
606   // set the segment registers to mirror ours
607   guest_state->cs.selector = 1<<3;
608   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
609   guest_state->cs.attrib.fields.S = 1;
610   guest_state->cs.attrib.fields.P = 1;
611   guest_state->cs.attrib.fields.db = 1;
612   guest_state->cs.attrib.fields.G = 1;
613   guest_state->cs.limit = 0xfffff;
614   guest_state->cs.base = 0;
615   
616   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
617   for ( i = 0; segregs[i] != NULL; i++) {
618     struct vmcb_selector * seg = segregs[i];
619     
620     seg->selector = 2<<3;
621     seg->attrib.fields.type = 0x2; // Data Segment+read/write
622     seg->attrib.fields.S = 1;
623     seg->attrib.fields.P = 1;
624     seg->attrib.fields.db = 1;
625     seg->attrib.fields.G = 1;
626     seg->limit = 0xfffff;
627     seg->base = 0;
628   }
629
630
631   {
632     /* JRL THIS HAS TO GO */
633     
634     //    guest_state->tr.selector = GetTR_Selector();
635     guest_state->tr.attrib.fields.type = 0x9; 
636     guest_state->tr.attrib.fields.P = 1;
637     // guest_state->tr.limit = GetTR_Limit();
638     //guest_state->tr.base = GetTR_Base();// - 0x2000;
639     /* ** */
640   }
641
642
643   /* ** */
644
645
646   guest_state->efer |= EFER_MSR_svm_enable;
647   guest_state->rflags = 0x00000002; // The reserved bit is always 1
648   ctrl_area->svm_instrs.VMRUN = 1;
649   guest_state->cr0 = 0x00000001;    // PE 
650   ctrl_area->guest_ASID = 1;
651
652
653   //  guest_state->cpl = 0;
654
655
656
657   // Setup exits
658
659   ctrl_area->cr_writes.cr4 = 1;
660   
661   ctrl_area->exceptions.de = 1;
662   ctrl_area->exceptions.df = 1;
663   ctrl_area->exceptions.pf = 1;
664   ctrl_area->exceptions.ts = 1;
665   ctrl_area->exceptions.ss = 1;
666   ctrl_area->exceptions.ac = 1;
667   ctrl_area->exceptions.mc = 1;
668   ctrl_area->exceptions.gp = 1;
669   ctrl_area->exceptions.ud = 1;
670   ctrl_area->exceptions.np = 1;
671   ctrl_area->exceptions.of = 1;
672   ctrl_area->exceptions.nmi = 1;
673
674   
675
676   ctrl_area->instrs.IOIO_PROT = 1;
677   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
678   
679   {
680     reg_ex_t tmp_reg;
681     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
682     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
683   }
684
685   ctrl_area->instrs.INTR = 1;
686
687   
688   {
689     char gdt_buf[6];
690     char idt_buf[6];
691
692     memset(gdt_buf, 0, 6);
693     memset(idt_buf, 0, 6);
694
695
696     uint_t gdt_base, idt_base;
697     ushort_t gdt_limit, idt_limit;
698     
699     GetGDTR(gdt_buf);
700     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
701     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
702     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
703
704     GetIDTR(idt_buf);
705     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
706     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
707     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
708
709
710     // gdt_base -= 0x2000;
711     //idt_base -= 0x2000;
712
713     guest_state->gdtr.base = gdt_base;
714     guest_state->gdtr.limit = gdt_limit;
715     guest_state->idtr.base = idt_base;
716     guest_state->idtr.limit = idt_limit;
717
718
719   }
720   
721   
722   // also determine if CPU supports nested paging
723   /*
724   if (vm_info.page_tables) {
725     //   if (0) {
726     // Flush the TLB on entries/exits
727     ctrl_area->TLB_CONTROL = 1;
728
729     // Enable Nested Paging
730     ctrl_area->NP_ENABLE = 1;
731
732     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
733
734         // Set the Nested Page Table pointer
735     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
736
737
738     //   ctrl_area->N_CR3 = Get_CR3();
739     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
740
741     guest_state->g_pat = 0x7040600070406ULL;
742
743     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
744     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
745     // Enable Paging
746     //    guest_state->cr0 |= 0x80000000;
747   }
748   */
749
750 }
751
752
753
754
755
756 #endif
757
758