Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


APIC and CR8 changes for vector priorization vs TPR
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21
22 #include <palacios/svm.h>
23 #include <palacios/vmm.h>
24
25 #include <palacios/vmcb.h>
26 #include <palacios/vmm_mem.h>
27 #include <palacios/vmm_paging.h>
28 #include <palacios/svm_handler.h>
29
30 #include <palacios/vmm_debug.h>
31 #include <palacios/vm_guest_mem.h>
32
33 #include <palacios/vmm_decoder.h>
34 #include <palacios/vmm_string.h>
35 #include <palacios/vmm_lowlevel.h>
36 #include <palacios/svm_msr.h>
37
38 #include <palacios/vmm_rbtree.h>
39 #include <palacios/vmm_barrier.h>
40 #include <palacios/vmm_debug.h>
41
42
43
44 #ifdef V3_CONFIG_CHECKPOINT
45 #include <palacios/vmm_checkpoint.h>
46 #endif
47
48 #include <palacios/vmm_direct_paging.h>
49
50 #include <palacios/vmm_ctrl_regs.h>
51 #include <palacios/svm_io.h>
52
53 #include <palacios/vmm_sprintf.h>
54
55
56 #ifndef V3_CONFIG_DEBUG_SVM
57 #undef PrintDebug
58 #define PrintDebug(fmt, args...)
59 #endif
60
61
62 uint32_t v3_last_exit;
63
64 // This is a global pointer to the host's VMCB
65 static addr_t host_vmcbs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
66
67
68
69 extern void v3_stgi();
70 extern void v3_clgi();
71 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
72 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
73
74
75 static vmcb_t * Allocate_VMCB() {
76     vmcb_t * vmcb_page = NULL;
77     addr_t vmcb_pa = (addr_t)V3_AllocPages(1);
78
79     if ((void *)vmcb_pa == NULL) {
80         PrintError("Error allocating VMCB\n");
81         return NULL;
82     }
83
84     vmcb_page = (vmcb_t *)V3_VAddr((void *)vmcb_pa);
85
86     memset(vmcb_page, 0, 4096);
87
88     return vmcb_page;
89 }
90
91
92 static int v3_svm_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data)
93 {
94     int status;
95
96     // Call arch-independent handler
97     if ((status = v3_handle_efer_write(core, msr, src, priv_data)) != 0) {
98         return status;
99     }
100
101     // SVM-specific code
102     {
103         // Ensure that hardware visible EFER.SVME bit is set (SVM Enable)
104         struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
105         hw_efer->svme = 1;
106     }
107
108     return 0;
109 }
110
111
112 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
113     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
114     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
115     uint_t i;
116
117
118     //
119     ctrl_area->svm_instrs.VMRUN = 1;
120     ctrl_area->svm_instrs.VMMCALL = 1;
121     ctrl_area->svm_instrs.VMLOAD = 1;
122     ctrl_area->svm_instrs.VMSAVE = 1;
123     ctrl_area->svm_instrs.STGI = 1;
124     ctrl_area->svm_instrs.CLGI = 1;
125     ctrl_area->svm_instrs.SKINIT = 1;
126     ctrl_area->svm_instrs.ICEBP = 1;
127     ctrl_area->svm_instrs.WBINVD = 1;
128     ctrl_area->svm_instrs.MONITOR = 1;
129     ctrl_area->svm_instrs.MWAIT_always = 1;
130     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
131     ctrl_area->instrs.INVLPGA = 1;
132     ctrl_area->instrs.CPUID = 1;
133
134     ctrl_area->instrs.HLT = 1;
135
136     /* Set at VMM launch as needed */
137     ctrl_area->instrs.RDTSC = 0;
138     ctrl_area->svm_instrs.RDTSCP = 0;
139
140     // guest_state->cr0 = 0x00000001;    // PE 
141   
142     /*
143       ctrl_area->exceptions.de = 1;
144       ctrl_area->exceptions.df = 1;
145       
146       ctrl_area->exceptions.ts = 1;
147       ctrl_area->exceptions.ss = 1;
148       ctrl_area->exceptions.ac = 1;
149       ctrl_area->exceptions.mc = 1;
150       ctrl_area->exceptions.gp = 1;
151       ctrl_area->exceptions.ud = 1;
152       ctrl_area->exceptions.np = 1;
153       ctrl_area->exceptions.of = 1;
154       
155       ctrl_area->exceptions.nmi = 1;
156     */
157     
158
159     ctrl_area->instrs.NMI = 1;
160     ctrl_area->instrs.SMI = 0; // allow SMIs to run in guest
161     ctrl_area->instrs.INIT = 1;
162     //    ctrl_area->instrs.PAUSE = 1;
163     ctrl_area->instrs.shutdown_evts = 1;
164
165
166     /* DEBUG FOR RETURN CODE */
167     ctrl_area->exit_code = 1;
168
169
170     /* Setup Guest Machine state */
171
172     core->vm_regs.rsp = 0x00;
173     core->rip = 0xfff0;
174
175     core->vm_regs.rdx = 0x00000f00;
176
177
178     core->cpl = 0;
179
180     core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
181     core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
182     core->ctrl_regs.efer |= EFER_MSR_svm_enable;
183
184
185
186
187
188     core->segments.cs.selector = 0xf000;
189     core->segments.cs.limit = 0xffff;
190     core->segments.cs.base = 0x0000000f0000LL;
191
192     // (raw attributes = 0xf3)
193     core->segments.cs.type = 0x3;
194     core->segments.cs.system = 0x1;
195     core->segments.cs.dpl = 0x3;
196     core->segments.cs.present = 1;
197
198
199
200     struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds), 
201                                       &(core->segments.es), &(core->segments.fs), 
202                                       &(core->segments.gs), NULL};
203
204     for ( i = 0; segregs[i] != NULL; i++) {
205         struct v3_segment * seg = segregs[i];
206         
207         seg->selector = 0x0000;
208         //    seg->base = seg->selector << 4;
209         seg->base = 0x00000000;
210         seg->limit = ~0u;
211
212         // (raw attributes = 0xf3)
213         seg->type = 0x3;
214         seg->system = 0x1;
215         seg->dpl = 0x3;
216         seg->present = 1;
217     }
218
219     core->segments.gdtr.limit = 0x0000ffff;
220     core->segments.gdtr.base = 0x0000000000000000LL;
221     core->segments.idtr.limit = 0x0000ffff;
222     core->segments.idtr.base = 0x0000000000000000LL;
223
224     core->segments.ldtr.selector = 0x0000;
225     core->segments.ldtr.limit = 0x0000ffff;
226     core->segments.ldtr.base = 0x0000000000000000LL;
227     core->segments.tr.selector = 0x0000;
228     core->segments.tr.limit = 0x0000ffff;
229     core->segments.tr.base = 0x0000000000000000LL;
230
231
232     core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
233     core->dbg_regs.dr7 = 0x0000000000000400LL;
234
235
236     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->io_map.arch_data);
237     ctrl_area->instrs.IOIO_PROT = 1;
238             
239     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data);
240     ctrl_area->instrs.MSR_PROT = 1;   
241
242
243     PrintDebug("Exiting on interrupts\n");
244     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
245     ctrl_area->instrs.INTR = 1;
246     // The above also assures the TPR changes (CR8) are only virtual
247
248
249     // However, we need to see TPR writes since they will
250     // affect the virtual apic
251     // we reflect out cr8 to ctrl_regs->apic_tpr
252     ctrl_area->cr_reads.cr8 = 1;
253     ctrl_area->cr_writes.cr8 = 1;
254     // We will do all TPR comparisons in the virtual apic
255     // We also do not want the V_TPR to be able to mask the PIC
256     ctrl_area->guest_ctrl.V_IGN_TPR = 1;
257
258     
259
260     v3_hook_msr(core->vm_info, EFER_MSR, 
261                 &v3_handle_efer_read,
262                 &v3_svm_handle_efer_write, 
263                 core);
264
265     if (core->shdw_pg_mode == SHADOW_PAGING) {
266         PrintDebug("Creating initial shadow page table\n");
267         
268         /* JRL: This is a performance killer, and a simplistic solution */
269         /* We need to fix this */
270         ctrl_area->TLB_CONTROL = 1;
271         ctrl_area->guest_ASID = 1;
272         
273         
274         if (v3_init_passthrough_pts(core) == -1) {
275             PrintError("Could not initialize passthrough page tables\n");
276             return ;
277         }
278
279
280         core->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
281         PrintDebug("Created\n");
282         
283         core->ctrl_regs.cr0 |= 0x80000000;
284         core->ctrl_regs.cr3 = core->direct_map_pt;
285
286         ctrl_area->cr_reads.cr0 = 1;
287         ctrl_area->cr_writes.cr0 = 1;
288         //ctrl_area->cr_reads.cr4 = 1;
289         ctrl_area->cr_writes.cr4 = 1;
290         ctrl_area->cr_reads.cr3 = 1;
291         ctrl_area->cr_writes.cr3 = 1;
292
293
294         ctrl_area->instrs.INVLPG = 1;
295
296         ctrl_area->exceptions.pf = 1;
297
298         guest_state->g_pat = 0x7040600070406ULL;
299
300
301     } else if (core->shdw_pg_mode == NESTED_PAGING) {
302         // Flush the TLB on entries/exits
303         ctrl_area->TLB_CONTROL = 1;
304         ctrl_area->guest_ASID = 1;
305
306         // Enable Nested Paging
307         ctrl_area->NP_ENABLE = 1;
308
309         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
310
311         // Set the Nested Page Table pointer
312         if (v3_init_passthrough_pts(core) == -1) {
313             PrintError("Could not initialize Nested page tables\n");
314             return ;
315         }
316
317         ctrl_area->N_CR3 = core->direct_map_pt;
318
319         guest_state->g_pat = 0x7040600070406ULL;
320     }
321     
322     /* tell the guest that we don't support SVM */
323     v3_hook_msr(core->vm_info, SVM_VM_CR_MSR, 
324         &v3_handle_vm_cr_read,
325         &v3_handle_vm_cr_write, 
326         core);
327
328
329     {
330 #define INT_PENDING_AMD_MSR             0xc0010055
331
332         v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
333         v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
334         v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
335         v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
336         v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
337
338         v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
339         v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
340         v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
341
342
343         v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
344         v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
345
346         // Passthrough read operations are ok.
347         v3_hook_msr(core->vm_info, INT_PENDING_AMD_MSR, NULL, v3_msr_unhandled_write, NULL);
348     }
349 }
350
351
352 int v3_init_svm_vmcb(struct guest_info * core, v3_vm_class_t vm_class) {
353
354     PrintDebug("Allocating VMCB\n");
355     core->vmm_data = (void *)Allocate_VMCB();
356     
357     if (core->vmm_data == NULL) {
358         PrintError("Could not allocate VMCB, Exiting...\n");
359         return -1;
360     }
361
362     if (vm_class == V3_PC_VM) {
363         PrintDebug("Initializing VMCB (addr=%p)\n", (void *)core->vmm_data);
364         Init_VMCB_BIOS((vmcb_t*)(core->vmm_data), core);
365     } else {
366         PrintError("Invalid VM class\n");
367         return -1;
368     }
369
370     core->core_run_state = CORE_STOPPED;
371
372     return 0;
373 }
374
375
376 int v3_deinit_svm_vmcb(struct guest_info * core) {
377     V3_FreePages(V3_PAddr(core->vmm_data), 1);
378     return 0;
379 }
380
381
382 #ifdef V3_CONFIG_CHECKPOINT
383 int v3_svm_save_core(struct guest_info * core, void * ctx){
384
385     if (v3_chkpt_save_8(ctx, "cpl", &(core->cpl)) == -1) { 
386         PrintError("Could not save SVM cpl\n");
387         return -1;
388     }
389
390     if (v3_chkpt_save(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) { 
391         PrintError("Could not save SVM vmcb\n");
392         return -1;
393     }
394
395     return 0;
396 }
397
398 int v3_svm_load_core(struct guest_info * core, void * ctx){
399     
400     if (v3_chkpt_load_8(ctx, "cpl", &(core->cpl)) == -1) { 
401         PrintError("Could not load SVM cpl\n");
402         return -1;
403     }
404
405     if (v3_chkpt_load(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) {
406         return -1;
407     }
408
409     return 0;
410 }
411 #endif
412
413 static int update_irq_exit_state(struct guest_info * info) {
414     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
415
416     // Fix for QEMU bug using EVENTINJ as an internal cache
417     guest_ctrl->EVENTINJ.valid = 0;
418
419     if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
420         
421 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
422         PrintDebug("INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
423 #endif
424
425         info->intr_core_state.irq_started = 1;
426         info->intr_core_state.irq_pending = 0;
427
428         v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
429     }
430
431     if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
432 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
433         PrintDebug("Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
434 #endif
435
436         // Interrupt was taken fully vectored
437         info->intr_core_state.irq_started = 0;
438
439     } else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) {
440 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
441         PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
442 #endif
443     }
444
445     return 0;
446 }
447
448
449 static int update_irq_entry_state(struct guest_info * info) {
450     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
451
452
453     if (info->intr_core_state.irq_pending == 0) {
454         guest_ctrl->guest_ctrl.V_IRQ = 0;
455         guest_ctrl->guest_ctrl.V_INTR_VECTOR = 0;
456     }
457     
458     if (v3_excp_pending(info)) {
459         uint_t excp = v3_get_excp_number(info);
460         
461         guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION;
462         
463         if (info->excp_state.excp_error_code_valid) {
464             guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
465             guest_ctrl->EVENTINJ.ev = 1;
466 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
467             PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
468 #endif
469         }
470         
471         guest_ctrl->EVENTINJ.vector = excp;
472         
473         guest_ctrl->EVENTINJ.valid = 1;
474
475 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
476         PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n", 
477                    (int)info->num_exits, 
478                    guest_ctrl->EVENTINJ.vector, 
479                    (void *)(addr_t)info->ctrl_regs.cr2,
480                    (void *)(addr_t)info->rip);
481 #endif
482
483         v3_injecting_excp(info, excp);
484     } else if (info->intr_core_state.irq_started == 1) {
485 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
486         PrintDebug("IRQ pending from previous injection\n");
487 #endif
488         guest_ctrl->guest_ctrl.V_IRQ = 1;
489         guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector;
490
491         // We ignore the virtual TPR on this injection
492         // TPR/PPR tests have already been done in the APIC.
493         guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
494         guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ;  // 0xf;
495
496     } else {
497         switch (v3_intr_pending(info)) {
498             case V3_EXTERNAL_IRQ: {
499                 uint32_t irq = v3_get_intr(info);
500
501                 guest_ctrl->guest_ctrl.V_IRQ = 1;
502                 guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
503
504                 // We ignore the virtual TPR on this injection
505                 // TPR/PPR tests have already been done in the APIC.
506                 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
507                 guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ;  // 0xf;
508
509 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
510                 PrintDebug("Injecting Interrupt %d (EIP=%p)\n", 
511                            guest_ctrl->guest_ctrl.V_INTR_VECTOR, 
512                            (void *)(addr_t)info->rip);
513 #endif
514
515                 info->intr_core_state.irq_pending = 1;
516                 info->intr_core_state.irq_vector = irq;
517                 
518                 break;
519             }
520             case V3_NMI:
521                 guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI;
522                 break;
523             case V3_SOFTWARE_INTR:
524                 guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
525
526 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
527                 PrintDebug("Injecting software interrupt --  type: %d, vector: %d\n", 
528                            SVM_INJECTION_SOFT_INTR, info->intr_core_state.swintr_vector);
529 #endif
530                 guest_ctrl->EVENTINJ.vector = info->intr_core_state.swintr_vector;
531                 guest_ctrl->EVENTINJ.valid = 1;
532             
533                 /* reset swintr state */
534                 info->intr_core_state.swintr_posted = 0;
535                 info->intr_core_state.swintr_vector = 0;
536                 
537                 break;
538             case V3_VIRTUAL_IRQ:
539                 guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
540                 break;
541
542             case V3_INVALID_INTR:
543             default:
544                 break;
545         }
546         
547     }
548
549     return 0;
550 }
551
552 int 
553 v3_svm_config_tsc_virtualization(struct guest_info * info) {
554     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
555
556
557     if (info->time_state.flags & VM_TIME_TRAP_RDTSC) {
558         ctrl_area->instrs.RDTSC = 1;
559         ctrl_area->svm_instrs.RDTSCP = 1;
560     } else {
561         ctrl_area->instrs.RDTSC = 0;
562         ctrl_area->svm_instrs.RDTSCP = 0;
563
564         if (info->time_state.flags & VM_TIME_TSC_PASSTHROUGH) {
565                 ctrl_area->TSC_OFFSET = 0;
566         } else {
567                 ctrl_area->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
568         }
569     }
570     return 0;
571 }
572
573 /* 
574  * CAUTION and DANGER!!! 
575  * 
576  * The VMCB CANNOT(!!) be accessed outside of the clgi/stgi calls inside this function
577  * When exectuing a symbiotic call, the VMCB WILL be overwritten, so any dependencies 
578  * on its contents will cause things to break. The contents at the time of the exit WILL 
579  * change before the exit handler is executed.
580  */
581 int v3_svm_enter(struct guest_info * info) {
582     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
583     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); 
584     addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
585     uint64_t guest_cycles = 0;
586
587     // Conditionally yield the CPU if the timeslice has expired
588     v3_yield_cond(info,-1);
589
590     // Update timer devices after being in the VM before doing 
591     // IRQ updates, so that any interrupts they raise get seen 
592     // immediately.
593     v3_advance_time(info, NULL);
594     v3_update_timers(info);
595
596     // disable global interrupts for vm state transition
597     v3_clgi();
598
599     // Synchronize the guest state to the VMCB
600     guest_state->cr0 = info->ctrl_regs.cr0;
601     guest_state->cr2 = info->ctrl_regs.cr2;
602     guest_state->cr3 = info->ctrl_regs.cr3;
603     guest_state->cr4 = info->ctrl_regs.cr4;
604     guest_state->dr6 = info->dbg_regs.dr6;
605     guest_state->dr7 = info->dbg_regs.dr7;
606
607     // CR8 is now updated by read/writes and it contains the APIC TPR
608     // the V_TPR should be just the class part of that.
609     // This update is here just for completeness.  We currently
610     // are ignoring V_TPR on all injections and doing the priority logivc
611     // in the APIC.
612     // guest_ctrl->guest_ctrl.V_TPR = ((info->ctrl_regs.apic_tpr) >> 4) & 0xf;
613
614     //guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
615     // 
616     
617     guest_state->rflags = info->ctrl_regs.rflags;
618     guest_state->efer = info->ctrl_regs.efer;
619     
620     /* Synchronize MSRs */
621     guest_state->star = info->msrs.star;
622     guest_state->lstar = info->msrs.lstar;
623     guest_state->sfmask = info->msrs.sfmask;
624     guest_state->KernelGsBase = info->msrs.kern_gs_base;
625
626     guest_state->cpl = info->cpl;
627
628     v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
629
630     guest_state->rax = info->vm_regs.rax;
631     guest_state->rip = info->rip;
632     guest_state->rsp = info->vm_regs.rsp;
633
634 #ifdef V3_CONFIG_SYMCALL
635     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
636         update_irq_entry_state(info);
637     }
638 #else 
639     update_irq_entry_state(info);
640 #endif
641
642
643     /* ** */
644
645     /*
646       PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
647       (void *)(addr_t)info->segments.cs.base, 
648       (void *)(addr_t)info->rip);
649     */
650
651 #ifdef V3_CONFIG_SYMCALL
652     if (info->sym_core_state.symcall_state.sym_call_active == 1) {
653         if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
654             V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
655         }
656     }
657 #endif
658
659     v3_svm_config_tsc_virtualization(info);
660
661     //V3_Print("Calling v3_svm_launch\n");
662     {   
663         uint64_t entry_tsc = 0;
664         uint64_t exit_tsc = 0;
665         
666         rdtscll(entry_tsc);
667
668         v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
669
670         rdtscll(exit_tsc);
671
672         guest_cycles = exit_tsc - entry_tsc;
673     }
674
675
676     //V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
677
678     v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
679
680     v3_advance_time(info, &guest_cycles);
681
682     info->num_exits++;
683
684     // Save Guest state from VMCB
685     info->rip = guest_state->rip;
686     info->vm_regs.rsp = guest_state->rsp;
687     info->vm_regs.rax = guest_state->rax;
688
689     info->cpl = guest_state->cpl;
690
691     info->ctrl_regs.cr0 = guest_state->cr0;
692     info->ctrl_regs.cr2 = guest_state->cr2;
693     info->ctrl_regs.cr3 = guest_state->cr3;
694     info->ctrl_regs.cr4 = guest_state->cr4;
695     info->dbg_regs.dr6 = guest_state->dr6;
696     info->dbg_regs.dr7 = guest_state->dr7;
697     //
698     // We do not track this anymore
699     // V_TPR is ignored and we do the logic in the APIC
700     //info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
701     //
702     info->ctrl_regs.rflags = guest_state->rflags;
703     info->ctrl_regs.efer = guest_state->efer;
704     
705     /* Synchronize MSRs */
706     info->msrs.star =  guest_state->star;
707     info->msrs.lstar = guest_state->lstar;
708     info->msrs.sfmask = guest_state->sfmask;
709     info->msrs.kern_gs_base = guest_state->KernelGsBase;
710
711     v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
712     info->cpu_mode = v3_get_vm_cpu_mode(info);
713     info->mem_mode = v3_get_vm_mem_mode(info);
714     /* ** */
715
716     // save exit info here
717     exit_code = guest_ctrl->exit_code;
718     exit_info1 = guest_ctrl->exit_info1;
719     exit_info2 = guest_ctrl->exit_info2;
720
721 #ifdef V3_CONFIG_SYMCALL
722     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
723         update_irq_exit_state(info);
724     }
725 #else
726     update_irq_exit_state(info);
727 #endif
728
729     // reenable global interrupts after vm exit
730     v3_stgi();
731  
732     // Conditionally yield the CPU if the timeslice has expired
733     v3_yield_cond(info,-1);
734
735     // This update timers is for time-dependent handlers
736     // if we're slaved to host time
737     v3_advance_time(info, NULL);
738     v3_update_timers(info);
739
740     {
741         int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2);
742         
743         if (ret != 0) {
744             PrintError("Error in SVM exit handler (ret=%d)\n", ret);
745             PrintError("  last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code);
746             return -1;
747         }
748     }
749
750     if (info->timeouts.timeout_active) {
751         /* Check to see if any timeouts have expired */
752         v3_handle_timeouts(info, guest_cycles);
753     }
754
755
756     return 0;
757 }
758
759
760 int v3_start_svm_guest(struct guest_info * info) {
761     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
762     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
763
764     PrintDebug("Starting SVM core %u (on logical core %u)\n", info->vcpu_id, info->pcpu_id);
765
766     if (info->vcpu_id == 0) {
767         info->core_run_state = CORE_RUNNING;
768     } else  { 
769         PrintDebug("SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
770
771         while (info->core_run_state == CORE_STOPPED) {
772             
773             if (info->vm_info->run_state == VM_STOPPED) {
774                 // The VM was stopped before this core was initialized. 
775                 return 0;
776             }
777
778             v3_yield(info,-1);
779             //PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id);
780         }
781
782         PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
783
784         // We'll be paranoid about race conditions here
785         v3_wait_at_barrier(info);
786     } 
787
788     PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x),  RIP=0x%p\n", 
789                info->vcpu_id, info->pcpu_id, 
790                info->segments.cs.selector, (void *)(info->segments.cs.base), 
791                info->segments.cs.limit, (void *)(info->rip));
792
793
794
795     PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p) (on cpu %u)\n", 
796                info->vcpu_id, (void *)info->vmm_data, info->pcpu_id);
797     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
798     
799     v3_start_time(info);
800
801     while (1) {
802
803         if (info->vm_info->run_state == VM_STOPPED) {
804             info->core_run_state = CORE_STOPPED;
805             break;
806         }
807         
808         if (v3_svm_enter(info) == -1) {
809             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
810             addr_t host_addr;
811             addr_t linear_addr = 0;
812             
813             info->vm_info->run_state = VM_ERROR;
814             
815             V3_Print("SVM core %u: SVM ERROR!!\n", info->vcpu_id); 
816             
817             v3_print_guest_state(info);
818             
819             V3_Print("SVM core %u: SVM Exit Code: %p\n", info->vcpu_id, (void *)(addr_t)guest_ctrl->exit_code); 
820             
821             V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
822             V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
823             
824             V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
825             V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
826             
827             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
828             
829             if (info->mem_mode == PHYSICAL_MEM) {
830                 v3_gpa_to_hva(info, linear_addr, &host_addr);
831             } else if (info->mem_mode == VIRTUAL_MEM) {
832                 v3_gva_to_hva(info, linear_addr, &host_addr);
833             }
834             
835             V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
836             
837             V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
838             v3_dump_mem((uint8_t *)host_addr, 15);
839             
840             v3_print_stack(info);
841
842             break;
843         }
844
845         v3_wait_at_barrier(info);
846
847
848         if (info->vm_info->run_state == VM_STOPPED) {
849             info->core_run_state = CORE_STOPPED;
850             break;
851         }
852
853         
854
855 /*
856         if ((info->num_exits % 50000) == 0) {
857             V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
858             v3_print_guest_state(info);
859         }
860 */
861         
862     }
863
864     // Need to take down the other cores on error... 
865
866     return 0;
867 }
868
869
870
871
872 int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) {
873     // init vmcb_bios
874
875     // Write the RIP, CS, and descriptor
876     // assume the rest is already good to go
877     //
878     // vector VV -> rip at 0
879     //              CS = VV00
880     //  This means we start executing at linear address VV000
881     //
882     // So the selector needs to be VV00
883     // and the base needs to be VV000
884     //
885     core->rip = 0;
886     core->segments.cs.selector = rip << 8;
887     core->segments.cs.limit = 0xffff;
888     core->segments.cs.base = rip << 12;
889
890     return 0;
891 }
892
893
894
895
896
897
898 /* Checks machine SVM capability */
899 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
900 int v3_is_svm_capable() {
901     uint_t vm_cr_low = 0, vm_cr_high = 0;
902     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
903
904     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
905   
906     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
907
908     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
909       V3_Print("SVM Not Available\n");
910       return 0;
911     }  else {
912         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
913         
914         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
915         
916         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
917             V3_Print("SVM is available but is disabled.\n");
918             
919             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
920             
921             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
922             
923             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
924                 V3_Print("SVM BIOS Disabled, not unlockable\n");
925             } else {
926                 V3_Print("SVM is locked with a key\n");
927             }
928             return 0;
929
930         } else {
931             V3_Print("SVM is available and  enabled.\n");
932
933             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
934             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
935             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
936             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
937             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
938
939             return 1;
940         }
941     }
942 }
943
944 static int has_svm_nested_paging() {
945     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
946     
947     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
948     
949     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
950     
951     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
952         V3_Print("SVM Nested Paging not supported\n");
953         return 0;
954     } else {
955         V3_Print("SVM Nested Paging supported\n");
956         return 1;
957     }
958  }
959  
960
961
962 void v3_init_svm_cpu(int cpu_id) {
963     reg_ex_t msr;
964     extern v3_cpu_arch_t v3_cpu_types[];
965
966     // Enable SVM on the CPU
967     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
968     msr.e_reg.low |= EFER_MSR_svm_enable;
969     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
970
971     V3_Print("SVM Enabled\n");
972
973     // Setup the host state save area
974     host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
975
976     if (!host_vmcbs[cpu_id]) {
977         PrintError("Failed to allocate VMCB\n");
978         return;
979     }
980
981     /* 64-BIT-ISSUE */
982     //  msr.e_reg.high = 0;
983     //msr.e_reg.low = (uint_t)host_vmcb;
984     msr.r_reg = host_vmcbs[cpu_id];
985
986     PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
987     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
988
989
990     if (has_svm_nested_paging() == 1) {
991         v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
992     } else {
993         v3_cpu_types[cpu_id] = V3_SVM_CPU;
994     }
995 }
996
997
998
999 void v3_deinit_svm_cpu(int cpu_id) {
1000     reg_ex_t msr;
1001     extern v3_cpu_arch_t v3_cpu_types[];
1002
1003     // reset SVM_VM_HSAVE_PA_MSR
1004     // Does setting it to NULL disable??
1005     msr.r_reg = 0;
1006     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
1007
1008     // Disable SVM?
1009     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
1010     msr.e_reg.low &= ~EFER_MSR_svm_enable;
1011     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
1012
1013     v3_cpu_types[cpu_id] = V3_INVALID_CPU;
1014
1015     V3_FreePages((void *)host_vmcbs[cpu_id], 4);
1016
1017     V3_Print("Host CPU %d host area freed, and SVM disabled\n", cpu_id);
1018     return;
1019 }
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 #if 0
1071 /* 
1072  * Test VMSAVE/VMLOAD Latency 
1073  */
1074 #define vmsave ".byte 0x0F,0x01,0xDB ; "
1075 #define vmload ".byte 0x0F,0x01,0xDA ; "
1076 {
1077     uint32_t start_lo, start_hi;
1078     uint32_t end_lo, end_hi;
1079     uint64_t start, end;
1080     
1081     __asm__ __volatile__ (
1082                           "rdtsc ; "
1083                           "movl %%eax, %%esi ; "
1084                           "movl %%edx, %%edi ; "
1085                           "movq  %%rcx, %%rax ; "
1086                           vmsave
1087                           "rdtsc ; "
1088                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
1089                           : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
1090                           );
1091     
1092     start = start_hi;
1093     start <<= 32;
1094     start += start_lo;
1095     
1096     end = end_hi;
1097     end <<= 32;
1098     end += end_lo;
1099     
1100     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
1101     
1102     __asm__ __volatile__ (
1103                           "rdtsc ; "
1104                           "movl %%eax, %%esi ; "
1105                           "movl %%edx, %%edi ; "
1106                           "movq  %%rcx, %%rax ; "
1107                           vmload
1108                           "rdtsc ; "
1109                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
1110                               : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
1111                               );
1112         
1113         start = start_hi;
1114         start <<= 32;
1115         start += start_lo;
1116
1117         end = end_hi;
1118         end <<= 32;
1119         end += end_lo;
1120
1121
1122         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
1123     }
1124     /* End Latency Test */
1125
1126 #endif
1127
1128
1129
1130
1131
1132
1133
1134 #if 0
1135 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
1136   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
1137   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
1138   uint_t i = 0;
1139
1140
1141   guest_state->rsp = vm_info.vm_regs.rsp;
1142   guest_state->rip = vm_info.rip;
1143
1144
1145   /* I pretty much just gutted this from TVMM */
1146   /* Note: That means its probably wrong */
1147
1148   // set the segment registers to mirror ours
1149   guest_state->cs.selector = 1<<3;
1150   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
1151   guest_state->cs.attrib.fields.S = 1;
1152   guest_state->cs.attrib.fields.P = 1;
1153   guest_state->cs.attrib.fields.db = 1;
1154   guest_state->cs.attrib.fields.G = 1;
1155   guest_state->cs.limit = 0xfffff;
1156   guest_state->cs.base = 0;
1157   
1158   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
1159   for ( i = 0; segregs[i] != NULL; i++) {
1160     struct vmcb_selector * seg = segregs[i];
1161     
1162     seg->selector = 2<<3;
1163     seg->attrib.fields.type = 0x2; // Data Segment+read/write
1164     seg->attrib.fields.S = 1;
1165     seg->attrib.fields.P = 1;
1166     seg->attrib.fields.db = 1;
1167     seg->attrib.fields.G = 1;
1168     seg->limit = 0xfffff;
1169     seg->base = 0;
1170   }
1171
1172
1173   {
1174     /* JRL THIS HAS TO GO */
1175     
1176     //    guest_state->tr.selector = GetTR_Selector();
1177     guest_state->tr.attrib.fields.type = 0x9; 
1178     guest_state->tr.attrib.fields.P = 1;
1179     // guest_state->tr.limit = GetTR_Limit();
1180     //guest_state->tr.base = GetTR_Base();// - 0x2000;
1181     /* ** */
1182   }
1183
1184
1185   /* ** */
1186
1187
1188   guest_state->efer |= EFER_MSR_svm_enable;
1189   guest_state->rflags = 0x00000002; // The reserved bit is always 1
1190   ctrl_area->svm_instrs.VMRUN = 1;
1191   guest_state->cr0 = 0x00000001;    // PE 
1192   ctrl_area->guest_ASID = 1;
1193
1194
1195   //  guest_state->cpl = 0;
1196
1197
1198
1199   // Setup exits
1200
1201   ctrl_area->cr_writes.cr4 = 1;
1202   
1203   ctrl_area->exceptions.de = 1;
1204   ctrl_area->exceptions.df = 1;
1205   ctrl_area->exceptions.pf = 1;
1206   ctrl_area->exceptions.ts = 1;
1207   ctrl_area->exceptions.ss = 1;
1208   ctrl_area->exceptions.ac = 1;
1209   ctrl_area->exceptions.mc = 1;
1210   ctrl_area->exceptions.gp = 1;
1211   ctrl_area->exceptions.ud = 1;
1212   ctrl_area->exceptions.np = 1;
1213   ctrl_area->exceptions.of = 1;
1214   ctrl_area->exceptions.nmi = 1;
1215
1216   
1217
1218   ctrl_area->instrs.IOIO_PROT = 1;
1219   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
1220
1221   if (!ctrl_area->IOPM_BASE_PA) { 
1222       PrintError("Cannot allocate IO bitmap\n");
1223       return;
1224   }
1225   
1226   {
1227     reg_ex_t tmp_reg;
1228     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
1229     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
1230   }
1231
1232   ctrl_area->instrs.INTR = 1;
1233
1234   
1235   {
1236     char gdt_buf[6];
1237     char idt_buf[6];
1238
1239     memset(gdt_buf, 0, 6);
1240     memset(idt_buf, 0, 6);
1241
1242
1243     uint_t gdt_base, idt_base;
1244     ushort_t gdt_limit, idt_limit;
1245     
1246     GetGDTR(gdt_buf);
1247     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
1248     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
1249     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
1250
1251     GetIDTR(idt_buf);
1252     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
1253     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
1254     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
1255
1256
1257     // gdt_base -= 0x2000;
1258     //idt_base -= 0x2000;
1259
1260     guest_state->gdtr.base = gdt_base;
1261     guest_state->gdtr.limit = gdt_limit;
1262     guest_state->idtr.base = idt_base;
1263     guest_state->idtr.limit = idt_limit;
1264
1265
1266   }
1267   
1268   
1269   // also determine if CPU supports nested paging
1270   /*
1271   if (vm_info.page_tables) {
1272     //   if (0) {
1273     // Flush the TLB on entries/exits
1274     ctrl_area->TLB_CONTROL = 1;
1275
1276     // Enable Nested Paging
1277     ctrl_area->NP_ENABLE = 1;
1278
1279     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
1280
1281         // Set the Nested Page Table pointer
1282     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
1283
1284
1285     //   ctrl_area->N_CR3 = Get_CR3();
1286     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
1287
1288     guest_state->g_pat = 0x7040600070406ULL;
1289
1290     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
1291     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
1292     // Enable Paging
1293     //    guest_state->cr0 |= 0x80000000;
1294   }
1295   */
1296
1297 }
1298
1299
1300
1301
1302
1303 #endif
1304
1305