Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


More changed files
[palacios.git] / palacios / src / palacios / vmx.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2011, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/vmx.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_handler.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmx_lowlevel.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_ctrl_regs.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_time.h>
30 #include <palacios/vm_guest_mem.h>
31 #include <palacios/vmm_direct_paging.h>
32 #include <palacios/vmx_io.h>
33 #include <palacios/vmx_msr.h>
34 #include <palacios/vmm_decoder.h>
35 #include <palacios/vmm_barrier.h>
36 #include <palacios/vmm_timeout.h>
37
38 #ifdef V3_CONFIG_CHECKPOINT
39 #include <palacios/vmm_checkpoint.h>
40 #endif
41
42 #include <palacios/vmx_ept.h>
43 #include <palacios/vmx_assist.h>
44 #include <palacios/vmx_hw_info.h>
45
46 #ifndef V3_CONFIG_DEBUG_VMX
47 #undef PrintDebug
48 #define PrintDebug(fmt, args...)
49 #endif
50
51
52 /* These fields contain the hardware feature sets supported by the local CPU */
53 static struct vmx_hw_info hw_info;
54
55 extern v3_cpu_arch_t v3_cpu_types[];
56
57 static addr_t host_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
58
59 extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
60 extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
61
62 static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
63     int ret = 0;
64
65     ret = vmcs_write(field, val);
66
67     if (ret != VMX_SUCCESS) {
68         PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
69         return 1;
70     }
71
72
73     
74
75     return 0;
76 }
77
78 static int inline check_vmcs_read(vmcs_field_t field, void * val) {
79     int ret = 0;
80
81     ret = vmcs_read(field, val);
82
83     if (ret != VMX_SUCCESS) {
84         PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
85     }
86
87     return ret;
88 }
89
90
91
92
93 static addr_t allocate_vmcs() {
94     struct vmcs_data * vmcs_page = NULL;
95
96     PrintDebug("Allocating page\n");
97
98     vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
99     memset(vmcs_page, 0, 4096);
100
101     vmcs_page->revision = hw_info.basic_info.revision;
102     PrintDebug("VMX Revision: 0x%x\n", vmcs_page->revision);
103
104     return (addr_t)V3_PAddr((void *)vmcs_page);
105 }
106
107 /*
108
109 static int debug_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * src, void * priv_data) {
110     struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
111     V3_Print("\n\nEFER READ\n");
112     
113     v3_print_guest_state(core);
114
115     src->value = efer->value;
116     return 0;
117 }
118
119 static int debug_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
120     struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
121     V3_Print("\n\nEFER WRITE\n");
122     
123     v3_print_guest_state(core);
124
125     efer->value = src.value;
126
127     {
128         struct vmx_data * vmx_state = core->vmm_data;
129
130         V3_Print("Trapping page faults and GPFs\n");
131         vmx_state->excp_bmap.pf = 1;
132         vmx_state->excp_bmap.gp = 1;
133         
134          check_vmcs_write(VMCS_EXCP_BITMAP, vmx_state->excp_bmap.value);
135     }
136
137     return 0;
138 }
139 */
140
141
142 static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
143     int vmx_ret = 0;
144
145     /* Get Available features */
146     struct vmx_pin_ctrls avail_pin_ctrls;
147     avail_pin_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.pin_ctrls));
148     /* ** */
149
150
151     // disable global interrupts for vm state initialization
152     v3_disable_ints();
153
154     PrintDebug("Loading VMCS\n");
155     vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
156     vmx_state->state = VMX_UNLAUNCHED;
157
158     if (vmx_ret != VMX_SUCCESS) {
159         PrintError("VMPTRLD failed\n");
160         return -1;
161     }
162
163
164     /*** Setup default state from HW ***/
165
166     vmx_state->pin_ctrls.value = hw_info.pin_ctrls.def_val;
167     vmx_state->pri_proc_ctrls.value = hw_info.proc_ctrls.def_val;
168     vmx_state->exit_ctrls.value = hw_info.exit_ctrls.def_val;
169     vmx_state->entry_ctrls.value = hw_info.entry_ctrls.def_val;
170     vmx_state->sec_proc_ctrls.value = hw_info.sec_proc_ctrls.def_val;
171
172     /* Print Control MSRs */
173     PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)hw_info.cr0.value);
174     PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)hw_info.cr4.value);
175
176
177
178     /******* Setup Host State **********/
179
180     /* Cache GDTR, IDTR, and TR in host struct */
181
182
183     /********** Setup VMX Control Fields ***********/
184
185     /* Add external interrupts, NMI exiting, and virtual NMI */
186     vmx_state->pin_ctrls.nmi_exit = 1;
187     vmx_state->pin_ctrls.ext_int_exit = 1;
188
189
190     /* We enable the preemption timer by default to measure accurate guest time */
191     if (avail_pin_ctrls.active_preempt_timer) {
192         V3_Print("VMX Preemption Timer is available\n");
193         vmx_state->pin_ctrls.active_preempt_timer = 1;
194         vmx_state->exit_ctrls.save_preempt_timer = 1;
195     }
196
197     vmx_state->pri_proc_ctrls.hlt_exit = 1;
198
199
200     vmx_state->pri_proc_ctrls.pause_exit = 0;
201     vmx_state->pri_proc_ctrls.tsc_offset = 1;
202 #ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
203     vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
204 #endif
205
206     /* Setup IO map */
207     vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
208     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(core->vm_info->io_map.arch_data));
209     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR, 
210             (addr_t)V3_PAddr(core->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
211
212
213     vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
214     vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data));
215
216
217
218 #ifdef __V3_64BIT__
219     // Ensure host runs in 64-bit mode at each VM EXIT
220     vmx_state->exit_ctrls.host_64_on = 1;
221 #endif
222
223
224
225     // Restore host's EFER register on each VM EXIT
226     vmx_state->exit_ctrls.ld_efer = 1;
227
228     // Save/restore guest's EFER register to/from VMCS on VM EXIT/ENTRY
229     vmx_state->exit_ctrls.save_efer = 1;
230     vmx_state->entry_ctrls.ld_efer  = 1;
231
232     vmx_state->exit_ctrls.save_pat = 1;
233     vmx_state->exit_ctrls.ld_pat = 1;
234     vmx_state->entry_ctrls.ld_pat = 1;
235
236     /* Temporary GPF trap */
237     //    vmx_state->excp_bmap.gp = 1;
238
239     // Setup Guests initial PAT field
240     vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
241
242     /* Setup paging */
243     if (core->shdw_pg_mode == SHADOW_PAGING) {
244         PrintDebug("Creating initial shadow page table\n");
245
246         if (v3_init_passthrough_pts(core) == -1) {
247             PrintError("Could not initialize passthrough page tables\n");
248             return -1;
249         }
250         
251 #define CR0_PE 0x00000001
252 #define CR0_PG 0x80000000
253 #define CR0_WP 0x00010000 // To ensure mem hooks work
254         vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
255
256
257         // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
258         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
259
260         core->ctrl_regs.cr3 = core->direct_map_pt;
261
262         // vmx_state->pinbased_ctrls |= NMI_EXIT;
263
264         /* Add CR exits */
265         vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
266         vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
267         
268         vmx_state->pri_proc_ctrls.invlpg_exit = 1;
269         
270         /* Add page fault exits */
271         vmx_state->excp_bmap.pf = 1;
272
273         // Setup VMX Assist
274         v3_vmxassist_init(core, vmx_state);
275
276         // Hook all accesses to EFER register
277         v3_hook_msr(core->vm_info, EFER_MSR, 
278                     &v3_handle_efer_read,
279                     &v3_handle_efer_write, 
280                     core);
281
282     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
283                (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
284
285 #define CR0_PE 0x00000001
286 #define CR0_PG 0x80000000
287 #define CR0_WP 0x00010000 // To ensure mem hooks work
288         vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
289
290         // vmx_state->pinbased_ctrls |= NMI_EXIT;
291
292         // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
293         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
294         
295         /* Disable CR exits */
296         vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
297         vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
298
299         vmx_state->pri_proc_ctrls.invlpg_exit = 0;
300
301         /* Add page fault exits */
302         //      vmx_state->excp_bmap.pf = 1; // This should never happen..., enabled to catch bugs
303         
304         // Setup VMX Assist
305         v3_vmxassist_init(core, vmx_state);
306
307         /* Enable EPT */
308         vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
309         vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
310
311
312
313         if (v3_init_ept(core, &hw_info) == -1) {
314             PrintError("Error initializing EPT\n");
315             return -1;
316         }
317
318         // Hook all accesses to EFER register
319         v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
320
321     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
322                (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
323         int i = 0;
324         // For now we will assume that unrestricted guest mode is assured w/ EPT
325
326
327         core->vm_regs.rsp = 0x00;
328         core->rip = 0xfff0;
329         core->vm_regs.rdx = 0x00000f00;
330         core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
331         core->ctrl_regs.cr0 = 0x00000030; 
332         core->ctrl_regs.cr4 = 0x00002010; // Enable VMX and PSE flag
333         
334
335         core->segments.cs.selector = 0xf000;
336         core->segments.cs.limit = 0xffff;
337         core->segments.cs.base = 0x0000000f0000LL;
338
339         // (raw attributes = 0xf3)
340         core->segments.cs.type = 0xb;
341         core->segments.cs.system = 0x1;
342         core->segments.cs.dpl = 0x0;
343         core->segments.cs.present = 1;
344
345
346
347         struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds), 
348                                           &(core->segments.es), &(core->segments.fs), 
349                                           &(core->segments.gs), NULL};
350
351         for ( i = 0; segregs[i] != NULL; i++) {
352             struct v3_segment * seg = segregs[i];
353         
354             seg->selector = 0x0000;
355             //    seg->base = seg->selector << 4;
356             seg->base = 0x00000000;
357             seg->limit = 0xffff;
358
359
360             seg->type = 0x3;
361             seg->system = 0x1;
362             seg->dpl = 0x0;
363             seg->present = 1;
364             //    seg->granularity = 1;
365
366         }
367
368
369         core->segments.gdtr.limit = 0x0000ffff;
370         core->segments.gdtr.base = 0x0000000000000000LL;
371
372         core->segments.idtr.limit = 0x0000ffff;
373         core->segments.idtr.base = 0x0000000000000000LL;
374
375         core->segments.ldtr.selector = 0x0000;
376         core->segments.ldtr.limit = 0x0000ffff;
377         core->segments.ldtr.base = 0x0000000000000000LL;
378         core->segments.ldtr.type = 0x2;
379         core->segments.ldtr.present = 1;
380
381         core->segments.tr.selector = 0x0000;
382         core->segments.tr.limit = 0x0000ffff;
383         core->segments.tr.base = 0x0000000000000000LL;
384         core->segments.tr.type = 0xb;
385         core->segments.tr.present = 1;
386
387         //      core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
388         core->dbg_regs.dr7 = 0x0000000000000400LL;
389
390         /* Enable EPT */
391         vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
392         vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
393         vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation
394
395
396         /* Disable shadow paging stuff */
397         vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
398         vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
399
400         vmx_state->pri_proc_ctrls.invlpg_exit = 0;
401
402
403         // Cause VM_EXIT whenever the CR4.VMXE bit is set
404         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
405
406
407         if (v3_init_ept(core, &hw_info) == -1) {
408             PrintError("Error initializing EPT\n");
409             return -1;
410         }
411
412         // Hook all accesses to EFER register
413         //v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
414         v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
415     } else {
416         PrintError("Invalid Virtual paging mode\n");
417         return -1;
418     }
419
420
421     // hook vmx msrs
422
423     // Setup SYSCALL/SYSENTER MSRs in load/store area
424     
425     // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
426     {
427
428         struct vmcs_msr_save_area * msr_entries = NULL;
429         int max_msrs = (hw_info.misc_info.max_msr_cache_size + 1) * 4;
430         int msr_ret = 0;
431
432         V3_Print("Setting up MSR load/store areas (max_msr_count=%d)\n", max_msrs);
433
434         if (max_msrs < 4) {
435             PrintError("Max MSR cache size is too small (%d)\n", max_msrs);
436             return -1;
437         }
438
439         vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1);
440         
441         if (vmx_state->msr_area_paddr == (addr_t)NULL) {
442             PrintError("could not allocate msr load/store area\n");
443             return -1;
444         }
445
446         msr_entries = (struct vmcs_msr_save_area *)V3_VAddr((void *)(vmx_state->msr_area_paddr));
447         vmx_state->msr_area = msr_entries; // cache in vmx_info
448
449         memset(msr_entries, 0, PAGE_SIZE);
450
451         msr_entries->guest_star.index = IA32_STAR_MSR;
452         msr_entries->guest_lstar.index = IA32_LSTAR_MSR;
453         msr_entries->guest_fmask.index = IA32_FMASK_MSR;
454         msr_entries->guest_kern_gs.index = IA32_KERN_GS_BASE_MSR;
455
456         msr_entries->host_star.index = IA32_STAR_MSR;
457         msr_entries->host_lstar.index = IA32_LSTAR_MSR;
458         msr_entries->host_fmask.index = IA32_FMASK_MSR;
459         msr_entries->host_kern_gs.index = IA32_KERN_GS_BASE_MSR;
460
461         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_CNT, 4);
462         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_CNT, 4);
463         msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_CNT, 4);
464
465         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
466         msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
467         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->host_msrs));
468
469
470         msr_ret |= v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
471         msr_ret |= v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
472         msr_ret |= v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
473         msr_ret |= v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
474
475
476         // IMPORTANT: These MSRs appear to be cached by the hardware....
477         msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
478         msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
479         msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
480
481         msr_ret |= v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
482         msr_ret |= v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
483
484         msr_ret |= v3_hook_msr(core->vm_info, IA32_PAT_MSR, NULL, NULL, NULL);
485
486         // Not sure what to do about this... Does not appear to be an explicit hardware cache version...
487         msr_ret |= v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
488
489         if (msr_ret != 0) {
490             PrintError("Error configuring MSR save/restore area\n");
491             return -1;
492         }
493
494
495     }    
496
497     /* Sanity check ctrl/reg fields against hw_defaults */
498
499
500
501
502     /*** Write all the info to the VMCS ***/
503   
504     /*
505     {
506         // IS THIS NECESSARY???
507 #define DEBUGCTL_MSR 0x1d9
508         struct v3_msr tmp_msr;
509         v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
510         vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
511         core->dbg_regs.dr7 = 0x400;
512     }
513     */
514
515 #ifdef __V3_64BIT__
516     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
517 #else
518     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffUL);
519     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
520 #endif
521
522
523  
524
525     if (v3_update_vmcs_ctrl_fields(core)) {
526         PrintError("Could not write control fields!\n");
527         return -1;
528     }
529     
530     /*
531     if (v3_update_vmcs_host_state(core)) {
532         PrintError("Could not write host state\n");
533         return -1;
534     }
535     */
536
537     // reenable global interrupts for vm state initialization now
538     // that the vm state is initialized. If another VM kicks us off, 
539     // it'll update our vmx state so that we know to reload ourself
540     v3_enable_ints();
541
542     return 0;
543 }
544
545 int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
546     struct vmx_data * vmx_state = NULL;
547     int vmx_ret = 0;
548     
549     vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
550     memset(vmx_state, 0, sizeof(struct vmx_data));
551
552     PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
553
554     PrintDebug("Allocating VMCS\n");
555     vmx_state->vmcs_ptr_phys = allocate_vmcs();
556
557     PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
558
559     core->vmm_data = vmx_state;
560     vmx_state->state = VMX_UNLAUNCHED;
561
562     PrintDebug("Initializing VMCS (addr=%p)\n", core->vmm_data);
563     
564     // TODO: Fix vmcs fields so they're 32-bit
565
566     PrintDebug("Clearing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
567     vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
568
569     if (vmx_ret != VMX_SUCCESS) {
570         PrintError("VMCLEAR failed\n");
571         return -1; 
572     }
573
574     if (vm_class == V3_PC_VM) {
575         PrintDebug("Initializing VMCS\n");
576         if (init_vmcs_bios(core, vmx_state) == -1) {
577             PrintError("Error initializing VMCS to BIOS state\n");
578             return -1;
579         }
580     } else {
581         PrintError("Invalid VM Class\n");
582         return -1;
583     }
584
585     PrintDebug("Serializing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
586     vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
587
588     return 0;
589 }
590
591
592 int v3_deinit_vmx_vmcs(struct guest_info * core) {
593     struct vmx_data * vmx_state = core->vmm_data;
594
595     V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
596     V3_FreePages(V3_PAddr(vmx_state->msr_area), 1);
597
598     V3_Free(vmx_state);
599
600     return 0;
601 }
602
603
604
605 #ifdef V3_CONFIG_CHECKPOINT
606 /* 
607  * JRL: This is broken
608  */
609 int v3_vmx_save_core(struct guest_info * core, void * ctx){
610     uint64_t vmcs_ptr = vmcs_store();
611
612     v3_chkpt_save(ctx, "vmcs_data", PAGE_SIZE, (void *)vmcs_ptr);
613
614     return 0;
615 }
616
617 int v3_vmx_load_core(struct guest_info * core, void * ctx){
618     struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
619     struct cr0_32 * shadow_cr0;
620     char vmcs[PAGE_SIZE_4KB];
621
622     v3_chkpt_load(ctx, "vmcs_data", PAGE_SIZE_4KB, vmcs);
623
624     vmcs_clear(vmx_info->vmcs_ptr_phys);
625     vmcs_load((addr_t)vmcs);
626
627     v3_vmx_save_vmcs(core);
628
629     shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
630
631
632     /* Get the CPU mode to set the guest_ia32e entry ctrl */
633
634     if (core->shdw_pg_mode == SHADOW_PAGING) {
635         if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
636             if (v3_activate_shadow_pt(core) == -1) {
637                 PrintError("Failed to activate shadow page tables\n");
638                 return -1;
639             }
640         } else {
641             if (v3_activate_passthrough_pt(core) == -1) {
642                 PrintError("Failed to activate passthrough page tables\n");
643                 return -1;
644             }
645         }
646     }
647
648     return 0;
649 }
650 #endif
651
652
653 void v3_flush_vmx_vm_core(struct guest_info * core) {
654     struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
655     vmcs_clear(vmx_info->vmcs_ptr_phys);
656     vmx_info->state = VMX_UNLAUNCHED;
657 }
658
659
660
661 static int update_irq_exit_state(struct guest_info * info) {
662     struct vmx_exit_idt_vec_info idt_vec_info;
663
664     check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
665
666     if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
667 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
668         V3_Print("Calling v3_injecting_intr\n");
669 #endif
670         info->intr_core_state.irq_started = 0;
671         v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
672     }
673
674     return 0;
675 }
676
677 static int update_irq_entry_state(struct guest_info * info) {
678     struct vmx_exit_idt_vec_info idt_vec_info;
679     struct vmcs_interrupt_state intr_core_state;
680     struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
681
682     check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
683     check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state));
684
685     /* Check for pending exceptions to inject */
686     if (v3_excp_pending(info)) {
687         struct vmx_entry_int_info int_info;
688         int_info.value = 0;
689
690         // In VMX, almost every exception is hardware
691         // Software exceptions are pretty much only for breakpoint or overflow
692         int_info.type = 3;
693         int_info.vector = v3_get_excp_number(info);
694
695         if (info->excp_state.excp_error_code_valid) {
696             check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
697             int_info.error_code = 1;
698
699 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
700             V3_Print("Injecting exception %d with error code %x\n", 
701                     int_info.vector, info->excp_state.excp_error_code);
702 #endif
703         }
704
705         int_info.valid = 1;
706 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
707         V3_Print("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
708 #endif
709         check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
710
711         v3_injecting_excp(info, int_info.vector);
712
713     } else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) && 
714                (intr_core_state.val == 0)) {
715        
716         if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
717
718 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
719             V3_Print("IRQ pending from previous injection\n");
720 #endif
721
722             // Copy the IDT vectoring info over to reinject the old interrupt
723             if (idt_vec_info.error_code == 1) {
724                 uint32_t err_code = 0;
725
726                 check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
727                 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
728             }
729
730             idt_vec_info.undef = 0;
731             check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
732
733         } else {
734             struct vmx_entry_int_info ent_int;
735             ent_int.value = 0;
736
737             switch (v3_intr_pending(info)) {
738                 case V3_EXTERNAL_IRQ: {
739                     info->intr_core_state.irq_vector = v3_get_intr(info); 
740                     ent_int.vector = info->intr_core_state.irq_vector;
741                     ent_int.type = 0;
742                     ent_int.error_code = 0;
743                     ent_int.valid = 1;
744
745 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
746                     V3_Print("Injecting Interrupt %d at exit %u(EIP=%p)\n", 
747                                info->intr_core_state.irq_vector, 
748                                (uint32_t)info->num_exits, 
749                                (void *)(addr_t)info->rip);
750 #endif
751
752                     check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
753                     info->intr_core_state.irq_started = 1;
754
755                     break;
756                 }
757                 case V3_NMI:
758                     PrintDebug("Injecting NMI\n");
759
760                     ent_int.type = 2;
761                     ent_int.vector = 2;
762                     ent_int.valid = 1;
763                     check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
764
765                     break;
766                 case V3_SOFTWARE_INTR:
767                     PrintDebug("Injecting software interrupt\n");
768                     ent_int.type = 4;
769
770                     ent_int.valid = 1;
771                     check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
772
773                     break;
774                 case V3_VIRTUAL_IRQ:
775                     // Not sure what to do here, Intel doesn't have virtual IRQs
776                     // May be the same as external interrupts/IRQs
777
778                     break;
779                 case V3_INVALID_INTR:
780                 default:
781                     break;
782             }
783         }
784     } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
785         // Enable INTR window exiting so we know when IF=1
786         uint32_t instr_len;
787
788         check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
789
790 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
791         V3_Print("Enabling Interrupt-Window exiting: %d\n", instr_len);
792 #endif
793
794         vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
795         check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
796     }
797
798
799     return 0;
800 }
801
802
803
804 static struct vmx_exit_info exit_log[10];
805 static uint64_t rip_log[10];
806
807
808
809 static void print_exit_log(struct guest_info * info) {
810     int cnt = info->num_exits % 10;
811     int i = 0;
812     
813
814     V3_Print("\nExit Log (%d total exits):\n", (uint32_t)info->num_exits);
815
816     for (i = 0; i < 10; i++) {
817         struct vmx_exit_info * tmp = &exit_log[cnt];
818
819         V3_Print("%d:\texit_reason = %p\n", i, (void *)(addr_t)tmp->exit_reason);
820         V3_Print("\texit_qual = %p\n", (void *)tmp->exit_qual);
821         V3_Print("\tint_info = %p\n", (void *)(addr_t)tmp->int_info);
822         V3_Print("\tint_err = %p\n", (void *)(addr_t)tmp->int_err);
823         V3_Print("\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info);
824         V3_Print("\tguest_linear_addr= %p\n", (void *)(addr_t)tmp->guest_linear_addr);
825         V3_Print("\tRIP = %p\n", (void *)rip_log[cnt]);
826
827
828         cnt--;
829
830         if (cnt == -1) {
831             cnt = 9;
832         }
833
834     }
835
836 }
837
838
839
840 /* 
841  * CAUTION and DANGER!!! 
842  * 
843  * The VMCS CANNOT(!!) be accessed outside of the cli/sti calls inside this function
844  * When exectuing a symbiotic call, the VMCS WILL be overwritten, so any dependencies 
845  * on its contents will cause things to break. The contents at the time of the exit WILL 
846  * change before the exit handler is executed.
847  */
848 int v3_vmx_enter(struct guest_info * info) {
849     int ret = 0;
850     sint64_t tsc_offset;
851     uint32_t tsc_offset_low, tsc_offset_high;
852     struct vmx_exit_info exit_info;
853     struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
854     uint64_t guest_cycles = 0;
855
856     // Conditionally yield the CPU if the timeslice has expired
857     v3_yield_cond(info);
858
859     // disable global interrupts for vm state transition
860     v3_disable_ints();
861
862     // Update timer devices late after being in the VM so that as much 
863     // of the time in the VM is accounted for as possible. Also do it before
864     // updating IRQ entry state so that any interrupts the timers raise get 
865     // handled on the next VM entry. Must be done with interrupts disabled.
866     v3_advance_time(info);
867     v3_update_timers(info);
868
869     if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
870         vmcs_clear(vmx_info->vmcs_ptr_phys);
871         vmcs_load(vmx_info->vmcs_ptr_phys);
872         vmx_info->state = VMX_UNLAUNCHED;
873     }
874
875     v3_vmx_restore_vmcs(info);
876
877
878 #ifdef V3_CONFIG_SYMCALL
879     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
880         update_irq_entry_state(info);
881     }
882 #else 
883     update_irq_entry_state(info);
884 #endif
885
886     {
887         addr_t guest_cr3;
888         vmcs_read(VMCS_GUEST_CR3, &guest_cr3);
889         vmcs_write(VMCS_GUEST_CR3, guest_cr3);
890     }
891
892
893     // Perform last-minute time bookkeeping prior to entering the VM
894     v3_time_enter_vm(info);
895     
896     tsc_offset = v3_tsc_host_offset(&info->time_state);
897     tsc_offset_high = (uint32_t)(( tsc_offset >> 32) & 0xffffffff);
898     tsc_offset_low = (uint32_t)(tsc_offset & 0xffffffff);
899
900     check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
901     check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
902
903     
904
905     if (v3_update_vmcs_host_state(info)) {
906         v3_enable_ints();
907         PrintError("Could not write host state\n");
908         return -1;
909     }
910     
911     if (vmx_info->pin_ctrls.active_preempt_timer) {
912         /* Preemption timer is active */
913         uint32_t preempt_window = 0xffffffff;
914
915         if (info->timeouts.timeout_active) {
916             preempt_window = info->timeouts.next_timeout;
917         }
918         
919         check_vmcs_write(VMCS_PREEMPT_TIMER, preempt_window);
920     }
921    
922
923     {   
924         uint64_t entry_tsc = 0;
925         uint64_t exit_tsc = 0;
926
927         if (vmx_info->state == VMX_UNLAUNCHED) {
928             vmx_info->state = VMX_LAUNCHED;
929             rdtscll(entry_tsc);
930             ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
931             rdtscll(exit_tsc);
932
933         } else {
934             V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
935             rdtscll(entry_tsc);
936             ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
937             rdtscll(exit_tsc);
938         }
939
940         guest_cycles = exit_tsc - entry_tsc;    
941     }
942
943     //  PrintDebug("VMX Exit: ret=%d\n", ret);
944
945     if (ret != VMX_SUCCESS) {
946         uint32_t error = 0;
947         vmcs_read(VMCS_INSTR_ERR, &error);
948
949         v3_enable_ints();
950
951         PrintError("VMENTRY Error: %d (launch_ret = %d)\n", error, ret);
952         return -1;
953     }
954
955
956     info->num_exits++;
957
958     /* If we have the preemption time, then use it to get more accurate guest time */
959     if (vmx_info->pin_ctrls.active_preempt_timer) {
960         uint32_t cycles_left = 0;
961         check_vmcs_read(VMCS_PREEMPT_TIMER, &(cycles_left));
962
963         if (info->timeouts.timeout_active) {
964             guest_cycles = info->timeouts.next_timeout - cycles_left;
965         } else {
966             guest_cycles = 0xffffffff - cycles_left;
967         }
968     }
969
970     // Immediate exit from VM time bookkeeping
971     v3_time_exit_vm(info, &guest_cycles);
972
973
974     /* Update guest state */
975     v3_vmx_save_vmcs(info);
976
977     // info->cpl = info->segments.cs.selector & 0x3;
978
979     info->mem_mode = v3_get_vm_mem_mode(info);
980     info->cpu_mode = v3_get_vm_cpu_mode(info);
981
982
983
984     check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
985     check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
986     check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
987     check_vmcs_read(VMCS_EXIT_QUAL, &(exit_info.exit_qual));
988     check_vmcs_read(VMCS_EXIT_INT_INFO, &(exit_info.int_info));
989     check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
990     check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
991
992     if (info->shdw_pg_mode == NESTED_PAGING) {
993         check_vmcs_read(VMCS_GUEST_PHYS_ADDR, &(exit_info.ept_fault_addr));
994     }
995
996     //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
997
998     exit_log[info->num_exits % 10] = exit_info;
999     rip_log[info->num_exits % 10] = get_addr_linear(info, info->rip, &(info->segments.cs));
1000
1001 #ifdef V3_CONFIG_SYMCALL
1002     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
1003         update_irq_exit_state(info);
1004     }
1005 #else
1006     update_irq_exit_state(info);
1007 #endif
1008
1009     if (exit_info.exit_reason == VMEXIT_INTR_WINDOW) {
1010         // This is a special case whose only job is to inject an interrupt
1011         vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
1012         vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
1013         vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
1014
1015 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
1016        V3_Print("Interrupts available again! (RIP=%llx)\n", info->rip);
1017 #endif
1018     }
1019
1020     // reenable global interrupts after vm exit
1021     v3_enable_ints();
1022
1023     // Conditionally yield the CPU if the timeslice has expired
1024     v3_yield_cond(info);
1025
1026     if (v3_handle_vmx_exit(info, &exit_info) == -1) {
1027         PrintError("Error in VMX exit handler (Exit reason=%x)\n", exit_info.exit_reason);
1028         return -1;
1029     }
1030
1031     if (info->timeouts.timeout_active) {
1032         /* Check to see if any timeouts have expired */
1033         v3_handle_timeouts(info, guest_cycles);
1034     }
1035
1036     return 0;
1037 }
1038
1039
1040 int v3_start_vmx_guest(struct guest_info * info) {
1041
1042     PrintDebug("Starting VMX core %u\n", info->vcpu_id);
1043
1044     if (info->vcpu_id == 0) {
1045         info->core_run_state = CORE_RUNNING;
1046     } else {
1047
1048         PrintDebug("VMX core %u: Waiting for core initialization\n", info->vcpu_id);
1049
1050         while (info->core_run_state == CORE_STOPPED) {
1051
1052             if (info->vm_info->run_state == VM_STOPPED) {
1053                 // The VM was stopped before this core was initialized. 
1054                 return 0;
1055             }
1056
1057             v3_yield(info);
1058             //PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
1059         }
1060         
1061         PrintDebug("VMX core %u initialized\n", info->vcpu_id);
1062
1063         // We'll be paranoid about race conditions here
1064         v3_wait_at_barrier(info);
1065     }
1066
1067
1068     PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x),  RIP=0x%p\n",
1069                info->vcpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
1070                info->segments.cs.limit, (void *)(info->rip));
1071
1072
1073     PrintDebug("VMX core %u: Launching VMX VM on logical core %u\n", info->vcpu_id, info->pcpu_id);
1074
1075     v3_start_time(info);
1076
1077     while (1) {
1078
1079         if (info->vm_info->run_state == VM_STOPPED) {
1080             info->core_run_state = CORE_STOPPED;
1081             break;
1082         }
1083
1084         if (v3_vmx_enter(info) == -1) {
1085
1086             addr_t host_addr;
1087             addr_t linear_addr = 0;
1088             
1089             info->vm_info->run_state = VM_ERROR;
1090             
1091             V3_Print("VMX core %u: VMX ERROR!!\n", info->vcpu_id); 
1092             
1093             v3_print_guest_state(info);
1094             
1095             V3_Print("VMX core %u\n", info->vcpu_id); 
1096
1097             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
1098             
1099             if (info->mem_mode == PHYSICAL_MEM) {
1100                 v3_gpa_to_hva(info, linear_addr, &host_addr);
1101             } else if (info->mem_mode == VIRTUAL_MEM) {
1102                 v3_gva_to_hva(info, linear_addr, &host_addr);
1103             }
1104             
1105             V3_Print("VMX core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
1106             
1107             V3_Print("VMX core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
1108             v3_dump_mem((uint8_t *)host_addr, 15);
1109             
1110             v3_print_stack(info);
1111
1112
1113             v3_print_vmcs();
1114             print_exit_log(info);
1115             return -1;
1116         }
1117
1118         v3_wait_at_barrier(info);
1119
1120
1121         if (info->vm_info->run_state == VM_STOPPED) {
1122             info->core_run_state = CORE_STOPPED;
1123             break;
1124         }
1125 /*
1126         if ((info->num_exits % 5000) == 0) {
1127             V3_Print("VMX Exit number %d\n", (uint32_t)info->num_exits);
1128         }
1129 */
1130
1131     }
1132
1133     return 0;
1134 }
1135
1136
1137
1138
1139 #define VMX_FEATURE_CONTROL_MSR     0x0000003a
1140 #define CPUID_VMX_FEATURES 0x00000005  /* LOCK and VMXON */
1141 #define CPUID_1_ECX_VTXFLAG 0x00000020
1142
1143 int v3_is_vmx_capable() {
1144     v3_msr_t feature_msr;
1145     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1146
1147     v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
1148
1149     PrintDebug("ECX: 0x%x\n", ecx);
1150
1151     if (ecx & CPUID_1_ECX_VTXFLAG) {
1152         v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
1153         
1154         PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo);
1155
1156         if ((feature_msr.lo & CPUID_VMX_FEATURES) != CPUID_VMX_FEATURES) {
1157             PrintDebug("VMX is locked -- enable in the BIOS\n");
1158             return 0;
1159         }
1160
1161     } else {
1162         PrintDebug("VMX not supported on this cpu\n");
1163         return 0;
1164     }
1165
1166     return 1;
1167 }
1168
1169
1170 int v3_reset_vmx_vm_core(struct guest_info * core, addr_t rip) {
1171     // init vmcs bios
1172     
1173     if ((core->shdw_pg_mode == NESTED_PAGING) && 
1174         (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
1175         // easy 
1176         core->rip = 0;
1177         core->segments.cs.selector = rip << 8;
1178         core->segments.cs.limit = 0xffff;
1179         core->segments.cs.base = rip << 12;
1180     } else {
1181         core->vm_regs.rdx = core->vcpu_id;
1182         core->vm_regs.rbx = rip;
1183     }
1184
1185     return 0;
1186 }
1187
1188
1189
1190 void v3_init_vmx_cpu(int cpu_id) {
1191     addr_t vmx_on_region = 0;
1192     extern v3_cpu_arch_t v3_mach_type;
1193
1194     if (v3_mach_type == V3_INVALID_CPU) {
1195         if (v3_init_vmx_hw(&hw_info) == -1) {
1196             PrintError("Could not initialize VMX hardware features on cpu %d\n", cpu_id);
1197             return;
1198         }
1199     }
1200
1201     enable_vmx();
1202
1203
1204     // Setup VMXON Region
1205     vmx_on_region = allocate_vmcs();
1206
1207
1208     if (vmx_on(vmx_on_region) == VMX_SUCCESS) {
1209         V3_Print("VMX Enabled\n");
1210         host_vmcs_ptrs[cpu_id] = vmx_on_region;
1211     } else {
1212         V3_Print("VMX already enabled\n");
1213         V3_FreePages((void *)vmx_on_region, 1);
1214     }
1215
1216     PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);    
1217
1218     {
1219         struct vmx_sec_proc_ctrls sec_proc_ctrls;
1220         sec_proc_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.sec_proc_ctrls));
1221         
1222         if (sec_proc_ctrls.enable_ept == 0) {
1223             V3_Print("VMX EPT (Nested) Paging not supported\n");
1224             v3_cpu_types[cpu_id] = V3_VMX_CPU;
1225         } else if (sec_proc_ctrls.unrstrct_guest == 0) {
1226             V3_Print("VMX EPT (Nested) Paging supported\n");
1227             v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
1228         } else {
1229             V3_Print("VMX EPT (Nested) Paging + Unrestricted guest supported\n");
1230             v3_cpu_types[cpu_id] = V3_VMX_EPT_UG_CPU;
1231         }
1232     }
1233     
1234 }
1235
1236
1237 void v3_deinit_vmx_cpu(int cpu_id) {
1238     extern v3_cpu_arch_t v3_cpu_types[];
1239     v3_cpu_types[cpu_id] = V3_INVALID_CPU;
1240
1241     if (host_vmcs_ptrs[cpu_id] != 0) {
1242         V3_Print("Disabling VMX\n");
1243
1244         if (vmx_off() != VMX_SUCCESS) {
1245             PrintError("Error executing VMXOFF\n");
1246         }
1247
1248         V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
1249
1250         host_vmcs_ptrs[cpu_id] = 0;
1251     }
1252 }