Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel' of ssh://newskysaw.cs.northwestern.edu/home/palacios/palacios...
[palacios.git] / palacios / src / palacios / vmx.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2011, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/vmx.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_handler.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmx_lowlevel.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_ctrl_regs.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_time.h>
30 #include <palacios/vm_guest_mem.h>
31 #include <palacios/vmm_direct_paging.h>
32 #include <palacios/vmx_io.h>
33 #include <palacios/vmx_msr.h>
34 #include <palacios/vmm_decoder.h>
35 #include <palacios/vmm_barrier.h>
36
37 #ifdef V3_CONFIG_CHECKPOINT
38 #include <palacios/vmm_checkpoint.h>
39 #endif
40
41 #include <palacios/vmx_ept.h>
42 #include <palacios/vmx_assist.h>
43 #include <palacios/vmx_hw_info.h>
44
45 #ifndef V3_CONFIG_DEBUG_VMX
46 #undef PrintDebug
47 #define PrintDebug(fmt, args...)
48 #endif
49
50
51 /* These fields contain the hardware feature sets supported by the local CPU */
52 static struct vmx_hw_info hw_info;
53
54 extern v3_cpu_arch_t v3_cpu_types[];
55
56 static addr_t host_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
57
58 extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
59 extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
60
61 static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
62     int ret = 0;
63
64     ret = vmcs_write(field, val);
65
66     if (ret != VMX_SUCCESS) {
67         PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
68         return 1;
69     }
70
71
72     
73
74     return 0;
75 }
76
77 static int inline check_vmcs_read(vmcs_field_t field, void * val) {
78     int ret = 0;
79
80     ret = vmcs_read(field, val);
81
82     if (ret != VMX_SUCCESS) {
83         PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
84     }
85
86     return ret;
87 }
88
89
90
91
92 static addr_t allocate_vmcs() {
93     struct vmcs_data * vmcs_page = NULL;
94
95     PrintDebug("Allocating page\n");
96
97     vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
98     memset(vmcs_page, 0, 4096);
99
100     vmcs_page->revision = hw_info.basic_info.revision;
101     PrintDebug("VMX Revision: 0x%x\n", vmcs_page->revision);
102
103     return (addr_t)V3_PAddr((void *)vmcs_page);
104 }
105
106 /*
107
108 static int debug_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * src, void * priv_data) {
109     struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
110     V3_Print("\n\nEFER READ\n");
111     
112     v3_print_guest_state(core);
113
114     src->value = efer->value;
115     return 0;
116 }
117
118 static int debug_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
119     struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
120     V3_Print("\n\nEFER WRITE\n");
121     
122     v3_print_guest_state(core);
123
124     efer->value = src.value;
125
126     {
127         struct vmx_data * vmx_state = core->vmm_data;
128
129         V3_Print("Trapping page faults and GPFs\n");
130         vmx_state->excp_bmap.pf = 1;
131         vmx_state->excp_bmap.gp = 1;
132         
133          check_vmcs_write(VMCS_EXCP_BITMAP, vmx_state->excp_bmap.value);
134     }
135
136     return 0;
137 }
138 */
139
140
141 static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
142     int vmx_ret = 0;
143
144     // disable global interrupts for vm state initialization
145     v3_disable_ints();
146
147     PrintDebug("Loading VMCS\n");
148     vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
149     vmx_state->state = VMX_UNLAUNCHED;
150
151     if (vmx_ret != VMX_SUCCESS) {
152         PrintError("VMPTRLD failed\n");
153         return -1;
154     }
155
156
157     /*** Setup default state from HW ***/
158
159     vmx_state->pin_ctrls.value = hw_info.pin_ctrls.def_val;
160     vmx_state->pri_proc_ctrls.value = hw_info.proc_ctrls.def_val;
161     vmx_state->exit_ctrls.value = hw_info.exit_ctrls.def_val;
162     vmx_state->entry_ctrls.value = hw_info.entry_ctrls.def_val;
163     vmx_state->sec_proc_ctrls.value = hw_info.sec_proc_ctrls.def_val;
164
165     /* Print Control MSRs */
166     PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)hw_info.cr0.value);
167     PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)hw_info.cr4.value);
168
169
170
171     /******* Setup Host State **********/
172
173     /* Cache GDTR, IDTR, and TR in host struct */
174
175
176     /********** Setup VMX Control Fields ***********/
177
178     /* Add external interrupts, NMI exiting, and virtual NMI */
179     vmx_state->pin_ctrls.nmi_exit = 1;
180     vmx_state->pin_ctrls.ext_int_exit = 1;
181
182
183     vmx_state->pri_proc_ctrls.hlt_exit = 1;
184
185
186     vmx_state->pri_proc_ctrls.pause_exit = 0;
187     vmx_state->pri_proc_ctrls.tsc_offset = 1;
188 #ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
189     vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
190 #endif
191
192     /* Setup IO map */
193     vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
194     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(core->vm_info->io_map.arch_data));
195     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR, 
196             (addr_t)V3_PAddr(core->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
197
198
199     vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
200     vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data));
201
202
203
204 #ifdef __V3_64BIT__
205     // Ensure host runs in 64-bit mode at each VM EXIT
206     vmx_state->exit_ctrls.host_64_on = 1;
207 #endif
208
209
210
211     // Restore host's EFER register on each VM EXIT
212     vmx_state->exit_ctrls.ld_efer = 1;
213
214     // Save/restore guest's EFER register to/from VMCS on VM EXIT/ENTRY
215     vmx_state->exit_ctrls.save_efer = 1;
216     vmx_state->entry_ctrls.ld_efer  = 1;
217
218     vmx_state->exit_ctrls.save_pat = 1;
219     vmx_state->exit_ctrls.ld_pat = 1;
220     vmx_state->entry_ctrls.ld_pat = 1;
221
222     /* Temporary GPF trap */
223     vmx_state->excp_bmap.gp = 1;
224
225     // Setup Guests initial PAT field
226     vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
227
228     /* Setup paging */
229     if (core->shdw_pg_mode == SHADOW_PAGING) {
230         PrintDebug("Creating initial shadow page table\n");
231
232         if (v3_init_passthrough_pts(core) == -1) {
233             PrintError("Could not initialize passthrough page tables\n");
234             return -1;
235         }
236         
237 #define CR0_PE 0x00000001
238 #define CR0_PG 0x80000000
239 #define CR0_WP 0x00010000 // To ensure mem hooks work
240         vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
241
242
243         // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
244         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
245
246         core->ctrl_regs.cr3 = core->direct_map_pt;
247
248         // vmx_state->pinbased_ctrls |= NMI_EXIT;
249
250         /* Add CR exits */
251         vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
252         vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
253         
254         vmx_state->pri_proc_ctrls.invlpg_exit = 1;
255         
256         /* Add page fault exits */
257         vmx_state->excp_bmap.pf = 1;
258
259         // Setup VMX Assist
260         v3_vmxassist_init(core, vmx_state);
261
262         // Hook all accesses to EFER register
263         v3_hook_msr(core->vm_info, EFER_MSR, 
264                     &v3_handle_efer_read,
265                     &v3_handle_efer_write, 
266                     core);
267
268     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
269                (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
270
271 #define CR0_PE 0x00000001
272 #define CR0_PG 0x80000000
273 #define CR0_WP 0x00010000 // To ensure mem hooks work
274         vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
275
276         // vmx_state->pinbased_ctrls |= NMI_EXIT;
277
278         // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
279         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
280         
281         /* Disable CR exits */
282         vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
283         vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
284
285         vmx_state->pri_proc_ctrls.invlpg_exit = 0;
286
287         /* Add page fault exits */
288         //      vmx_state->excp_bmap.pf = 1; // This should never happen..., enabled to catch bugs
289         
290         // Setup VMX Assist
291         v3_vmxassist_init(core, vmx_state);
292
293         /* Enable EPT */
294         vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
295         vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
296
297
298
299         if (v3_init_ept(core, &hw_info) == -1) {
300             PrintError("Error initializing EPT\n");
301             return -1;
302         }
303
304         // Hook all accesses to EFER register
305         v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
306
307     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
308                (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
309         int i = 0;
310         // For now we will assume that unrestricted guest mode is assured w/ EPT
311
312
313         core->vm_regs.rsp = 0x00;
314         core->rip = 0xfff0;
315         core->vm_regs.rdx = 0x00000f00;
316         core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
317         core->ctrl_regs.cr0 = 0x00000030; 
318         core->ctrl_regs.cr4 = 0x00002010; // Enable VMX and PSE flag
319         
320
321         core->segments.cs.selector = 0xf000;
322         core->segments.cs.limit = 0xffff;
323         core->segments.cs.base = 0x0000000f0000LL;
324
325         // (raw attributes = 0xf3)
326         core->segments.cs.type = 0xb;
327         core->segments.cs.system = 0x1;
328         core->segments.cs.dpl = 0x0;
329         core->segments.cs.present = 1;
330
331
332
333         struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds), 
334                                           &(core->segments.es), &(core->segments.fs), 
335                                           &(core->segments.gs), NULL};
336
337         for ( i = 0; segregs[i] != NULL; i++) {
338             struct v3_segment * seg = segregs[i];
339         
340             seg->selector = 0x0000;
341             //    seg->base = seg->selector << 4;
342             seg->base = 0x00000000;
343             seg->limit = 0xffff;
344
345
346             seg->type = 0x3;
347             seg->system = 0x1;
348             seg->dpl = 0x0;
349             seg->present = 1;
350             //    seg->granularity = 1;
351
352         }
353
354
355         core->segments.gdtr.limit = 0x0000ffff;
356         core->segments.gdtr.base = 0x0000000000000000LL;
357
358         core->segments.idtr.limit = 0x0000ffff;
359         core->segments.idtr.base = 0x0000000000000000LL;
360
361         core->segments.ldtr.selector = 0x0000;
362         core->segments.ldtr.limit = 0x0000ffff;
363         core->segments.ldtr.base = 0x0000000000000000LL;
364         core->segments.ldtr.type = 0x2;
365         core->segments.ldtr.present = 1;
366
367         core->segments.tr.selector = 0x0000;
368         core->segments.tr.limit = 0x0000ffff;
369         core->segments.tr.base = 0x0000000000000000LL;
370         core->segments.tr.type = 0xb;
371         core->segments.tr.present = 1;
372
373         //      core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
374         core->dbg_regs.dr7 = 0x0000000000000400LL;
375
376         /* Enable EPT */
377         vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
378         vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
379         vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation
380
381
382         /* Disable shadow paging stuff */
383         vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
384         vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
385
386         vmx_state->pri_proc_ctrls.invlpg_exit = 0;
387
388
389         // Cause VM_EXIT whenever the CR4.VMXE bit is set
390         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
391
392
393         if (v3_init_ept(core, &hw_info) == -1) {
394             PrintError("Error initializing EPT\n");
395             return -1;
396         }
397
398         // Hook all accesses to EFER register
399         //v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
400         v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
401     } else {
402         PrintError("Invalid Virtual paging mode\n");
403         return -1;
404     }
405
406
407     // hook vmx msrs
408
409     // Setup SYSCALL/SYSENTER MSRs in load/store area
410     
411     // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
412     {
413
414         struct vmcs_msr_save_area * msr_entries = NULL;
415         int max_msrs = (hw_info.misc_info.max_msr_cache_size + 1) * 4;
416         int msr_ret = 0;
417
418         V3_Print("Setting up MSR load/store areas (max_msr_count=%d)\n", max_msrs);
419
420         if (max_msrs < 4) {
421             PrintError("Max MSR cache size is too small (%d)\n", max_msrs);
422             return -1;
423         }
424
425         vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1);
426         
427         if (vmx_state->msr_area_paddr == (addr_t)NULL) {
428             PrintError("could not allocate msr load/store area\n");
429             return -1;
430         }
431
432         msr_entries = (struct vmcs_msr_save_area *)V3_VAddr((void *)(vmx_state->msr_area_paddr));
433         vmx_state->msr_area = msr_entries; // cache in vmx_info
434
435         memset(msr_entries, 0, PAGE_SIZE);
436
437         msr_entries->guest_star.index = IA32_STAR_MSR;
438         msr_entries->guest_lstar.index = IA32_LSTAR_MSR;
439         msr_entries->guest_fmask.index = IA32_FMASK_MSR;
440         msr_entries->guest_kern_gs.index = IA32_KERN_GS_BASE_MSR;
441
442         msr_entries->host_star.index = IA32_STAR_MSR;
443         msr_entries->host_lstar.index = IA32_LSTAR_MSR;
444         msr_entries->host_fmask.index = IA32_FMASK_MSR;
445         msr_entries->host_kern_gs.index = IA32_KERN_GS_BASE_MSR;
446
447         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_CNT, 4);
448         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_CNT, 4);
449         msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_CNT, 4);
450
451         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
452         msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
453         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->host_msrs));
454
455
456         msr_ret |= v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
457         msr_ret |= v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
458         msr_ret |= v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
459         msr_ret |= v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
460
461
462         // IMPORTANT: These MSRs appear to be cached by the hardware....
463         msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
464         msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
465         msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
466
467         msr_ret |= v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
468         msr_ret |= v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
469
470         msr_ret |= v3_hook_msr(core->vm_info, IA32_PAT_MSR, NULL, NULL, NULL);
471
472         // Not sure what to do about this... Does not appear to be an explicit hardware cache version...
473         msr_ret |= v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
474
475         if (msr_ret != 0) {
476             PrintError("Error configuring MSR save/restore area\n");
477             return -1;
478         }
479
480
481     }    
482
483     /* Sanity check ctrl/reg fields against hw_defaults */
484
485
486
487
488     /*** Write all the info to the VMCS ***/
489   
490     /*
491     {
492         // IS THIS NECESSARY???
493 #define DEBUGCTL_MSR 0x1d9
494         struct v3_msr tmp_msr;
495         v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
496         vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
497         core->dbg_regs.dr7 = 0x400;
498     }
499     */
500
501 #ifdef __V3_64BIT__
502     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
503 #else
504     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffUL);
505     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
506 #endif
507
508
509  
510
511     if (v3_update_vmcs_ctrl_fields(core)) {
512         PrintError("Could not write control fields!\n");
513         return -1;
514     }
515     
516     /*
517     if (v3_update_vmcs_host_state(core)) {
518         PrintError("Could not write host state\n");
519         return -1;
520     }
521     */
522
523     // reenable global interrupts for vm state initialization now
524     // that the vm state is initialized. If another VM kicks us off, 
525     // it'll update our vmx state so that we know to reload ourself
526     v3_enable_ints();
527
528     return 0;
529 }
530
531 int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
532     struct vmx_data * vmx_state = NULL;
533     int vmx_ret = 0;
534     
535     vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
536     memset(vmx_state, 0, sizeof(struct vmx_data));
537
538     PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
539
540     PrintDebug("Allocating VMCS\n");
541     vmx_state->vmcs_ptr_phys = allocate_vmcs();
542
543     PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
544
545     core->vmm_data = vmx_state;
546     vmx_state->state = VMX_UNLAUNCHED;
547
548     PrintDebug("Initializing VMCS (addr=%p)\n", core->vmm_data);
549     
550     // TODO: Fix vmcs fields so they're 32-bit
551
552     PrintDebug("Clearing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
553     vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
554
555     if (vmx_ret != VMX_SUCCESS) {
556         PrintError("VMCLEAR failed\n");
557         return -1; 
558     }
559
560     if (vm_class == V3_PC_VM) {
561         PrintDebug("Initializing VMCS\n");
562         if (init_vmcs_bios(core, vmx_state) == -1) {
563             PrintError("Error initializing VMCS to BIOS state\n");
564             return -1;
565         }
566     } else {
567         PrintError("Invalid VM Class\n");
568         return -1;
569     }
570
571     PrintDebug("Serializing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
572     vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
573
574     return 0;
575 }
576
577
578 int v3_deinit_vmx_vmcs(struct guest_info * core) {
579     struct vmx_data * vmx_state = core->vmm_data;
580
581     V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
582     V3_FreePages(V3_PAddr(vmx_state->msr_area), 1);
583
584     V3_Free(vmx_state);
585
586     return 0;
587 }
588
589
590
591 #ifdef V3_CONFIG_CHECKPOINT
592 /* 
593  * JRL: This is broken
594  */
595 int v3_vmx_save_core(struct guest_info * core, void * ctx){
596     uint64_t vmcs_ptr = vmcs_store();
597
598     v3_chkpt_save(ctx, "vmcs_data", PAGE_SIZE, (void *)vmcs_ptr);
599
600     return 0;
601 }
602
603 int v3_vmx_load_core(struct guest_info * core, void * ctx){
604     struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
605     struct cr0_32 * shadow_cr0;
606     char vmcs[PAGE_SIZE_4KB];
607
608     v3_chkpt_load(ctx, "vmcs_data", PAGE_SIZE_4KB, vmcs);
609
610     vmcs_clear(vmx_info->vmcs_ptr_phys);
611     vmcs_load((addr_t)vmcs);
612
613     v3_vmx_save_vmcs(core);
614
615     shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
616
617
618     /* Get the CPU mode to set the guest_ia32e entry ctrl */
619
620     if (core->shdw_pg_mode == SHADOW_PAGING) {
621         if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
622             if (v3_activate_shadow_pt(core) == -1) {
623                 PrintError("Failed to activate shadow page tables\n");
624                 return -1;
625             }
626         } else {
627             if (v3_activate_passthrough_pt(core) == -1) {
628                 PrintError("Failed to activate passthrough page tables\n");
629                 return -1;
630             }
631         }
632     }
633
634     return 0;
635 }
636 #endif
637
638
639 void v3_flush_vmx_vm_core(struct guest_info * core) {
640     struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
641     vmcs_clear(vmx_info->vmcs_ptr_phys);
642     vmx_info->state = VMX_UNLAUNCHED;
643 }
644
645
646
647 static int update_irq_exit_state(struct guest_info * info) {
648     struct vmx_exit_idt_vec_info idt_vec_info;
649
650     check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
651
652     if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
653 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
654         V3_Print("Calling v3_injecting_intr\n");
655 #endif
656         info->intr_core_state.irq_started = 0;
657         v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
658     }
659
660     return 0;
661 }
662
663 static int update_irq_entry_state(struct guest_info * info) {
664     struct vmx_exit_idt_vec_info idt_vec_info;
665     struct vmcs_interrupt_state intr_core_state;
666     struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
667
668     check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
669     check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state));
670
671     /* Check for pending exceptions to inject */
672     if (v3_excp_pending(info)) {
673         struct vmx_entry_int_info int_info;
674         int_info.value = 0;
675
676         // In VMX, almost every exception is hardware
677         // Software exceptions are pretty much only for breakpoint or overflow
678         int_info.type = 3;
679         int_info.vector = v3_get_excp_number(info);
680
681         if (info->excp_state.excp_error_code_valid) {
682             check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
683             int_info.error_code = 1;
684
685 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
686             V3_Print("Injecting exception %d with error code %x\n", 
687                     int_info.vector, info->excp_state.excp_error_code);
688 #endif
689         }
690
691         int_info.valid = 1;
692 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
693         V3_Print("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
694 #endif
695         check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
696
697         v3_injecting_excp(info, int_info.vector);
698
699     } else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) && 
700                (intr_core_state.val == 0)) {
701        
702         if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
703
704 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
705             V3_Print("IRQ pending from previous injection\n");
706 #endif
707
708             // Copy the IDT vectoring info over to reinject the old interrupt
709             if (idt_vec_info.error_code == 1) {
710                 uint32_t err_code = 0;
711
712                 check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
713                 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
714             }
715
716             idt_vec_info.undef = 0;
717             check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
718
719         } else {
720             struct vmx_entry_int_info ent_int;
721             ent_int.value = 0;
722
723             switch (v3_intr_pending(info)) {
724                 case V3_EXTERNAL_IRQ: {
725                     info->intr_core_state.irq_vector = v3_get_intr(info); 
726                     ent_int.vector = info->intr_core_state.irq_vector;
727                     ent_int.type = 0;
728                     ent_int.error_code = 0;
729                     ent_int.valid = 1;
730
731 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
732                     V3_Print("Injecting Interrupt %d at exit %u(EIP=%p)\n", 
733                                info->intr_core_state.irq_vector, 
734                                (uint32_t)info->num_exits, 
735                                (void *)(addr_t)info->rip);
736 #endif
737
738                     check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
739                     info->intr_core_state.irq_started = 1;
740
741                     break;
742                 }
743                 case V3_NMI:
744                     PrintDebug("Injecting NMI\n");
745
746                     ent_int.type = 2;
747                     ent_int.vector = 2;
748                     ent_int.valid = 1;
749                     check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
750
751                     break;
752                 case V3_SOFTWARE_INTR:
753                     PrintDebug("Injecting software interrupt\n");
754                     ent_int.type = 4;
755
756                     ent_int.valid = 1;
757                     check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
758
759                     break;
760                 case V3_VIRTUAL_IRQ:
761                     // Not sure what to do here, Intel doesn't have virtual IRQs
762                     // May be the same as external interrupts/IRQs
763
764                     break;
765                 case V3_INVALID_INTR:
766                 default:
767                     break;
768             }
769         }
770     } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
771         // Enable INTR window exiting so we know when IF=1
772         uint32_t instr_len;
773
774         check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
775
776 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
777         V3_Print("Enabling Interrupt-Window exiting: %d\n", instr_len);
778 #endif
779
780         vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
781         check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
782     }
783
784
785     return 0;
786 }
787
788
789
790 static struct vmx_exit_info exit_log[10];
791 static uint64_t rip_log[10];
792
793
794
795 static void print_exit_log(struct guest_info * info) {
796     int cnt = info->num_exits % 10;
797     int i = 0;
798     
799
800     V3_Print("\nExit Log (%d total exits):\n", (uint32_t)info->num_exits);
801
802     for (i = 0; i < 10; i++) {
803         struct vmx_exit_info * tmp = &exit_log[cnt];
804
805         V3_Print("%d:\texit_reason = %p\n", i, (void *)(addr_t)tmp->exit_reason);
806         V3_Print("\texit_qual = %p\n", (void *)tmp->exit_qual);
807         V3_Print("\tint_info = %p\n", (void *)(addr_t)tmp->int_info);
808         V3_Print("\tint_err = %p\n", (void *)(addr_t)tmp->int_err);
809         V3_Print("\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info);
810         V3_Print("\tguest_linear_addr= %p\n", (void *)(addr_t)tmp->guest_linear_addr);
811         V3_Print("\tRIP = %p\n", (void *)rip_log[cnt]);
812
813
814         cnt--;
815
816         if (cnt == -1) {
817             cnt = 9;
818         }
819
820     }
821
822 }
823
824 int
825 v3_vmx_schedule_timeout(struct guest_info * info)
826 {
827     struct vmx_data * vmx_state = (struct vmx_data *)(info->vmm_data);
828     sint64_t cycles;
829     uint32_t timeout;
830
831     /* Check if the hardware supports an active timeout */
832 #define VMX_ACTIVE_PREEMPT_TIMER_PIN 0x40
833     if (hw_info.pin_ctrls.req_mask & VMX_ACTIVE_PREEMPT_TIMER_PIN) {
834         /* The hardware doesn't support us modifying this pin control */
835         return 0;
836     }
837
838     /* Check if we have one to schedule and schedule it if we do */
839     cycles = (sint64_t)info->time_state.next_timeout - (sint64_t)v3_get_guest_time(&info->time_state);
840     if (info->time_state.next_timeout == (ullong_t) -1)  {
841         timeout = 0;
842         vmx_state->pin_ctrls.active_preempt_timer = 0;
843     } else if (cycles < 0) {
844         /* set the timeout to 0 to force an immediate re-exit since it expired between
845          * when we checked a timeout and now. IF SOMEONE CONTINAULLY SETS A SHORT TIMEOUT,
846          * THIS CAN LOCK US OUT OF THE GUEST! */
847         timeout = 0;
848         vmx_state->pin_ctrls.active_preempt_timer = 1;
849     } else {
850         /* The hardware supports scheduling a timeout, and we have one to 
851          * schedule */
852         timeout = (uint32_t)cycles >> hw_info.misc_info.tsc_multiple;
853         vmx_state->pin_ctrls.active_preempt_timer = 1;
854     }
855
856     /* Actually program the timer based on the settings above. */
857     check_vmcs_write(VMCS_PREEMPT_TIMER, timeout);
858     check_vmcs_write(VMCS_PIN_CTRLS, vmx_state->pin_ctrls.value);
859     return 0;
860 }
861
862 /* 
863  * CAUTION and DANGER!!! 
864  * 
865  * The VMCS CANNOT(!!) be accessed outside of the cli/sti calls inside this function
866  * When exectuing a symbiotic call, the VMCS WILL be overwritten, so any dependencies 
867  * on its contents will cause things to break. The contents at the time of the exit WILL 
868  * change before the exit handler is executed.
869  */
870 int v3_vmx_enter(struct guest_info * info) {
871     int ret = 0;
872     sint64_t tsc_offset;
873     uint32_t tsc_offset_low, tsc_offset_high;
874     struct vmx_exit_info exit_info;
875     struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
876
877     // Conditionally yield the CPU if the timeslice has expired
878     v3_yield_cond(info);
879
880     // Perform any additional yielding needed for time adjustment
881     v3_adjust_time(info);
882
883     // Check for timeout - since this calls generic hooks in devices
884     // that may do things like pause the VM, it cannot be with interrupts
885     // disabled.
886     v3_check_timeout(info);
887
888     // disable global interrupts for vm state transition
889     v3_disable_ints();
890
891     // Update timer devices late after being in the VM so that as much 
892     // of the time in the VM is accounted for as possible. Also do it before
893     // updating IRQ entry state so that any interrupts the timers raise get 
894     // handled on the next VM entry. Must be done with interrupts disabled.
895     v3_update_timers(info);
896
897     if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
898         vmcs_clear(vmx_info->vmcs_ptr_phys);
899         vmcs_load(vmx_info->vmcs_ptr_phys);
900         vmx_info->state = VMX_UNLAUNCHED;
901     }
902
903     v3_vmx_restore_vmcs(info);
904
905
906 #ifdef V3_CONFIG_SYMCALL
907     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
908         update_irq_entry_state(info);
909     }
910 #else 
911     update_irq_entry_state(info);
912 #endif
913
914     {
915         addr_t guest_cr3;
916         vmcs_read(VMCS_GUEST_CR3, &guest_cr3);
917         vmcs_write(VMCS_GUEST_CR3, guest_cr3);
918     }
919
920     // Update vmx active preemption timer to exit at the next timeout if 
921     // the hardware supports it.
922     v3_vmx_schedule_timeout(info);
923
924     // Perform last-minute time bookkeeping prior to entering the VM
925     v3_time_enter_vm(info);
926
927     tsc_offset = v3_tsc_host_offset(&info->time_state);
928     tsc_offset_high = (uint32_t)(( tsc_offset >> 32) & 0xffffffff);
929     tsc_offset_low = (uint32_t)(tsc_offset & 0xffffffff);
930
931     check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
932     check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
933
934     if (v3_update_vmcs_host_state(info)) {
935         v3_enable_ints();
936         PrintError("Could not write host state\n");
937         return -1;
938     }
939
940
941     if (vmx_info->state == VMX_UNLAUNCHED) {
942         vmx_info->state = VMX_LAUNCHED;
943         ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
944     } else {
945         V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
946         ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
947     }
948     
949
950
951     //  PrintDebug("VMX Exit: ret=%d\n", ret);
952
953     if (ret != VMX_SUCCESS) {
954         uint32_t error = 0;
955         vmcs_read(VMCS_INSTR_ERR, &error);
956
957         v3_enable_ints();
958
959         PrintError("VMENTRY Error: %d (launch_ret = %d)\n", error, ret);
960         return -1;
961     }
962
963
964
965     // Immediate exit from VM time bookkeeping
966     v3_time_exit_vm(info);
967
968     info->num_exits++;
969
970     /* Update guest state */
971     v3_vmx_save_vmcs(info);
972
973     // info->cpl = info->segments.cs.selector & 0x3;
974
975     info->mem_mode = v3_get_vm_mem_mode(info);
976     info->cpu_mode = v3_get_vm_cpu_mode(info);
977
978
979     check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
980     check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
981     check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
982     check_vmcs_read(VMCS_EXIT_QUAL, &(exit_info.exit_qual));
983     check_vmcs_read(VMCS_EXIT_INT_INFO, &(exit_info.int_info));
984     check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
985     check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
986
987     if (info->shdw_pg_mode == NESTED_PAGING) {
988         check_vmcs_read(VMCS_GUEST_PHYS_ADDR, &(exit_info.ept_fault_addr));
989     }
990
991     //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
992
993     exit_log[info->num_exits % 10] = exit_info;
994     rip_log[info->num_exits % 10] = get_addr_linear(info, info->rip, &(info->segments.cs));
995
996 #ifdef V3_CONFIG_SYMCALL
997     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
998         update_irq_exit_state(info);
999     }
1000 #else
1001     update_irq_exit_state(info);
1002 #endif
1003
1004     if (exit_info.exit_reason == VMEXIT_INTR_WINDOW) {
1005         // This is a special case whose only job is to inject an interrupt
1006         vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
1007         vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
1008         vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
1009
1010 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
1011        V3_Print("Interrupts available again! (RIP=%llx)\n", info->rip);
1012 #endif
1013     }
1014
1015     // reenable global interrupts after vm exit
1016     v3_enable_ints();
1017
1018     // Conditionally yield the CPU if the timeslice has expired
1019     v3_yield_cond(info);
1020
1021     if (v3_handle_vmx_exit(info, &exit_info) == -1) {
1022         PrintError("Error in VMX exit handler (Exit reason=%x)\n", exit_info.exit_reason);
1023         return -1;
1024     }
1025
1026     return 0;
1027 }
1028
1029
1030 int v3_start_vmx_guest(struct guest_info * info) {
1031
1032     PrintDebug("Starting VMX core %u\n", info->vcpu_id);
1033
1034     if (info->vcpu_id == 0) {
1035         info->core_run_state = CORE_RUNNING;
1036     } else {
1037
1038         PrintDebug("VMX core %u: Waiting for core initialization\n", info->vcpu_id);
1039
1040         while (info->core_run_state == CORE_STOPPED) {
1041
1042             if (info->vm_info->run_state == VM_STOPPED) {
1043                 // The VM was stopped before this core was initialized. 
1044                 return 0;
1045             }
1046
1047             v3_yield(info);
1048             //PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
1049         }
1050         
1051         PrintDebug("VMX core %u initialized\n", info->vcpu_id);
1052
1053         // We'll be paranoid about race conditions here
1054         v3_wait_at_barrier(info);
1055     }
1056
1057
1058     PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x),  RIP=0x%p\n",
1059                info->vcpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
1060                info->segments.cs.limit, (void *)(info->rip));
1061
1062
1063     PrintDebug("VMX core %u: Launching VMX VM on logical core %u\n", info->vcpu_id, info->pcpu_id);
1064
1065     v3_start_time(info);
1066
1067     while (1) {
1068
1069         if (info->vm_info->run_state == VM_STOPPED) {
1070             info->core_run_state = CORE_STOPPED;
1071             break;
1072         }
1073
1074         if (v3_vmx_enter(info) == -1) {
1075
1076             addr_t host_addr;
1077             addr_t linear_addr = 0;
1078             
1079             info->vm_info->run_state = VM_ERROR;
1080             
1081             V3_Print("VMX core %u: VMX ERROR!!\n", info->vcpu_id); 
1082             
1083             v3_print_guest_state(info);
1084             
1085             V3_Print("VMX core %u\n", info->vcpu_id); 
1086
1087             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
1088             
1089             if (info->mem_mode == PHYSICAL_MEM) {
1090                 v3_gpa_to_hva(info, linear_addr, &host_addr);
1091             } else if (info->mem_mode == VIRTUAL_MEM) {
1092                 v3_gva_to_hva(info, linear_addr, &host_addr);
1093             }
1094             
1095             V3_Print("VMX core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
1096             
1097             V3_Print("VMX core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
1098             v3_dump_mem((uint8_t *)host_addr, 15);
1099             
1100             v3_print_stack(info);
1101
1102
1103             v3_print_vmcs();
1104             print_exit_log(info);
1105             return -1;
1106         }
1107
1108         v3_wait_at_barrier(info);
1109
1110
1111         if (info->vm_info->run_state == VM_STOPPED) {
1112             info->core_run_state = CORE_STOPPED;
1113             break;
1114         }
1115 /*
1116         if ((info->num_exits % 5000) == 0) {
1117             V3_Print("VMX Exit number %d\n", (uint32_t)info->num_exits);
1118         }
1119 */
1120
1121     }
1122
1123     return 0;
1124 }
1125
1126
1127
1128
1129 #define VMX_FEATURE_CONTROL_MSR     0x0000003a
1130 #define CPUID_VMX_FEATURES 0x00000005  /* LOCK and VMXON */
1131 #define CPUID_1_ECX_VTXFLAG 0x00000020
1132
1133 int v3_is_vmx_capable() {
1134     v3_msr_t feature_msr;
1135     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1136
1137     v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
1138
1139     PrintDebug("ECX: 0x%x\n", ecx);
1140
1141     if (ecx & CPUID_1_ECX_VTXFLAG) {
1142         v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
1143         
1144         PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo);
1145
1146         if ((feature_msr.lo & CPUID_VMX_FEATURES) != CPUID_VMX_FEATURES) {
1147             PrintDebug("VMX is locked -- enable in the BIOS\n");
1148             return 0;
1149         }
1150
1151     } else {
1152         PrintDebug("VMX not supported on this cpu\n");
1153         return 0;
1154     }
1155
1156     return 1;
1157 }
1158
1159
1160 int v3_reset_vmx_vm_core(struct guest_info * core, addr_t rip) {
1161     // init vmcs bios
1162     
1163     if ((core->shdw_pg_mode == NESTED_PAGING) && 
1164         (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
1165         // easy 
1166         core->rip = 0;
1167         core->segments.cs.selector = rip << 8;
1168         core->segments.cs.limit = 0xffff;
1169         core->segments.cs.base = rip << 12;
1170     } else {
1171         core->vm_regs.rdx = core->vcpu_id;
1172         core->vm_regs.rbx = rip;
1173     }
1174
1175     return 0;
1176 }
1177
1178
1179
1180 void v3_init_vmx_cpu(int cpu_id) {
1181     addr_t vmx_on_region = 0;
1182     extern v3_cpu_arch_t v3_mach_type;
1183
1184     if (v3_mach_type == V3_INVALID_CPU) {
1185         if (v3_init_vmx_hw(&hw_info) == -1) {
1186             PrintError("Could not initialize VMX hardware features on cpu %d\n", cpu_id);
1187             return;
1188         }
1189     }
1190
1191     enable_vmx();
1192
1193
1194     // Setup VMXON Region
1195     vmx_on_region = allocate_vmcs();
1196
1197
1198     if (vmx_on(vmx_on_region) == VMX_SUCCESS) {
1199         V3_Print("VMX Enabled\n");
1200         host_vmcs_ptrs[cpu_id] = vmx_on_region;
1201     } else {
1202         V3_Print("VMX already enabled\n");
1203         V3_FreePages((void *)vmx_on_region, 1);
1204     }
1205
1206     PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);    
1207
1208     {
1209         struct vmx_sec_proc_ctrls sec_proc_ctrls;
1210         sec_proc_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.sec_proc_ctrls));
1211         
1212         if (sec_proc_ctrls.enable_ept == 0) {
1213             V3_Print("VMX EPT (Nested) Paging not supported\n");
1214             v3_cpu_types[cpu_id] = V3_VMX_CPU;
1215         } else if (sec_proc_ctrls.unrstrct_guest == 0) {
1216             V3_Print("VMX EPT (Nested) Paging supported\n");
1217             v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
1218         } else {
1219             V3_Print("VMX EPT (Nested) Paging + Unrestricted guest supported\n");
1220             v3_cpu_types[cpu_id] = V3_VMX_EPT_UG_CPU;
1221         }
1222     }
1223     
1224 }
1225
1226
1227 void v3_deinit_vmx_cpu(int cpu_id) {
1228     extern v3_cpu_arch_t v3_cpu_types[];
1229     v3_cpu_types[cpu_id] = V3_INVALID_CPU;
1230
1231     if (host_vmcs_ptrs[cpu_id] != 0) {
1232         V3_Print("Disabling VMX\n");
1233
1234         if (vmx_off() != VMX_SUCCESS) {
1235             PrintError("Error executing VMXOFF\n");
1236         }
1237
1238         V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
1239
1240         host_vmcs_ptrs[cpu_id] = 0;
1241     }
1242 }