Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


bug fixes for VMX
[palacios.git] / palacios / src / palacios / vmx.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2011, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/vmx.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_handler.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmx_lowlevel.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_ctrl_regs.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_time.h>
30 #include <palacios/vm_guest_mem.h>
31 #include <palacios/vmm_direct_paging.h>
32 #include <palacios/vmx_io.h>
33 #include <palacios/vmx_msr.h>
34 #include <palacios/vmm_decoder.h>
35 #include <palacios/vmm_barrier.h>
36
37 #ifdef V3_CONFIG_CHECKPOINT
38 #include <palacios/vmm_checkpoint.h>
39 #endif
40
41 #include <palacios/vmx_ept.h>
42 #include <palacios/vmx_assist.h>
43 #include <palacios/vmx_hw_info.h>
44
45 #ifndef V3_CONFIG_DEBUG_VMX
46 #undef PrintDebug
47 #define PrintDebug(fmt, args...)
48 #endif
49
50
51 /* These fields contain the hardware feature sets supported by the local CPU */
52 static struct vmx_hw_info hw_info;
53
54 extern v3_cpu_arch_t v3_cpu_types[];
55
56 static addr_t host_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
57
58 extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
59 extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
60
61 static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
62     int ret = 0;
63
64     ret = vmcs_write(field, val);
65
66     if (ret != VMX_SUCCESS) {
67         PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
68         return 1;
69     }
70
71
72     
73
74     return 0;
75 }
76
77 static int inline check_vmcs_read(vmcs_field_t field, void * val) {
78     int ret = 0;
79
80     ret = vmcs_read(field, val);
81
82     if (ret != VMX_SUCCESS) {
83         PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
84     }
85
86     return ret;
87 }
88
89
90
91
92 static addr_t allocate_vmcs() {
93     struct vmcs_data * vmcs_page = NULL;
94
95     PrintDebug("Allocating page\n");
96
97     vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
98     memset(vmcs_page, 0, 4096);
99
100     vmcs_page->revision = hw_info.basic_info.revision;
101     PrintDebug("VMX Revision: 0x%x\n", vmcs_page->revision);
102
103     return (addr_t)V3_PAddr((void *)vmcs_page);
104 }
105
106 /*
107
108 static int debug_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * src, void * priv_data) {
109     struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
110     V3_Print("\n\nEFER READ\n");
111     
112     v3_print_guest_state(core);
113
114     src->value = efer->value;
115     return 0;
116 }
117
118 static int debug_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
119     struct v3_msr * efer = (struct v3_msr *)&(core->ctrl_regs.efer);
120     V3_Print("\n\nEFER WRITE\n");
121     
122     v3_print_guest_state(core);
123
124     efer->value = src.value;
125
126     {
127         struct vmx_data * vmx_state = core->vmm_data;
128
129         V3_Print("Trapping page faults and GPFs\n");
130         vmx_state->excp_bmap.pf = 1;
131         vmx_state->excp_bmap.gp = 1;
132         
133          check_vmcs_write(VMCS_EXCP_BITMAP, vmx_state->excp_bmap.value);
134     }
135
136     return 0;
137 }
138 */
139
140
141 static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
142     int vmx_ret = 0;
143
144     // disable global interrupts for vm state initialization
145     v3_disable_ints();
146
147     PrintDebug("Loading VMCS\n");
148     vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
149     vmx_state->state = VMX_UNLAUNCHED;
150
151     if (vmx_ret != VMX_SUCCESS) {
152         PrintError("VMPTRLD failed\n");
153         return -1;
154     }
155
156
157     /*** Setup default state from HW ***/
158
159     vmx_state->pin_ctrls.value = hw_info.pin_ctrls.def_val;
160     vmx_state->pri_proc_ctrls.value = hw_info.proc_ctrls.def_val;
161     vmx_state->exit_ctrls.value = hw_info.exit_ctrls.def_val;
162     vmx_state->entry_ctrls.value = hw_info.entry_ctrls.def_val;
163     vmx_state->sec_proc_ctrls.value = hw_info.sec_proc_ctrls.def_val;
164
165     /* Print Control MSRs */
166     PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)hw_info.cr0.value);
167     PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)hw_info.cr4.value);
168
169
170
171     /******* Setup Host State **********/
172
173     /* Cache GDTR, IDTR, and TR in host struct */
174
175
176     /********** Setup VMX Control Fields ***********/
177
178     /* Add external interrupts, NMI exiting, and virtual NMI */
179     vmx_state->pin_ctrls.nmi_exit = 1;
180     vmx_state->pin_ctrls.ext_int_exit = 1;
181
182
183     vmx_state->pri_proc_ctrls.hlt_exit = 1;
184
185
186     vmx_state->pri_proc_ctrls.pause_exit = 0;
187     vmx_state->pri_proc_ctrls.tsc_offset = 1;
188 #ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
189     vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
190 #endif
191
192     /* Setup IO map */
193     vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
194     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(core->vm_info->io_map.arch_data));
195     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR, 
196             (addr_t)V3_PAddr(core->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
197
198
199     vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
200     vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data));
201
202
203
204 #ifdef __V3_64BIT__
205     // Ensure host runs in 64-bit mode at each VM EXIT
206     vmx_state->exit_ctrls.host_64_on = 1;
207 #endif
208
209
210
211     // Restore host's EFER register on each VM EXIT
212     vmx_state->exit_ctrls.ld_efer = 1;
213
214     // Save/restore guest's EFER register to/from VMCS on VM EXIT/ENTRY
215     vmx_state->exit_ctrls.save_efer = 1;
216     vmx_state->entry_ctrls.ld_efer  = 1;
217
218     vmx_state->exit_ctrls.save_pat = 1;
219     vmx_state->exit_ctrls.ld_pat = 1;
220     vmx_state->entry_ctrls.ld_pat = 1;
221
222     /* Temporary GPF trap */
223     vmx_state->excp_bmap.gp = 1;
224
225     // Setup Guests initial PAT field
226     vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
227
228     /* Setup paging */
229     if (core->shdw_pg_mode == SHADOW_PAGING) {
230         PrintDebug("Creating initial shadow page table\n");
231
232         if (v3_init_passthrough_pts(core) == -1) {
233             PrintError("Could not initialize passthrough page tables\n");
234             return -1;
235         }
236         
237 #define CR0_PE 0x00000001
238 #define CR0_PG 0x80000000
239 #define CR0_WP 0x00010000 // To ensure mem hooks work
240         vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
241
242
243         // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
244         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
245
246         core->ctrl_regs.cr3 = core->direct_map_pt;
247
248         // vmx_state->pinbased_ctrls |= NMI_EXIT;
249
250         /* Add CR exits */
251         vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
252         vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
253         
254         vmx_state->pri_proc_ctrls.invlpg_exit = 1;
255         
256         /* Add page fault exits */
257         vmx_state->excp_bmap.pf = 1;
258
259         // Setup VMX Assist
260         v3_vmxassist_init(core, vmx_state);
261
262         // Hook all accesses to EFER register
263         v3_hook_msr(core->vm_info, EFER_MSR, 
264                     &v3_handle_efer_read,
265                     &v3_handle_efer_write, 
266                     core);
267
268     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
269                (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
270
271 #define CR0_PE 0x00000001
272 #define CR0_PG 0x80000000
273 #define CR0_WP 0x00010000 // To ensure mem hooks work
274         vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
275
276         // vmx_state->pinbased_ctrls |= NMI_EXIT;
277
278         // Cause VM_EXIT whenever CR4.VMXE or CR4.PAE bits are written
279         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE | CR4_PAE);
280         
281         /* Disable CR exits */
282         vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
283         vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
284
285         vmx_state->pri_proc_ctrls.invlpg_exit = 0;
286
287         /* Add page fault exits */
288         //      vmx_state->excp_bmap.pf = 1; // This should never happen..., enabled to catch bugs
289         
290         // Setup VMX Assist
291         v3_vmxassist_init(core, vmx_state);
292
293         /* Enable EPT */
294         vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
295         vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
296
297
298
299         if (v3_init_ept(core, &hw_info) == -1) {
300             PrintError("Error initializing EPT\n");
301             return -1;
302         }
303
304         // Hook all accesses to EFER register
305         v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
306
307     } else if ((core->shdw_pg_mode == NESTED_PAGING) && 
308                (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
309         int i = 0;
310         // For now we will assume that unrestricted guest mode is assured w/ EPT
311
312
313         core->vm_regs.rsp = 0x00;
314         core->rip = 0xfff0;
315         core->vm_regs.rdx = 0x00000f00;
316         core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
317         core->ctrl_regs.cr0 = 0x00000030; 
318         core->ctrl_regs.cr4 = 0x00002010; // Enable VMX and PSE flag
319         
320
321         core->segments.cs.selector = 0xf000;
322         core->segments.cs.limit = 0xffff;
323         core->segments.cs.base = 0x0000000f0000LL;
324
325         // (raw attributes = 0xf3)
326         core->segments.cs.type = 0xb;
327         core->segments.cs.system = 0x1;
328         core->segments.cs.dpl = 0x0;
329         core->segments.cs.present = 1;
330
331
332
333         struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds), 
334                                           &(core->segments.es), &(core->segments.fs), 
335                                           &(core->segments.gs), NULL};
336
337         for ( i = 0; segregs[i] != NULL; i++) {
338             struct v3_segment * seg = segregs[i];
339         
340             seg->selector = 0x0000;
341             //    seg->base = seg->selector << 4;
342             seg->base = 0x00000000;
343             seg->limit = 0xffff;
344
345
346             seg->type = 0x3;
347             seg->system = 0x1;
348             seg->dpl = 0x0;
349             seg->present = 1;
350             //    seg->granularity = 1;
351
352         }
353
354
355         core->segments.gdtr.limit = 0x0000ffff;
356         core->segments.gdtr.base = 0x0000000000000000LL;
357
358         core->segments.idtr.limit = 0x0000ffff;
359         core->segments.idtr.base = 0x0000000000000000LL;
360
361         core->segments.ldtr.selector = 0x0000;
362         core->segments.ldtr.limit = 0x0000ffff;
363         core->segments.ldtr.base = 0x0000000000000000LL;
364         core->segments.ldtr.type = 0x2;
365         core->segments.ldtr.present = 1;
366
367         core->segments.tr.selector = 0x0000;
368         core->segments.tr.limit = 0x0000ffff;
369         core->segments.tr.base = 0x0000000000000000LL;
370         core->segments.tr.type = 0xb;
371         core->segments.tr.present = 1;
372
373         //      core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
374         core->dbg_regs.dr7 = 0x0000000000000400LL;
375
376         /* Enable EPT */
377         vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
378         vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
379         vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation
380
381
382         /* Disable shadow paging stuff */
383         vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
384         vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
385
386         vmx_state->pri_proc_ctrls.invlpg_exit = 0;
387
388
389         // Cause VM_EXIT whenever the CR4.VMXE bit is set
390         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
391
392
393         if (v3_init_ept(core, &hw_info) == -1) {
394             PrintError("Error initializing EPT\n");
395             return -1;
396         }
397
398         // Hook all accesses to EFER register
399         //v3_hook_msr(core->vm_info, EFER_MSR, &debug_efer_read, &debug_efer_write, core);
400         v3_hook_msr(core->vm_info, EFER_MSR, NULL, NULL, NULL);
401     } else {
402         PrintError("Invalid Virtual paging mode\n");
403         return -1;
404     }
405
406
407     // hook vmx msrs
408
409     // Setup SYSCALL/SYSENTER MSRs in load/store area
410     
411     // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
412     {
413
414         struct vmcs_msr_save_area * msr_entries = NULL;
415         int max_msrs = (hw_info.misc_info.max_msr_cache_size + 1) * 4;
416         int msr_ret = 0;
417
418         V3_Print("Setting up MSR load/store areas (max_msr_count=%d)\n", max_msrs);
419
420         if (max_msrs < 4) {
421             PrintError("Max MSR cache size is too small (%d)\n", max_msrs);
422             return -1;
423         }
424
425         vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1);
426         
427         if (vmx_state->msr_area_paddr == (addr_t)NULL) {
428             PrintError("could not allocate msr load/store area\n");
429             return -1;
430         }
431
432         msr_entries = (struct vmcs_msr_save_area *)V3_VAddr((void *)(vmx_state->msr_area_paddr));
433         vmx_state->msr_area = msr_entries; // cache in vmx_info
434
435         memset(msr_entries, 0, PAGE_SIZE);
436
437         msr_entries->guest_star.index = IA32_STAR_MSR;
438         msr_entries->guest_lstar.index = IA32_LSTAR_MSR;
439         msr_entries->guest_fmask.index = IA32_FMASK_MSR;
440         msr_entries->guest_kern_gs.index = IA32_KERN_GS_BASE_MSR;
441
442         msr_entries->host_star.index = IA32_STAR_MSR;
443         msr_entries->host_lstar.index = IA32_LSTAR_MSR;
444         msr_entries->host_fmask.index = IA32_FMASK_MSR;
445         msr_entries->host_kern_gs.index = IA32_KERN_GS_BASE_MSR;
446
447         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_CNT, 4);
448         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_CNT, 4);
449         msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_CNT, 4);
450
451         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
452         msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->guest_msrs));
453         msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(msr_entries->host_msrs));
454
455
456         msr_ret |= v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
457         msr_ret |= v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
458         msr_ret |= v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
459         msr_ret |= v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
460
461
462         // IMPORTANT: These MSRs appear to be cached by the hardware....
463         msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
464         msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
465         msr_ret |= v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
466
467         msr_ret |= v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
468         msr_ret |= v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
469
470         msr_ret |= v3_hook_msr(core->vm_info, IA32_PAT_MSR, NULL, NULL, NULL);
471
472         // Not sure what to do about this... Does not appear to be an explicit hardware cache version...
473         msr_ret |= v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
474
475         if (msr_ret != 0) {
476             PrintError("Error configuring MSR save/restore area\n");
477             return -1;
478         }
479
480
481     }    
482
483     /* Sanity check ctrl/reg fields against hw_defaults */
484
485
486
487
488     /*** Write all the info to the VMCS ***/
489   
490     /*
491     {
492         // IS THIS NECESSARY???
493 #define DEBUGCTL_MSR 0x1d9
494         struct v3_msr tmp_msr;
495         v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
496         vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
497         core->dbg_regs.dr7 = 0x400;
498     }
499     */
500
501 #ifdef __V3_64BIT__
502     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
503 #else
504     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffUL);
505     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
506 #endif
507
508
509  
510
511     if (v3_update_vmcs_ctrl_fields(core)) {
512         PrintError("Could not write control fields!\n");
513         return -1;
514     }
515     
516     /*
517     if (v3_update_vmcs_host_state(core)) {
518         PrintError("Could not write host state\n");
519         return -1;
520     }
521     */
522
523     // reenable global interrupts for vm state initialization now
524     // that the vm state is initialized. If another VM kicks us off, 
525     // it'll update our vmx state so that we know to reload ourself
526     v3_enable_ints();
527
528     return 0;
529 }
530
531 int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
532     struct vmx_data * vmx_state = NULL;
533     int vmx_ret = 0;
534     
535     vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
536     memset(vmx_state, 0, sizeof(struct vmx_data));
537
538     PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
539
540     PrintDebug("Allocating VMCS\n");
541     vmx_state->vmcs_ptr_phys = allocate_vmcs();
542
543     PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
544
545     core->vmm_data = vmx_state;
546     vmx_state->state = VMX_UNLAUNCHED;
547
548     PrintDebug("Initializing VMCS (addr=%p)\n", core->vmm_data);
549     
550     // TODO: Fix vmcs fields so they're 32-bit
551
552     PrintDebug("Clearing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
553     vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
554
555     if (vmx_ret != VMX_SUCCESS) {
556         PrintError("VMCLEAR failed\n");
557         return -1; 
558     }
559
560     if (vm_class == V3_PC_VM) {
561         PrintDebug("Initializing VMCS\n");
562         if (init_vmcs_bios(core, vmx_state) == -1) {
563             PrintError("Error initializing VMCS to BIOS state\n");
564             return -1;
565         }
566     } else {
567         PrintError("Invalid VM Class\n");
568         return -1;
569     }
570
571     PrintDebug("Serializing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
572     vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
573
574     return 0;
575 }
576
577
578 int v3_deinit_vmx_vmcs(struct guest_info * core) {
579     struct vmx_data * vmx_state = core->vmm_data;
580
581     V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
582     V3_FreePages(V3_PAddr(vmx_state->msr_area), 1);
583
584     V3_Free(vmx_state);
585
586     return 0;
587 }
588
589
590
591 #ifdef V3_CONFIG_CHECKPOINT
592 /* 
593  * JRL: This is broken
594  */
595 int v3_vmx_save_core(struct guest_info * core, void * ctx){
596     uint64_t vmcs_ptr = vmcs_store();
597
598     v3_chkpt_save(ctx, "vmcs_data", PAGE_SIZE, (void *)vmcs_ptr);
599
600     return 0;
601 }
602
603 int v3_vmx_load_core(struct guest_info * core, void * ctx){
604     struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
605     struct cr0_32 * shadow_cr0;
606     char vmcs[PAGE_SIZE_4KB];
607
608     v3_chkpt_load(ctx, "vmcs_data", PAGE_SIZE_4KB, vmcs);
609
610     vmcs_clear(vmx_info->vmcs_ptr_phys);
611     vmcs_load((addr_t)vmcs);
612
613     v3_vmx_save_vmcs(core);
614
615     shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
616
617
618     /* Get the CPU mode to set the guest_ia32e entry ctrl */
619
620     if (core->shdw_pg_mode == SHADOW_PAGING) {
621         if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
622             if (v3_activate_shadow_pt(core) == -1) {
623                 PrintError("Failed to activate shadow page tables\n");
624                 return -1;
625             }
626         } else {
627             if (v3_activate_passthrough_pt(core) == -1) {
628                 PrintError("Failed to activate passthrough page tables\n");
629                 return -1;
630             }
631         }
632     }
633
634     return 0;
635 }
636 #endif
637
638
639 void v3_flush_vmx_vm_core(struct guest_info * core) {
640     struct vmx_data * vmx_info = (struct vmx_data *)(core->vmm_data);
641     vmcs_clear(vmx_info->vmcs_ptr_phys);
642     vmx_info->state = VMX_UNLAUNCHED;
643 }
644
645
646
647 static int update_irq_exit_state(struct guest_info * info) {
648     struct vmx_exit_idt_vec_info idt_vec_info;
649
650     check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
651
652     if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
653 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
654         V3_Print("Calling v3_injecting_intr\n");
655 #endif
656         info->intr_core_state.irq_started = 0;
657         v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
658     }
659
660     return 0;
661 }
662
663 static int update_irq_entry_state(struct guest_info * info) {
664     struct vmx_exit_idt_vec_info idt_vec_info;
665     struct vmcs_interrupt_state intr_core_state;
666     struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
667
668     check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
669     check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state));
670
671     /* Check for pending exceptions to inject */
672     if (v3_excp_pending(info)) {
673         struct vmx_entry_int_info int_info;
674         int_info.value = 0;
675
676         // In VMX, almost every exception is hardware
677         // Software exceptions are pretty much only for breakpoint or overflow
678         int_info.type = 3;
679         int_info.vector = v3_get_excp_number(info);
680
681         if (info->excp_state.excp_error_code_valid) {
682             check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
683             int_info.error_code = 1;
684
685 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
686             V3_Print("Injecting exception %d with error code %x\n", 
687                     int_info.vector, info->excp_state.excp_error_code);
688 #endif
689         }
690
691         int_info.valid = 1;
692 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
693         V3_Print("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
694 #endif
695         check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
696
697         v3_injecting_excp(info, int_info.vector);
698
699     } else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) && 
700                (intr_core_state.val == 0)) {
701        
702         if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
703
704 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
705             V3_Print("IRQ pending from previous injection\n");
706 #endif
707
708             // Copy the IDT vectoring info over to reinject the old interrupt
709             if (idt_vec_info.error_code == 1) {
710                 uint32_t err_code = 0;
711
712                 check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
713                 check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
714             }
715
716             idt_vec_info.undef = 0;
717             check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
718
719         } else {
720             struct vmx_entry_int_info ent_int;
721             ent_int.value = 0;
722
723             switch (v3_intr_pending(info)) {
724                 case V3_EXTERNAL_IRQ: {
725                     info->intr_core_state.irq_vector = v3_get_intr(info); 
726                     ent_int.vector = info->intr_core_state.irq_vector;
727                     ent_int.type = 0;
728                     ent_int.error_code = 0;
729                     ent_int.valid = 1;
730
731 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
732                     V3_Print("Injecting Interrupt %d at exit %u(EIP=%p)\n", 
733                                info->intr_core_state.irq_vector, 
734                                (uint32_t)info->num_exits, 
735                                (void *)(addr_t)info->rip);
736 #endif
737
738                     check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
739                     info->intr_core_state.irq_started = 1;
740
741                     break;
742                 }
743                 case V3_NMI:
744                     PrintDebug("Injecting NMI\n");
745
746                     ent_int.type = 2;
747                     ent_int.vector = 2;
748                     ent_int.valid = 1;
749                     check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
750
751                     break;
752                 case V3_SOFTWARE_INTR:
753                     PrintDebug("Injecting software interrupt\n");
754                     ent_int.type = 4;
755
756                     ent_int.valid = 1;
757                     check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
758
759                     break;
760                 case V3_VIRTUAL_IRQ:
761                     // Not sure what to do here, Intel doesn't have virtual IRQs
762                     // May be the same as external interrupts/IRQs
763
764                     break;
765                 case V3_INVALID_INTR:
766                 default:
767                     break;
768             }
769         }
770     } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
771         // Enable INTR window exiting so we know when IF=1
772         uint32_t instr_len;
773
774         check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
775
776 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
777         V3_Print("Enabling Interrupt-Window exiting: %d\n", instr_len);
778 #endif
779
780         vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
781         check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
782     }
783
784
785     return 0;
786 }
787
788
789
790 static struct vmx_exit_info exit_log[10];
791 static uint64_t rip_log[10];
792
793
794
795 static void print_exit_log(struct guest_info * info) {
796     int cnt = info->num_exits % 10;
797     int i = 0;
798     
799
800     V3_Print("\nExit Log (%d total exits):\n", (uint32_t)info->num_exits);
801
802     for (i = 0; i < 10; i++) {
803         struct vmx_exit_info * tmp = &exit_log[cnt];
804
805         V3_Print("%d:\texit_reason = %p\n", i, (void *)(addr_t)tmp->exit_reason);
806         V3_Print("\texit_qual = %p\n", (void *)tmp->exit_qual);
807         V3_Print("\tint_info = %p\n", (void *)(addr_t)tmp->int_info);
808         V3_Print("\tint_err = %p\n", (void *)(addr_t)tmp->int_err);
809         V3_Print("\tinstr_info = %p\n", (void *)(addr_t)tmp->instr_info);
810         V3_Print("\tguest_linear_addr= %p\n", (void *)(addr_t)tmp->guest_linear_addr);
811         V3_Print("\tRIP = %p\n", (void *)rip_log[cnt]);
812
813
814         cnt--;
815
816         if (cnt == -1) {
817             cnt = 9;
818         }
819
820     }
821
822 }
823
824 int
825 v3_vmx_schedule_timeout(struct guest_info * info)
826 {
827     struct vmx_data * vmx_state = (struct vmx_data *)(info->vmm_data);
828     sint64_t cycles;
829     uint32_t timeout;
830
831     /* Check if the hardware supports an active timeout */
832 #define VMX_ACTIVE_PREEMPT_TIMER_PIN 0x40
833     if (hw_info.pin_ctrls.req_mask & VMX_ACTIVE_PREEMPT_TIMER_PIN) {
834         /* The hardware doesn't support us modifying this pin control */
835         return 0;
836     }
837
838     /* Check if we have one to schedule and schedule it if we do */
839     cycles = (sint64_t)info->time_state.next_timeout - (sint64_t)v3_get_guest_time(&info->time_state);
840     if (info->time_state.next_timeout == (ullong_t) -1)  {
841         timeout = 0;
842         vmx_state->pin_ctrls.active_preempt_timer = 0;
843     } else if (cycles < 0) {
844         /* set the timeout to 0 to force an immediate re-exit since it expired between
845          * when we checked a timeout and now. IF SOMEONE CONTINAULLY SETS A SHORT TIMEOUT,
846          * THIS CAN LOCK US OUT OF THE GUEST! */
847         timeout = 0;
848         vmx_state->pin_ctrls.active_preempt_timer = 1;
849     } else {
850         /* The hardware supports scheduling a timeout, and we have one to 
851          * schedule */
852         timeout = (uint32_t)cycles >> hw_info.misc_info.tsc_multiple;
853         vmx_state->pin_ctrls.active_preempt_timer = 1;
854     }
855
856     /* Actually program the timer based on the settings above. */
857     check_vmcs_write(VMCS_PREEMPT_TIMER, timeout);
858     check_vmcs_write(VMCS_PIN_CTRLS, vmx_state->pin_ctrls.value);
859     return 0;
860 }
861
862 /* 
863  * CAUTION and DANGER!!! 
864  * 
865  * The VMCS CANNOT(!!) be accessed outside of the cli/sti calls inside this function
866  * When exectuing a symbiotic call, the VMCS WILL be overwritten, so any dependencies 
867  * on its contents will cause things to break. The contents at the time of the exit WILL 
868  * change before the exit handler is executed.
869  */
870 int v3_vmx_enter(struct guest_info * info) {
871     int ret = 0;
872     uint32_t tsc_offset_low, tsc_offset_high;
873     struct vmx_exit_info exit_info;
874     struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
875
876     // Conditionally yield the CPU if the timeslice has expired
877     v3_yield_cond(info);
878
879     // Perform any additional yielding needed for time adjustment
880     v3_adjust_time(info);
881
882     // Check for timeout - since this calls generic hooks in devices
883     // that may do things like pause the VM, it cannot be with interrupts
884     // disabled.
885     v3_check_timeout(info);
886
887     // disable global interrupts for vm state transition
888     v3_disable_ints();
889
890     // Update timer devices late after being in the VM so that as much 
891     // of the time in the VM is accounted for as possible. Also do it before
892     // updating IRQ entry state so that any interrupts the timers raise get 
893     // handled on the next VM entry. Must be done with interrupts disabled.
894     v3_update_timers(info);
895
896     if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
897         vmcs_clear(vmx_info->vmcs_ptr_phys);
898         vmcs_load(vmx_info->vmcs_ptr_phys);
899         vmx_info->state = VMX_UNLAUNCHED;
900     }
901
902     v3_vmx_restore_vmcs(info);
903
904
905 #ifdef V3_CONFIG_SYMCALL
906     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
907         update_irq_entry_state(info);
908     }
909 #else 
910     update_irq_entry_state(info);
911 #endif
912
913     {
914         addr_t guest_cr3;
915         vmcs_read(VMCS_GUEST_CR3, &guest_cr3);
916         vmcs_write(VMCS_GUEST_CR3, guest_cr3);
917     }
918
919     // Update vmx active preemption timer to exit at the next timeout if 
920     // the hardware supports it.
921     v3_vmx_schedule_timeout(info);
922
923     // Perform last-minute time bookkeeping prior to entering the VM
924     v3_time_enter_vm(info);
925
926     tsc_offset_high = (uint32_t)((v3_tsc_host_offset(&info->time_state) >> 32) & 0xffffffff);
927     tsc_offset_low = (uint32_t)(v3_tsc_host_offset(&info->time_state) & 0xffffffff);
928     check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
929     check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
930
931     if (v3_update_vmcs_host_state(info)) {
932         v3_enable_ints();
933         PrintError("Could not write host state\n");
934         return -1;
935     }
936
937
938     if (vmx_info->state == VMX_UNLAUNCHED) {
939         vmx_info->state = VMX_LAUNCHED;
940         ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
941     } else {
942         V3_ASSERT(vmx_info->state != VMX_UNLAUNCHED);
943         ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
944     }
945     
946
947
948     //  PrintDebug("VMX Exit: ret=%d\n", ret);
949
950     if (ret != VMX_SUCCESS) {
951         uint32_t error = 0;
952         vmcs_read(VMCS_INSTR_ERR, &error);
953
954         v3_enable_ints();
955
956         PrintError("VMENTRY Error: %d (launch_ret = %d)\n", error, ret);
957         return -1;
958     }
959
960
961
962     // Immediate exit from VM time bookkeeping
963     v3_time_exit_vm(info);
964
965     info->num_exits++;
966
967     /* Update guest state */
968     v3_vmx_save_vmcs(info);
969
970     // info->cpl = info->segments.cs.selector & 0x3;
971
972     info->mem_mode = v3_get_vm_mem_mode(info);
973     info->cpu_mode = v3_get_vm_cpu_mode(info);
974
975
976     check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
977     check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
978     check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
979     check_vmcs_read(VMCS_EXIT_QUAL, &(exit_info.exit_qual));
980     check_vmcs_read(VMCS_EXIT_INT_INFO, &(exit_info.int_info));
981     check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
982     check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
983
984     if (info->shdw_pg_mode == NESTED_PAGING) {
985         check_vmcs_read(VMCS_GUEST_PHYS_ADDR, &(exit_info.ept_fault_addr));
986     }
987
988     //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
989
990     exit_log[info->num_exits % 10] = exit_info;
991     rip_log[info->num_exits % 10] = get_addr_linear(info, info->rip, &(info->segments.cs));
992
993 #ifdef V3_CONFIG_SYMCALL
994     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
995         update_irq_exit_state(info);
996     }
997 #else
998     update_irq_exit_state(info);
999 #endif
1000
1001     if (exit_info.exit_reason == VMEXIT_INTR_WINDOW) {
1002         // This is a special case whose only job is to inject an interrupt
1003         vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
1004         vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
1005         vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
1006
1007 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
1008        V3_Print("Interrupts available again! (RIP=%llx)\n", info->rip);
1009 #endif
1010     }
1011
1012     // reenable global interrupts after vm exit
1013     v3_enable_ints();
1014
1015     // Conditionally yield the CPU if the timeslice has expired
1016     v3_yield_cond(info);
1017
1018     if (v3_handle_vmx_exit(info, &exit_info) == -1) {
1019         PrintError("Error in VMX exit handler (Exit reason=%x)\n", exit_info.exit_reason);
1020         return -1;
1021     }
1022
1023     return 0;
1024 }
1025
1026
1027 int v3_start_vmx_guest(struct guest_info * info) {
1028
1029     PrintDebug("Starting VMX core %u\n", info->vcpu_id);
1030
1031     if (info->vcpu_id == 0) {
1032         info->core_run_state = CORE_RUNNING;
1033     } else {
1034
1035         PrintDebug("VMX core %u: Waiting for core initialization\n", info->vcpu_id);
1036
1037         while (info->core_run_state == CORE_STOPPED) {
1038
1039             if (info->vm_info->run_state == VM_STOPPED) {
1040                 // The VM was stopped before this core was initialized. 
1041                 return 0;
1042             }
1043
1044             v3_yield(info);
1045             //PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
1046         }
1047         
1048         PrintDebug("VMX core %u initialized\n", info->vcpu_id);
1049
1050         // We'll be paranoid about race conditions here
1051         v3_wait_at_barrier(info);
1052     }
1053
1054
1055     PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x),  RIP=0x%p\n",
1056                info->vcpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
1057                info->segments.cs.limit, (void *)(info->rip));
1058
1059
1060     PrintDebug("VMX core %u: Launching VMX VM on logical core %u\n", info->vcpu_id, info->pcpu_id);
1061
1062     v3_start_time(info);
1063
1064     while (1) {
1065
1066         if (info->vm_info->run_state == VM_STOPPED) {
1067             info->core_run_state = CORE_STOPPED;
1068             break;
1069         }
1070
1071         if (v3_vmx_enter(info) == -1) {
1072
1073             addr_t host_addr;
1074             addr_t linear_addr = 0;
1075             
1076             info->vm_info->run_state = VM_ERROR;
1077             
1078             V3_Print("VMX core %u: VMX ERROR!!\n", info->vcpu_id); 
1079             
1080             v3_print_guest_state(info);
1081             
1082             V3_Print("VMX core %u\n", info->vcpu_id); 
1083
1084             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
1085             
1086             if (info->mem_mode == PHYSICAL_MEM) {
1087                 v3_gpa_to_hva(info, linear_addr, &host_addr);
1088             } else if (info->mem_mode == VIRTUAL_MEM) {
1089                 v3_gva_to_hva(info, linear_addr, &host_addr);
1090             }
1091             
1092             V3_Print("VMX core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
1093             
1094             V3_Print("VMX core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
1095             v3_dump_mem((uint8_t *)host_addr, 15);
1096             
1097             v3_print_stack(info);
1098
1099
1100             v3_print_vmcs();
1101             print_exit_log(info);
1102             return -1;
1103         }
1104
1105         v3_wait_at_barrier(info);
1106
1107
1108         if (info->vm_info->run_state == VM_STOPPED) {
1109             info->core_run_state = CORE_STOPPED;
1110             break;
1111         }
1112 /*
1113         if ((info->num_exits % 5000) == 0) {
1114             V3_Print("VMX Exit number %d\n", (uint32_t)info->num_exits);
1115         }
1116 */
1117
1118     }
1119
1120     return 0;
1121 }
1122
1123
1124
1125
1126 #define VMX_FEATURE_CONTROL_MSR     0x0000003a
1127 #define CPUID_VMX_FEATURES 0x00000005  /* LOCK and VMXON */
1128 #define CPUID_1_ECX_VTXFLAG 0x00000020
1129
1130 int v3_is_vmx_capable() {
1131     v3_msr_t feature_msr;
1132     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1133
1134     v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
1135
1136     PrintDebug("ECX: 0x%x\n", ecx);
1137
1138     if (ecx & CPUID_1_ECX_VTXFLAG) {
1139         v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
1140         
1141         PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo);
1142
1143         if ((feature_msr.lo & CPUID_VMX_FEATURES) != CPUID_VMX_FEATURES) {
1144             PrintDebug("VMX is locked -- enable in the BIOS\n");
1145             return 0;
1146         }
1147
1148     } else {
1149         PrintDebug("VMX not supported on this cpu\n");
1150         return 0;
1151     }
1152
1153     return 1;
1154 }
1155
1156
1157 int v3_reset_vmx_vm_core(struct guest_info * core, addr_t rip) {
1158     // init vmcs bios
1159     
1160     if ((core->shdw_pg_mode == NESTED_PAGING) && 
1161         (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
1162         // easy 
1163         core->rip = 0;
1164         core->segments.cs.selector = rip << 8;
1165         core->segments.cs.limit = 0xffff;
1166         core->segments.cs.base = rip << 12;
1167     } else {
1168         core->vm_regs.rdx = core->vcpu_id;
1169         core->vm_regs.rbx = rip;
1170     }
1171
1172     return 0;
1173 }
1174
1175
1176
1177 void v3_init_vmx_cpu(int cpu_id) {
1178     addr_t vmx_on_region = 0;
1179     extern v3_cpu_arch_t v3_mach_type;
1180
1181     if (v3_mach_type == V3_INVALID_CPU) {
1182         if (v3_init_vmx_hw(&hw_info) == -1) {
1183             PrintError("Could not initialize VMX hardware features on cpu %d\n", cpu_id);
1184             return;
1185         }
1186     }
1187
1188     enable_vmx();
1189
1190
1191     // Setup VMXON Region
1192     vmx_on_region = allocate_vmcs();
1193
1194
1195     if (vmx_on(vmx_on_region) == VMX_SUCCESS) {
1196         V3_Print("VMX Enabled\n");
1197         host_vmcs_ptrs[cpu_id] = vmx_on_region;
1198     } else {
1199         V3_Print("VMX already enabled\n");
1200         V3_FreePages((void *)vmx_on_region, 1);
1201     }
1202
1203     PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);    
1204
1205     {
1206         struct vmx_sec_proc_ctrls sec_proc_ctrls;
1207         sec_proc_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.sec_proc_ctrls));
1208         
1209         if (sec_proc_ctrls.enable_ept == 0) {
1210             V3_Print("VMX EPT (Nested) Paging not supported\n");
1211             v3_cpu_types[cpu_id] = V3_VMX_CPU;
1212         } else if (sec_proc_ctrls.unrstrct_guest == 0) {
1213             V3_Print("VMX EPT (Nested) Paging supported\n");
1214             v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
1215         } else {
1216             V3_Print("VMX EPT (Nested) Paging + Unrestricted guest supported\n");
1217             v3_cpu_types[cpu_id] = V3_VMX_EPT_UG_CPU;
1218         }
1219     }
1220     
1221 }
1222
1223
1224 void v3_deinit_vmx_cpu(int cpu_id) {
1225     extern v3_cpu_arch_t v3_cpu_types[];
1226     v3_cpu_types[cpu_id] = V3_INVALID_CPU;
1227
1228     if (host_vmcs_ptrs[cpu_id] != 0) {
1229         V3_Print("Disabling VMX\n");
1230
1231         if (vmx_off() != VMX_SUCCESS) {
1232             PrintError("Error executing VMXOFF\n");
1233         }
1234
1235         V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
1236
1237         host_vmcs_ptrs[cpu_id] = 0;
1238     }
1239 }