Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


vmx_patch4
[palacios.git] / palacios / src / palacios / vmx.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Peter Dinda <pdinda@northwestern.edu>
16  *         Jack Lange <jarusl@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22
23 #include <palacios/vmx.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmm.h>
26 #include <palacios/vmx_lowlevel.h>
27 #include <palacios/vmm_lowlevel.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_ctrl_regs.h>
30
31
32 // 
33 // 
34 // CRUFT
35 //
36 //
37
38 #if 0
39
40 #include <palacios/vmm_util.h>
41 #include <palacios/vmm_string.h>
42 #include <palacios/vmm_ctrl_regs.h>
43
44
45
46 extern int Launch_VM(ullong_t vmcsPtr, uint_t eip);
47
48 #define NUMPORTS 65536
49
50
51 #define VMXASSIST_INFO_PORT   0x0e9
52 #define ROMBIOS_PANIC_PORT    0x400
53 #define ROMBIOS_PANIC_PORT2   0x401
54 #define ROMBIOS_INFO_PORT     0x402
55 #define ROMBIOS_DEBUG_PORT    0x403
56
57
58
59 static uint_t GetLinearIP(struct VM * vm) {
60   if (vm->state == VM_VMXASSIST_V8086_BIOS || vm->state == VM_VMXASSIST_V8086) { 
61     return vm->vmcs.guestStateArea.cs.baseAddr + vm->vmcs.guestStateArea.rip;
62   } else {
63     return vm->vmcs.guestStateArea.rip;
64   }
65 }
66
67
68
69
70 #define MAX_CODE 512
71 #define INSTR_OFFSET_START 17
72 #define NOP_SEQ_LEN        10
73 #define INSTR_OFFSET_END   (INSTR_OFFSET_START + NOP_SEQ_LEN - 1)
74 #define TEMPLATE_CODE_LEN  35
75
76 uint_t oldesp = 0;
77 uint_t myregs = 0;
78
79
80
81
82
83 extern uint_t VMCS_LAUNCH();
84 extern uint_t Init_VMCS_HostState();
85 extern uint_t Init_VMCS_GuestState();
86
87
88
89
90 extern int Get_CR2();
91 extern int vmRunning;
92
93
94
95
96
97 void DecodeCurrentInstruction(struct VM *vm, struct Instruction *inst)
98 {
99   // this is a gruesome hack
100   uint_t address = GetLinearIP(vm);
101   uint_t length = vm->vmcs.exitInfoFields.instrLength;
102   unsigned char *t = (unsigned char *) address;
103
104
105   
106   PrintTrace("DecodeCurrentInstruction: instruction is\n");
107   PrintTraceMemDump(t,length);
108   
109   if (length==3 && t[0]==0x0f && t[1]==0x22 && t[2]==0xc0) { 
110     // mov from eax to cr0
111     // usually used to signal
112     inst->type=VM_MOV_TO_CR0;
113     inst->address=address;
114     inst->size=length;
115     inst->input1=vm->registers.eax;
116     inst->input2=vm->vmcs.guestStateArea.cr0;
117     inst->output=vm->registers.eax;
118     PrintTrace("MOV FROM EAX TO CR0\n");
119   } else {
120     inst->type=VM_UNKNOWN_INST;
121   }
122 }
123
124
125
126 static void ConfigureExits(struct VM *vm)
127 {
128   CopyOutVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
129
130   vm->vmcs.execCtrlFields.pinCtrls |= 0 
131     // EXTERNAL_INTERRUPT_EXITING 
132     | NMI_EXITING;
133   vm->vmcs.execCtrlFields.procCtrls |= 0
134       // INTERRUPT_WINDOWS_EXIT 
135       | USE_TSC_OFFSETTING
136       | HLT_EXITING  
137       | INVLPG_EXITING           
138       | MWAIT_EXITING            
139       | RDPMC_EXITING           
140       | RDTSC_EXITING         
141       | MOVDR_EXITING         
142       | UNCONDITION_IO_EXITING
143       | MONITOR_EXITING       
144       | PAUSE_EXITING         ;
145
146   CopyInVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
147   
148   CopyOutVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
149
150   vm->vmcs.exitCtrlFields.exitCtrls |= ACK_IRQ_ON_EXIT;
151   
152   CopyInVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
153
154
155 /*   VMCS_READ(VM_EXIT_CTRLS, &flags); */
156 /*   flags |= ACK_IRQ_ON_EXIT; */
157 /*   VMCS_WRITE(VM_EXIT_CTRLS, &flags); */
158 }
159
160
161 extern int RunVMM();
162 extern int SAFE_VM_LAUNCH();
163
164 int MyLaunch(struct VM *vm)
165 {
166   ullong_t vmcs = (ullong_t)((uint_t) (vm->vmcsregion));
167   uint_t entry_eip = vm->descriptor.entry_ip;
168   uint_t exit_eip = vm->descriptor.exit_eip;
169   uint_t guest_esp = vm->descriptor.guest_esp;
170   uint_t f = 0xffffffff;
171   uint_t tmpReg = 0;
172   int ret;
173   int vmm_ret = 0;
174
175   PrintTrace("Guest ESP: 0x%x (%u)\n", guest_esp, guest_esp);
176
177   exit_eip = (uint_t)RunVMM;
178
179   PrintTrace("Clear\n");
180   VMCS_CLEAR(vmcs);
181   PrintTrace("Load\n");
182   VMCS_LOAD(vmcs);
183
184
185   PrintTrace("VMCS_LINK_PTR\n");
186   VMCS_WRITE(VMCS_LINK_PTR, &f);
187   PrintTrace("VMCS_LINK_PTR_HIGH\n");
188   VMCS_WRITE(VMCS_LINK_PTR_HIGH, &f);
189
190  
191   SetCtrlBitsCorrectly(IA32_VMX_PINBASED_CTLS_MSR, PIN_VM_EXEC_CTRLS);
192   SetCtrlBitsCorrectly(IA32_VMX_PROCBASED_CTLS_MSR, PROC_VM_EXEC_CTRLS);
193   SetCtrlBitsCorrectly(IA32_VMX_EXIT_CTLS_MSR, VM_EXIT_CTRLS);
194   SetCtrlBitsCorrectly(IA32_VMX_ENTRY_CTLS_MSR, VM_ENTRY_CTRLS);
195
196   //
197   //
198   //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL);
199   //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL_HIGH);
200
201
202   /* Host state */
203   PrintTrace("Setting up host state\n");
204   SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, HOST_CR0);
205   SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, HOST_CR4);
206   ret = Init_VMCS_HostState();
207
208   if (ret != VMX_SUCCESS) {
209     if (ret == VMX_FAIL_VALID) {
210       PrintTrace("Init Host state: VMCS FAILED WITH ERROR\n");
211     } else {
212       PrintTrace("Init Host state: Invalid VMCS\n");
213     }
214     return ret;
215   }
216
217   //  PrintTrace("HOST_RIP: %x (%u)\n", exit_eip, exit_eip);
218   VMCS_WRITE(HOST_RIP, &exit_eip);
219
220   /* Guest state */
221   PrintTrace("Setting up guest state\n");
222   PrintTrace("GUEST_RIP: %x (%u)\n", entry_eip, entry_eip);
223   VMCS_WRITE(GUEST_RIP, &entry_eip);
224
225   SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, GUEST_CR0);
226   SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, GUEST_CR4);
227   ret = Init_VMCS_GuestState();
228
229   PrintTrace("InitGuestState returned\n");
230
231   if (ret != VMX_SUCCESS) {
232     if (ret == VMX_FAIL_VALID) {
233       PrintTrace("Init Guest state: VMCS FAILED WITH ERROR\n");
234     } else {
235       PrintTrace("Init Guest state: Invalid VMCS\n");
236     }
237     return ret;
238   }
239   PrintTrace("GUEST_RSP: %x (%u)\n", guest_esp, (uint_t)guest_esp);
240   VMCS_WRITE(GUEST_RSP, &guest_esp);
241
242   //  tmpReg = 0x4100;
243   tmpReg = 0xffffffff;
244   if (VMCS_WRITE(EXCEPTION_BITMAP, &tmpReg) != VMX_SUCCESS) {
245     PrintInfo("Bitmap error\n");
246   }
247
248   ConfigureExits(vm);
249
250   PrintTrace("VMCS_LAUNCH\n");
251
252   vm->state=VM_VMXASSIST_STARTUP;
253
254   vmm_ret = SAFE_VM_LAUNCH();
255
256   PrintTrace("VMM error %d\n", vmm_ret);
257
258   return vmm_ret;
259 }
260
261
262
263   
264 int VMLaunch(struct VMDescriptor *vm) 
265 {
266   VMCS * vmcs = CreateVMCS();
267   int rc;
268
269   ullong_t vmcs_ptr = (ullong_t)((uint_t)vmcs);
270   uint_t top = (vmcs_ptr >> 32) & 0xffffffff;
271   uint_t bottom = (vmcs_ptr) & 0xffffffff;
272
273   theVM.vmcsregion = vmcs;
274   theVM.descriptor = *vm;
275
276   PrintTrace("vmcs_ptr_top=%x vmcs_ptr_bottom=%x, eip=%x\n", top, bottom, vm->entry_ip);
277   rc = MyLaunch(&theVM); // vmcs_ptr, vm->entry_ip, vm->exit_eip, vm->guest_esp);
278   PrintTrace("Returned from MyLaunch();\n");
279   return rc;
280 }
281
282
283
284
285 //
286 //
287 //  END CRUFT
288 //
289 //
290
291 #endif
292
293 static int update_vmcs_host_state(struct guest_info * info) {
294     addr_t tmp;
295
296     struct {
297         uint16_t limit;
298         addr_t base;
299     } __attribute__((packed)) tmp_seg;
300
301
302     struct v3_msr tmp_msr;
303
304     __asm__ __volatile__ ( "movq    %%cr0, %0; "                
305                            : "=q"(tmp)
306                            :
307     );
308     vmcs_write(VMCS_HOST_CR0, tmp);
309
310
311     __asm__ __volatile__ ( "movq %%cr3, %0; "           
312                            : "=q"(tmp)
313                            :
314     );
315     vmcs_write(VMCS_HOST_CR3, tmp);
316
317
318     __asm__ __volatile__ ( "movq %%cr4, %0; "           
319                            : "=q"(tmp)
320                            :
321     );
322     vmcs_write(VMCS_HOST_CR4, tmp);
323
324
325
326
327     __asm__ __volatile__ ("sgdt (%0); "
328                           : 
329                           :"q"(&tmp_seg)
330                           : "memory"
331                           );
332     vmcs_write(VMCS_HOST_GDTR_BASE, tmp_seg.base);
333
334
335     __asm__ __volatile__ ("sidt (%0); "
336                           : 
337                           :"q"(&tmp_seg)
338                           : "memory"
339                   );
340     vmcs_write(VMCS_HOST_IDTR_BASE, tmp_seg.base);
341
342     /* How do we handle this...?
343     __asm__ __volatile__ ("str (%0); "
344                           : 
345                           :"q"(&tmp_seg)
346                           : "memory"
347                           );
348     vmcs_write(VMCS_HOST_TR_BASE, tmp_seg.base);
349     */
350
351 #define FS_BASE_MSR 0xc0000100
352 #define GS_BASE_MSR 0xc0000101
353
354     // FS.BASE MSR
355     v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
356     vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
357
358     // GS.BASE MSR
359     v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
360     vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
361
362
363
364     __asm__ __volatile__ ( "movq %%cs, %0; "            
365                            : "=q"(tmp)
366                            :
367     );
368     vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
369
370     __asm__ __volatile__ ( "movq %%ss, %0; "            
371                            : "=q"(tmp)
372                            :
373     );
374     vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
375
376     __asm__ __volatile__ ( "movq %%ds, %0; "            
377                            : "=q"(tmp)
378                            :
379     );
380     vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
381
382     __asm__ __volatile__ ( "movq %%es, %0; "            
383                            : "=q"(tmp)
384                            :
385     );
386     vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
387
388     __asm__ __volatile__ ( "movq %%fs, %0; "            
389                            : "=q"(tmp)
390                            :
391     );
392     vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
393
394     __asm__ __volatile__ ( "movq %%gs, %0; "            
395                            : "=q"(tmp)
396                            :
397     );
398     vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
399
400     __asm__ __volatile__ ( "str %0; "           
401                            : "=q"(tmp)
402                            :
403     );
404     vmcs_write(VMCS_HOST_TR_SELECTOR, tmp);
405
406
407 #define SYSENTER_CS_MSR 0x00000174
408 #define SYSENTER_ESP_MSR 0x00000175
409 #define SYSENTER_EIP_MSR 0x00000176
410
411    // SYSENTER CS MSR
412     v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
413     vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.value);
414
415     // SYSENTER_ESP MSR
416     v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
417     vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
418
419     // SYSENTER_EIP MSR
420     v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
421     vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
422
423
424     // RIP
425     // RSP
426
427     return 0;
428 }
429
430
431
432
433
434 static struct vmcs_data* vmxon_ptr;
435
436
437 #if 0
438 // For the 32 bit reserved bit fields 
439 // MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
440 static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
441     v3_msr_t mask_msr;
442
443     PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
444
445     v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
446
447     PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
448
449     val &= mask_msr.lo;
450     val &= mask_msr.hi;
451   
452     return val;
453 }
454
455
456
457 static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
458     v3_msr_t msr0, msr1;
459     addr_t msr0_val, msr1_val;
460
461     PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
462
463     v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
464     v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
465   
466     // This generates a mask that is the natural bit width of the CPU
467     msr0_val = msr0.value;
468     msr1_val = msr1.value;
469
470     PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
471
472     val &= msr0_val;
473     val &= msr1_val;
474
475     return val;
476 }
477
478 static int setup_base_host_state() {
479     
480
481
482     //   vmwrite(HOST_IDTR_BASE, 
483
484
485 }
486
487
488
489 static void setup_v8086_mode_for_boot(struct guest_info* vm_info)
490 {
491
492     ((struct vmx_data*)vm_info->vmm_data)->state = VMXASSIST_V8086_BIOS;
493     ((struct rflags*)&(vm_info->ctrl_regs.rflags))->vm = 1;
494     ((struct rflags*)&(vm_info->ctrl_regs.rflags))->iopl = 3;
495
496
497     vm_info->rip = 0xfff0;
498
499     vm_info->segments.cs.selector = 0xf000;
500     vm_info->segments.cs.base = 0xf000<<4;
501     vm_info->segments.cs.limit = 0xffff;
502     vm_info->segments.cs.type = 3;
503     vm_info->segments.cs.system = 1;
504     vm_info->segments.cs.dpl = 3;
505     vm_info->segments.cs.present = 1;
506     vm_info->segments.cs.granularity = 0;
507
508     vm_info->segments.ss.selector = 0x0000;
509     vm_info->segments.ss.base = 0x0000<<4;
510     vm_info->segments.ss.limit = 0xffff;
511     vm_info->segments.ss.type = 3;
512     vm_info->segments.ss.system = 1;
513     vm_info->segments.ss.dpl = 3;
514     vm_info->segments.ss.present = 1;
515     vm_info->segments.ss.granularity = 0;
516
517     vm_info->segments.es.selector = 0x0000;
518     vm_info->segments.es.base = 0x0000<<4;
519     vm_info->segments.es.limit = 0xffff;
520     vm_info->segments.es.type = 3;
521     vm_info->segments.es.system = 1;
522     vm_info->segments.es.dpl = 3;
523     vm_info->segments.es.present = 1;
524     vm_info->segments.es.granularity = 0;
525
526     vm_info->segments.fs.selector = 0x0000;
527     vm_info->segments.fs.base = 0x0000<<4;
528     vm_info->segments.fs.limit = 0xffff;
529     vm_info->segments.fs.type = 3;
530     vm_info->segments.fs.system = 1;
531     vm_info->segments.fs.dpl = 3;
532     vm_info->segments.fs.present = 1;
533     vm_info->segments.fs.granularity = 0;
534
535     vm_info->segments.gs.selector = 0x0000;
536     vm_info->segments.gs.base = 0x0000<<4;
537     vm_info->segments.gs.limit = 0xffff;
538     vm_info->segments.gs.type = 3;
539     vm_info->segments.gs.system = 1;
540     vm_info->segments.gs.dpl = 3;
541     vm_info->segments.gs.present = 1;
542     vm_info->segments.gs.granularity = 0;
543 }
544
545 #endif
546
547 static struct vmcs_data* allocate_vmcs() 
548 {
549     reg_ex_t msr;
550     PrintDebug("Allocating page\n");
551     struct vmcs_data* vmcs_page = (struct vmcs_data*)V3_VAddr(V3_AllocPages(1));
552
553     memset(vmcs_page, 0, 4096);
554
555     v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
556     
557     vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
558     PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
559
560     return (struct vmcs_data*)V3_PAddr((void*)vmcs_page);
561 }
562
563
564
565 static void init_vmcs_bios(struct guest_info * vm_info) 
566 {
567
568
569 }
570
571
572
573 static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
574     PrintDebug("Entering init_vmx_guest\n");
575     v3_pre_config_guest(info, config_ptr);
576
577     struct vmx_data* data;
578
579     PrintDebug("Allocating vmx_data\n");
580     data = (struct vmx_data*)V3_Malloc(sizeof(struct vmx_data));
581     PrintDebug("Allocating VMCS\n");
582     data->vmcs = allocate_vmcs();
583
584     info->vmm_data = (void*)data;
585
586     PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
587     init_vmcs_bios(info);
588
589     v3_post_config_guest(info, config_ptr);
590
591     return 0;
592 }
593
594
595
596
597 static int start_vmx_guest(struct guest_info *info) {
598     struct vmx_data* vmx_data = (struct vmx_data*)info->vmm_data;
599     int vmx_ret;
600
601     // Have to do a whole lot of flag setting here
602     PrintDebug("Clearing VMCS\n");
603     vmx_ret = vmcs_clear(vmx_data->vmcs);
604     if(vmx_ret != VMX_SUCCESS) {
605         PrintDebug("VMCLEAR failed\n");
606         return -1;
607     }
608     PrintDebug("Loading VMCS\n");
609     vmx_ret = vmcs_load(vmx_data->vmcs);
610     if(vmx_ret != VMX_SUCCESS) {
611         PrintDebug("VMPTRLD failed\n");
612         return -1;
613     }
614
615
616     update_vmcs_host_state(info);
617
618     // Setup guest state
619     return -1;
620 }
621
622
623
624
625
626
627 int v3_is_vmx_capable() {
628     v3_msr_t feature_msr;
629     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
630
631     v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
632
633     PrintDebug("ECX: %p\n", (void*)ecx);
634
635     if (ecx & CPUID_1_ECX_VTXFLAG) {
636         v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
637         
638         PrintTrace("MSRREGlow: 0x%.8x\n", feature_msr.lo);
639
640         if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
641             PrintDebug("VMX is locked -- enable in the BIOS\n");
642             return 0;
643         }
644
645     } else {
646         PrintDebug("VMX not supported on this cpu\n");
647         return 0;
648     }
649
650     return 1;
651 }
652
653 static int has_vmx_nested_paging() {
654     return 0;
655 }
656
657
658
659 // We set up the global host state that is unlikely to change across processes here
660 // Segment Descriptors mainly
661
662 struct seg_descriptor {
663
664 };
665
666
667
668
669 void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
670     extern v3_cpu_arch_t v3_cpu_type;
671
672     
673     __asm__ __volatile__ (
674                           "movq %%cr4, %%rbx; "
675                           "orq  $0x00002000,%%rbx; "
676                           "movq %%rbx, %%cr4;"
677               :
678               :
679               : "%rbx"
680                           );
681
682
683
684     // Should check and return Error here.... 
685     __asm__ __volatile__ (
686                           "movq %%cr0, %%rbx; "
687                           "orq  $0x00000020,%%rbx; "
688                           "movq %%rbx, %%cr0;"
689               :
690               :
691               : "%rbx"
692                           );
693
694     // Setup VMXON Region
695     vmxon_ptr = allocate_vmcs();
696     PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr);
697
698     if (v3_enable_vmx(vmxon_ptr) == VMX_SUCCESS) {
699         PrintDebug("VMX Enabled\n");
700     } else {
701         PrintError("VMX initialization failure\n");
702         return;
703     }
704         
705
706     if (has_vmx_nested_paging() == 1) {
707         v3_cpu_type = V3_VMX_EPT_CPU;
708     } else {
709         v3_cpu_type = V3_VMX_CPU;
710     }
711
712     // Setup the VMX specific vmm operations
713     vm_ops->init_guest = &init_vmx_guest;
714     vm_ops->start_guest = &start_vmx_guest;
715     vm_ops->has_nested_paging = &has_vmx_nested_paging;
716
717 }