Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Succesfully launches and jumps into the exit handler. Need to write a proper exit...
[palacios.git] / palacios / src / palacios / vmx.c
index e393099..93b3c16 100644 (file)
-/* Eventually  we want to get rid of these */
+/* 
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National 
+ * Science Foundation and the Department of Energy.  
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico.  You can find out more at 
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu> 
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
+ * All rights reserved.
+ *
+ * Author: Peter Dinda <pdinda@northwestern.edu>
+ *         Jack Lange <jarusl@cs.northwestern.edu>
+ *
+ * This is free software.  You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
 
-#include <geekos/cpu.h>
-#include <geekos/io_devs.h>
-#include <geekos/io.h>
-/* ** */
 
 #include <palacios/vmx.h>
 #include <palacios/vmcs.h>
 #include <palacios/vmm.h>
-#include <palacios/vmm_util.h>
-#include <palacios/vmm_string.h>
+#include <palacios/vmx_lowlevel.h>
+#include <palacios/vmm_lowlevel.h>
+#include <palacios/vmm_config.h>
+#include <palacios/vmm_ctrl_regs.h>
+#include <palacios/vm_guest_mem.h>
 
+static addr_t vmxon_ptr_phys;
+extern int v3_vmx_exit_handler();
+extern int v3_vmx_vmlaunch(struct v3_gprs * vm_regs);
 
-extern void Get_MSR(unsigned int msr, uint_t * high, uint_t * low);
-extern void Set_MSR(unsigned int msr, uint_t high, uint_t low);
-extern int Enable_VMX(ullong_t regionPtr);
-extern int cpuid_ecx(unsigned int op);
-extern int Launch_VM(ullong_t vmcsPtr, uint_t eip);
+static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
+{
+    int ret = 0;
+    ret = vmcs_write(field,val);
 
-#define NUMPORTS 65536
+    if (ret != VMX_SUCCESS) {
+        PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
+        return 1;
+    }
 
+    return 0;
+}
 
-#define VMXASSIST_INFO_PORT   0x0e9
-#define ROMBIOS_PANIC_PORT    0x400
-#define ROMBIOS_PANIC_PORT2   0x401
-#define ROMBIOS_INFO_PORT     0x402
-#define ROMBIOS_DEBUG_PORT    0x403
+static int update_vmcs_host_state(struct guest_info * info) {
+    int vmx_ret = 0;
+    addr_t tmp;
+    struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
+    struct v3_msr tmp_msr;
 
+    __asm__ __volatile__ ( "movq    %%cr0, %0; "               
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp);
 
-extern struct vmm_os_hooks * os_hooks;
 
+    __asm__ __volatile__ ( "movq %%cr3, %0; "          
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp);
 
-static struct VM theVM;
 
-static uint_t GetLinearIP(struct VM *vm)
-{
-  if (vm->state==VM_VMXASSIST_V8086_BIOS || vm->state==VM_VMXASSIST_V8086) { 
-    return vm->vmcs.guestStateArea.cs.baseAddr + vm->vmcs.guestStateArea.rip;
-  } else {
-    return vm->vmcs.guestStateArea.rip;
-  }
-}
+    __asm__ __volatile__ ( "movq %%cr4, %0; "          
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
 
 
-static void VMXPanic()
-{
-  while (1) {}
-}
 
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
 
-#define MAX_CODE 512
-#define INSTR_OFFSET_START 17
-#define NOP_SEQ_LEN        10
-#define INSTR_OFFSET_END   (INSTR_OFFSET_START+NOP_SEQ_LEN-1)
-#define TEMPLATE_CODE_LEN  35
+#define FS_BASE_MSR 0xc0000100
+#define GS_BASE_MSR 0xc0000101
 
-uint_t oldesp=0;
-uint_t myregs=0;
+    // FS.BASE MSR
+    v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
 
-// simply execute the instruction that is faulting and return
-static int ExecFaultingInstructionInVMM(struct VM *vm)
-{
-  uint_t address = GetLinearIP(vm);
-  myregs = (uint_t)&(vm->registers);
-  
+    // GS.BASE MSR
+    v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
 
-  PrintTrace("About the execute faulting instruction!\n");
-  PrintTrace("Instruction is:\n");
-  PrintTraceMemDump((void*)(address),vm->vmcs.exitInfoFields.instrLength);
-  
 
-  PrintTrace("The template code is:\n");
-  PrintTraceMemDump(&&template_code,TEMPLATE_CODE_LEN);
 
-  // clone the template code
-  //memcpy(&&template_code,code,MAX_CODE);
-  
-  // clean up the nop field
-  memset(&&template_code+INSTR_OFFSET_START,*((uchar_t *)(&&template_code+0)),NOP_SEQ_LEN);
-  // overwrite the nops with the faulting instruction
-  memcpy(&&template_code+INSTR_OFFSET_START, (void*)(address),vm->vmcs.exitInfoFields.instrLength);
-  
-  PrintTrace("Finished modifying the template code, which now is:\n");
-  PrintTraceMemDump(&&template_code,TEMPLATE_CODE_LEN);
-
-  PrintTrace("Now entering modified template code\n");
-
-
- template_code:
-  // Template code stores current registers,
-  // restores registers, has a landing pad of noops 
-  // that will be modified, restores current regs, and then returns
-  //
-  // Note that this currently ignores cr0, cr3, cr4, dr7, rsp, rip, and rflags
-  // it also blythly assumes it can exec the instruction in protected mode
-  //
-  __asm__ __volatile__ ("nop\n"               // for cloning purposes                          (1 byte)
-                       "pusha\n"             // push our current regs onto the current stack  (1 byte)
-                       "movl %0, %%eax\n"    // Get oldesp location                           (5 bytes)
-                       "movl %%esp, (%%eax)\n"  // store the current stack pointer in oldesp       (2 bytes)
-                        "movl %1, %%eax\n"    // Get regs location                             (5 bytes)
-                       "movl (%%eax), %%esp\n"  // point esp at regs                               (2 bytes)
-                       "popa\n"              // now we have the VM registers restored            (1 byte)
-                       "nop\n"               // now we execute the actual instruction         (1 byte x 10)
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       // need to copy back to the VM registers!
-                        "movl %0, %%eax\n"     // recapture oldesp location                     (5 bytes)
-                       "movl (%%eax), %%esp\n"   // now we'll get our esp back from oldesp       (2 bytes)
-                       "popa\n"              // and restore our GP regs and we're done       (1 byte)
-                       : "=m"(oldesp)
-                       : "m"(myregs)
-                       );
-  
-  PrintTrace("Survived executing the faulting instruction and returning.\n");
+    __asm__ __volatile__ ( "movq %%cs, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
 
-  vm->vmcs.guestStateArea.rip += vm->vmcs.exitInfoFields.instrLength;
+    __asm__ __volatile__ ( "movq %%ss, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
 
-  return 0;
+    __asm__ __volatile__ ( "movq %%ds, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
 
-}
+    __asm__ __volatile__ ( "movq %%es, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
 
+    __asm__ __volatile__ ( "movq %%fs, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
 
-int is_vmx_capable() {
-  uint_t ret;
-  union VMX_MSR featureMSR;
-  
-  ret = cpuid_ecx(1);
-  if (ret & CPUID_1_ECX_VTXFLAG) {
-    Get_MSR(IA32_FEATURE_CONTROL_MSR, &featureMSR.regs.high, &featureMSR.regs.low);
+    __asm__ __volatile__ ( "movq %%gs, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
 
-    PrintTrace("MSRREGlow: 0x%.8x\n", featureMSR.regs.low);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
 
-    if ((featureMSR.regs.low & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
-      PrintDebug("VMX is locked -- enable in the BIOS\n");
-      return 0;
-    }
-  } else {
-    PrintDebug("VMX not supported on this cpu\n");
-    return 0;
-  }
 
-  return 1;
+#define SYSENTER_CS_MSR 0x00000174
+#define SYSENTER_ESP_MSR 0x00000175
+#define SYSENTER_EIP_MSR 0x00000176
+
+   // SYSENTER CS MSR
+    v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
+
+    // SYSENTER_ESP MSR
+    v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
 
+    // SYSENTER_EIP MSR
+    v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
+
+    return vmx_ret;
 }
 
 
-VmxOnRegion * Init_VMX() {
-  uint_t ret;
-  VmxOnRegion * region = NULL;
 
 
-  region = CreateVmxOnRegion();
 
 
-  ret = Enable_VMX((ullong_t)((uint_t)region));
-  if (ret == 0) {
-    PrintDebug("VMX Enabled\n");
-  } else {
-    PrintDebug("VMX failure (ret = %d)\n", ret);
-  }
+#if 0
+// For the 32 bit reserved bit fields 
+// MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
+static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
+    v3_msr_t mask_msr;
 
-  theVM.vmxonregion = region;
+    PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
 
-  return region;
-}
+    v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
 
-extern uint_t VMCS_CLEAR();
-extern uint_t VMCS_LOAD();
-extern uint_t VMCS_STORE();
-extern uint_t VMCS_LAUNCH();
-extern uint_t VMCS_RESUME();
-extern uint_t Init_VMCS_HostState();
-extern uint_t Init_VMCS_GuestState();
+    PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
 
-void SetCtrlBitsCorrectly(int msrno, int vmcsno)
-{
-  uint_t reserved =0;
-  union VMX_MSR msr;
-
-  PrintTrace("SetCtrlBitsCorrectly(%x,%x)\n", msrno, vmcsno);
-  Get_MSR(msrno, &msr.regs.high, &msr.regs.low);
-  PrintTrace("MSR %x = %x : %x \n", msrno, msr.regs.high, msr.regs.low);
-  reserved = msr.regs.low;
-  reserved &= msr.regs.high;
-  VMCS_WRITE(vmcsno, &reserved);
+    val |= mask_msr.lo;
+    val |= mask_msr.hi;
+  
+    return val;
 }
 
 
-void SetCRBitsCorrectly(int msr0no, int msr1no, int vmcsno)
-{
-  uint_t reserved =0;
-  union VMX_MSR msr0, msr1;
-
-  PrintTrace("SetCRBitsCorrectly(%x,%x,%x)\n",msr0no,msr1no,vmcsno);
-  Get_MSR(msr0no, &msr0.regs.high, &msr0.regs.low);
-  Get_MSR(msr1no, &msr1.regs.high, &msr1.regs.low);
-  PrintTrace("MSR %x = %x, %x =  %x \n", msr0no, msr0.regs.low, msr1no, msr1.regs.low);
-  reserved = msr0.regs.low;
-  reserved &= msr1.regs.low;
-  VMCS_WRITE(vmcsno, &reserved);
+
+static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
+    v3_msr_t msr0, msr1;
+    addr_t msr0_val, msr1_val;
+
+    PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
+
+    v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
+    v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
+  
+    // This generates a mask that is the natural bit width of the CPU
+    msr0_val = msr0.value;
+    msr1_val = msr1.value;
+
+    PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
+
+    val |= msr0_val;
+    val |= msr1_val;
+
+    return val;
 }
 
+static int setup_base_host_state() {
+    
 
-extern int Get_CR2();
-extern int vmRunning;
+
+    //   vmwrite(HOST_IDTR_BASE, 
 
 
-static int PanicUnhandledVMExit(struct VM *vm)
-{
-  PrintInfo("Panicking due to VMExit with reason %u\n", vm->vmcs.exitInfoFields.reason);
-  PrintTrace("Panicking due to VMExit with reason %u\n", vm->vmcs.exitInfoFields.reason);
-  PrintTrace_VMCS_ALL();
-  PrintTrace_VMX_Regs(&(vm->registers));
-  VMXPanic();
-  return 0;
 }
 
 
-static int HandleVMPrintsAndPanics(struct VM *vm, uint_t port, uint_t data)
-{
-  if (port==VMXASSIST_INFO_PORT &&
-      (vm->state == VM_VMXASSIST_STARTUP || 
-       vm->state == VM_VMXASSIST_V8086_BIOS ||
-       vm->state == VM_VMXASSIST_V8086)) { 
-    // Communication channel from VMXAssist
-    PrintTrace("VMXASSIST Output Port\n");
-    PrintDebug("%c",data&0xff);
-    return 1;
-  } 
-
-  if ((port==ROMBIOS_PANIC_PORT || 
-       port==ROMBIOS_PANIC_PORT2 || 
-       port==ROMBIOS_DEBUG_PORT ||
-       port==ROMBIOS_INFO_PORT) &&
-      (vm->state==VM_VMXASSIST_V8086_BIOS)) {
-    // rombios is communicating
-    PrintTrace("ROMBIOS Output Port\n");
-    //    PrintDebug("%c",data&0xff);
-    return 1;
-  }
+#endif
 
-  if (port==BOOT_STATE_CARD_PORT && vm->state==VM_VMXASSIST_V8086_BIOS) { 
-    // rombios is sending something to the display card
-    PrintTrace("Hex Display: 0x%x\n",data&0xff);
-    return 1;
-  }
-  return 0;
+
+static void inline translate_segment_access(struct v3_segment * v3_seg,  
+                                           struct vmcs_segment_access * access)
+{
+    access->type = v3_seg->type;
+    access->desc_type = v3_seg->system;
+    access->dpl = v3_seg->dpl;
+    access->present = v3_seg->present;
+    access->avail = v3_seg->avail;
+    access->long_mode = v3_seg->long_mode;
+    access->db = v3_seg->db;
+    access->granularity = v3_seg->granularity;
 }
 
-static int HandleInOutExit(struct VM *vm)
+static int inline vmcs_write_guest_segments(struct guest_info* info)
 {
-  uint_t address;
-
-  struct VMCSExitInfoFields *exitinfo = &(vm->vmcs.exitInfoFields);
-  struct VMExitIOQual * qual = (struct VMExitIOQual *)&(vm->vmcs.exitInfoFields.qualification);
-  struct VMXRegs *regs = &(vm->registers);
-
-  address=GetLinearIP(vm);
-
-  PrintTrace("Handling Input/Output Instruction Exit\n");
-
-  PrintTrace_VMX_Regs(regs);
-
-  PrintTrace("Qualifications=0x%x\n", exitinfo->qualification);
-  PrintTrace("Reason=0x%x\n", exitinfo->reason);
-  PrintTrace("IO Port: 0x%x (%d)\n", qual->port, qual->port);
-  PrintTrace("Instruction Info=%x\n", exitinfo->instrInfo);
-  PrintTrace("%x : %s %s %s instruction of length %d for %d bytes from/to port 0x%x\n",
-                  address,
-                  qual->dir == 0 ? "output" : "input",
-                  qual->string ==0 ? "nonstring" : "STRING",
-                  qual->REP == 0 ? "with no rep" : "WITH REP",
-                  exitinfo->instrLength, 
-                  qual->accessSize==0 ? 1 : qual->accessSize==1 ? 2 : 4,
-                  qual->port);
-
-  if ((qual->port == PIC_MASTER_CMD_ISR_PORT) ||
-      (qual->port == PIC_MASTER_IMR_PORT)     ||
-      (qual->port == PIC_SLAVE_CMD_ISR_PORT)  ||
-      (qual->port == PIC_SLAVE_IMR_PORT)) {
-    PrintTrace( "PIC Access\n");
-  }
-                  
-
-  if ((qual->dir == 1) && (qual->REP == 0) && (qual->string == 0)) { 
-    char byte = In_Byte(qual->port);
-
-    vm->vmcs.guestStateArea.rip += exitinfo->instrLength;
-    regs->eax = (regs->eax & 0xffffff00) | byte;
-    PrintTrace("Returning 0x%x in eax\n", (regs->eax));
-  }
-
-  if (qual->dir==0 && qual->REP==0 && qual->string==0) { 
-    // See if we need to handle the outb as a signal or
-    // print from the VM
-    if (HandleVMPrintsAndPanics(vm,qual->port,regs->eax)) {
-    } else {
-      // If not, just go ahead and do the outb
-      Out_Byte(qual->port,regs->eax);
-      PrintTrace("Wrote 0x%x to port\n",(regs->eax));
-    }
-    vm->vmcs.guestStateArea.rip += exitinfo->instrLength;
-  }
+    int ret = 0;
+    struct vmcs_segment_access access;
 
-  return 0;
-}  
+    memset(&access, 0, sizeof(access));
 
+    /* CS Segment */
+    translate_segment_access(&(info->segments.cs), &access);
 
-static int HandleExternalIRQExit(struct VM *vm)
-{
-  struct VMCSExitInfoFields * exitinfo = &(vm->vmcs.exitInfoFields);
-  struct VMExitIntInfo * intInfo  = (struct VMExitIntInfo *)&(vm->vmcs.exitInfoFields.intInfo);
+    ret |= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
+    ret |= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
 
-  PrintTrace("External Interrupt captured\n");
-  PrintTrace("IntInfo: %x\n", exitinfo->intInfo);
+    /* SS Segment */
+    translate_segment_access(&(info->segments.ss), &access);
 
+    ret |= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
+    ret |= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
 
-  if (!intInfo->valid) {
-     // interrupts are off, but this interrupt is not acknoledged (still pending)
-     // so we turn on interrupts to deliver appropriately in the
-     // host
-    PrintTrace("External Interrupt is invald.  Turning Interrupts back on\n");
-    asm("sti");
-    return 0;
-  } 
-
-  // At this point, interrupts are off and the interrupt has been 
-  // acknowledged.  We will now handle the interrupt ourselves 
-  // and turn interrupts  back on in the host
-
-  PrintTrace("type: %d\n", intInfo->type);
-  PrintTrace("number: %d\n", intInfo->nr);
-
-  PrintTrace("Interrupt %d occuring now and handled by HandleExternalIRQExit\n",intInfo->nr);
-
-  switch (intInfo->type) {
-  case 0:  {  // ext. IRQ
-    // In the following, we construct an "int x" instruction
-    // where x is the specific interrupt number that is raised
-    // then we execute that instruciton
-    // because we are in host context, that means it is delivered as normal
-    // through the host IDT
-     
-     ((char*)(&&ext_int_seq_start))[1] = intInfo->nr;
-     PrintTrace("Interrupt instruction setup done %x\n", *((ushort_t *)(&&ext_int_seq_start)));
-     
-ext_int_seq_start:
-     asm("int $0");
-  }
-
-    break;
-  case 2: // NMI
-    PrintTrace("Type: NMI\n");
-    break;
-  case 3: // hw exception
-    PrintTrace("Type: HW Exception\n");
-    break;
-  case 4: // sw exception
-    PrintTrace("Type: SW Exception\n");
-    break;
-  default:
-    PrintTrace("Invalid Interrupt Type\n");
-    return -1;
-  }
-  
-  if (intInfo->valid && intInfo->errorCode) {
-    PrintTrace("IntError: %x\n", exitinfo->intErrorCode);
-  }
+    /* DS Segment */
+    translate_segment_access(&(info->segments.ds), &access);
 
+    ret |= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
+    ret |= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
 
-  return 0;
 
-}
+    /* ES Segment */
+    translate_segment_access(&(info->segments.es), &access);
 
+    ret |= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
+    ret |= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
 
+    /* FS Segment */
+    translate_segment_access(&(info->segments.fs), &access);
 
-void DecodeCurrentInstruction(struct VM *vm, struct Instruction *inst)
-{
-  // this is a gruesome hack
-  uint_t address = GetLinearIP(vm);
-  uint_t length = vm->vmcs.exitInfoFields.instrLength;
-  unsigned char *t = (unsigned char *) address;
+    ret |= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
+    ret |= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
 
+    /* GS Segment */
+    translate_segment_access(&(info->segments.gs), &access);
 
-  
-  PrintTrace("DecodeCurrentInstruction: instruction is\n");
-  PrintTraceMemDump(t,length);
-  
-  if (length==3 && t[0]==0x0f && t[1]==0x22 && t[2]==0xc0) { 
-    // mov from eax to cr0
-    // usually used to signal
-    inst->type=VM_MOV_TO_CR0;
-    inst->address=address;
-    inst->size=length;
-    inst->input1=vm->registers.eax;
-    inst->input2=vm->vmcs.guestStateArea.cr0;
-    inst->output=vm->registers.eax;
-    PrintTrace("MOV FROM EAX TO CR0\n");
-  } else {
-    inst->type=VM_UNKNOWN_INST;
-  }
-}
+    ret |= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
+    ret |= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
 
+    /* LDTR segment */
+    translate_segment_access(&(info->segments.ldtr), &access);
 
-static void V8086ModeSegmentRegisterFixup(struct VM *vm)
-{
-  vm->vmcs.guestStateArea.cs.baseAddr=vm->vmcs.guestStateArea.cs.selector<<4;
-  vm->vmcs.guestStateArea.es.baseAddr=vm->vmcs.guestStateArea.es.selector<<4;
-  vm->vmcs.guestStateArea.ss.baseAddr=vm->vmcs.guestStateArea.ss.selector<<4;
-  vm->vmcs.guestStateArea.ds.baseAddr=vm->vmcs.guestStateArea.ds.selector<<4;
-  vm->vmcs.guestStateArea.fs.baseAddr=vm->vmcs.guestStateArea.fs.selector<<4;
-  vm->vmcs.guestStateArea.gs.baseAddr=vm->vmcs.guestStateArea.gs.selector<<4;
+    ret |= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
+    ret |= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
+
+    /* TR Segment */
+    translate_segment_access(&(info->segments.tr), &access);
+
+    ret |= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
+    ret |= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.ldtr.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
+
+    /* GDTR Segment */
+
+    ret |= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
+    ret |= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
+
+    /* IDTR Segment*/
+    ret |= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
+    ret |= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
+
+    return ret;
 }
 
-static void SetupV8086ModeForBoot(struct VM *vm)
+static void setup_v8086_mode_for_boot(struct guest_info * vm_info)
 {
-  vm->state = VM_VMXASSIST_V8086_BIOS;
 
-  // Put guest into V8086 mode on return
-  vm->vmcs.guestStateArea.rflags |= EFLAGS_VM | EFLAGS_IOPL_HI | EFLAGS_IOPL_LO ;
-  
-  // We will start at f000:fff0 on return
-  //
-  // We want this to look as much as possible as a processor
-  // reset
-  vm->vmcs.guestStateArea.rip = 0xfff0;  // note, 16 bit rip
-  vm->vmcs.guestStateArea.cs.selector = 0xf000;
-  vm->vmcs.guestStateArea.cs.limit=0xffff;
-  vm->vmcs.guestStateArea.cs.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.ss.selector = 0x0000;
-  vm->vmcs.guestStateArea.ss.limit=0xffff;
-  vm->vmcs.guestStateArea.ss.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.ds.selector = 0x0000;
-  vm->vmcs.guestStateArea.ds.limit=0xffff;
-  vm->vmcs.guestStateArea.ds.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.es.selector = 0x0000;
-  vm->vmcs.guestStateArea.es.limit=0xffff;
-  vm->vmcs.guestStateArea.es.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.fs.selector = 0x0000;
-  vm->vmcs.guestStateArea.fs.limit=0xffff;
-  vm->vmcs.guestStateArea.fs.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.gs.selector = 0x0000;
-  vm->vmcs.guestStateArea.gs.limit=0xffff;
-  vm->vmcs.guestStateArea.gs.access.as_dword = 0xf3;
-  
-  V8086ModeSegmentRegisterFixup(vm);
+    ((struct vmx_data *)vm_info->vmm_data)->state = VMXASSIST_V8086_BIOS;
+    ((struct rflags *)&(vm_info->ctrl_regs.rflags))->vm = 1;
+    ((struct rflags *)&(vm_info->ctrl_regs.rflags))->iopl = 3;
+
+   
+    vm_info->rip = 0xd0000;
+    vm_info->vm_regs.rsp = 0x80000;
+
+    vm_info->segments.cs.selector = 0xf000;
+    vm_info->segments.cs.base = 0xf000 << 4;
+    vm_info->segments.cs.limit = 0xffff;
+    vm_info->segments.cs.type = 3;
+    vm_info->segments.cs.system = 1;
+    vm_info->segments.cs.dpl = 3;
+    vm_info->segments.cs.present = 1;
+    vm_info->segments.cs.granularity = 0;
+
+    int i = 0;
+    struct v3_segment * seg_ptr = (struct v3_segment *)&(vm_info->segments);
+
+    /* Set values for selectors ds through ss */
+    for(i = 1; i < 6 ; i++) {
+        seg_ptr[i].selector = 0x0000;
+        seg_ptr[i].base = 0x00000;
+        seg_ptr[i].type = 3;
+        seg_ptr[i].system = 1;
+        seg_ptr[i].dpl = 3;
+        seg_ptr[i].present = 1;
+        seg_ptr[i].granularity = 0;
+    }
 
-  PrintTrace_VMCSData(&(vm->vmcs));
+    for(i = 6; i < 10; i++) {
+        seg_ptr[i].base = 0x0;
+        seg_ptr[i].limit = 0xffff;
+    }
 
+    vm_info->segments.ldtr.selector = 0x0;
+    vm_info->segments.ldtr.type = 2;
+    vm_info->segments.ldtr.system = 0;
+    vm_info->segments.ldtr.present = 1;
+    vm_info->segments.ldtr.granularity = 0;
+
+    vm_info->segments.tr.selector = 0x0;
+    vm_info->segments.tr.type = 3;
+    vm_info->segments.tr.system = 0;
+    vm_info->segments.tr.present = 1;
+    vm_info->segments.tr.granularity = 0;
 }
-  
 
 
-static int HandleExceptionOrNMI(struct VM *vm)
+static addr_t allocate_vmcs() 
 {
-  struct Instruction inst;
-  uint_t num;
-  uint_t type;
-  uint_t errorvalid;
-  uint_t error;
-  uint_t ext=0;
-  uint_t idt=0;
-  uint_t ti=0;
-  uint_t selectorindex=0;
-
-  PrintTrace("Exception or NMI occurred\n");
-  
-  num=vm->vmcs.exitInfoFields.intInfo & 0xff;
-  type=(vm->vmcs.exitInfoFields.intInfo & 0x700)>>8;
-  errorvalid=(vm->vmcs.exitInfoFields.intInfo & 0x800)>>11;
-  if (errorvalid) { 
-    error=vm->vmcs.exitInfoFields.intErrorCode;
-    ext=error&0x1;
-    idt=(error&0x2)>>1;
-    ti=(error&0x4)>>2;
-    selectorindex=(error>>3)&0xffff;
-  }
-  
-  PrintTrace("Exception %d now - handled by HandleExceptionOrNMI\n",num);
+    reg_ex_t msr;
+    PrintDebug("Allocating page\n");
+    struct vmcs_data * vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
 
-  PrintTrace("Exception Number %u : %s\n", num, exception_names[num]);
-  PrintTrace("Exception Type %u : %s\n", type, exception_type_names[type]);
-  if (errorvalid) { 
-    if (ext) { 
-      PrintTrace("External\n");
-    } else {
-      PrintTrace("%s - Selector Index is %u\n", idt ? "IDT" : ti ? "LDT" : "GDT", selectorindex);
-    }
-  }
-
-  DecodeCurrentInstruction(vm,&inst);
-
-  if (inst.type==VM_MOV_TO_CR0) {
-    PrintTrace("MOV TO CR0, oldvalue=0x%x, newvalue=0x%x\n",inst.input2, inst.input1);
-    if ((inst.input2 & CR0_PE) && !(inst.input1 & CR0_PE) && vm->state==VM_VMXASSIST_STARTUP) {
-      // This is VMXAssist signalling for us to turn on V8086 mode and
-      // jump into the bios
-      PrintTrace("VMXAssist is signaling us for switch to V8086 mode and jump to 0xf000:fff0\n");
-      SetupV8086ModeForBoot(vm);
-      goto leave;
-    } else {
-      PrintTrace("Instruction is a write to CR0, but we don't understand it so we'll just exec it\n");
-    } 
-  } 
 
+    memset(vmcs_page, 0, 4096);
 
-  PrintTrace("Trying to execute the faulting instruction in VMM context now\n");
-  ExecFaultingInstructionInVMM(vm);
+    v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
+    
+    vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
+    PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
 
-    leave:
-  //
-  //PanicUnhandledVMExit(vmcs,regs);
-  //VMXPanic();
-  return 0;
+    return (addr_t)V3_PAddr((void *)vmcs_page);
 }
 
 
-static struct VM *FindVM()
+static int init_vmcs_bios(struct guest_info * vm_info) 
 {
-  return &theVM;
-}
 
+    setup_v8086_mode_for_boot(vm_info);
 
-int Do_VMM(struct VMXRegs regs) 
-{
+    // TODO: Fix vmcs fields so they're 32-bit
+    struct vmx_data * vmx_data = (struct vmx_data *)vm_info->vmm_data;
+    int vmx_ret = 0;
 
-  ullong_t vmcs_ptr = 0;
-  uint_t vmcs_ptr_low = 0;
-  int ret = 0;
-  uint_t vmx_abort = 0;
+    PrintDebug("Clearing VMCS: %p\n",(void*)vmx_data->vmcs_ptr_phys);
+    vmx_ret = vmcs_clear(vmx_data->vmcs_ptr_phys);
 
+    if (vmx_ret != VMX_SUCCESS) {
+        PrintError("VMCLEAR failed\n");
+        return -1;
+    }
 
-  
-  PrintTrace("Vm Exit\n");
-  ret = VMCS_STORE(&vmcs_ptr);
-  vmcs_ptr &= 0xffffffff;
-  vmcs_ptr_low +=  vmcs_ptr;
+    PrintDebug("Loading VMCS\n");
+    vmx_ret = vmcs_load(vmx_data->vmcs_ptr_phys);
 
+    if (vmx_ret != VMX_SUCCESS) {
+        PrintError("VMPTRLD failed\n");
+        return -1;
+    }
 
+    struct v3_msr tmp_msr;
 
+    /* Write VMX Control Fields */
+    v3_get_msr(VMX_PINBASED_CTLS_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
+    /* Add NMI exiting */
+    tmp_msr.lo |= NMI_EXIT;
+    check_vmcs_write(VMCS_PIN_CTRLS, tmp_msr.lo);
 
-  PrintTrace("ret=%d\n", ret);
-  PrintTrace("Revision: %x\n", *(uint_t *)(vmcs_ptr_low));
-  vmx_abort = *(uint_t*)(((char *)vmcs_ptr_low)+4);
-    
-  struct VM *vm = FindVM();
+    v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    /* Add unconditional I/O */
+    tmp_msr.lo |= UNCOND_IO_EXIT;
+    check_vmcs_write(VMCS_PROC_CTRLS, tmp_msr.lo);
 
-  if (vmx_abort != 0) {
-    PrintTrace("VM ABORTED w/ code: %x\n", vmx_abort);
-    return -1;
-  }
+    v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    tmp_msr.lo |= HOST_ADDR_SPACE_SIZE;
+    check_vmcs_write(VMCS_EXIT_CTRLS, tmp_msr.lo);
 
-  vm->registers = regs;
 
-  if (CopyOutVMCSData(&(vm->vmcs)) != 0) {
-    PrintTrace("Could not copy out VMCS\n");
-    return -1;
-  }
-
-
-  PrintTrace("Guest esp: 0x%x (%u)\n", vm->vmcs.guestStateArea.rsp, vm->vmcs.guestStateArea.rsp);
-
-  PrintTrace("VM Exit for reason: %d (%x)\n", 
-             vm->vmcs.exitInfoFields.reason & 0x00000fff,
-             vm->vmcs.exitInfoFields.reason);  
-
-  if (vm->vmcs.exitInfoFields.reason & (0x1<<29) ) { 
-    PrintTrace("VM Exit is from VMX root operation.  Panicking\n");
-    VMXPanic();
-  }
-
-  if (vm->vmcs.exitInfoFields.reason & (0x1<<31) ) { 
-    PrintTrace("VM Exit is due to a VM entry failure.  Shouldn't happen here. Panicking\n");
-    PrintTrace_VMCSData(&(vm->vmcs));
-    VMXPanic();
-  }
-
-  switch (vm->vmcs.exitInfoFields.reason) {
-  case VM_EXIT_REASON_INFO_EXCEPTION_OR_NMI:
-    ret = HandleExceptionOrNMI(vm);
-    break;
-  case VM_EXIT_REASON_EXTERNAL_INTR:
-    ret = HandleExternalIRQExit(vm);
-    break;
-  case VM_EXIT_REASON_TRIPLE_FAULT:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INIT_SIGNAL:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_STARTUP_IPI:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_IO_SMI:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_OTHER_SMI:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INTR_WINDOW:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_NMI_WINDOW:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_TASK_SWITCH:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_CPUID:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INVD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INVLPG:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_RDPMC:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_RDTSC:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_RSM:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMCALL:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMCLEAR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMLAUNCH:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMPTRLD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMPTRST:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMREAD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMRESUME:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMWRITE:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMXOFF:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMXON:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_CR_REG_ACCESSES:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_MOV_DR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_IO_INSTR:
-    ret = HandleInOutExit(vm);
-    break;
-  case VM_EXIT_REASON_RDMSR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_WRMSR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_ENTRY_FAIL_INVALID_GUEST_STATE:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_ENTRY_FAIL_MSR_LOAD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_MWAIT:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_MONITOR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_PAUSE:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_ENTRY_FAILURE_MACHINE_CHECK:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_TPR_BELOW_THRESHOLD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  default:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  }
-  
-  
-  regs = vm->registers;
-  CopyInVMCSData(&(vm->vmcs));
+    v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    check_vmcs_write(VMCS_ENTRY_CTRLS, tmp_msr.lo);
+
+
+    check_vmcs_write(VMCS_EXCP_BITMAP, 0xffffffff);
 
-  /*
-    {
-    VMCS_CLEAR(vmcs_ptr);
+    /* Cache GDTR, IDTR, and TR in host struct */
+    struct {
+        uint16_t selector;
+        addr_t   base;
+    } __attribute__((packed)) tmp_seg;
+    
+    addr_t gdtr_base;
+
+    __asm__ __volatile__(
+                        "sgdt (%0);"
+                        :
+                        : "q"(&tmp_seg)
+                        : "memory"
+                        );
+    gdtr_base = tmp_seg.base;
+    vmx_data->host_state.gdtr.base = gdtr_base;
+
+    __asm__ __volatile__(
+                        "sidt (%0);"
+                        :
+                        : "q"(&tmp_seg)
+                        : "memory"
+                        );
+    vmx_data->host_state.idtr.base = tmp_seg.base;
+
+    __asm__ __volatile__(
+                        "str (%0);"
+                        :
+                        : "q"(&tmp_seg)
+                        : "memory"
+                        );
+    vmx_data->host_state.tr.selector = tmp_seg.selector;
+
+    /* The GDTR *index* is bits 3-15 of the selector. */
+    struct tss_descriptor * desc = (struct tss_descriptor *)
+                        (gdtr_base + 8*(tmp_seg.selector>>3));
+
+    tmp_seg.base = (
+                   (desc->base1) |
+                   (desc->base2 << 16) |
+                   (desc->base3 << 24) |
+#ifdef __V3_64BIT__
+                   ((uint64_t)desc->base4 << 32)
+#else 
+                   (0)
+#endif
+                );
+
+    vmx_data->host_state.tr.base = tmp_seg.base;
+
+    if(update_vmcs_host_state(vm_info)) {
+        PrintError("Could not write host state\n");
+        return -1;
     }
-  */
 
-  PrintTrace("Returning from Do_VMM: %d\n", ret);
-  return ret;
-}
+    // Setup guest state 
+    // TODO: This is not 32-bit safe!
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, vm_info->rip);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, vm_info->vm_regs.rsp);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, 0x80000021);
 
+    vmx_ret |= vmcs_write_guest_segments(vm_info);
 
-static void ConfigureExits(struct VM *vm)
-{
-  CopyOutVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
-
-  vm->vmcs.execCtrlFields.pinCtrls |= 0 
-    // EXTERNAL_INTERRUPT_EXITING 
-    | NMI_EXITING;
-  vm->vmcs.execCtrlFields.procCtrls |= 0
-    // INTERRUPT_WINDOWS_EXIT 
-    | USE_TSC_OFFSETTING
-    | HLT_EXITING  
-    |INVLPG_EXITING           
-    |MWAIT_EXITING            
-    |RDPMC_EXITING           
-    |RDTSC_EXITING         
-    |MOVDR_EXITING         
-    |UNCONDITION_IO_EXITING
-    |MONITOR_EXITING       
-    |PAUSE_EXITING         ;
-
-  CopyInVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
-  
-  CopyOutVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, vm_info->ctrl_regs.rflags);
+    vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
 
-  vm->vmcs.exitCtrlFields.exitCtrls |= ACK_IRQ_ON_EXIT;
-  
-  CopyInVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
+    if (vmx_ret != 0) {
+       PrintError("Could not initialize VMCS segments\n");
+        return -1;
+    }
 
+#define VMXASSIST_START 0x000d0000
+    extern uint8_t vmxassist_start[];
+    extern uint8_t vmxassist_end[];
+
+    addr_t vmxassist_dst = 0;
+    if(guest_pa_to_host_va(vm_info, VMXASSIST_START, &vmxassist_dst) == -1) {
+        PrintError("Could not find VMXASSIST destination\n");
+        return -1;
+    }
+    memcpy((void*)vmxassist_dst, vmxassist_start, vmxassist_end-vmxassist_start);
 
-/*   VMCS_READ(VM_EXIT_CTRLS, &flags); */
-/*   flags |= ACK_IRQ_ON_EXIT; */
-/*   VMCS_WRITE(VM_EXIT_CTRLS, &flags); */
+    v3_print_vmcs_host_state();
+    v3_print_vmcs_guest_state();
+    return 0;
 }
 
+static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
+    v3_pre_config_guest(info, config_ptr);
 
-extern int RunVMM();
-extern int SAFE_VM_LAUNCH();
+    struct vmx_data * data = NULL;
 
-int MyLaunch(struct VM *vm)
-{
-  ullong_t vmcs = (ullong_t)((uint_t) (vm->vmcsregion));
-  uint_t entry_eip = vm->descriptor.entry_ip;
-  uint_t exit_eip = vm->descriptor.exit_eip;
-  uint_t guest_esp = vm->descriptor.guest_esp;
-  uint_t f = 0xffffffff;
-  uint_t tmpReg = 0;
-  int ret;
-  int vmm_ret = 0;
-
-  PrintTrace("Guest ESP: 0x%x (%u)\n", guest_esp, guest_esp);
-
-  exit_eip=(uint_t)RunVMM;
-
-  PrintTrace("Clear\n");
-  VMCS_CLEAR(vmcs);
-  PrintTrace("Load\n");
-  VMCS_LOAD(vmcs);
-
-
-  PrintTrace("VMCS_LINK_PTR\n");
-  VMCS_WRITE(VMCS_LINK_PTR, &f);
-  PrintTrace("VMCS_LINK_PTR_HIGH\n");
-  VMCS_WRITE(VMCS_LINK_PTR_HIGH, &f);
-
-  SetCtrlBitsCorrectly(IA32_VMX_PINBASED_CTLS_MSR, PIN_VM_EXEC_CTRLS);
-  SetCtrlBitsCorrectly(IA32_VMX_PROCBASED_CTLS_MSR, PROC_VM_EXEC_CTRLS);
-  SetCtrlBitsCorrectly(IA32_VMX_EXIT_CTLS_MSR, VM_EXIT_CTRLS);
-  SetCtrlBitsCorrectly(IA32_VMX_ENTRY_CTLS_MSR, VM_ENTRY_CTRLS);
-
-  //
-  //
-  //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL);
-  //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL_HIGH);
-
-
-  /* Host state */
-  PrintTrace("Setting up host state\n");
-  SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, HOST_CR0);
-  SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, HOST_CR4);
-  ret = Init_VMCS_HostState();
-
-  if (ret != VMX_SUCCESS) {
-    if (ret == VMX_FAIL_VALID) {
-      PrintTrace("Init Host state: VMCS FAILED WITH ERROR\n");
-    } else {
-      PrintTrace("Init Host state: Invalid VMCS\n");
-    }
-    return ret;
-  }
+    data = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
 
-  //  PrintTrace("HOST_RIP: %x (%u)\n", exit_eip, exit_eip);
-  VMCS_WRITE(HOST_RIP, &exit_eip);
+    PrintDebug("vmx_data pointer: %p\n", (void *)data);
 
-  /* Guest state */
-  PrintTrace("Setting up guest state\n");
-  PrintTrace("GUEST_RIP: %x (%u)\n", entry_eip, entry_eip);
-  VMCS_WRITE(GUEST_RIP,&entry_eip);
+    PrintDebug("Allocating VMCS\n");
+    data->vmcs_ptr_phys = allocate_vmcs();
 
-  SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, GUEST_CR0);
-  SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, GUEST_CR4);
-  ret = Init_VMCS_GuestState();
+    PrintDebug("VMCS pointer: %p\n", (void *)(data->vmcs_ptr_phys));
 
-  PrintTrace("InitGuestState returned\n");
-  if (ret != VMX_SUCCESS) {
-    if (ret == VMX_FAIL_VALID) {
-      PrintTrace("Init Guest state: VMCS FAILED WITH ERROR\n");
-    } else {
-      PrintTrace("Init Guest state: Invalid VMCS\n");
+    info->vmm_data = data;
+
+    PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
+
+    if (init_vmcs_bios(info) != 0) {
+       PrintError("Could not initialize VMCS BIOS\n");
+        return -1;
     }
-    return ret;
-  }
-  PrintTrace("GUEST_RSP: %x (%u)\n", guest_esp, (uint_t)guest_esp);
-  VMCS_WRITE(GUEST_RSP,&guest_esp);
 
-  //  tmpReg = 0x4100;
-  tmpReg = 0xffffffff;
-  if (VMCS_WRITE(EXCEPTION_BITMAP,&tmpReg ) != VMX_SUCCESS) {
-    PrintInfo("Bitmap error\n");
-  }
+     //v3_post_config_guest(info, config_ptr);
+
+    return 0;
+}
 
-  ConfigureExits(vm);
 
-  PrintTrace("VMCS_LAUNCH\n");
+static int start_vmx_guest(struct guest_info* info) {
+    uint32_t error = 0;
+    int ret = 0;
 
-  vm->state=VM_VMXASSIST_STARTUP;
+    PrintDebug("Attempting VMLAUNCH\n");
 
-  vmm_ret = SAFE_VM_LAUNCH();
+    ret = v3_vmx_vmlaunch(&(info->vm_regs));
+    if (ret != VMX_SUCCESS) {
+        vmcs_read(VMCS_INSTR_ERR, &error, 4);
+        PrintError("VMLAUNCH failed: %d\n", error);
 
-  PrintTrace("VMM error %d\n", vmm_ret);
+        v3_print_vmcs_guest_state();
+        v3_print_vmcs_host_state();
+    }
+    PrintDebug("Returned from VMLAUNCH ret=%d(0x%x)\n", ret, ret);
 
-  return vmm_ret;
+    return -1;
 }
 
 
 
-  
-int VMLaunch(struct VMDescriptor *vm) 
-{
-  VMCS * vmcs = CreateVMCS();
-  int rc;
 
-  ullong_t vmcs_ptr = (ullong_t)((uint_t)vmcs);
-  uint_t top = (vmcs_ptr>>32)&0xffffffff;
-  uint_t bottom = (vmcs_ptr)&0xffffffff;
 
-  theVM.vmcsregion = vmcs;
-  theVM.descriptor = *vm;
 
-  PrintTrace("vmcs_ptr_top=%x vmcs_ptr_bottom=%x, eip=%x\n", top, bottom, vm->entry_ip);
-  rc=MyLaunch(&theVM); // vmcs_ptr, vm->entry_ip, vm->exit_eip, vm->guest_esp);
-  PrintTrace("Returned from MyLaunch();\n");
-  return rc;
-}
+int v3_is_vmx_capable() {
+    v3_msr_t feature_msr;
+    addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
 
+    v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
 
-VmxOnRegion * CreateVmxOnRegion() {
-  union VMX_MSR basicMSR;
-  VmxOnRegion * region = (VmxOnRegion *)(os_hooks)->allocate_pages(1);
+    PrintDebug("ECX: %p\n", (void*)ecx);
 
-  Get_MSR(IA32_VMX_BASIC_MSR, &basicMSR.regs.high, &basicMSR.regs.low);
-  //  memcpy(region, &basicMSR.vmxBasic.revision, sizeof(uint_t));
+    if (ecx & CPUID_1_ECX_VTXFLAG) {
+        v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
+       
+        PrintTrace("MSRREGlow: 0x%.8x\n", feature_msr.lo);
 
-  *(ulong_t*)region = basicMSR.vmxBasic.revision;
+        if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
+            PrintDebug("VMX is locked -- enable in the BIOS\n");
+            return 0;
+        }
 
-  PrintInfo("VMX revision: 0x%lu\n", *(ulong_t *)region);
+    } else {
+        PrintDebug("VMX not supported on this cpu\n");
+        return 0;
+    }
 
-  return region;
+    return 1;
 }
 
-VMCS * CreateVMCS() {
-  union VMX_MSR basicMSR;
-  VMCS * vmcs = (VMCS *)(os_hooks)->allocate_pages(1);
+static int has_vmx_nested_paging() {
+    return 0;
+}
+
+
 
-  Get_MSR(IA32_VMX_BASIC_MSR, &basicMSR.regs.high, &basicMSR.regs.low);
-  *(ulong_t *)vmcs = basicMSR.vmxBasic.revision;
-  *(ulong_t *)((char*)vmcs + 4) = 0;
+void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
+    extern v3_cpu_arch_t v3_cpu_type;
 
-  PrintTrace("VMCS Region size: %u\n", basicMSR.vmxBasic.regionSize);
-  PrintTrace("VMCS Abort: %x\n",*(uint_t *)(((char*)vmcs)+4));
+    struct v3_msr tmp_msr;
+    uint64_t ret=0;
+
+    v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
+    
+    __asm__ __volatile__ (
+                         "movq %%cr4, %%rbx;"
+                         "orq  $0x00002000, %%rbx;"
+                         "movq %%rbx, %0;"
+                         : "=m"(ret) 
+                         :
+                         : "%rbx"
+                         );
+
+    if((~ret & tmp_msr.value) == 0) {
+        __asm__ __volatile__ (
+                             "movq %0, %%cr4;"
+                             :
+                             : "q"(ret)
+                             );
+    } else {
+        PrintError("Invalid CR4 Settings!\n");
+        return;
+    }
+      __asm__ __volatile__ (
+                           "movq %%cr0, %%rbx; "
+                           "orq  $0x00000020,%%rbx; "
+                           "movq %%rbx, %%cr0;"
+                           :
+                           :
+                           : "%rbx"
+                           );
+      //
+    // Should check and return Error here.... 
+
+
+    // Setup VMXON Region
+    vmxon_ptr_phys = allocate_vmcs();
+    PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr_phys);
+
+    if (v3_enable_vmx(vmxon_ptr_phys) == VMX_SUCCESS) {
+        PrintDebug("VMX Enabled\n");
+    } else {
+        PrintError("VMX initialization failure\n");
+        return;
+    }
+       
+
+    if (has_vmx_nested_paging() == 1) {
+        v3_cpu_type = V3_VMX_EPT_CPU;
+    } else {
+        v3_cpu_type = V3_VMX_CPU;
+    }
+
+    // Setup the VMX specific vmm operations
+    vm_ops->init_guest = &init_vmx_guest;
+    vm_ops->start_guest = &start_vmx_guest;
+    vm_ops->has_nested_paging = &has_vmx_nested_paging;
 
-  return vmcs;
 }
+