Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Succesful transition to vmxassist, then to the bios, where it dies in keyboard init.
[palacios.git] / palacios / src / palacios / vmx.c
index e393099..ed3fe92 100644 (file)
-/* Eventually  we want to get rid of these */
+/* 
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National 
+ * Science Foundation and the Department of Energy.  
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico.  You can find out more at 
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu> 
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
+ * All rights reserved.
+ *
+ * Author: Peter Dinda <pdinda@northwestern.edu>
+ *         Jack Lange <jarusl@cs.northwestern.edu>
+ *
+ * This is free software.  You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
 
-#include <geekos/cpu.h>
-#include <geekos/io_devs.h>
-#include <geekos/io.h>
-/* ** */
 
 #include <palacios/vmx.h>
 #include <palacios/vmcs.h>
 #include <palacios/vmm.h>
-#include <palacios/vmm_util.h>
-#include <palacios/vmm_string.h>
-
-
-extern void Get_MSR(unsigned int msr, uint_t * high, uint_t * low);
-extern void Set_MSR(unsigned int msr, uint_t high, uint_t low);
-extern int Enable_VMX(ullong_t regionPtr);
-extern int cpuid_ecx(unsigned int op);
-extern int Launch_VM(ullong_t vmcsPtr, uint_t eip);
+#include <palacios/vmx_lowlevel.h>
+#include <palacios/vmm_lowlevel.h>
+#include <palacios/vmm_ctrl_regs.h>
+#include <palacios/vmm_config.h>
+#include <palacios/vm_guest_mem.h>
+#include <palacios/vmm_direct_paging.h>
+#include <palacios/vmx_io.h>
+#include <palacios/vmx_msr.h>
+
+static addr_t vmxon_ptr_phys;
+extern int v3_vmx_exit_handler();
+extern int v3_vmx_vmlaunch(struct v3_gprs * vm_regs, struct guest_info * info);
+
+static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
+{
+    int ret = 0;
+    ret = vmcs_write(field,val);
 
-#define NUMPORTS 65536
+    if (ret != VMX_SUCCESS) {
+        PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
+        return 1;
+    }
 
+    return 0;
+}
 
-#define VMXASSIST_INFO_PORT   0x0e9
-#define ROMBIOS_PANIC_PORT    0x400
-#define ROMBIOS_PANIC_PORT2   0x401
-#define ROMBIOS_INFO_PORT     0x402
-#define ROMBIOS_DEBUG_PORT    0x403
+static void inline translate_segment_access(struct v3_segment * v3_seg,  
+                                           struct vmcs_segment_access * access)
+{
+    access->type = v3_seg->type;
+    access->desc_type = v3_seg->system;
+    access->dpl = v3_seg->dpl;
+    access->present = v3_seg->present;
+    access->avail = v3_seg->avail;
+    access->long_mode = v3_seg->long_mode;
+    access->db = v3_seg->db;
+    access->granularity = v3_seg->granularity;
+}
 
+int v3_update_vmcs_ctrl_fields(struct guest_info * info) {
+    int vmx_ret = 0;
+    struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
 
-extern struct vmm_os_hooks * os_hooks;
+    vmx_ret |= check_vmcs_write(VMCS_PIN_CTRLS, arch_data->pinbased_ctrls);
+    vmx_ret |= check_vmcs_write(VMCS_PROC_CTRLS, arch_data->pri_procbased_ctrls);
 
+    if(arch_data->pri_procbased_ctrls & ACTIVE_SEC_CTRLS) {
+        vmx_ret |= check_vmcs_write(VMCS_SEC_PROC_CTRLS, arch_data->sec_procbased_ctrls);
+    }
 
-static struct VM theVM;
+    vmx_ret |= check_vmcs_write(VMCS_EXIT_CTRLS, arch_data->exit_ctrls);
+    vmx_ret |= check_vmcs_write(VMCS_ENTRY_CTRLS, arch_data->entry_ctrls);
 
-static uint_t GetLinearIP(struct VM *vm)
-{
-  if (vm->state==VM_VMXASSIST_V8086_BIOS || vm->state==VM_VMXASSIST_V8086) { 
-    return vm->vmcs.guestStateArea.cs.baseAddr + vm->vmcs.guestStateArea.rip;
-  } else {
-    return vm->vmcs.guestStateArea.rip;
-  }
+    return vmx_ret;
 }
 
+int v3_update_vmcs_host_state(struct guest_info * info) {
+    int vmx_ret = 0;
+    addr_t tmp;
+    struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
+    struct v3_msr tmp_msr;
 
-static void VMXPanic()
-{
-  while (1) {}
-}
+    __asm__ __volatile__ ( "movq    %%cr0, %0; "               
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp);
 
 
-#define MAX_CODE 512
-#define INSTR_OFFSET_START 17
-#define NOP_SEQ_LEN        10
-#define INSTR_OFFSET_END   (INSTR_OFFSET_START+NOP_SEQ_LEN-1)
-#define TEMPLATE_CODE_LEN  35
+    __asm__ __volatile__ ( "movq %%cr3, %0; "          
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp);
 
-uint_t oldesp=0;
-uint_t myregs=0;
 
-// simply execute the instruction that is faulting and return
-static int ExecFaultingInstructionInVMM(struct VM *vm)
-{
-  uint_t address = GetLinearIP(vm);
-  myregs = (uint_t)&(vm->registers);
-  
+    __asm__ __volatile__ ( "movq %%cr4, %0; "          
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
 
-  PrintTrace("About the execute faulting instruction!\n");
-  PrintTrace("Instruction is:\n");
-  PrintTraceMemDump((void*)(address),vm->vmcs.exitInfoFields.instrLength);
-  
 
-  PrintTrace("The template code is:\n");
-  PrintTraceMemDump(&&template_code,TEMPLATE_CODE_LEN);
 
-  // clone the template code
-  //memcpy(&&template_code,code,MAX_CODE);
-  
-  // clean up the nop field
-  memset(&&template_code+INSTR_OFFSET_START,*((uchar_t *)(&&template_code+0)),NOP_SEQ_LEN);
-  // overwrite the nops with the faulting instruction
-  memcpy(&&template_code+INSTR_OFFSET_START, (void*)(address),vm->vmcs.exitInfoFields.instrLength);
-  
-  PrintTrace("Finished modifying the template code, which now is:\n");
-  PrintTraceMemDump(&&template_code,TEMPLATE_CODE_LEN);
-
-  PrintTrace("Now entering modified template code\n");
-
-
- template_code:
-  // Template code stores current registers,
-  // restores registers, has a landing pad of noops 
-  // that will be modified, restores current regs, and then returns
-  //
-  // Note that this currently ignores cr0, cr3, cr4, dr7, rsp, rip, and rflags
-  // it also blythly assumes it can exec the instruction in protected mode
-  //
-  __asm__ __volatile__ ("nop\n"               // for cloning purposes                          (1 byte)
-                       "pusha\n"             // push our current regs onto the current stack  (1 byte)
-                       "movl %0, %%eax\n"    // Get oldesp location                           (5 bytes)
-                       "movl %%esp, (%%eax)\n"  // store the current stack pointer in oldesp       (2 bytes)
-                        "movl %1, %%eax\n"    // Get regs location                             (5 bytes)
-                       "movl (%%eax), %%esp\n"  // point esp at regs                               (2 bytes)
-                       "popa\n"              // now we have the VM registers restored            (1 byte)
-                       "nop\n"               // now we execute the actual instruction         (1 byte x 10)
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       // need to copy back to the VM registers!
-                        "movl %0, %%eax\n"     // recapture oldesp location                     (5 bytes)
-                       "movl (%%eax), %%esp\n"   // now we'll get our esp back from oldesp       (2 bytes)
-                       "popa\n"              // and restore our GP regs and we're done       (1 byte)
-                       : "=m"(oldesp)
-                       : "m"(myregs)
-                       );
-  
-  PrintTrace("Survived executing the faulting instruction and returning.\n");
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
 
-  vm->vmcs.guestStateArea.rip += vm->vmcs.exitInfoFields.instrLength;
+#define FS_BASE_MSR 0xc0000100
+#define GS_BASE_MSR 0xc0000101
 
-  return 0;
+    // FS.BASE MSR
+    v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
 
-}
+    // GS.BASE MSR
+    v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
 
 
-int is_vmx_capable() {
-  uint_t ret;
-  union VMX_MSR featureMSR;
-  
-  ret = cpuid_ecx(1);
-  if (ret & CPUID_1_ECX_VTXFLAG) {
-    Get_MSR(IA32_FEATURE_CONTROL_MSR, &featureMSR.regs.high, &featureMSR.regs.low);
 
-    PrintTrace("MSRREGlow: 0x%.8x\n", featureMSR.regs.low);
+    __asm__ __volatile__ ( "movq %%cs, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
 
-    if ((featureMSR.regs.low & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
-      PrintDebug("VMX is locked -- enable in the BIOS\n");
-      return 0;
-    }
-  } else {
-    PrintDebug("VMX not supported on this cpu\n");
-    return 0;
-  }
+    __asm__ __volatile__ ( "movq %%ss, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
 
-  return 1;
+    __asm__ __volatile__ ( "movq %%ds, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
 
-}
+    __asm__ __volatile__ ( "movq %%es, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
 
+    __asm__ __volatile__ ( "movq %%fs, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
 
-VmxOnRegion * Init_VMX() {
-  uint_t ret;
-  VmxOnRegion * region = NULL;
+    __asm__ __volatile__ ( "movq %%gs, %0; "           
+                          : "=q"(tmp)
+                          :
+    );
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
 
+    vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
 
-  region = CreateVmxOnRegion();
 
+#define SYSENTER_CS_MSR 0x00000174
+#define SYSENTER_ESP_MSR 0x00000175
+#define SYSENTER_EIP_MSR 0x00000176
 
-  ret = Enable_VMX((ullong_t)((uint_t)region));
-  if (ret == 0) {
-    PrintDebug("VMX Enabled\n");
-  } else {
-    PrintDebug("VMX failure (ret = %d)\n", ret);
-  }
+   // SYSENTER CS MSR
+    v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
 
-  theVM.vmxonregion = region;
+    // SYSENTER_ESP MSR
+    v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
 
-  return region;
+    // SYSENTER_EIP MSR
+    v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
+
+    return vmx_ret;
 }
 
-extern uint_t VMCS_CLEAR();
-extern uint_t VMCS_LOAD();
-extern uint_t VMCS_STORE();
-extern uint_t VMCS_LAUNCH();
-extern uint_t VMCS_RESUME();
-extern uint_t Init_VMCS_HostState();
-extern uint_t Init_VMCS_GuestState();
 
-void SetCtrlBitsCorrectly(int msrno, int vmcsno)
+int v3_update_vmcs_guest_state(struct guest_info * info)
 {
-  uint_t reserved =0;
-  union VMX_MSR msr;
-
-  PrintTrace("SetCtrlBitsCorrectly(%x,%x)\n", msrno, vmcsno);
-  Get_MSR(msrno, &msr.regs.high, &msr.regs.low);
-  PrintTrace("MSR %x = %x : %x \n", msrno, msr.regs.high, msr.regs.low);
-  reserved = msr.regs.low;
-  reserved &= msr.regs.high;
-  VMCS_WRITE(vmcsno, &reserved);
-}
+    int vmx_ret = 0;
 
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, info->rip);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
+    
 
-void SetCRBitsCorrectly(int msr0no, int msr1no, int vmcsno)
-{
-  uint_t reserved =0;
-  union VMX_MSR msr0, msr1;
-
-  PrintTrace("SetCRBitsCorrectly(%x,%x,%x)\n",msr0no,msr1no,vmcsno);
-  Get_MSR(msr0no, &msr0.regs.high, &msr0.regs.low);
-  Get_MSR(msr1no, &msr1.regs.high, &msr1.regs.low);
-  PrintTrace("MSR %x = %x, %x =  %x \n", msr0no, msr0.regs.low, msr1no, msr1.regs.low);
-  reserved = msr0.regs.low;
-  reserved &= msr1.regs.low;
-  VMCS_WRITE(vmcsno, &reserved);
-}
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR3, info->ctrl_regs.cr3);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
 
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
 
-extern int Get_CR2();
-extern int vmRunning;
 
 
-static int PanicUnhandledVMExit(struct VM *vm)
-{
-  PrintInfo("Panicking due to VMExit with reason %u\n", vm->vmcs.exitInfoFields.reason);
-  PrintTrace("Panicking due to VMExit with reason %u\n", vm->vmcs.exitInfoFields.reason);
-  PrintTrace_VMCS_ALL();
-  PrintTrace_VMX_Regs(&(vm->registers));
-  VMXPanic();
-  return 0;
-}
+    /*** Write VMCS Segments ***/
+    struct vmcs_segment_access access;
 
+    memset(&access, 0, sizeof(access));
 
-static int HandleVMPrintsAndPanics(struct VM *vm, uint_t port, uint_t data)
-{
-  if (port==VMXASSIST_INFO_PORT &&
-      (vm->state == VM_VMXASSIST_STARTUP || 
-       vm->state == VM_VMXASSIST_V8086_BIOS ||
-       vm->state == VM_VMXASSIST_V8086)) { 
-    // Communication channel from VMXAssist
-    PrintTrace("VMXASSIST Output Port\n");
-    PrintDebug("%c",data&0xff);
-    return 1;
-  } 
-
-  if ((port==ROMBIOS_PANIC_PORT || 
-       port==ROMBIOS_PANIC_PORT2 || 
-       port==ROMBIOS_DEBUG_PORT ||
-       port==ROMBIOS_INFO_PORT) &&
-      (vm->state==VM_VMXASSIST_V8086_BIOS)) {
-    // rombios is communicating
-    PrintTrace("ROMBIOS Output Port\n");
-    //    PrintDebug("%c",data&0xff);
-    return 1;
-  }
+    /* CS Segment */
+    translate_segment_access(&(info->segments.cs), &access);
 
-  if (port==BOOT_STATE_CARD_PORT && vm->state==VM_VMXASSIST_V8086_BIOS) { 
-    // rombios is sending something to the display card
-    PrintTrace("Hex Display: 0x%x\n",data&0xff);
-    return 1;
-  }
-  return 0;
-}
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
 
-static int HandleInOutExit(struct VM *vm)
-{
-  uint_t address;
-
-  struct VMCSExitInfoFields *exitinfo = &(vm->vmcs.exitInfoFields);
-  struct VMExitIOQual * qual = (struct VMExitIOQual *)&(vm->vmcs.exitInfoFields.qualification);
-  struct VMXRegs *regs = &(vm->registers);
-
-  address=GetLinearIP(vm);
-
-  PrintTrace("Handling Input/Output Instruction Exit\n");
-
-  PrintTrace_VMX_Regs(regs);
-
-  PrintTrace("Qualifications=0x%x\n", exitinfo->qualification);
-  PrintTrace("Reason=0x%x\n", exitinfo->reason);
-  PrintTrace("IO Port: 0x%x (%d)\n", qual->port, qual->port);
-  PrintTrace("Instruction Info=%x\n", exitinfo->instrInfo);
-  PrintTrace("%x : %s %s %s instruction of length %d for %d bytes from/to port 0x%x\n",
-                  address,
-                  qual->dir == 0 ? "output" : "input",
-                  qual->string ==0 ? "nonstring" : "STRING",
-                  qual->REP == 0 ? "with no rep" : "WITH REP",
-                  exitinfo->instrLength, 
-                  qual->accessSize==0 ? 1 : qual->accessSize==1 ? 2 : 4,
-                  qual->port);
-
-  if ((qual->port == PIC_MASTER_CMD_ISR_PORT) ||
-      (qual->port == PIC_MASTER_IMR_PORT)     ||
-      (qual->port == PIC_SLAVE_CMD_ISR_PORT)  ||
-      (qual->port == PIC_SLAVE_IMR_PORT)) {
-    PrintTrace( "PIC Access\n");
-  }
-                  
-
-  if ((qual->dir == 1) && (qual->REP == 0) && (qual->string == 0)) { 
-    char byte = In_Byte(qual->port);
-
-    vm->vmcs.guestStateArea.rip += exitinfo->instrLength;
-    regs->eax = (regs->eax & 0xffffff00) | byte;
-    PrintTrace("Returning 0x%x in eax\n", (regs->eax));
-  }
-
-  if (qual->dir==0 && qual->REP==0 && qual->string==0) { 
-    // See if we need to handle the outb as a signal or
-    // print from the VM
-    if (HandleVMPrintsAndPanics(vm,qual->port,regs->eax)) {
-    } else {
-      // If not, just go ahead and do the outb
-      Out_Byte(qual->port,regs->eax);
-      PrintTrace("Wrote 0x%x to port\n",(regs->eax));
-    }
-    vm->vmcs.guestStateArea.rip += exitinfo->instrLength;
-  }
+    /* SS Segment */
+    memset(&access, 0, sizeof(access));
+    translate_segment_access(&(info->segments.ss), &access);
 
-  return 0;
-}  
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
 
+    /* DS Segment */
+    memset(&access, 0, sizeof(access));
+    translate_segment_access(&(info->segments.ds), &access);
 
-static int HandleExternalIRQExit(struct VM *vm)
-{
-  struct VMCSExitInfoFields * exitinfo = &(vm->vmcs.exitInfoFields);
-  struct VMExitIntInfo * intInfo  = (struct VMExitIntInfo *)&(vm->vmcs.exitInfoFields.intInfo);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
 
-  PrintTrace("External Interrupt captured\n");
-  PrintTrace("IntInfo: %x\n", exitinfo->intInfo);
 
+    /* ES Segment */
+    memset(&access, 0, sizeof(access));
+    translate_segment_access(&(info->segments.es), &access);
 
-  if (!intInfo->valid) {
-     // interrupts are off, but this interrupt is not acknoledged (still pending)
-     // so we turn on interrupts to deliver appropriately in the
-     // host
-    PrintTrace("External Interrupt is invald.  Turning Interrupts back on\n");
-    asm("sti");
-    return 0;
-  } 
-
-  // At this point, interrupts are off and the interrupt has been 
-  // acknowledged.  We will now handle the interrupt ourselves 
-  // and turn interrupts  back on in the host
-
-  PrintTrace("type: %d\n", intInfo->type);
-  PrintTrace("number: %d\n", intInfo->nr);
-
-  PrintTrace("Interrupt %d occuring now and handled by HandleExternalIRQExit\n",intInfo->nr);
-
-  switch (intInfo->type) {
-  case 0:  {  // ext. IRQ
-    // In the following, we construct an "int x" instruction
-    // where x is the specific interrupt number that is raised
-    // then we execute that instruciton
-    // because we are in host context, that means it is delivered as normal
-    // through the host IDT
-     
-     ((char*)(&&ext_int_seq_start))[1] = intInfo->nr;
-     PrintTrace("Interrupt instruction setup done %x\n", *((ushort_t *)(&&ext_int_seq_start)));
-     
-ext_int_seq_start:
-     asm("int $0");
-  }
-
-    break;
-  case 2: // NMI
-    PrintTrace("Type: NMI\n");
-    break;
-  case 3: // hw exception
-    PrintTrace("Type: HW Exception\n");
-    break;
-  case 4: // sw exception
-    PrintTrace("Type: SW Exception\n");
-    break;
-  default:
-    PrintTrace("Invalid Interrupt Type\n");
-    return -1;
-  }
-  
-  if (intInfo->valid && intInfo->errorCode) {
-    PrintTrace("IntError: %x\n", exitinfo->intErrorCode);
-  }
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
+
+    /* FS Segment */
+    memset(&access, 0, sizeof(access));
+    translate_segment_access(&(info->segments.fs), &access);
+
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
+
+    /* GS Segment */
+    memset(&access, 0, sizeof(access));
+    translate_segment_access(&(info->segments.gs), &access);
+
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
+
+    /* LDTR segment */
+    memset(&access, 0, sizeof(access));
+    translate_segment_access(&(info->segments.ldtr), &access);
+
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
+
+    /* TR Segment */
+    memset(&access, 0, sizeof(access));
+    translate_segment_access(&(info->segments.tr), &access);
 
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.tr.selector);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
 
-  return 0;
+    /* GDTR Segment */
+
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
+
+    /* IDTR Segment*/
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
+
+    return vmx_ret;
 
 }
 
 
 
-void DecodeCurrentInstruction(struct VM *vm, struct Instruction *inst)
-{
-  // this is a gruesome hack
-  uint_t address = GetLinearIP(vm);
-  uint_t length = vm->vmcs.exitInfoFields.instrLength;
-  unsigned char *t = (unsigned char *) address;
 
+#if 0
+// For the 32 bit reserved bit fields 
+// MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
+static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
+    v3_msr_t mask_msr;
 
+    PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
+
+    v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
+
+    PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
+
+    val |= mask_msr.lo;
+    val |= mask_msr.hi;
   
-  PrintTrace("DecodeCurrentInstruction: instruction is\n");
-  PrintTraceMemDump(t,length);
-  
-  if (length==3 && t[0]==0x0f && t[1]==0x22 && t[2]==0xc0) { 
-    // mov from eax to cr0
-    // usually used to signal
-    inst->type=VM_MOV_TO_CR0;
-    inst->address=address;
-    inst->size=length;
-    inst->input1=vm->registers.eax;
-    inst->input2=vm->vmcs.guestStateArea.cr0;
-    inst->output=vm->registers.eax;
-    PrintTrace("MOV FROM EAX TO CR0\n");
-  } else {
-    inst->type=VM_UNKNOWN_INST;
-  }
+    return val;
 }
 
 
-static void V8086ModeSegmentRegisterFixup(struct VM *vm)
-{
-  vm->vmcs.guestStateArea.cs.baseAddr=vm->vmcs.guestStateArea.cs.selector<<4;
-  vm->vmcs.guestStateArea.es.baseAddr=vm->vmcs.guestStateArea.es.selector<<4;
-  vm->vmcs.guestStateArea.ss.baseAddr=vm->vmcs.guestStateArea.ss.selector<<4;
-  vm->vmcs.guestStateArea.ds.baseAddr=vm->vmcs.guestStateArea.ds.selector<<4;
-  vm->vmcs.guestStateArea.fs.baseAddr=vm->vmcs.guestStateArea.fs.selector<<4;
-  vm->vmcs.guestStateArea.gs.baseAddr=vm->vmcs.guestStateArea.gs.selector<<4;
-}
 
-static void SetupV8086ModeForBoot(struct VM *vm)
-{
-  vm->state = VM_VMXASSIST_V8086_BIOS;
+static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
+    v3_msr_t msr0, msr1;
+    addr_t msr0_val, msr1_val;
 
-  // Put guest into V8086 mode on return
-  vm->vmcs.guestStateArea.rflags |= EFLAGS_VM | EFLAGS_IOPL_HI | EFLAGS_IOPL_LO ;
-  
-  // We will start at f000:fff0 on return
-  //
-  // We want this to look as much as possible as a processor
-  // reset
-  vm->vmcs.guestStateArea.rip = 0xfff0;  // note, 16 bit rip
-  vm->vmcs.guestStateArea.cs.selector = 0xf000;
-  vm->vmcs.guestStateArea.cs.limit=0xffff;
-  vm->vmcs.guestStateArea.cs.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.ss.selector = 0x0000;
-  vm->vmcs.guestStateArea.ss.limit=0xffff;
-  vm->vmcs.guestStateArea.ss.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.ds.selector = 0x0000;
-  vm->vmcs.guestStateArea.ds.limit=0xffff;
-  vm->vmcs.guestStateArea.ds.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.es.selector = 0x0000;
-  vm->vmcs.guestStateArea.es.limit=0xffff;
-  vm->vmcs.guestStateArea.es.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.fs.selector = 0x0000;
-  vm->vmcs.guestStateArea.fs.limit=0xffff;
-  vm->vmcs.guestStateArea.fs.access.as_dword = 0xf3;
-
-  vm->vmcs.guestStateArea.gs.selector = 0x0000;
-  vm->vmcs.guestStateArea.gs.limit=0xffff;
-  vm->vmcs.guestStateArea.gs.access.as_dword = 0xf3;
+    PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
+
+    v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
+    v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
   
-  V8086ModeSegmentRegisterFixup(vm);
+    // This generates a mask that is the natural bit width of the CPU
+    msr0_val = msr0.value;
+    msr1_val = msr1.value;
 
-  PrintTrace_VMCSData(&(vm->vmcs));
+    PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
 
+    val |= msr0_val;
+    val |= msr1_val;
+
+    return val;
 }
-  
 
 
-static int HandleExceptionOrNMI(struct VM *vm)
+
+#endif
+
+
+static addr_t allocate_vmcs() 
 {
-  struct Instruction inst;
-  uint_t num;
-  uint_t type;
-  uint_t errorvalid;
-  uint_t error;
-  uint_t ext=0;
-  uint_t idt=0;
-  uint_t ti=0;
-  uint_t selectorindex=0;
-
-  PrintTrace("Exception or NMI occurred\n");
-  
-  num=vm->vmcs.exitInfoFields.intInfo & 0xff;
-  type=(vm->vmcs.exitInfoFields.intInfo & 0x700)>>8;
-  errorvalid=(vm->vmcs.exitInfoFields.intInfo & 0x800)>>11;
-  if (errorvalid) { 
-    error=vm->vmcs.exitInfoFields.intErrorCode;
-    ext=error&0x1;
-    idt=(error&0x2)>>1;
-    ti=(error&0x4)>>2;
-    selectorindex=(error>>3)&0xffff;
-  }
-  
-  PrintTrace("Exception %d now - handled by HandleExceptionOrNMI\n",num);
+    reg_ex_t msr;
+    PrintDebug("Allocating page\n");
+    struct vmcs_data * vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
 
-  PrintTrace("Exception Number %u : %s\n", num, exception_names[num]);
-  PrintTrace("Exception Type %u : %s\n", type, exception_type_names[type]);
-  if (errorvalid) { 
-    if (ext) { 
-      PrintTrace("External\n");
-    } else {
-      PrintTrace("%s - Selector Index is %u\n", idt ? "IDT" : ti ? "LDT" : "GDT", selectorindex);
-    }
-  }
-
-  DecodeCurrentInstruction(vm,&inst);
-
-  if (inst.type==VM_MOV_TO_CR0) {
-    PrintTrace("MOV TO CR0, oldvalue=0x%x, newvalue=0x%x\n",inst.input2, inst.input1);
-    if ((inst.input2 & CR0_PE) && !(inst.input1 & CR0_PE) && vm->state==VM_VMXASSIST_STARTUP) {
-      // This is VMXAssist signalling for us to turn on V8086 mode and
-      // jump into the bios
-      PrintTrace("VMXAssist is signaling us for switch to V8086 mode and jump to 0xf000:fff0\n");
-      SetupV8086ModeForBoot(vm);
-      goto leave;
-    } else {
-      PrintTrace("Instruction is a write to CR0, but we don't understand it so we'll just exec it\n");
-    } 
-  } 
 
+    memset(vmcs_page, 0, 4096);
 
-  PrintTrace("Trying to execute the faulting instruction in VMM context now\n");
-  ExecFaultingInstructionInVMM(vm);
+    v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
+    
+    vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
+    PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
 
-    leave:
-  //
-  //PanicUnhandledVMExit(vmcs,regs);
-  //VMXPanic();
-  return 0;
+    return (addr_t)V3_PAddr((void *)vmcs_page);
 }
 
+#if 0
+
+#endif
 
-static struct VM *FindVM()
+#if 0
+static int init_vmcs_bios(struct guest_info * vm_info) 
 {
-  return &theVM;
-}
+#if 0
 
+    setup_v8086_mode_for_boot(vm_info);
 
-int Do_VMM(struct VMXRegs regs) 
-{
 
-  ullong_t vmcs_ptr = 0;
-  uint_t vmcs_ptr_low = 0;
-  int ret = 0;
-  uint_t vmx_abort = 0;
+    // Setup guest state 
+    // TODO: This is not 32-bit safe!
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, vm_info->rip);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, vm_info->vm_regs.rsp);
+    
 
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, vm_info->ctrl_regs.cr0);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, vm_info->ctrl_regs.cr4);
 
-  
-  PrintTrace("Vm Exit\n");
-  ret = VMCS_STORE(&vmcs_ptr);
-  vmcs_ptr &= 0xffffffff;
-  vmcs_ptr_low +=  vmcs_ptr;
+    vmx_ret |= vmcs_write_guest_segments(vm_info);
 
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, vm_info->ctrl_regs.rflags);
+#define DEBUGCTL_MSR 0x1d9
 
+    v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
 
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, 0x400);
 
-  PrintTrace("ret=%d\n", ret);
-  PrintTrace("Revision: %x\n", *(uint_t *)(vmcs_ptr_low));
-  vmx_abort = *(uint_t*)(((char *)vmcs_ptr_low)+4);
+    vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
+
+    if (vmx_ret != 0) {
+       PrintError("Could not initialize VMCS segments\n");
+        return -1;
+    }
+
+#endif
+    return 0;
+}
+#endif
+
+static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
+    v3_pre_config_guest(info, config_ptr);
+
+    struct vmx_data * vmx_data = NULL;
+
+    vmx_data = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
+
+    PrintDebug("vmx_data pointer: %p\n", (void *)vmx_data);
+
+    PrintDebug("Allocating VMCS\n");
+    vmx_data->vmcs_ptr_phys = allocate_vmcs();
+
+    PrintDebug("VMCS pointer: %p\n", (void *)(vmx_data->vmcs_ptr_phys));
+
+    info->vmm_data = vmx_data;
+
+    PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
     
-  struct VM *vm = FindVM();
+    // TODO: Fix vmcs fields so they're 32-bit
+    int vmx_ret = 0;
 
-  if (vmx_abort != 0) {
-    PrintTrace("VM ABORTED w/ code: %x\n", vmx_abort);
-    return -1;
-  }
+    PrintDebug("Clearing VMCS: %p\n",(void*)vmx_data->vmcs_ptr_phys);
+    vmx_ret = vmcs_clear(vmx_data->vmcs_ptr_phys);
 
-  vm->registers = regs;
+    if (vmx_ret != VMX_SUCCESS) {
+        PrintError("VMCLEAR failed\n");
+        return -1;
+    }
 
-  if (CopyOutVMCSData(&(vm->vmcs)) != 0) {
-    PrintTrace("Could not copy out VMCS\n");
-    return -1;
-  }
-
-
-  PrintTrace("Guest esp: 0x%x (%u)\n", vm->vmcs.guestStateArea.rsp, vm->vmcs.guestStateArea.rsp);
-
-  PrintTrace("VM Exit for reason: %d (%x)\n", 
-             vm->vmcs.exitInfoFields.reason & 0x00000fff,
-             vm->vmcs.exitInfoFields.reason);  
-
-  if (vm->vmcs.exitInfoFields.reason & (0x1<<29) ) { 
-    PrintTrace("VM Exit is from VMX root operation.  Panicking\n");
-    VMXPanic();
-  }
-
-  if (vm->vmcs.exitInfoFields.reason & (0x1<<31) ) { 
-    PrintTrace("VM Exit is due to a VM entry failure.  Shouldn't happen here. Panicking\n");
-    PrintTrace_VMCSData(&(vm->vmcs));
-    VMXPanic();
-  }
-
-  switch (vm->vmcs.exitInfoFields.reason) {
-  case VM_EXIT_REASON_INFO_EXCEPTION_OR_NMI:
-    ret = HandleExceptionOrNMI(vm);
-    break;
-  case VM_EXIT_REASON_EXTERNAL_INTR:
-    ret = HandleExternalIRQExit(vm);
-    break;
-  case VM_EXIT_REASON_TRIPLE_FAULT:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INIT_SIGNAL:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_STARTUP_IPI:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_IO_SMI:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_OTHER_SMI:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INTR_WINDOW:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_NMI_WINDOW:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_TASK_SWITCH:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_CPUID:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INVD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INVLPG:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_RDPMC:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_RDTSC:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_RSM:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMCALL:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMCLEAR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMLAUNCH:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMPTRLD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMPTRST:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMREAD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMRESUME:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMWRITE:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMXOFF:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMXON:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_CR_REG_ACCESSES:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_MOV_DR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_IO_INSTR:
-    ret = HandleInOutExit(vm);
-    break;
-  case VM_EXIT_REASON_RDMSR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_WRMSR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_ENTRY_FAIL_INVALID_GUEST_STATE:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_ENTRY_FAIL_MSR_LOAD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_MWAIT:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_MONITOR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_PAUSE:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_ENTRY_FAILURE_MACHINE_CHECK:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_TPR_BELOW_THRESHOLD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  default:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  }
-  
-  
-  regs = vm->registers;
-  CopyInVMCSData(&(vm->vmcs));
+    PrintDebug("Loading VMCS\n");
+    vmx_ret = vmcs_load(vmx_data->vmcs_ptr_phys);
 
-  /*
-    {
-    VMCS_CLEAR(vmcs_ptr);
+    if (vmx_ret != VMX_SUCCESS) {
+        PrintError("VMPTRLD failed\n");
+        return -1;
     }
-  */
 
-  PrintTrace("Returning from Do_VMM: %d\n", ret);
-  return ret;
-}
 
 
-static void ConfigureExits(struct VM *vm)
-{
-  CopyOutVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
-
-  vm->vmcs.execCtrlFields.pinCtrls |= 0 
-    // EXTERNAL_INTERRUPT_EXITING 
-    | NMI_EXITING;
-  vm->vmcs.execCtrlFields.procCtrls |= 0
-    // INTERRUPT_WINDOWS_EXIT 
-    | USE_TSC_OFFSETTING
-    | HLT_EXITING  
-    |INVLPG_EXITING           
-    |MWAIT_EXITING            
-    |RDPMC_EXITING           
-    |RDTSC_EXITING         
-    |MOVDR_EXITING         
-    |UNCONDITION_IO_EXITING
-    |MONITOR_EXITING       
-    |PAUSE_EXITING         ;
-
-  CopyInVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
-  
-  CopyOutVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
+    /******* Setup Host State **********/
+
+    /* Cache GDTR, IDTR, and TR in host struct */
+    addr_t gdtr_base;
+    struct {
+        uint16_t selector;
+        addr_t   base;
+    } __attribute__((packed)) tmp_seg;
+    
+
+    __asm__ __volatile__(
+                        "sgdt (%0);"
+                        :
+                        : "q"(&tmp_seg)
+                        : "memory"
+                        );
+    gdtr_base = tmp_seg.base;
+    vmx_data->host_state.gdtr.base = gdtr_base;
+
+    __asm__ __volatile__(
+                        "sidt (%0);"
+                        :
+                        : "q"(&tmp_seg)
+                        : "memory"
+                        );
+    vmx_data->host_state.idtr.base = tmp_seg.base;
+
+    __asm__ __volatile__(
+                        "str (%0);"
+                        :
+                        : "q"(&tmp_seg)
+                        : "memory"
+                        );
+    vmx_data->host_state.tr.selector = tmp_seg.selector;
+
+    /* The GDTR *index* is bits 3-15 of the selector. */
+    struct tss_descriptor * desc = (struct tss_descriptor *)
+                        (gdtr_base + 8*(tmp_seg.selector>>3));
+
+    tmp_seg.base = (
+                   (desc->base1) |
+                   (desc->base2 << 16) |
+                   (desc->base3 << 24) |
+#ifdef __V3_64BIT__
+                   ((uint64_t)desc->base4 << 32)
+#else 
+                   (0)
+#endif
+                );
+
+    vmx_data->host_state.tr.base = tmp_seg.base;
 
-  vm->vmcs.exitCtrlFields.exitCtrls |= ACK_IRQ_ON_EXIT;
   
-  CopyInVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
 
+    /********** Setup and VMX Control Fields from MSR ***********/
+    struct v3_msr tmp_msr;
 
-/*   VMCS_READ(VM_EXIT_CTRLS, &flags); */
-/*   flags |= ACK_IRQ_ON_EXIT; */
-/*   VMCS_WRITE(VM_EXIT_CTRLS, &flags); */
-}
+    v3_get_msr(VMX_PINBASED_CTLS_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
+    /* Add NMI exiting */
+    vmx_data->pinbased_ctrls =  tmp_msr.lo | NMI_EXIT;
 
+    v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_data->pri_procbased_ctrls = tmp_msr.lo;
 
-extern int RunVMM();
-extern int SAFE_VM_LAUNCH();
+    v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_data->exit_ctrls = tmp_msr.lo ;
+
+    v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_data->entry_ctrls = tmp_msr.lo;
+
+    struct vmx_exception_bitmap excp_bmap;
+    excp_bmap.value = 0xffffffff;
+    vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value);
 
-int MyLaunch(struct VM *vm)
-{
-  ullong_t vmcs = (ullong_t)((uint_t) (vm->vmcsregion));
-  uint_t entry_eip = vm->descriptor.entry_ip;
-  uint_t exit_eip = vm->descriptor.exit_eip;
-  uint_t guest_esp = vm->descriptor.guest_esp;
-  uint_t f = 0xffffffff;
-  uint_t tmpReg = 0;
-  int ret;
-  int vmm_ret = 0;
 
-  PrintTrace("Guest ESP: 0x%x (%u)\n", guest_esp, guest_esp);
+    /******* Setup VMXAssist guest state ***********/
 
-  exit_eip=(uint_t)RunVMM;
+    info->rip = 0xd0000;
+    info->vm_regs.rsp = 0x80000;
 
-  PrintTrace("Clear\n");
-  VMCS_CLEAR(vmcs);
-  PrintTrace("Load\n");
-  VMCS_LOAD(vmcs);
+    struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
+    flags->rsvd1 = 1;
 
+    /* Print Control MSRs */
+    v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    PrintDebug("CR0 MSR: %p\n", (void*)tmp_msr.value);
+    v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    PrintDebug("CR4 MSR: %p\n", (void*)tmp_msr.value);
 
-  PrintTrace("VMCS_LINK_PTR\n");
-  VMCS_WRITE(VMCS_LINK_PTR, &f);
-  PrintTrace("VMCS_LINK_PTR_HIGH\n");
-  VMCS_WRITE(VMCS_LINK_PTR_HIGH, &f);
 
+#define GUEST_CR0 0x80000031
+#define GUEST_CR4 0x00002000
+    info->ctrl_regs.cr0 = GUEST_CR0;
+    info->ctrl_regs.cr4 = GUEST_CR4;
+   
+    /* Setup paging */
+    if(info->shdw_pg_mode == SHADOW_PAGING) {
+        PrintDebug("Creating initial shadow page table\n");
+
+        if(v3_init_passthrough_pts(info) == -1) {
+            PrintError("Could not initialize passthrough page tables\n");
+            return -1;
+        }
+
+        info->shdw_pg_state.guest_cr0 = CR0_PE;
+        PrintDebug("Created\n");
+
+        vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG) );
+        vmx_ret |= check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
+        vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
+
+        info->ctrl_regs.cr3 = info->direct_map_pt;
+
+        // vmx_data->pinbased_ctrls |= NMI_EXIT;
+
+        /* Add unconditional I/O and CR exits */
+        vmx_data->pri_procbased_ctrls |= UNCOND_IO_EXIT  
+                                        | CR3_LOAD_EXIT  
+                                        | CR3_STORE_EXIT;
  
-  SetCtrlBitsCorrectly(IA32_VMX_PINBASED_CTLS_MSR, PIN_VM_EXEC_CTRLS);
-  SetCtrlBitsCorrectly(IA32_VMX_PROCBASED_CTLS_MSR, PROC_VM_EXEC_CTRLS);
-  SetCtrlBitsCorrectly(IA32_VMX_EXIT_CTLS_MSR, VM_EXIT_CTRLS);
-  SetCtrlBitsCorrectly(IA32_VMX_ENTRY_CTLS_MSR, VM_ENTRY_CTRLS);
-
-  //
-  //
-  //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL);
-  //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL_HIGH);
-
-
-  /* Host state */
-  PrintTrace("Setting up host state\n");
-  SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, HOST_CR0);
-  SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, HOST_CR4);
-  ret = Init_VMCS_HostState();
-
-  if (ret != VMX_SUCCESS) {
-    if (ret == VMX_FAIL_VALID) {
-      PrintTrace("Init Host state: VMCS FAILED WITH ERROR\n");
-    } else {
-      PrintTrace("Init Host state: Invalid VMCS\n");
+        vmx_data->exit_ctrls |= HOST_ADDR_SPACE_SIZE;
     }
-    return ret;
-  }
 
-  //  PrintTrace("HOST_RIP: %x (%u)\n", exit_eip, exit_eip);
-  VMCS_WRITE(HOST_RIP, &exit_eip);
+    struct v3_segment * seg_reg = (struct v3_segment *)&(info->segments);
 
-  /* Guest state */
-  PrintTrace("Setting up guest state\n");
-  PrintTrace("GUEST_RIP: %x (%u)\n", entry_eip, entry_eip);
-  VMCS_WRITE(GUEST_RIP,&entry_eip);
+    int i;
+    for(i=0; i < 10; i++)
+    {
+        seg_reg[i].selector = 3<<3;
+        seg_reg[i].limit = 0xffff;
+        seg_reg[i].base = 0x0;
+    }
+    info->segments.cs.selector = 2<<3;
+
+    /* Set only the segment registers */
+    for(i=0; i < 6; i++) {
+        seg_reg[i].limit = 0xfffff;
+        seg_reg[i].granularity = 1;
+        seg_reg[i].type = 3;
+        seg_reg[i].system = 1;
+        seg_reg[i].dpl = 0;
+        seg_reg[i].present = 1;
+        seg_reg[i].db = 1;
+    }
+    info->segments.cs.type = 0xb;
 
-  SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, GUEST_CR0);
-  SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, GUEST_CR4);
-  ret = Init_VMCS_GuestState();
+    info->segments.ldtr.selector = 0x20;
+    info->segments.ldtr.type = 2;
+    info->segments.ldtr.system = 0;
+    info->segments.ldtr.present = 1;
+    info->segments.ldtr.granularity = 0;
+    
+    /* Setup IO map */
+    (void) v3_init_vmx_io_map(info);
+    (void) v3_init_vmx_msr_map(info);
+
+    /************* Map in GDT and vmxassist *************/
+
+    uint64_t  gdt[] __attribute__ ((aligned(32))) = {
+        0x0000000000000000ULL,         /* 0x00: reserved */
+        0x0000830000000000ULL,         /* 0x08: 32-bit TSS */
+       //0x0000890000000000ULL,                /* 0x08: 32-bit TSS */
+        0x00CF9b000000FFFFULL,         /* 0x10: CS 32-bit */
+        0x00CF93000000FFFFULL,         /* 0x18: DS 32-bit */
+        0x000082000000FFFFULL,         /* 0x20: LDTR 32-bit */
+    };
+
+#define VMXASSIST_GDT   0x10000
+    addr_t vmxassist_gdt = 0;
+    if(guest_pa_to_host_va(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
+        PrintError("Could not find VMXASSIST GDT destination\n");
+        return -1;
+    }
+    memcpy((void*)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
+        
+    info->segments.gdtr.base = VMXASSIST_GDT;
+
+#define VMXASSIST_TSS   0x40000
+    addr_t vmxassist_tss = VMXASSIST_TSS;
+    gdt[0x08 / sizeof(gdt[0])] |=
+       ((vmxassist_tss & 0xFF000000) << (56-24)) |
+       ((vmxassist_tss & 0x00FF0000) << (32-16)) |
+       ((vmxassist_tss & 0x0000FFFF) << (16)) |
+       (8392 - 1);
+
+    info->segments.tr.selector = 0x08;
+    info->segments.tr.base = vmxassist_tss;
+
+    //info->segments.tr.type = 0x9; 
+    info->segments.tr.type = 0x3;
+    info->segments.tr.system = 0;
+    info->segments.tr.present = 1;
+    info->segments.tr.granularity = 0;
 
-  PrintTrace("InitGuestState returned\n");
-  if (ret != VMX_SUCCESS) {
-    if (ret == VMX_FAIL_VALID) {
-      PrintTrace("Init Guest state: VMCS FAILED WITH ERROR\n");
-    } else {
-      PrintTrace("Init Guest state: Invalid VMCS\n");
+#define VMXASSIST_START 0x000d0000
+    extern uint8_t v3_vmxassist_start[];
+    extern uint8_t v3_vmxassist_end[];
+
+    addr_t vmxassist_dst = 0;
+    if(guest_pa_to_host_va(info, VMXASSIST_START, &vmxassist_dst) == -1) {
+        PrintError("Could not find VMXASSIST destination\n");
+        return -1;
     }
-    return ret;
-  }
-  PrintTrace("GUEST_RSP: %x (%u)\n", guest_esp, (uint_t)guest_esp);
-  VMCS_WRITE(GUEST_RSP,&guest_esp);
+    memcpy((void*)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
+    
+    /*** Write all the info to the VMCS ***/
+
+#define DEBUGCTL_MSR 0x1d9
+    v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
+
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, 0x400);
 
-  //  tmpReg = 0x4100;
-  tmpReg = 0xffffffff;
-  if (VMCS_WRITE(EXCEPTION_BITMAP,&tmpReg ) != VMX_SUCCESS) {
-    PrintInfo("Bitmap error\n");
-  }
+    vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
+    
+    if(v3_update_vmcs_ctrl_fields(info)) {
+        PrintError("Could not write control fields!\n");
+        return -1;
+    }
+    
+    if(v3_update_vmcs_host_state(info)) {
+        PrintError("Could not write host state\n");
+        return -1;
+    }
 
-  ConfigureExits(vm);
 
-  PrintTrace("VMCS_LAUNCH\n");
+    if(v3_update_vmcs_guest_state(info) != VMX_SUCCESS) {
+        PrintError("Writing guest state failed!\n");
+        return -1;
+    }
 
-  vm->state=VM_VMXASSIST_STARTUP;
+    v3_print_vmcs();
 
-  vmm_ret = SAFE_VM_LAUNCH();
+    vmx_data->state = VMXASSIST_STARTUP;
 
-  PrintTrace("VMM error %d\n", vmm_ret);
+    v3_post_config_guest(info, config_ptr);
 
-  return vmm_ret;
+    return 0;
 }
 
 
+static int start_vmx_guest(struct guest_info* info) {
+    uint32_t error = 0;
+    int ret = 0;
 
-  
-int VMLaunch(struct VMDescriptor *vm) 
-{
-  VMCS * vmcs = CreateVMCS();
-  int rc;
+    PrintDebug("Attempting VMLAUNCH\n");
+
+    ret = v3_vmx_vmlaunch(&(info->vm_regs), info);
+    if (ret != VMX_SUCCESS) {
+        vmcs_read(VMCS_INSTR_ERR, &error);
+        PrintError("VMLAUNCH failed: %d\n", error);
 
-  ullong_t vmcs_ptr = (ullong_t)((uint_t)vmcs);
-  uint_t top = (vmcs_ptr>>32)&0xffffffff;
-  uint_t bottom = (vmcs_ptr)&0xffffffff;
+        v3_print_vmcs();
 
-  theVM.vmcsregion = vmcs;
-  theVM.descriptor = *vm;
+    }
+    PrintDebug("Returned from VMLAUNCH ret=%d(0x%x)\n", ret, ret);
 
-  PrintTrace("vmcs_ptr_top=%x vmcs_ptr_bottom=%x, eip=%x\n", top, bottom, vm->entry_ip);
-  rc=MyLaunch(&theVM); // vmcs_ptr, vm->entry_ip, vm->exit_eip, vm->guest_esp);
-  PrintTrace("Returned from MyLaunch();\n");
-  return rc;
+    return -1;
 }
 
 
-VmxOnRegion * CreateVmxOnRegion() {
-  union VMX_MSR basicMSR;
-  VmxOnRegion * region = (VmxOnRegion *)(os_hooks)->allocate_pages(1);
+int v3_is_vmx_capable() {
+    v3_msr_t feature_msr;
+    addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
+
+    v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
 
-  Get_MSR(IA32_VMX_BASIC_MSR, &basicMSR.regs.high, &basicMSR.regs.low);
-  //  memcpy(region, &basicMSR.vmxBasic.revision, sizeof(uint_t));
+    PrintDebug("ECX: %p\n", (void*)ecx);
 
-  *(ulong_t*)region = basicMSR.vmxBasic.revision;
+    if (ecx & CPUID_1_ECX_VTXFLAG) {
+        v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
+       
+        PrintTrace("MSRREGlow: 0x%.8x\n", feature_msr.lo);
 
-  PrintInfo("VMX revision: 0x%lu\n", *(ulong_t *)region);
+        if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
+            PrintDebug("VMX is locked -- enable in the BIOS\n");
+            return 0;
+        }
 
-  return region;
+    } else {
+        PrintDebug("VMX not supported on this cpu\n");
+        return 0;
+    }
+
+    return 1;
+}
+
+static int has_vmx_nested_paging() {
+    return 0;
 }
 
-VMCS * CreateVMCS() {
-  union VMX_MSR basicMSR;
-  VMCS * vmcs = (VMCS *)(os_hooks)->allocate_pages(1);
 
-  Get_MSR(IA32_VMX_BASIC_MSR, &basicMSR.regs.high, &basicMSR.regs.low);
-  *(ulong_t *)vmcs = basicMSR.vmxBasic.revision;
-  *(ulong_t *)((char*)vmcs + 4) = 0;
 
-  PrintTrace("VMCS Region size: %u\n", basicMSR.vmxBasic.regionSize);
-  PrintTrace("VMCS Abort: %x\n",*(uint_t *)(((char*)vmcs)+4));
+void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
+    extern v3_cpu_arch_t v3_cpu_type;
+
+    struct v3_msr tmp_msr;
+    uint64_t ret=0;
+
+    v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
+    
+    __asm__ __volatile__ (
+                         "movq %%cr4, %%rbx;"
+                         "orq  $0x00002000, %%rbx;"
+                         "movq %%rbx, %0;"
+                         : "=m"(ret) 
+                         :
+                         : "%rbx"
+                         );
+
+    if((~ret & tmp_msr.value) == 0) {
+        __asm__ __volatile__ (
+                             "movq %0, %%cr4;"
+                             :
+                             : "q"(ret)
+                             );
+    } else {
+        PrintError("Invalid CR4 Settings!\n");
+        return;
+    }
+      __asm__ __volatile__ (
+                           "movq %%cr0, %%rbx; "
+                           "orq  $0x00000020,%%rbx; "
+                           "movq %%rbx, %%cr0;"
+                           :
+                           :
+                           : "%rbx"
+                           );
+      //
+    // Should check and return Error here.... 
+
+
+    // Setup VMXON Region
+    vmxon_ptr_phys = allocate_vmcs();
+    PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr_phys);
+
+    if (v3_enable_vmx(vmxon_ptr_phys) == VMX_SUCCESS) {
+        PrintDebug("VMX Enabled\n");
+    } else {
+        PrintError("VMX initialization failure\n");
+        return;
+    }
+       
+
+    if (has_vmx_nested_paging() == 1) {
+        v3_cpu_type = V3_VMX_EPT_CPU;
+    } else {
+        v3_cpu_type = V3_VMX_CPU;
+    }
+
+    // Setup the VMX specific vmm operations
+    vm_ops->init_guest = &init_vmx_guest;
+    vm_ops->start_guest = &start_vmx_guest;
+    vm_ops->has_nested_paging = &has_vmx_nested_paging;
 
-  return vmcs;
 }
+