Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Succesfully launches and jumps into the exit handler. Need to write a proper exit...
Andy Gocke [Tue, 4 Aug 2009 21:58:19 +0000 (16:58 -0500)]
palacios/build/Makefile
palacios/include/palacios/vmcs.h
palacios/include/palacios/vmx.h
palacios/include/palacios/vmx_handler.h
palacios/include/palacios/vmx_lowlevel.h
palacios/src/palacios/vmcs.c
palacios/src/palacios/vmx.c
palacios/src/palacios/vmx_handler.c
palacios/src/palacios/vmx_lowlevel.S

index d373579..7e7c8a2 100644 (file)
@@ -309,13 +309,15 @@ VMM_OBJS := \
        palacios/vmm_lock.o \
        palacios/vmx.o \
        palacios/vmcs.o \
+       palacios/vmx_handler.o \
        palacios/vmx_lowlevel.o \
+       palacios/vmxassist.o \
        $(OBJ_FILES)
 
 
 # Extra C flags for the VMM objects
 $(VMM_OBJS) :: EXTRA_CFLAGS = \
-       $(JRLDEBUG) $(CFLAGS)\
+       $(JRLDEBUG) $(CFLAGS) -DVMXASSIST_PATH="\"../build/vmxassist\"" \
 
 
 
@@ -551,8 +553,11 @@ rombios_link:
 vgabios_link:
        ln -s -f ../src/vmboot/vgabios/VGABIOS-lgpl-latest.bin vgabios
 
+vmxassist_link:
+       ln -s -f ../src/vmboot/vmxassist/vmxassist.bin vmxassist
+
 
-force_payload: rombios_link vgabios_link
+force_payload: rombios_link vgabios_link vmxassist_link
        ../scripts/make_payload.pl payload_layout.txt vm_kernel
 
 inter1: force_payload
index d0cc867..9719c9d 100644 (file)
@@ -204,6 +204,7 @@ typedef enum {
 int v3_vmcs_get_field_len(vmcs_field_t field);
 const char* v3_vmcs_field_to_str(vmcs_field_t field);
 void v3_print_vmcs_guest_state();
+void v3_print_vmcs_host_state();
 
 
 /* VMCS Exit QUALIFICATIONs */
index 5da6609..9dcaa5d 100644 (file)
@@ -77,26 +77,31 @@ typedef enum {
 } vmx_state_t;
 
 struct tss_descriptor {
-    uint16_t    limit1;
-    uint16_t    base1;
-    uint_t  base2       : 8;
-    /* In 32 bit type follows the form 10B1b, where B is the busy flag */
-    uint_t  type        : 4; 
-    uint_t  zero1       : 1;
-    uint_t  dpl         : 2;
-    uint_t  present     : 1;
-    uint_t  limit2      : 4;
-    uint_t  available   : 1;
-    uint_t  zero2       : 1;
-    uint_t  zero3       : 1;
-    uint_t  granularity : 1;
-    uint_t  base3       : 8;
+    union {
+    ulong_t value;
+    struct {
+        uint16_t    limit1;
+        uint16_t    base1;
+        uint_t      base2       : 8;
+        /* In IA32, type follows the form 10B1b, where B is the busy flag */
+        uint_t      type        : 4; 
+        uint_t      zero1       : 1;
+        uint_t      dpl         : 2;
+        uint_t      present     : 1;
+        uint_t      limit2      : 4;
+        uint_t      available   : 1;
+        uint_t      zero2       : 1;
+        uint_t      zero3       : 1;
+        uint_t      granularity : 1;
+        uint_t      base3       : 8;
 #ifdef __V3_64BIT__
-    uint32_t    base4;
-    uint_t  rsvd1       : 8;
-    uint_t  zero4       : 5;
-    uint_t  rsvd2       : 19;
+        uint32_t    base4;
+        uint_t      rsvd1       : 8;
+        uint_t      zero4       : 5;
+        uint_t      rsvd2       : 19;
 #endif
+    } __attribute__((packed));
+    } __attribute__((packed));
 }__attribute__((packed));
 
 struct vmcs_host_state {
index afb2310..cd2f7e5 100644 (file)
@@ -22,6 +22,7 @@
 
 #ifdef __V3VEE__
 
+#include <palacios/vm_guest.h>
 
 /******************************************/
 /* VMX Intercept Exit Codes               */
@@ -66,6 +67,7 @@
 #define VMEXIT_ENTRY_FAILURE_MACHINE_CHECK        41
 #define VMEXIT_TPR_BELOW_THRESHOLD                43
 
+int v3_handle_vmx_exit(struct guest_info * info);
 
 #endif
 
index 0a6ac3e..11409ce 100644 (file)
@@ -37,7 +37,7 @@
 
 /* Opcode definitions for all the VM instructions */
 
-#define VMCLEAR_OPCODE  ".byte 0x66,0xf,0x67;" /* reg=/6 */
+#define VMCLEAR_OPCODE  ".byte 0x66,0xf,0xc7;" /* reg=/6 */
 #define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3;"
 #define VMPTRLD_OPCODE  ".byte 0x0f,0xc7;" /* reg=/6 */
 #define VMPTRST_OPCODE  ".byte 0x0f,0xc7;" /* reg=/7 */
@@ -75,10 +75,8 @@ static inline int v3_enable_vmx(addr_t vmxon_ptr) {
     }
 }
 
-// No vmcall necessary - is only executed by the guest
-
 static inline int vmcs_clear(addr_t vmcs_ptr) {
-    uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
+    uint64_t vmcs_ptr_64 __attribute__ ((aligned(8))) = (uint64_t)vmcs_ptr;
     uint8_t ret_valid = 0;
     uint8_t ret_invalid = 0;
 
@@ -96,25 +94,6 @@ static inline int vmcs_clear(addr_t vmcs_ptr) {
     return VMX_SUCCESS;
 }
 
-
-static inline int vmcs_resume() {
-    uint8_t ret_valid = 0;
-    uint8_t ret_invalid = 0;
-
-    __asm__ __volatile__ (
-                VMRESUME_OPCODE
-                "seteb %0;"
-                "setnaeb %1;"
-                : "=q"(ret_valid), "=q"(ret_invalid)
-                : "0"(ret_valid), "1"(ret_invalid)
-                : "memory");
-
-    CHECK_VMXFAIL(ret_valid, ret_invalid);
-
-    return VMX_SUCCESS;
-}
-
-
 static inline int vmcs_load(addr_t vmcs_ptr) {
     uint64_t vmcs_ptr_64 = (uint64_t)vmcs_ptr;
     uint8_t ret_valid = 0;
index af8bc2e..a7da803 100644 (file)
@@ -132,107 +132,45 @@ void v3_print_vmcs_guest_state()
     PrintDebug("\n");
 }
        
-/*
-void print_debug_vmcs_load_guest() {
-    const int wordsize = sizeof(addr_t);
-    uint64_t temp;
-    struct vmcs_segment_access tmp_seg;
-
-    PrintDebug("\n====== Loading Guest State ======\n");
-    PRINT_VMREAD("Guest CR0: %x\n", GUEST_CR0, wordsize);
-    PRINT_VMREAD("Guest CR3: %x\n", GUEST_CR3, wordsize);
-    PRINT_VMREAD("Guest CR4: %x\n", GUEST_CR4, wordsize);
-    PRINT_VMREAD("Guest DR7: %x\n", GUEST_DR7, wordsize);
-
-    READ_VMCS_SEG(&tmp_seg,CS,wordsize);
-    print_vmcs_segment("CS", &tmp_seg);
-    
-    READ_VMCS_SEG(&tmp_seg,SS,wordsize);
-    print_vmcs_segment("SS", &tmp_seg);
-
-    READ_VMCS_SEG(&tmp,DS,wordsize);
-    print_vmcs_segment("DS", &tmp_seg);
-
-    READ_VMCS_SEG(&tmp_seg,ES,wordsize);
-    print_vmcs_segment("ES", &tmp_seg);
-
-    READ_VMCS_SEG(&tmp_seg,FS,wordsize);
-    print_vmcs_segment("FS", &tmp_seg);
-
-    READ_VMCS_SEG(&tmp_seg,GS,wordsize);
-    print_vmcs_segment("GS", &tmp_seg);
-
-    READ_VMCS_SEG(&tmp_seg,TR,wordsize);
-    print_vmcs_segment("TR", &tmp_seg);
+void v3_print_vmcs_host_state()
+{
+    PrintDebug("=== Control Fields===\n");
+    print_vmcs_field(VMCS_PIN_CTRLS);
+    print_vmcs_field(VMCS_PROC_CTRLS);
+    print_vmcs_field(VMCS_EXIT_CTRLS);
+    print_vmcs_field(VMCS_ENTRY_CTRLS);
+    print_vmcs_field(VMCS_EXCP_BITMAP);
 
-    READ_VMCS_SEG(&tmp_seg,LDTR,wordsize);
-    print_vmcs_segment("LDTR", &tmp_seg);
+    PrintDebug("\n");
+    print_vmcs_field(VMCS_HOST_CR0);
+    print_vmcs_field(VMCS_HOST_CR3);
+    print_vmcs_field(VMCS_HOST_CR4);
+    print_vmcs_field(VMCS_HOST_RSP);
+    print_vmcs_field(VMCS_HOST_RIP);
+    print_vmcs_field(VMCS_HOST_SYSENTER_CS);
+    print_vmcs_field(VMCS_HOST_SYSENTER_ESP);
+    print_vmcs_field(VMCS_HOST_SYSENTER_EIP);
     
-    PrintDebug("\n==GDTR==\n");
-    PRINT_VMREAD("GDTR Base: %x\n", GUEST_GDTR_BASE, wordsize);
-    PRINT_VMREAD("GDTR Limit: %x\n", GUEST_GDTR_LIMIT, 32);
-    PrintDebug("====\n");
-
-    PrintDebug("\n==LDTR==\n");
-    PRINT_VMREAD("LDTR Base: %x\n", GUEST_LDTR_BASE, wordsize);
-    PRINT_VMREAD("LDTR Limit: %x\n", GUEST_LDTR_LIMIT, 32);
-    PrintDebug("=====\n");
-
-    PRINT_VMREAD("Guest RSP: %x\n", GUEST_RSP, wordsize);
-    PRINT_VMREAD("Guest RIP: %x\n", GUEST_RIP, wordsize);
-    PRINT_VMREAD("Guest RFLAGS: %x\n", GUEST_RFLAGS, wordsize);
-    PRINT_VMREAD("Guest Activity state: %x\n", GUEST_ACTIVITY_STATE, 32);
-    PRINT_VMREAD("Guest Interruptibility state: %x\n", GUEST_INT_STATE, 32);
-    PRINT_VMREAD("Guest pending debug: %x\n", GUEST_PENDING_DEBUG_EXCS, wordsize);
-
-    PRINT_VMREAD("IA32_DEBUGCTL: %x\n", GUEST_IA32_DEBUGCTL, 64);
-    PRINT_VMREAD("IA32_SYSENTER_CS: %x\n", GUEST_IA32_SYSENTER_CS, 32);
-    PRINT_VMREAD("IA32_SYSTENTER_ESP: %x\n", GUEST_IA32_SYSENTER_ESP, wordsize);
-    PRINT_VMREAD("IA32_SYSTENTER_EIP: %x\n", GUEST_IA32_SYSENTER_EIP, wordsize);
-    PRINT_VMREAD("IA32_PERF_GLOBAL_CTRL: %x\n", GUEST_IA32_PERF_GLOBAL_CTRL, wordsize);
-    PRINT_VMREAD("VMCS Link Ptr: %x\n", VMCS_LINK_PTR, 64);
-    // TODO: Maybe add VMX preemption timer and PDTE (Intel 20-8 Vol. 3b)
-}
+    PrintDebug("\n=== Segment Registers===\n");
+    PrintDebug("Selector:\n");
+    print_vmcs_field(VMCS_HOST_CS_SELECTOR);
+    print_vmcs_field(VMCS_HOST_SS_SELECTOR);
+    print_vmcs_field(VMCS_HOST_DS_SELECTOR);
+    print_vmcs_field(VMCS_HOST_ES_SELECTOR);
+    print_vmcs_field(VMCS_HOST_FS_SELECTOR);
+    print_vmcs_field(VMCS_HOST_GS_SELECTOR);
+    print_vmcs_field(VMCS_HOST_TR_SELECTOR);
+
+    PrintDebug("\nBase:\n");
+    print_vmcs_field(VMCS_HOST_FS_BASE);
+    print_vmcs_field(VMCS_HOST_GS_BASE);
+    print_vmcs_field(VMCS_HOST_TR_BASE);
+    print_vmcs_field(VMCS_HOST_GDTR_BASE);
+    print_vmcs_field(VMCS_HOST_IDTR_BASE);
 
-void print_debug_load_host() {
-    const int wordsize = sizeof(addr_t);
-    uint64_t temp;
-    vmcs_segment tmp_seg;
-
-    PrintDebug("\n====== Host State ========\n");
-    PRINT_VMREAD("Host CR0: %x\n", HOST_CR0, wordsize);
-    PRINT_VMREAD("Host CR3: %x\n", HOST_CR3, wordsize);
-    PRINT_VMREAD("Host CR4: %x\n", HOST_CR4, wordsize);
-    PRINT_VMREAD("Host RSP: %x\n", HOST_RSP, wordsize);
-    PRINT_VMREAD("Host RIP: %x\n", HOST_RIP, wordsize);
-    PRINT_VMREAD("IA32_SYSENTER_CS: %x\n", HOST_IA32_SYSENTER_CS, 32);
-    PRINT_VMREAD("IA32_SYSENTER_ESP: %x\n", HOST_IA32_SYSENTER_ESP, wordsize);
-    PRINT_VMREAD("IA32_SYSENTER_EIP: %x\n", HOST_IA32_SYSENTER_EIP, wordsize);
-        
-    PRINT_VMREAD("Host CS Selector: %x\n", HOST_CS_SELECTOR, 16);
-    PRINT_VMREAD("Host SS Selector: %x\n", HOST_SS_SELECTOR, 16);
-    PRINT_VMREAD("Host DS Selector: %x\n", HOST_DS_SELECTOR, 16);
-    PRINT_VMREAD("Host ES Selector: %x\n", HOST_ES_SELECTOR, 16);
-    PRINT_VMREAD("Host FS Selector: %x\n", HOST_FS_SELECTOR, 16);
-    PRINT_VMREAD("Host GS Selector: %x\n", HOST_GS_SELECTOR, 16);
-    PRINT_VMREAD("Host TR Selector: %x\n", HOST_TR_SELECTOR, 16);
-
-    PRINT_VMREAD("Host FS Base: %x\n", HOST_FS_BASE, wordsize);
-    PRINT_VMREAD("Host GS Base: %x\n", HOST_GS_BASE, wordsize);
-    PRINT_VMREAD("Host TR Base: %x\n", HOST_TR_BASE, wordsize);
-    PRINT_VMREAD("Host GDTR Base: %x\n", HOST_GDTR_BASE, wordsize);
-    PRINT_VMREAD("Host IDTR Base: %x\n", HOSE_IDTR_BASE, wordsize);
+    PrintDebug("\n");
 }
 
-void print_vmcs_segment(char * name, vmcs_segment* seg)
-{
-    PrintDebug("\n==VMCS %s Segment==\n",name);
-    PrintDebug("\tSelector: %x\n", seg->selector);
-    PrintDebug("\tBase Address: %x\n", seg->baseAddr);
-    PrintDebug("\tLimit: %x\n", seg->limit);
-    PrintDebug("\tAccess: %x\n", seg->access);
-}*/
-
 /*
  * Returns the field length in bytes
  */
index 6c2939d..93b3c16 100644 (file)
 #include <palacios/vmm_lowlevel.h>
 #include <palacios/vmm_config.h>
 #include <palacios/vmm_ctrl_regs.h>
+#include <palacios/vm_guest_mem.h>
 
+static addr_t vmxon_ptr_phys;
+extern int v3_vmx_exit_handler();
+extern int v3_vmx_vmlaunch(struct v3_gprs * vm_regs);
 
-// 
-// 
-// CRUFT
-//
-//
-
-#if 0
-
-#include <palacios/vmm_util.h>
-#include <palacios/vmm_string.h>
-#include <palacios/vmm_ctrl_regs.h>
-
-
-
-extern int Launch_VM(ullong_t vmcsPtr, uint_t eip);
-
-#define NUMPORTS 65536
-
-
-#define VMXASSIST_INFO_PORT   0x0e9
-#define ROMBIOS_PANIC_PORT    0x400
-#define ROMBIOS_PANIC_PORT2   0x401
-#define ROMBIOS_INFO_PORT     0x402
-#define ROMBIOS_DEBUG_PORT    0x403
-
-
-
-static uint_t GetLinearIP(struct VM * vm) {
-  if (vm->state == VM_VMXASSIST_V8086_BIOS || vm->state == VM_VMXASSIST_V8086) { 
-    return vm->vmcs.guestStateArea.cs.baseAddr + vm->vmcs.guestStateArea.rip;
-  } else {
-    return vm->vmcs.guestStateArea.rip;
-  }
-}
-
-
-
-
-#define MAX_CODE 512
-#define INSTR_OFFSET_START 17
-#define NOP_SEQ_LEN        10
-#define INSTR_OFFSET_END   (INSTR_OFFSET_START + NOP_SEQ_LEN - 1)
-#define TEMPLATE_CODE_LEN  35
-
-uint_t oldesp = 0;
-uint_t myregs = 0;
-
-
-
-
-
-extern uint_t VMCS_LAUNCH();
-extern uint_t Init_VMCS_HostState();
-extern uint_t Init_VMCS_GuestState();
-
-
-
-
-extern int Get_CR2();
-extern int vmRunning;
-
-
-
-
-
-void DecodeCurrentInstruction(struct VM *vm, struct Instruction *inst)
-{
-  // this is a gruesome hack
-  uint_t address = GetLinearIP(vm);
-  uint_t length = vm->vmcs.exitInfoFields.instrLength;
-  unsigned char *t = (unsigned char *) address;
-
-
-  
-  PrintTrace("DecodeCurrentInstruction: instruction is\n");
-  PrintTraceMemDump(t,length);
-  
-  if (length==3 && t[0]==0x0f && t[1]==0x22 && t[2]==0xc0) { 
-    // mov from eax to cr0
-    // usually used to signal
-    inst->type=VM_MOV_TO_CR0;
-    inst->address=address;
-    inst->size=length;
-    inst->input1=vm->registers.eax;
-    inst->input2=vm->vmcs.guestStateArea.cr0;
-    inst->output=vm->registers.eax;
-    PrintTrace("MOV FROM EAX TO CR0\n");
-  } else {
-    inst->type=VM_UNKNOWN_INST;
-  }
-}
-
-
-
-static void ConfigureExits(struct VM *vm)
-{
-  CopyOutVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
-
-  vm->vmcs.execCtrlFields.pinCtrls |= 0 
-    // EXTERNAL_INTERRUPT_EXITING 
-    | NMI_EXITING;
-  vm->vmcs.execCtrlFields.procCtrls |= 0
-      // INTERRUPT_WINDOWS_EXIT 
-      | USE_TSC_OFFSETTING
-      | HLT_EXITING  
-      | INVLPG_EXITING           
-      | MWAIT_EXITING            
-      | RDPMC_EXITING           
-      | RDTSC_EXITING         
-      | MOVDR_EXITING         
-      | UNCONDITION_IO_EXITING
-      | MONITOR_EXITING       
-      | PAUSE_EXITING         ;
-
-  CopyInVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
-  
-  CopyOutVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
-
-  vm->vmcs.exitCtrlFields.exitCtrls |= ACK_IRQ_ON_EXIT;
-  
-  CopyInVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
-
-
-/*   VMCS_READ(VM_EXIT_CTRLS, &flags); */
-/*   flags |= ACK_IRQ_ON_EXIT; */
-/*   VMCS_WRITE(VM_EXIT_CTRLS, &flags); */
-}
-
-
-extern int RunVMM();
-extern int SAFE_VM_LAUNCH();
-
-int MyLaunch(struct VM *vm)
+static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
 {
-  ullong_t vmcs = (ullong_t)((uint_t) (vm->vmcsregion));
-  uint_t entry_eip = vm->descriptor.entry_ip;
-  uint_t exit_eip = vm->descriptor.exit_eip;
-  uint_t guest_esp = vm->descriptor.guest_esp;
-  uint_t f = 0xffffffff;
-  uint_t tmpReg = 0;
-  int ret;
-  int vmm_ret = 0;
-
-  PrintTrace("Guest ESP: 0x%x (%u)\n", guest_esp, guest_esp);
-
-  exit_eip = (uint_t)RunVMM;
-
-  PrintTrace("Clear\n");
-  VMCS_CLEAR(vmcs);
-  PrintTrace("Load\n");
-  VMCS_LOAD(vmcs);
-
-
-  PrintTrace("VMCS_LINK_PTR\n");
-  VMCS_WRITE(VMCS_LINK_PTR, &f);
-  PrintTrace("VMCS_LINK_PTR_HIGH\n");
-  VMCS_WRITE(VMCS_LINK_PTR_HIGH, &f);
-
-  SetCtrlBitsCorrectly(IA32_VMX_PINBASED_CTLS_MSR, PIN_VM_EXEC_CTRLS);
-  SetCtrlBitsCorrectly(IA32_VMX_PROCBASED_CTLS_MSR, PROC_VM_EXEC_CTRLS);
-  SetCtrlBitsCorrectly(IA32_VMX_EXIT_CTLS_MSR, VM_EXIT_CTRLS);
-  SetCtrlBitsCorrectly(IA32_VMX_ENTRY_CTLS_MSR, VM_ENTRY_CTRLS);
-
-  //
-  //
-  //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL);
-  //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL_HIGH);
-
-
-  /* Host state */
-  PrintTrace("Setting up host state\n");
-  SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, HOST_CR0);
-  SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, HOST_CR4);
-  ret = Init_VMCS_HostState();
-
-  if (ret != VMX_SUCCESS) {
-    if (ret == VMX_FAIL_VALID) {
-      PrintTrace("Init Host state: VMCS FAILED WITH ERROR\n");
-    } else {
-      PrintTrace("Init Host state: Invalid VMCS\n");
-    }
-    return ret;
-  }
-
-  //  PrintTrace("HOST_RIP: %x (%u)\n", exit_eip, exit_eip);
-  VMCS_WRITE(HOST_RIP, &exit_eip);
-
-  /* Guest state */
-  PrintTrace("Setting up guest state\n");
-  PrintTrace("GUEST_RIP: %x (%u)\n", entry_eip, entry_eip);
-  VMCS_WRITE(GUEST_RIP, &entry_eip);
-
-  SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, GUEST_CR0);
-  SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, GUEST_CR4);
-  ret = Init_VMCS_GuestState();
-
-  PrintTrace("InitGuestState returned\n");
+    int ret = 0;
+    ret = vmcs_write(field,val);
 
-  if (ret != VMX_SUCCESS) {
-    if (ret == VMX_FAIL_VALID) {
-      PrintTrace("Init Guest state: VMCS FAILED WITH ERROR\n");
-    } else {
-      PrintTrace("Init Guest state: Invalid VMCS\n");
+    if (ret != VMX_SUCCESS) {
+        PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
+        return 1;
     }
-    return ret;
-  }
-  PrintTrace("GUEST_RSP: %x (%u)\n", guest_esp, (uint_t)guest_esp);
-  VMCS_WRITE(GUEST_RSP, &guest_esp);
-
-  //  tmpReg = 0x4100;
-  tmpReg = 0xffffffff;
-  if (VMCS_WRITE(EXCEPTION_BITMAP, &tmpReg) != VMX_SUCCESS) {
-    PrintInfo("Bitmap error\n");
-  }
-
-  ConfigureExits(vm);
-
-  PrintTrace("VMCS_LAUNCH\n");
-
-  vm->state=VM_VMXASSIST_STARTUP;
-
-  vmm_ret = SAFE_VM_LAUNCH();
-
-  PrintTrace("VMM error %d\n", vmm_ret);
-
-  return vmm_ret;
-}
-
-
-
-  
-int VMLaunch(struct VMDescriptor *vm) 
-{
-  VMCS * vmcs = CreateVMCS();
-  int rc;
-
-  ullong_t vmcs_ptr = (ullong_t)((uint_t)vmcs);
-  uint_t top = (vmcs_ptr >> 32) & 0xffffffff;
-  uint_t bottom = (vmcs_ptr) & 0xffffffff;
 
-  theVM.vmcsregion = vmcs;
-  theVM.descriptor = *vm;
-
-  PrintTrace("vmcs_ptr_top=%x vmcs_ptr_bottom=%x, eip=%x\n", top, bottom, vm->entry_ip);
-  rc = MyLaunch(&theVM); // vmcs_ptr, vm->entry_ip, vm->exit_eip, vm->guest_esp);
-  PrintTrace("Returned from MyLaunch();\n");
-  return rc;
+    return 0;
 }
 
-
-
-
-//
-//
-//  END CRUFT
-//
-//
-
-#endif
-
 static int update_vmcs_host_state(struct guest_info * info) {
+    int vmx_ret = 0;
     addr_t tmp;
     struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
     struct v3_msr tmp_msr;
@@ -299,38 +56,38 @@ static int update_vmcs_host_state(struct guest_info * info) {
                           : "=q"(tmp)
                           :
     );
-    vmcs_write(VMCS_HOST_CR0, tmp);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp);
 
 
     __asm__ __volatile__ ( "movq %%cr3, %0; "          
                           : "=q"(tmp)
                           :
     );
-    vmcs_write(VMCS_HOST_CR3, tmp);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp);
 
 
     __asm__ __volatile__ ( "movq %%cr4, %0; "          
                           : "=q"(tmp)
                           :
     );
-    vmcs_write(VMCS_HOST_CR4, tmp);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
 
 
 
-    vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
-    vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
-    vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
 
 #define FS_BASE_MSR 0xc0000100
 #define GS_BASE_MSR 0xc0000101
 
     // FS.BASE MSR
     v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
+    vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
 
     // GS.BASE MSR
     v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
 
 
 
@@ -338,39 +95,39 @@ static int update_vmcs_host_state(struct guest_info * info) {
                           : "=q"(tmp)
                           :
     );
-    vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
 
     __asm__ __volatile__ ( "movq %%ss, %0; "           
                           : "=q"(tmp)
                           :
     );
-    vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
 
     __asm__ __volatile__ ( "movq %%ds, %0; "           
                           : "=q"(tmp)
                           :
     );
-    vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
 
     __asm__ __volatile__ ( "movq %%es, %0; "           
                           : "=q"(tmp)
                           :
     );
-    vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
 
     __asm__ __volatile__ ( "movq %%fs, %0; "           
                           : "=q"(tmp)
                           :
     );
-    vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
 
     __asm__ __volatile__ ( "movq %%gs, %0; "           
                           : "=q"(tmp)
                           :
     );
-    vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
 
-    vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
 
 
 #define SYSENTER_CS_MSR 0x00000174
@@ -379,27 +136,22 @@ static int update_vmcs_host_state(struct guest_info * info) {
 
    // SYSENTER CS MSR
     v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.value);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
 
     // SYSENTER_ESP MSR
     v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
 
     // SYSENTER_EIP MSR
     v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
+    vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
 
-
-
-    return 0;
+    return vmx_ret;
 }
 
 
 
 
-static addr_t vmxon_ptr_phys;
-extern int v3_vmx_exit_handler();
-extern int v3_vmx_vmlaunch();
 
 
 #if 0
@@ -414,8 +166,8 @@ static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
 
     PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
 
-    val &= mask_msr.lo;
-    val &= mask_msr.hi;
+    val |= mask_msr.lo;
+    val |= mask_msr.hi;
   
     return val;
 }
@@ -437,8 +189,8 @@ static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
 
     PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
 
-    val &= msr0_val;
-    val &= msr1_val;
+    val |= msr0_val;
+    val |= msr1_val;
 
     return val;
 }
@@ -455,19 +207,6 @@ static int setup_base_host_state() {
 
 #endif
 
-static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
-{
-    int ret = 0;
-    ret = vmcs_write(field,val);
-
-    if (ret != VMX_SUCCESS) {
-        PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
-        return 1;
-    }
-
-    return 0;
-}
-
 
 static void inline translate_segment_access(struct v3_segment * v3_seg,  
                                            struct vmcs_segment_access * access)
@@ -492,76 +231,76 @@ static int inline vmcs_write_guest_segments(struct guest_info* info)
     /* CS Segment */
     translate_segment_access(&(info->segments.cs), &access);
 
-    ret &= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
-    ret &= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
-    ret &= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
-    ret &= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
+    ret |= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
+    ret |= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
 
     /* SS Segment */
     translate_segment_access(&(info->segments.ss), &access);
 
-    ret &= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
-    ret &= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
-    ret &= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
-    ret &= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
+    ret |= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
+    ret |= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
 
     /* DS Segment */
     translate_segment_access(&(info->segments.ds), &access);
 
-    ret &= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
-    ret &= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
-    ret &= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
-    ret &= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
+    ret |= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
+    ret |= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
 
 
     /* ES Segment */
     translate_segment_access(&(info->segments.es), &access);
 
-    ret &= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
-    ret &= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
-    ret &= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
-    ret &= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
+    ret |= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
+    ret |= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
 
     /* FS Segment */
     translate_segment_access(&(info->segments.fs), &access);
 
-    ret &= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
-    ret &= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
-    ret &= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
-    ret &= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
+    ret |= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
+    ret |= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
 
     /* GS Segment */
     translate_segment_access(&(info->segments.gs), &access);
 
-    ret &= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
-    ret &= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
-    ret &= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
-    ret &= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
+    ret |= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
+    ret |= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
 
     /* LDTR segment */
     translate_segment_access(&(info->segments.ldtr), &access);
 
-    ret &= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
-    ret &= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
-    ret &= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
-    ret &= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
+    ret |= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
+    ret |= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
 
     /* TR Segment */
     translate_segment_access(&(info->segments.tr), &access);
 
-    ret &= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
-    ret &= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.ldtr.selector);
-    ret &= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
-    ret &= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
+    ret |= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
+    ret |= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.ldtr.selector);
+    ret |= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
 
     /* GDTR Segment */
 
-    ret &= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
-    ret &= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
+    ret |= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
 
     /* IDTR Segment*/
-    ret &= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
-    ret &= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
+    ret |= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
+    ret |= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
 
     return ret;
 }
@@ -574,7 +313,8 @@ static void setup_v8086_mode_for_boot(struct guest_info * vm_info)
     ((struct rflags *)&(vm_info->ctrl_regs.rflags))->iopl = 3;
 
    
-    vm_info->rip = 0xfff0;
+    vm_info->rip = 0xd0000;
+    vm_info->vm_regs.rsp = 0x80000;
 
     vm_info->segments.cs.selector = 0xf000;
     vm_info->segments.cs.base = 0xf000 << 4;
@@ -636,7 +376,6 @@ static addr_t allocate_vmcs()
 }
 
 
-
 static int init_vmcs_bios(struct guest_info * vm_info) 
 {
 
@@ -644,10 +383,9 @@ static int init_vmcs_bios(struct guest_info * vm_info)
 
     // TODO: Fix vmcs fields so they're 32-bit
     struct vmx_data * vmx_data = (struct vmx_data *)vm_info->vmm_data;
-    int vmx_ret;
+    int vmx_ret = 0;
 
-    // Have to do a whole lot of flag setting here
-    PrintDebug("Clearing VMCS\n");
+    PrintDebug("Clearing VMCS: %p\n",(void*)vmx_data->vmcs_ptr_phys);
     vmx_ret = vmcs_clear(vmx_data->vmcs_ptr_phys);
 
     if (vmx_ret != VMX_SUCCESS) {
@@ -667,16 +405,25 @@ static int init_vmcs_bios(struct guest_info * vm_info)
 
     /* Write VMX Control Fields */
     v3_get_msr(VMX_PINBASED_CTLS_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
-    vmcs_write(VMCS_PIN_CTRLS, tmp_msr.lo);
+    /* Add NMI exiting */
+    tmp_msr.lo |= NMI_EXIT;
+    check_vmcs_write(VMCS_PIN_CTRLS, tmp_msr.lo);
 
     v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmcs_write(VMCS_PROC_CTRLS, tmp_msr.lo);
+    /* Add unconditional I/O */
+    tmp_msr.lo |= UNCOND_IO_EXIT;
+    check_vmcs_write(VMCS_PROC_CTRLS, tmp_msr.lo);
 
     v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmcs_write(VMCS_EXIT_CTRLS, tmp_msr.lo);
+    tmp_msr.lo |= HOST_ADDR_SPACE_SIZE;
+    check_vmcs_write(VMCS_EXIT_CTRLS, tmp_msr.lo);
+
 
     v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmcs_write(VMCS_ENTRY_CTRLS, tmp_msr.lo);
+    check_vmcs_write(VMCS_ENTRY_CTRLS, tmp_msr.lo);
+
+
+    check_vmcs_write(VMCS_EXCP_BITMAP, 0xffffffff);
 
     /* Cache GDTR, IDTR, and TR in host struct */
     struct {
@@ -692,7 +439,8 @@ static int init_vmcs_bios(struct guest_info * vm_info)
                         : "q"(&tmp_seg)
                         : "memory"
                         );
-    vmx_data->host_state.gdtr.base = gdtr_base = tmp_seg.base;
+    gdtr_base = tmp_seg.base;
+    vmx_data->host_state.gdtr.base = gdtr_base;
 
     __asm__ __volatile__(
                         "sidt (%0);"
@@ -710,14 +458,16 @@ static int init_vmcs_bios(struct guest_info * vm_info)
                         );
     vmx_data->host_state.tr.selector = tmp_seg.selector;
 
-    struct tss_descriptor desc = ((struct tss_descriptor *)gdtr_base)[tmp_seg.selector];
-    
+    /* The GDTR *index* is bits 3-15 of the selector. */
+    struct tss_descriptor * desc = (struct tss_descriptor *)
+                        (gdtr_base + 8*(tmp_seg.selector>>3));
+
     tmp_seg.base = (
-                   (desc.base1) |
-                   (desc.base2 << 16) |
-                   (desc.base3 << 24) |
+                   (desc->base1) |
+                   (desc->base2 << 16) |
+                   (desc->base3 << 24) |
 #ifdef __V3_64BIT__
-                   ((uint64_t)desc.base4 << 32)
+                   ((uint64_t)desc->base4 << 32)
 #else 
                    (0)
 #endif
@@ -725,36 +475,44 @@ static int init_vmcs_bios(struct guest_info * vm_info)
 
     vmx_data->host_state.tr.base = tmp_seg.base;
 
-    update_vmcs_host_state(vm_info);
-    vmcs_write(VMCS_HOST_RIP, (addr_t)&v3_vmx_exit_handler);
+    if(update_vmcs_host_state(vm_info)) {
+        PrintError("Could not write host state\n");
+        return -1;
+    }
 
     // Setup guest state 
     // TODO: This is not 32-bit safe!
-    vmx_ret &= check_vmcs_write(VMCS_GUEST_RIP, vm_info->rip);
-    vmx_ret &= check_vmcs_write(VMCS_GUEST_CR0, 0x60000010);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, vm_info->rip);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, vm_info->vm_regs.rsp);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, 0x80000021);
 
-    vmx_ret &= vmcs_write_guest_segments(vm_info);
+    vmx_ret |= vmcs_write_guest_segments(vm_info);
 
-    vmx_ret &= check_vmcs_write(VMCS_GUEST_RFLAGS, vm_info->ctrl_regs.rflags);
-    vmx_ret &= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
+    vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, vm_info->ctrl_regs.rflags);
+    vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
 
     if (vmx_ret != 0) {
        PrintError("Could not initialize VMCS segments\n");
         return -1;
     }
 
-    v3_print_vmcs_guest_state();
-    return 0;
-}
+#define VMXASSIST_START 0x000d0000
+    extern uint8_t vmxassist_start[];
+    extern uint8_t vmxassist_end[];
 
-int v3_vmx_handle_exit()
-{
-    PrintDebug("Exit taken!\n");
+    addr_t vmxassist_dst = 0;
+    if(guest_pa_to_host_va(vm_info, VMXASSIST_START, &vmxassist_dst) == -1) {
+        PrintError("Could not find VMXASSIST destination\n");
+        return -1;
+    }
+    memcpy((void*)vmxassist_dst, vmxassist_start, vmxassist_end-vmxassist_start);
+
+    v3_print_vmcs_host_state();
+    v3_print_vmcs_guest_state();
     return 0;
 }
 
 static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
-    PrintDebug("Entering init_vmx_guest\n");
     v3_pre_config_guest(info, config_ptr);
 
     struct vmx_data * data = NULL;
@@ -777,7 +535,7 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
         return -1;
     }
 
-    //v3_post_config_guest(info, config_ptr);
+     //v3_post_config_guest(info, config_ptr);
 
     return 0;
 }
@@ -789,15 +547,15 @@ static int start_vmx_guest(struct guest_info* info) {
 
     PrintDebug("Attempting VMLAUNCH\n");
 
-    ret = v3_vmx_vmlaunch();
-
-    PrintDebug("Returned from VMLAUNCH\n");
-
-    vmcs_read(VMCS_INSTR_ERR, &error, 4);
-
+    ret = v3_vmx_vmlaunch(&(info->vm_regs));
     if (ret != VMX_SUCCESS) {
+        vmcs_read(VMCS_INSTR_ERR, &error, 4);
         PrintError("VMLAUNCH failed: %d\n", error);
+
+        v3_print_vmcs_guest_state();
+        v3_print_vmcs_host_state();
     }
+    PrintDebug("Returned from VMLAUNCH ret=%d(0x%x)\n", ret, ret);
 
     return -1;
 }
@@ -877,6 +635,7 @@ void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
       //
     // Should check and return Error here.... 
 
+
     // Setup VMXON Region
     vmxon_ptr_phys = allocate_vmcs();
     PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr_phys);
@@ -901,3 +660,4 @@ void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
     vm_ops->has_nested_paging = &has_vmx_nested_paging;
 
 }
+
index 247422a..e719afc 100644 (file)
  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
  */
 
-static int PanicUnhandledVMExit(struct VM *vm)
-{
-  PrintInfo("Panicking due to VMExit with reason %u\n", vm->vmcs.exitInfoFields.reason);
-  PrintTrace("Panicking due to VMExit with reason %u\n", vm->vmcs.exitInfoFields.reason);
-  PrintTrace_VMCS_ALL();
-  PrintTrace_VMX_Regs(&(vm->registers));
-  VMXPanic();
-  return 0;
-}
-
-
-
-
-
-static int HandleVMPrintsAndPanics(struct VM *vm, uint_t port, uint_t data)
-{
-  if (port==VMXASSIST_INFO_PORT &&
-      (vm->state == VM_VMXASSIST_STARTUP || 
-       vm->state == VM_VMXASSIST_V8086_BIOS ||
-       vm->state == VM_VMXASSIST_V8086)) { 
-    // Communication channel from VMXAssist
-    PrintTrace("VMXASSIST Output Port\n");
-    PrintDebug("%c",data&0xff);
-    return 1;
-  } 
-
-  if ((port==ROMBIOS_PANIC_PORT || 
-       port==ROMBIOS_PANIC_PORT2 || 
-       port==ROMBIOS_DEBUG_PORT ||
-       port==ROMBIOS_INFO_PORT) &&
-      (vm->state==VM_VMXASSIST_V8086_BIOS)) {
-    // rombios is communicating
-    PrintTrace("ROMBIOS Output Port\n");
-    //    PrintDebug("%c",data&0xff);
-    return 1;
-  }
-
-  if (port==BOOT_STATE_CARD_PORT && vm->state==VM_VMXASSIST_V8086_BIOS) { 
-    // rombios is sending something to the display card
-    PrintTrace("Hex Display: 0x%x\n",data&0xff);
-    return 1;
-  }
-  return 0;
-}
-
-static int HandleInOutExit(struct VM *vm)
-{
-  uint_t address;
-
-  struct VMCSExitInfoFields *exitinfo = &(vm->vmcs.exitInfoFields);
-  struct VMExitIOQual * qual = (struct VMExitIOQual *)&(vm->vmcs.exitInfoFields.qualification);
-  struct VMXRegs *regs = &(vm->registers);
-
-  address=GetLinearIP(vm);
-
-  PrintTrace("Handling Input/Output Instruction Exit\n");
-
-  PrintTrace_VMX_Regs(regs);
-
-  PrintTrace("Qualifications=0x%x\n", exitinfo->qualification);
-  PrintTrace("Reason=0x%x\n", exitinfo->reason);
-  PrintTrace("IO Port: 0x%x (%d)\n", qual->port, qual->port);
-  PrintTrace("Instruction Info=%x\n", exitinfo->instrInfo);
-  PrintTrace("%x : %s %s %s instruction of length %d for %d bytes from/to port 0x%x\n",
-                  address,
-                  qual->dir == 0 ? "output" : "input",
-                  qual->string ==0 ? "nonstring" : "STRING",
-                  qual->REP == 0 ? "with no rep" : "WITH REP",
-                  exitinfo->instrLength, 
-                  qual->accessSize==0 ? 1 : qual->accessSize==1 ? 2 : 4,
-                  qual->port);
+#include <palacios/vmx_handler.h>
+#include <palacios/vmm_types.h>
+#include <palacios/vmm.h>
 
-  if ((qual->port == PIC_MASTER_CMD_ISR_PORT) ||
-      (qual->port == PIC_MASTER_IMR_PORT)     ||
-      (qual->port == PIC_SLAVE_CMD_ISR_PORT)  ||
-      (qual->port == PIC_SLAVE_IMR_PORT)) {
-    PrintTrace( "PIC Access\n");
-  }
-                  
 
-  if ((qual->dir == 1) && (qual->REP == 0) && (qual->string == 0)) { 
-    char byte = In_Byte(qual->port);
-
-    vm->vmcs.guestStateArea.rip += exitinfo->instrLength;
-    regs->eax = (regs->eax & 0xffffff00) | byte;
-    PrintTrace("Returning 0x%x in eax\n", (regs->eax));
-  }
-
-  if (qual->dir==0 && qual->REP==0 && qual->string==0) { 
-    // See if we need to handle the outb as a signal or
-    // print from the VM
-    if (HandleVMPrintsAndPanics(vm,qual->port,regs->eax)) {
-    } else {
-      // If not, just go ahead and do the outb
-      Out_Byte(qual->port,regs->eax);
-      PrintTrace("Wrote 0x%x to port\n",(regs->eax));
-    }
-    vm->vmcs.guestStateArea.rip += exitinfo->instrLength;
-  }
-
-  return 0;
-}  
-
-
-static int HandleExternalIRQExit(struct VM *vm)
+int v3_handle_vmx_exit(struct guest_info * info)
 {
-  struct VMCSExitInfoFields * exitinfo = &(vm->vmcs.exitInfoFields);
-  struct VMExitIntInfo * intInfo  = (struct VMExitIntInfo *)&(vm->vmcs.exitInfoFields.intInfo);
-
-  PrintTrace("External Interrupt captured\n");
-  PrintTrace("IntInfo: %x\n", exitinfo->intInfo);
-
-
-  if (!intInfo->valid) {
-     // interrupts are off, but this interrupt is not acknoledged (still pending)
-     // so we turn on interrupts to deliver appropriately in the
-     // host
-    PrintTrace("External Interrupt is invald.  Turning Interrupts back on\n");
-    asm("sti");
+    PrintDebug("VMX Exit taken!\n");
     return 0;
-  } 
-
-  // At this point, interrupts are off and the interrupt has been 
-  // acknowledged.  We will now handle the interrupt ourselves 
-  // and turn interrupts  back on in the host
-
-  PrintTrace("type: %d\n", intInfo->type);
-  PrintTrace("number: %d\n", intInfo->nr);
-
-  PrintTrace("Interrupt %d occuring now and handled by HandleExternalIRQExit\n",intInfo->nr);
-
-  switch (intInfo->type) {
-  case 0:  {  // ext. IRQ
-    // In the following, we construct an "int x" instruction
-    // where x is the specific interrupt number that is raised
-    // then we execute that instruciton
-    // because we are in host context, that means it is delivered as normal
-    // through the host IDT
-     
-     ((char*)(&&ext_int_seq_start))[1] = intInfo->nr;
-     PrintTrace("Interrupt instruction setup done %x\n", *((ushort_t *)(&&ext_int_seq_start)));
-     
-ext_int_seq_start:
-     asm("int $0");
-  }
-
-    break;
-  case 2: // NMI
-    PrintTrace("Type: NMI\n");
-    break;
-  case 3: // hw exception
-    PrintTrace("Type: HW Exception\n");
-    break;
-  case 4: // sw exception
-    PrintTrace("Type: SW Exception\n");
-    break;
-  default:
-    PrintTrace("Invalid Interrupt Type\n");
-    return -1;
-  }
-  
-  if (intInfo->valid && intInfo->errorCode) {
-    PrintTrace("IntError: %x\n", exitinfo->intErrorCode);
-  }
-
-
-  return 0;
-
 }
-
-
-
-
-
-
-
-static int HandleExceptionOrNMI(struct VM *vm)
-{
-  struct Instruction inst;
-  uint_t num;
-  uint_t type;
-  uint_t errorvalid;
-  uint_t error;
-  uint_t ext=0;
-  uint_t idt=0;
-  uint_t ti=0;
-  uint_t selectorindex=0;
-
-  PrintTrace("Exception or NMI occurred\n");
-  
-  num=vm->vmcs.exitInfoFields.intInfo & 0xff;
-  type=(vm->vmcs.exitInfoFields.intInfo & 0x700)>>8;
-  errorvalid=(vm->vmcs.exitInfoFields.intInfo & 0x800)>>11;
-  if (errorvalid) { 
-    error=vm->vmcs.exitInfoFields.intErrorCode;
-    ext=error&0x1;
-    idt=(error&0x2)>>1;
-    ti=(error&0x4)>>2;
-    selectorindex=(error>>3)&0xffff;
-  }
-  
-  PrintTrace("Exception %d now - handled by HandleExceptionOrNMI\n",num);
-
-  PrintTrace("Exception Number %u : %s\n", num, exception_names[num]);
-  PrintTrace("Exception Type %u : %s\n", type, exception_type_names[type]);
-  if (errorvalid) { 
-    if (ext) { 
-      PrintTrace("External\n");
-    } else {
-      PrintTrace("%s - Selector Index is %u\n", idt ? "IDT" : ti ? "LDT" : "GDT", selectorindex);
-    }
-  }
-
-  DecodeCurrentInstruction(vm,&inst);
-
-  if (inst.type==VM_MOV_TO_CR0) {
-    PrintTrace("MOV TO CR0, oldvalue=0x%x, newvalue=0x%x\n",inst.input2, inst.input1);
-    if ((inst.input2 & CR0_PE) && !(inst.input1 & CR0_PE) && vm->state==VM_VMXASSIST_STARTUP) {
-      // This is VMXAssist signalling for us to turn on V8086 mode and
-      // jump into the bios
-      PrintTrace("VMXAssist is signaling us for switch to V8086 mode and jump to 0xf000:fff0\n");
-      SetupV8086ModeForBoot(vm);
-      goto leave;
-    } else {
-      PrintTrace("Instruction is a write to CR0, but we don't understand it so we'll just exec it\n");
-    } 
-  } 
-
-
-  PrintTrace("Trying to execute the faulting instruction in VMM context now\n");
-  ExecFaultingInstructionInVMM(vm);
-
-    leave:
-  //
-  //PanicUnhandledVMExit(vmcs,regs);
-  //VMXPanic();
-  return 0;
-}
-
-
-
-
-
-int Do_VMM(struct VMXRegs regs) 
-{
-
-  ullong_t vmcs_ptr = 0;
-  uint_t vmcs_ptr_low = 0;
-  int ret = 0;
-  uint_t vmx_abort = 0;
-
-
-  
-  PrintTrace("Vm Exit\n");
-  ret = VMCS_STORE(&vmcs_ptr);
-  vmcs_ptr &= 0xffffffff;
-  vmcs_ptr_low +=  vmcs_ptr;
-
-
-
-
-  PrintTrace("ret=%d\n", ret);
-  PrintTrace("Revision: %x\n", *(uint_t *)(vmcs_ptr_low));
-  vmx_abort = *(uint_t*)(((char *)vmcs_ptr_low)+4);
-    
-  struct VM *vm = FindVM();
-
-  if (vmx_abort != 0) {
-    PrintTrace("VM ABORTED w/ code: %x\n", vmx_abort);
-    return -1;
-  }
-
-  vm->registers = regs;
-
-  if (CopyOutVMCSData(&(vm->vmcs)) != 0) {
-    PrintTrace("Could not copy out VMCS\n");
-    return -1;
-  }
-
-
-  PrintTrace("Guest esp: 0x%x (%u)\n", vm->vmcs.guestStateArea.rsp, vm->vmcs.guestStateArea.rsp);
-
-  PrintTrace("VM Exit for reason: %d (%x)\n", 
-             vm->vmcs.exitInfoFields.reason & 0x00000fff,
-             vm->vmcs.exitInfoFields.reason);  
-
-  if (vm->vmcs.exitInfoFields.reason & (0x1<<29) ) { 
-    PrintTrace("VM Exit is from VMX root operation.  Panicking\n");
-    VMXPanic();
-  }
-
-  if (vm->vmcs.exitInfoFields.reason & (0x1<<31) ) { 
-    PrintTrace("VM Exit is due to a VM entry failure.  Shouldn't happen here. Panicking\n");
-    PrintTrace_VMCSData(&(vm->vmcs));
-    VMXPanic();
-  }
-
-  switch (vm->vmcs.exitInfoFields.reason) {
-  case VM_EXIT_REASON_INFO_EXCEPTION_OR_NMI:
-    ret = HandleExceptionOrNMI(vm);
-    break;
-  case VM_EXIT_REASON_EXTERNAL_INTR:
-    ret = HandleExternalIRQExit(vm);
-    break;
-  case VM_EXIT_REASON_TRIPLE_FAULT:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INIT_SIGNAL:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_STARTUP_IPI:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_IO_SMI:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_OTHER_SMI:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INTR_WINDOW:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_NMI_WINDOW:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_TASK_SWITCH:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_CPUID:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INVD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_INVLPG:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_RDPMC:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_RDTSC:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_RSM:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMCALL:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMCLEAR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMLAUNCH:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMPTRLD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMPTRST:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMREAD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMRESUME:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMWRITE:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMXOFF:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_VMXON:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_CR_REG_ACCESSES:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_MOV_DR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_IO_INSTR:
-    ret = HandleInOutExit(vm);
-    break;
-  case VM_EXIT_REASON_RDMSR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_WRMSR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_ENTRY_FAIL_INVALID_GUEST_STATE:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_ENTRY_FAIL_MSR_LOAD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_MWAIT:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_MONITOR:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_PAUSE:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_ENTRY_FAILURE_MACHINE_CHECK:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  case VM_EXIT_REASON_TPR_BELOW_THRESHOLD:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  default:
-    ret = PanicUnhandledVMExit(vm);
-    break;
-  }
-  
-  
-  regs = vm->registers;
-  CopyInVMCSData(&(vm->vmcs));
-
-  /*
-    {
-    VMCS_CLEAR(vmcs_ptr);
-    }
-  */
-
-  PrintTrace("Returning from Do_VMM: %d\n", ret);
-  return ret;
-}
-
-
-
-
-
-
-// simply execute the instruction that is faulting and return
-static int ExecFaultingInstructionInVMM(struct VM *vm)
-{
-  uint_t address = GetLinearIP(vm);
-  myregs = (uint_t)&(vm->registers);
-  
-
-  PrintTrace("About the execute faulting instruction!\n");
-  PrintTrace("Instruction is:\n");
-  PrintTraceMemDump((void*)(address),vm->vmcs.exitInfoFields.instrLength);
-  
-
-  PrintTrace("The template code is:\n");
-  PrintTraceMemDump(&&template_code,TEMPLATE_CODE_LEN);
-
-  // clone the template code
-  //memcpy(&&template_code,code,MAX_CODE);
-  
-  // clean up the nop field
-  memset(&&template_code+INSTR_OFFSET_START,*((uchar_t *)(&&template_code+0)),NOP_SEQ_LEN);
-  // overwrite the nops with the faulting instruction
-  memcpy(&&template_code+INSTR_OFFSET_START, (void*)(address),vm->vmcs.exitInfoFields.instrLength);
-  
-  PrintTrace("Finished modifying the template code, which now is:\n");
-  PrintTraceMemDump(&&template_code,TEMPLATE_CODE_LEN);
-
-  PrintTrace("Now entering modified template code\n");
-
-
- template_code:
-  // Template code stores current registers,
-  // restores registers, has a landing pad of noops 
-  // that will be modified, restores current regs, and then returns
-  //
-  // Note that this currently ignores cr0, cr3, cr4, dr7, rsp, rip, and rflags
-  // it also blythly assumes it can exec the instruction in protected mode
-  //
-  __asm__ __volatile__ ("nop\n"               // for cloning purposes                          (1 byte)
-                       "pusha\n"             // push our current regs onto the current stack  (1 byte)
-                       "movl %0, %%eax\n"    // Get oldesp location                           (5 bytes)
-                       "movl %%esp, (%%eax)\n"  // store the current stack pointer in oldesp       (2 bytes)
-                        "movl %1, %%eax\n"    // Get regs location                             (5 bytes)
-                       "movl (%%eax), %%esp\n"  // point esp at regs                               (2 bytes)
-                       "popa\n"              // now we have the VM registers restored            (1 byte)
-                       "nop\n"               // now we execute the actual instruction         (1 byte x 10)
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       "nop\n"               // now we execute the actual instruction
-                       // need to copy back to the VM registers!
-                        "movl %0, %%eax\n"     // recapture oldesp location                     (5 bytes)
-                       "movl (%%eax), %%esp\n"   // now we'll get our esp back from oldesp       (2 bytes)
-                       "popa\n"              // and restore our GP regs and we're done       (1 byte)
-                       : "=m"(oldesp)
-                       : "m"(myregs)
-                       );
-  
-  PrintTrace("Survived executing the faulting instruction and returning.\n");
-
-  vm->vmcs.guestStateArea.rip += vm->vmcs.exitInfoFields.instrLength;
-
-  return 0;
-
-}
-
index 3591463..0651542 100644 (file)
@@ -1,13 +1,62 @@
+#;  -*- fundamental -*-
 
 #define VMX_SUCCESS         0
 #define VMX_FAIL_INVALID    1
 #define VMX_FAIL_VALID      2
+#define VMM_FAILURE         3
 
 #define VMCS_HOST_RSP       0x00006C14
+#define VMCS_HOST_RIP       0x00006C16
 
 #if defined(__V3_64BIT__)
 
-#define r(reg) %r##reg
+#define save_registers(location)       \
+       pushq   %rax;                   \
+       movq    location, %rax;         \
+       movq    %rdi, (%rax);           \
+       movq    %rsi, 8(%rax);          \
+       movq    %rbp, 16(%rax);         \
+       movq    $0, 24(%rax);           \
+       movq    %rbx, 32(%rax);         \
+       movq    %rdx, 40(%rax);         \
+       movq    %rcx, 48(%rax);         \
+       pushq   %rbx;                   \
+       movq    16(%rsp), %rbx;         \
+       movq    %rbx, 56(%rax);         \
+       popq    %rbx;                   \
+                                       \
+       movq    %r8, 64(%rax);          \
+       movq    %r9, 72(%rax);          \
+       movq    %r10, 80(%rax);         \
+       movq    %r11, 88(%rax);         \
+       movq    %r12, 96(%rax);         \
+       movq    %r13, 104(%rax);        \
+       movq    %r14, 112(%rax);        \
+       movq    %r15, 120(%rax);        \
+       popq    %rax;                   
+       
+
+#define restore_registers(location) \
+       push    %rax;                   \
+       mov     location, %rax;         \
+       mov     (%rax), %rdi;           \
+       mov     8(%rax), %rsi;          \
+       mov     16(%rax), %rbp;         \
+       mov     32(%rax), %rbx;         \
+       mov     40(%rax), %rdx;         \
+       mov     48(%rax), %rcx;         \
+                                       \
+       mov     64(%rax), %r8;          \
+       mov     72(%rax), %r9;          \
+       mov     80(%rax), %r10;         \
+       mov     88(%rax), %r11;         \
+       mov     96(%rax), %r12;         \
+       mov     104(%rax), %r13;        \
+       mov     112(%rax), %r14;        \
+       mov     120(%rax), %r15;        \
+       pop     %rax;
+
+
 
 #define PUSHA    \
     push %rax;   \
     pop %rcx;    \
     pop %rbx;    \
     pop %rax;    
+
+.align 8
+.globl v3_vmx_exit_handler
+v3_vmx_exit_handler:
+    save_registers(8(%rsp));
+    addq $8, %rsp
+    POPA
+    popf
+    pushq %rdi
+    call v3_handle_vmx_exit
+
+    andq %rax, %rax
+    jnz .Lvmm_failure
+
+v3_vmx_vmresume:
+    pop %rdi
+    pushf
+    PUSHA
+    pushq %rdi
+    restore_registers(%rdi);
+
+    vmresume
+
+    addq $8, %rsp
+    jz .Lfail_valid
+    jc .Lfail_invalid
+    jmp .Lreturn
+
+.align 8
+.globl v3_vmx_vmlaunch
+// vm_regs = %rdi
+v3_vmx_vmlaunch:
+    cli
+    pushf
+    PUSHA
+    pushq %rdi
+    
+    movq %rsp, %rax
+    movq $VMCS_HOST_RSP, %rbx
+    vmwrite %rax, %rbx
+    jz .Lfail_valid
+    jc .Lfail_invalid
+
+    movq $v3_vmx_exit_handler, %rax
+    movq $VMCS_HOST_RIP, %rbx
+    vmwrite %rax, %rbx
+    jz .Lfail_valid
+    jc .Lfail_invalid
+
+    restore_registers(%rdi);
+
+    vmlaunch
+    jz .Lfail_valid
+    jc .Lfail_invalid
+    jmp .Lreturn
+
+.Lfail_valid:
+    addq $8, %rsp
+    POPA
+    movq $VMX_FAIL_VALID, %rax
+    jmp .Lreturn
+
+.Lfail_invalid:
+    addq $8, %rsp
+    POPA
+    movq $VMX_FAIL_INVALID, %rax
+    jmp .Lreturn
+
+.Lvmm_failure:
+    addq $8, %rsp
+    POPA
+    movq $VMM_FAILURE, %rax
+    jmp .Lreturn
+
+.Lreturn:
+    sti
+    popf
+    ret
+    
 #else
 
-#define r(reg) %e##reg
-  
+#define save_resgisters(location)      \
+       pushl   %eax;                   \
+       movl    location, %eax;         \
+       movl    %edi, (%eax);           \
+       movl    %esi, 8(%eax);          \
+       movl    %ebp, 16(%eax);         \
+       movl    $0, 24(%eax);           \
+       movl    %ebx, 32(%eax);         \
+       movl    %edx, 40(%eax);         \
+       movl    %ecx, 48(%eax);         \
+       pushl   %ebx;                   \
+       movl    8(%esp), %ebx;          \
+       movl    %ebx, 56(%eax);         \
+       popl    %ebx;                   \
+       popl    %eax;                   
+       
+
+#define restore_registers(location) \
+       pushl   %eax;                   \
+       movl    location, %eax;         \
+       movl    (%eax), %edi;           \
+       movl    8(%eax), %esi;          \
+       movl    16(%eax), %ebp;         \
+       movl    32(%eax), %ebx;         \
+       movl    40(%eax), %edx;         \
+       movl    48(%eax), %ecx;         \
+       popl    %eax;
 #define PUSHA    \
     push %eax;   \
     push %ebx;   \
     pop %ebx;    \
     pop %eax;
 
-#endif
-
 .align 8
 .globl v3_vmx_exit_handler
 v3_vmx_exit_handler:
-    PUSHA
-    call v3_vmx_handle_exit
+    save_registers(4(%esp))
+    addl $8, %rsp
     POPA
+    popf
+    pushl %rdi
+    call v3_handle_vmx_exit
+
+    andl %eax, %eax
+    jnz .Lvmm_failure
 
 v3_vmx_vmresume:
+    popl %edi
+    pushf
+    PUSHA
+    pushl %edi
+    restore_registers(%rdi)
+
     vmresume
-    sti
+
+    addl $8, %esp
     jz .Lfail_valid
     jc .Lfail_invalid
     jmp .Lreturn
 
+.align 8
 .globl v3_vmx_vmlaunch
+// vm_regs = %edi
 v3_vmx_vmlaunch:
-    cli 
+    cli
     pushf
     PUSHA
+    pushl %edi
 
-    mov r(sp), r(ax)
-    mov $VMCS_HOST_RSP, r(bx)
-    vmwrite r(bx), r(ax)
+    movl %esp, %eax
+    movl $VMCS_HOST_RSP, %ebx
+    vmwrite %eax, %ebx
     jz .Lfail_valid
     jc .Lfail_invalid
 
+    movl $v3_vmx_exit_handler, %eax
+    movl $VMCS_HOST_RIP, %ebx
+    vmwrite %eax, %ebx
+    jz .Lfail_valid
+    jc .Lfail_invalid
+
+    restore_registers(%edi)
+
     vmlaunch
-    sti
     jz .Lfail_valid
     jc .Lfail_invalid
     jmp .Lreturn
 
 .Lfail_valid:
-    mov $VMX_FAIL_VALID, r(ax)
+    addl $8, %esp
+    POPA
+    movl $VMX_FAIL_VALID, %eax
     jmp .Lreturn
 
 .Lfail_invalid:
-    mov $VMX_FAIL_INVALID, r(ax)
+    addq $8, %esp
+    POPA
+    movl $MVX_FAIL_INVALID, %eax
     jmp .Lreturn
 
-.Lreturn:
+.Lvmm_failure:
+    addq $8, %esp
     POPA
+    movl $VMM_FAILURE, %eax
+    jmp .Lreturn
+
+.Lreturn:
+    sti
     popf
     ret
 
-
-
+#endif