Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


major VMX update
Jack Lange [Thu, 3 Dec 2009 22:39:16 +0000 (16:39 -0600)]
ported to new configuration system
reworked to allow nested entries

19 files changed:
palacios/include/palacios/vm_guest.h
palacios/include/palacios/vmcb.h
palacios/include/palacios/vmcs.h
palacios/include/palacios/vmx.h
palacios/include/palacios/vmx_assist.h
palacios/include/palacios/vmx_ctrl_regs.h
palacios/include/palacios/vmx_handler.h
palacios/src/palacios/svm.c
palacios/src/palacios/vm_guest.c
palacios/src/palacios/vmcs.c
palacios/src/palacios/vmm.c
palacios/src/palacios/vmm_config.c
palacios/src/palacios/vmm_config_class.h
palacios/src/palacios/vmx.c
palacios/src/palacios/vmx_assist.c
palacios/src/palacios/vmx_ctrl_regs.c
palacios/src/palacios/vmx_handler.c
palacios/src/palacios/vmx_lowlevel.S
palacios/src/palacios/vmx_lowlevel.asm [deleted file]

index 6506658..38e2a71 100644 (file)
@@ -105,6 +105,7 @@ struct v3_segment {
     uint_t long_mode      : 1;
     uint_t db             : 1;
     uint_t granularity    : 1;
+    uint_t unusable       : 1;
 } __attribute__((packed));
 
 
@@ -234,7 +235,7 @@ int v3_translate_segment(struct guest_info * info, uint16_t selector, struct v3_
 
 void v3_print_guest_state(struct guest_info * info);
 
-void v3_print_segments(struct guest_info * info);
+void v3_print_segments(struct v3_segments * segs);
 void v3_print_ctrl_regs(struct guest_info * info);
 void v3_print_GPRs(struct guest_info * info);
 
index 94e8d88..5effea4 100644 (file)
@@ -274,7 +274,7 @@ typedef struct VMCB_Control_Area vmcb_ctrl_t;
 
 
 struct vmcb_selector {
-    ushort_t selector;
+    uint16_t selector;
 
     /* These attributes are basically a direct map of the attribute fields of a segment desc.
      * The segment limit in the middle is removed and the fields are fused together
@@ -295,8 +295,8 @@ struct vmcb_selector {
        }  __attribute__((packed)) fields;
     }  __attribute__((packed)) attrib;
 
-    uint_t  limit;
-    ullong_t base;
+    uint32_t  limit;
+    uint64_t base;
 }  __attribute__((packed));
 
 
index 79efe4e..2a7e42e 100644 (file)
 /* Control register exit masks */
 #define   CR4_VMXE      0x00002000
 
-int v3_load_vmcs_guest_state(struct guest_info * info);
-int v3_update_vmcs_guest_state(struct guest_info * info);
-int v3_update_vmcs_host_state(struct guest_info * info);
-int v3_update_vmcs_ctrl_fields(struct guest_info * info);
+
 
 
 typedef enum {
@@ -243,9 +240,13 @@ struct vmx_exception_bitmap {
 
 /* Segment Selector Access Rights (32 bits) */
 /* INTEL Manual: 20-4 vol 3B */
-struct vmcs_segment_access {
+struct vmcs_segment {
+    uint16_t selector;
+    uint32_t limit;
+    uint64_t base;
+
     union {
-       uint32_t value;
+       uint32_t val;
        struct {
            uint32_t    type        : 4;
            uint32_t    desc_type   : 1; 
@@ -259,8 +260,8 @@ struct vmcs_segment_access {
            uint32_t    unusable    : 1; 
            uint32_t    rsvd2       : 15;
        } __attribute__((packed));
-    } __attribute__((packed));
-}__attribute__((packed));
+    } __attribute__((packed)) access;
+};
 
 
 struct vmcs_interrupt_state {
@@ -287,6 +288,19 @@ const char * v3_vmcs_field_to_str(vmcs_field_t field);
 void v3_print_vmcs();
 
 
+int v3_vmx_save_vmcs(struct guest_info * info);
+int v3_vmx_restore_vmcs(struct guest_info * info);
+
+
+int v3_update_vmcs_host_state(struct guest_info * info);
+int v3_update_vmcs_ctrl_fields(struct guest_info * info);
+
+
+int v3_read_vmcs_segments(struct v3_segments * segs);
+int v3_write_vmcs_segments(struct v3_segments * segs);
+void v3_vmxseg_to_seg(struct vmcs_segment * vmcs_seg, struct v3_segment * seg);
+void v3_seg_to_vmxseg(struct v3_segment * seg, struct vmcs_segment * vmcs_seg);
+
 #endif // ! __V3VEE__
 
 
index a34cd46..70e877f 100644 (file)
@@ -214,6 +214,10 @@ struct vmcs_host_state {
     struct v3_segment  tr;
 };
 
+
+
+
+
 struct vmx_data {
     vmx_state_t state;
     struct vmcs_host_state host_state;
@@ -222,6 +226,9 @@ struct vmx_data {
 
     uint8_t ia32e_avail;
 
+    v3_reg_t guest_cr4; /// corresponds to the CR4 Read shadow
+
+
     /* VMX Control Fields */
     struct vmx_pin_ctrls pin_ctrls;
     struct vmx_pri_proc_ctrls pri_proc_ctrls;
@@ -232,7 +239,9 @@ struct vmx_data {
 
 int v3_is_vmx_capable();
 void v3_init_vmx_cpu(int cpu_id);
+int v3_start_vmx_guest(struct guest_info* info);
 
+int v3_init_vmx_vmcs(struct guest_info * info, v3_vm_class_t vm_class);
 
 #endif // ! __V3VEE__
 
index b367cd3..d9fc31d 100644 (file)
@@ -55,9 +55,16 @@ union vmcs_arbytes {
             g         : 1,
             null_bit  : 1,
             reserved2 : 15;
-    } fields;
+    } __attribute__((packed)) fields;
     unsigned int bytes;
-};
+} __attribute__((packed));
+
+struct vmx_assist_segment {
+    uint32_t sel;
+    uint32_t limit;
+    uint32_t base;
+    union vmcs_arbytes arbytes;
+} __attribute__((packed));
 
 /*
  * World switch state
@@ -69,45 +76,25 @@ struct vmx_assist_context {
     uint32_t  cr0;
     uint32_t  cr3;        /* page table directory */
     uint32_t  cr4;
+
     uint32_t  idtr_limit; /* idt */
     uint32_t  idtr_base;
+
     uint32_t  gdtr_limit; /* gdt */
     uint32_t  gdtr_base;
-    uint32_t  cs_sel;     /* cs selector */
-    uint32_t  cs_limit;
-    uint32_t  cs_base;
-    union vmcs_arbytes cs_arbytes;
-    uint32_t  ds_sel;     /* ds selector */
-    uint32_t  ds_limit;
-    uint32_t  ds_base;
-    union vmcs_arbytes ds_arbytes;
-    uint32_t  es_sel;     /* es selector */
-    uint32_t  es_limit;
-    uint32_t  es_base;
-    union vmcs_arbytes es_arbytes;
-    uint32_t  ss_sel;     /* ss selector */
-    uint32_t  ss_limit;
-    uint32_t  ss_base;
-    union vmcs_arbytes ss_arbytes;
-    uint32_t  fs_sel;     /* fs selector */
-    uint32_t  fs_limit;
-    uint32_t  fs_base;
-    union vmcs_arbytes fs_arbytes;
-    uint32_t  gs_sel;     /* gs selector */
-    uint32_t  gs_limit;
-    uint32_t  gs_base;
-    union vmcs_arbytes gs_arbytes;
-    uint32_t  tr_sel;     /* task selector */
-    uint32_t  tr_limit;
-    uint32_t  tr_base;
-    union vmcs_arbytes tr_arbytes;
-    uint32_t  ldtr_sel;   /* ldtr selector */
-    uint32_t  ldtr_limit;
-    uint32_t  ldtr_base;
-    union vmcs_arbytes ldtr_arbytes;
+
+    struct vmx_assist_segment cs;
+    struct vmx_assist_segment ds;
+    struct vmx_assist_segment es;
+    struct vmx_assist_segment ss;
+    struct vmx_assist_segment fs;
+    struct vmx_assist_segment gs;
+    struct vmx_assist_segment tr;
+    struct vmx_assist_segment ldtr;
+
 
     unsigned char rm_irqbase[2];
-};
+} __attribute__((packed));
 
 typedef struct vmx_assist_context vmx_assist_context_t;
 
index 88b24f4..4a8f0d8 100644 (file)
@@ -1,7 +1,38 @@
+/* 
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National 
+ * Science Foundation and the Department of Energy.  
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico.  You can find out more at 
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
+ * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
+ * All rights reserved.
+ *
+ * Author: Andy Gocke <agocke@gmail.com>
+ *
+ * This is free software.  You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
 
-#include <palacios/vm_guest.h>
-#include <palacios/vmm_ctrl_regs.h>
+#ifndef __VMX_CTRL_REGS_H__
+#define __VMX_CTRL_REGS_H__
 
-int v3_vmx_handle_cr0_access(struct guest_info * info);
-int v3_vmx_handle_cr3_access(struct guest_info * info);
+#ifdef __V3VEE__
 
+#include <palacios/vmx_handler.h>
+
+struct guest_info;
+
+int v3_vmx_handle_cr0_access(struct guest_info * info, 
+                            struct vmx_exit_cr_qual * cr_qual, 
+                            struct vmx_exit_info * exit_info);
+int v3_vmx_handle_cr3_access(struct guest_info * info, 
+                            struct vmx_exit_cr_qual * cr_qual);
+
+
+#endif
+
+#endif
index 1152b5c..1fdf98c 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <palacios/vm_guest.h>
 
+
 /******************************************/
 /* VMX Intercept Exit Codes               */
 /******************************************/
@@ -197,7 +198,27 @@ struct vmx_exit_idt_vec_info {
     } __attribute__ ((packed));
 } __attribute__ ((packed));
 
-int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
+
+
+
+struct vmx_exit_info {
+    uint32_t instr_len;
+    uint32_t instr_info;
+
+    uint32_t exit_reason;
+    addr_t exit_qual;
+
+    uint32_t int_info;
+    uint32_t int_err;
+
+    addr_t guest_linear_addr;
+};
+
+
+
+
+
+int v3_handle_vmx_exit(struct guest_info * info, struct vmx_exit_info * exit_info);
 const char * v3_vmx_exit_code_to_str(vmx_exit_t exit);
 
 #endif
index 09fe744..7dcd68d 100644 (file)
@@ -284,7 +284,7 @@ int v3_init_svm_vmcb(struct guest_info * info, v3_vm_class_t vm_class) {
 
 
 
-static int update_irq_state_atomic(struct guest_info * info) {
+static int update_irq_exit_state(struct guest_info * info) {
     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
 
     if ((info->intr_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
@@ -317,7 +317,7 @@ static int update_irq_state_atomic(struct guest_info * info) {
 }
 
 
-static int update_irq_state(struct guest_info * info) {
+static int update_irq_entry_state(struct guest_info * info) {
     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
 
     if (v3_excp_pending(info)) {
@@ -435,6 +435,16 @@ int v3_svm_enter(struct guest_info * info) {
     guest_state->rax = info->vm_regs.rax;
     guest_state->rip = info->rip;
     guest_state->rsp = info->vm_regs.rsp;
+
+#ifdef CONFIG_SYMBIOTIC
+    if (info->sym_state.sym_call_active == 0) {
+       update_irq_entry_state(info);
+    }
+#else 
+    update_irq_entry_state(info);
+#endif
+
+
     /* ** */
 
     /*
@@ -497,10 +507,10 @@ int v3_svm_enter(struct guest_info * info) {
 
 #ifdef CONFIG_SYMBIOTIC
     if (info->sym_state.sym_call_active == 0) {
-       update_irq_state_atomic(info);
+       update_irq_exit_state(info);
     }
 #else
-    update_irq_state_atomic(info);
+    update_irq_exit_state(info);
 #endif
 
 
@@ -517,13 +527,6 @@ int v3_svm_enter(struct guest_info * info) {
        return -1;
     }
 
-#ifdef CONFIG_SYMBIOTIC
-    if (info->sym_state.sym_call_active == 0) {
-       update_irq_state(info);
-    }
-#else 
-    update_irq_state(info);
-#endif
 
     return 0;
 }
index 44673cb..de0cb21 100644 (file)
@@ -160,8 +160,7 @@ const uchar_t * v3_mem_mode_to_str(v3_mem_mode_t mode) {
 }
 
 
-void v3_print_segments(struct guest_info * info) {
-    struct v3_segments * segs = &(info->segments);
+void v3_print_segments(struct v3_segments * segs) {
     int i = 0;
     struct v3_segment * seg_ptr;
 
@@ -267,7 +266,7 @@ void v3_print_guest_state(struct guest_info * info) {
 
     V3_Print("NumExits: %u\n", (uint32_t)info->num_exits);
 
-    v3_print_segments(info);
+    v3_print_segments(&(info->segments));
     v3_print_ctrl_regs(info);
 
     if (info->shdw_pg_mode == SHADOW_PAGING) {
index e7f9570..9156e23 100644 (file)
 #include <palacios/vmm_ctrl_regs.h>
 #include <palacios/vmm_lowlevel.h>
 
-static void inline translate_v3_seg_to_access(struct v3_segment * v3_seg,  
-                                             struct vmcs_segment_access * access)
-{
-    access->type = v3_seg->type;
-    access->desc_type = v3_seg->system;
-    access->dpl = v3_seg->dpl;
-    access->present = v3_seg->present;
-    access->avail = v3_seg->avail;
-    access->long_mode = v3_seg->long_mode;
-    access->db = v3_seg->db;
-    access->granularity = v3_seg->granularity;
-}
 
-static void inline translate_access_to_v3_seg(struct vmcs_segment_access * access, 
-                                             struct v3_segment * v3_seg)
-{
-    v3_seg->type = access->type;
-    v3_seg->system = access->desc_type;
-    v3_seg->dpl = access->dpl;
-    v3_seg->present = access->present;
-    v3_seg->avail = access->avail;
-    v3_seg->long_mode = access->long_mode;
-    v3_seg->db = access->db;
-    v3_seg->granularity = access->granularity;
-}
 
 
-static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
-{
+
+static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
     int ret = 0;
     ret = vmcs_write(field, val);
 
@@ -65,8 +41,7 @@ static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
     return 0;
 }
 
-static int inline check_vmcs_read(vmcs_field_t field, void * val)
-{
+static int inline check_vmcs_read(vmcs_field_t field, void * val) {
     int ret = 0;
     ret = vmcs_read(field, val);
 
@@ -77,123 +52,143 @@ static int inline check_vmcs_read(vmcs_field_t field, void * val)
     return ret;
 }
 
-// static const char * v3_vmcs_field_to_str(vmcs_field_t field);
 
-//extern char * exception_names;
-//
-// Ignores "HIGH" addresses - 32 bit only for now
-//
-
-int v3_update_vmcs_guest_state(struct guest_info * info)
-{
-    int vmx_ret = 0;
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, info->rip);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
-    
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
-    vmx_ret |= check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR3, info->ctrl_regs.cr3);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, info->dbg_regs.dr7);
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
-    if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
-        vmx_ret |= check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
-    }
 
 
-    /*** Write VMCS Segments ***/
-    struct vmcs_segment_access access;
+typedef enum { ES = 0, 
+              CS = 2,
+              SS = 4,
+              DS = 6, 
+              FS = 8, 
+              GS = 10, 
+              LDTR = 12, 
+              TR = 14, 
+              GDTR = 16, 
+              IDTR = 18} vmcs_seg_offsets_t;
 
-    memset(&access, 0, sizeof(access));
+typedef enum {BASE = VMCS_GUEST_ES_BASE,
+             LIMIT = VMCS_GUEST_ES_LIMIT, 
+             ACCESS = VMCS_GUEST_ES_ACCESS, 
+             SELECTOR = VMCS_GUEST_ES_SELECTOR } vmcs_seg_bases_t;
 
-    /* CS Segment */
-    translate_v3_seg_to_access(&(info->segments.cs), &access);
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
+static int v3_read_vmcs_segment(struct v3_segment * seg, vmcs_seg_offsets_t seg_type) {
+    vmcs_field_t selector = VMCS_GUEST_ES_SELECTOR + seg_type;
+    vmcs_field_t base = VMCS_GUEST_ES_BASE + seg_type;
+    vmcs_field_t limit = VMCS_GUEST_ES_LIMIT + seg_type;
+    vmcs_field_t access = VMCS_GUEST_ES_ACCESS + seg_type;
+    struct vmcs_segment vmcs_seg;
 
-    /* SS Segment */
-    memset(&access, 0, sizeof(access));
-    translate_v3_seg_to_access(&(info->segments.ss), &access);
+    memset(&vmcs_seg, 0, sizeof(struct vmcs_segment));
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
+    check_vmcs_read(limit, &(vmcs_seg.limit));
+    check_vmcs_read(base, &(vmcs_seg.base));
 
-    /* DS Segment */
-    memset(&access, 0, sizeof(access));
-    translate_v3_seg_to_access(&(info->segments.ds), &access);
+    if ((seg_type != GDTR) && (seg_type != IDTR)) {
+       check_vmcs_read(selector, &(vmcs_seg.selector));
+       check_vmcs_read(access, &(vmcs_seg.access.val)); 
+    }
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
+    v3_vmxseg_to_seg(&vmcs_seg, seg);
 
+    return 0;
+}
 
-    /* ES Segment */
-    memset(&access, 0, sizeof(access));
-    translate_v3_seg_to_access(&(info->segments.es), &access);
+static int v3_write_vmcs_segment(struct v3_segment * seg, vmcs_seg_offsets_t seg_type) {
+    vmcs_field_t selector = VMCS_GUEST_ES_SELECTOR + seg_type;
+    vmcs_field_t base = VMCS_GUEST_ES_BASE + seg_type;
+    vmcs_field_t limit = VMCS_GUEST_ES_LIMIT + seg_type;
+    vmcs_field_t access = VMCS_GUEST_ES_ACCESS + seg_type;
+    struct vmcs_segment vmcs_seg;
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
+    v3_seg_to_vmxseg(seg, &vmcs_seg);
 
-    /* FS Segment */
-    memset(&access, 0, sizeof(access));
-    translate_v3_seg_to_access(&(info->segments.fs), &access);
+    check_vmcs_write(limit, vmcs_seg.limit);
+    check_vmcs_write(base, vmcs_seg.base);
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
+    if ((seg_type != GDTR) && (seg_type != IDTR)) {
+       check_vmcs_write(access, vmcs_seg.access.val); 
+       check_vmcs_write(selector, vmcs_seg.selector);
+    }
 
-    /* GS Segment */
-    memset(&access, 0, sizeof(access));
-    translate_v3_seg_to_access(&(info->segments.gs), &access);
+    return 0;
+}
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
+int v3_read_vmcs_segments(struct v3_segments * segs) {
+    v3_read_vmcs_segment(&(segs->cs), CS);
+    v3_read_vmcs_segment(&(segs->ds), DS);
+    v3_read_vmcs_segment(&(segs->es), ES);
+    v3_read_vmcs_segment(&(segs->fs), FS);
+    v3_read_vmcs_segment(&(segs->gs), GS);
+    v3_read_vmcs_segment(&(segs->ss), SS);
+    v3_read_vmcs_segment(&(segs->ldtr), LDTR);
+    v3_read_vmcs_segment(&(segs->gdtr), GDTR);
+    v3_read_vmcs_segment(&(segs->idtr), IDTR);
+    v3_read_vmcs_segment(&(segs->tr), TR);
 
-    /* LDTR segment */
-    memset(&access, 0, sizeof(access));
-    translate_v3_seg_to_access(&(info->segments.ldtr), &access);
+    return 0;
+}
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
+int v3_write_vmcs_segments(struct v3_segments * segs) {
+    v3_write_vmcs_segment(&(segs->cs), CS);
+    v3_write_vmcs_segment(&(segs->ds), DS);
+    v3_write_vmcs_segment(&(segs->es), ES);
+    v3_write_vmcs_segment(&(segs->fs), FS);
+    v3_write_vmcs_segment(&(segs->gs), GS);
+    v3_write_vmcs_segment(&(segs->ss), SS);
+    v3_write_vmcs_segment(&(segs->ldtr), LDTR);
+    v3_write_vmcs_segment(&(segs->gdtr), GDTR);
+    v3_write_vmcs_segment(&(segs->idtr), IDTR);
+    v3_write_vmcs_segment(&(segs->tr), TR);
 
-    /* TR Segment */
-    memset(&access, 0, sizeof(access));
-    translate_v3_seg_to_access(&(info->segments.tr), &access);
+    return 0;
+}
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.tr.selector);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
 
-    /* GDTR Segment */
+void v3_vmxseg_to_seg(struct vmcs_segment * vmcs_seg, struct v3_segment * seg) {
+    memset(seg, 0, sizeof(struct v3_segment));
 
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
+    seg->selector = vmcs_seg->selector;
+    seg->limit = vmcs_seg->limit;
+    seg->base = vmcs_seg->base;
 
-    /* IDTR Segment*/
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
-    vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
+    seg->type = vmcs_seg->access.type;
+    seg->system = vmcs_seg->access.desc_type;
+    seg->dpl = vmcs_seg->access.dpl;
+    seg->present = vmcs_seg->access.present;
+    seg->avail = vmcs_seg->access.avail;
+    seg->long_mode = vmcs_seg->access.long_mode;
+    seg->db = vmcs_seg->access.db;
+    seg->granularity = vmcs_seg->access.granularity;
+    seg->unusable = vmcs_seg->access.unusable;
 
-    return vmx_ret;
+}
 
+void v3_seg_to_vmxseg(struct v3_segment * seg, struct vmcs_segment * vmcs_seg) {
+    memset(vmcs_seg, 0, sizeof(struct vmcs_segment));
+
+    vmcs_seg->selector = seg->selector;
+    vmcs_seg->limit = seg->limit;
+    vmcs_seg->base = seg->base;
+
+    vmcs_seg->access.type = seg->type;
+    vmcs_seg->access.desc_type = seg->system;
+    vmcs_seg->access.dpl = seg->dpl;
+    vmcs_seg->access.present = seg->present;
+    vmcs_seg->access.avail = seg->avail;
+    vmcs_seg->access.long_mode = seg->long_mode;
+    vmcs_seg->access.db = seg->db;
+    vmcs_seg->access.granularity = seg->granularity;
+    vmcs_seg->access.unusable = seg->unusable;
 }
 
+
+
+
 int v3_update_vmcs_ctrl_fields(struct guest_info * info) {
     int vmx_ret = 0;
     struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
@@ -211,6 +206,64 @@ int v3_update_vmcs_ctrl_fields(struct guest_info * info) {
     return vmx_ret;
 }
 
+
+
+
+
+
+int v3_vmx_save_vmcs(struct guest_info * info) {
+    struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
+    int error = 0;
+
+    check_vmcs_read(VMCS_GUEST_RIP, &(info->rip));
+    check_vmcs_read(VMCS_GUEST_RSP, &(info->vm_regs.rsp));
+
+    check_vmcs_read(VMCS_GUEST_CR0, &(info->ctrl_regs.cr0));
+    check_vmcs_read(VMCS_CR0_READ_SHDW, &(info->shdw_pg_state.guest_cr0));
+    check_vmcs_read(VMCS_GUEST_CR3, &(info->ctrl_regs.cr3));
+    check_vmcs_read(VMCS_GUEST_CR4, &(info->ctrl_regs.cr4));
+    check_vmcs_read(VMCS_CR4_READ_SHDW, &(vmx_info->guest_cr4));
+    check_vmcs_read(VMCS_GUEST_DR7, &(info->dbg_regs.dr7));
+
+    check_vmcs_read(VMCS_GUEST_RFLAGS, &(info->ctrl_regs.rflags));
+    if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
+        check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
+    }
+
+    error =  v3_read_vmcs_segments(&(info->segments));
+
+    return error;
+}
+
+
+int v3_vmx_restore_vmcs(struct guest_info * info) {
+    struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
+    int error = 0;
+
+    check_vmcs_write(VMCS_GUEST_RIP, info->rip);
+    check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
+
+    check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
+    check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
+    check_vmcs_write(VMCS_GUEST_CR3, info->ctrl_regs.cr3);
+    check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
+    check_vmcs_write(VMCS_CR4_READ_SHDW, vmx_info->guest_cr4);
+    check_vmcs_write(VMCS_GUEST_DR7, info->dbg_regs.dr7);
+
+    check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
+
+    if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
+        check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
+    }
+
+    error = v3_write_vmcs_segments(&(info->segments));
+
+    return error;
+
+}
+
+
+
 int v3_update_vmcs_host_state(struct guest_info * info) {
     int vmx_ret = 0;
     addr_t tmp;
@@ -315,104 +368,9 @@ int v3_update_vmcs_host_state(struct guest_info * info) {
 }
 
 
-int v3_load_vmcs_guest_state(struct guest_info * info)
-{
 
-    int error = 0;
-
-    check_vmcs_read(VMCS_GUEST_RIP, &(info->rip));
-    check_vmcs_read(VMCS_GUEST_RSP, &(info->vm_regs.rsp));
-
-    check_vmcs_read(VMCS_GUEST_CR0, &(info->ctrl_regs.cr0));
-    check_vmcs_read(VMCS_CR0_READ_SHDW, &(info->shdw_pg_state.guest_cr0));
-    check_vmcs_read(VMCS_GUEST_CR3, &(info->ctrl_regs.cr3));
-    check_vmcs_read(VMCS_GUEST_CR4, &(info->ctrl_regs.cr4));
-    check_vmcs_read(VMCS_GUEST_DR7, &(info->dbg_regs.dr7));
-
-    check_vmcs_read(VMCS_GUEST_RFLAGS, &(info->ctrl_regs.rflags));
-    if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
-        check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
-    }
-
-    // JRL: Add error checking
-
-    struct vmcs_segment_access access;
-    memset(&access, 0, sizeof(access));
-
-    /* CS Segment */
-    check_vmcs_read(VMCS_GUEST_CS_BASE, &(info->segments.cs.base));
-    check_vmcs_read(VMCS_GUEST_CS_SELECTOR, &(info->segments.cs.selector));
-    check_vmcs_read(VMCS_GUEST_CS_LIMIT, &(info->segments.cs.limit));
-    check_vmcs_read(VMCS_GUEST_CS_ACCESS, &(access.value));
 
-    translate_access_to_v3_seg(&access, &(info->segments.cs));
 
-    /* SS Segment */
-    check_vmcs_read(VMCS_GUEST_SS_BASE, &(info->segments.ss.base));
-    check_vmcs_read(VMCS_GUEST_SS_SELECTOR, &(info->segments.ss.selector));
-    check_vmcs_read(VMCS_GUEST_SS_LIMIT, &(info->segments.ss.limit));
-    check_vmcs_read(VMCS_GUEST_SS_ACCESS, &(access.value));
-
-    translate_access_to_v3_seg(&access, &(info->segments.ss));
-
-    /* DS Segment */
-    check_vmcs_read(VMCS_GUEST_DS_BASE, &(info->segments.ds.base));
-    check_vmcs_read(VMCS_GUEST_DS_SELECTOR, &(info->segments.ds.selector));
-    check_vmcs_read(VMCS_GUEST_DS_LIMIT, &(info->segments.ds.limit));
-    check_vmcs_read(VMCS_GUEST_DS_ACCESS, &(access.value));
-
-    translate_access_to_v3_seg(&access, &(info->segments.ds));
-
-    /* ES Segment */
-    check_vmcs_read(VMCS_GUEST_ES_BASE, &(info->segments.es.base));
-    check_vmcs_read(VMCS_GUEST_ES_SELECTOR, &(info->segments.es.selector));
-    check_vmcs_read(VMCS_GUEST_ES_LIMIT, &(info->segments.es.limit));
-    check_vmcs_read(VMCS_GUEST_ES_ACCESS, &(access.value));
-
-    translate_access_to_v3_seg(&access, &(info->segments.es));
-
-    /* FS Segment */
-    check_vmcs_read(VMCS_GUEST_FS_BASE, &(info->segments.fs.base));
-    check_vmcs_read(VMCS_GUEST_FS_SELECTOR, &(info->segments.fs.selector));
-    check_vmcs_read(VMCS_GUEST_FS_LIMIT, &(info->segments.fs.limit));
-    check_vmcs_read(VMCS_GUEST_FS_ACCESS, &(access.value));
-
-    translate_access_to_v3_seg(&access, &(info->segments.fs));
-
-    /* GS Segment */
-    check_vmcs_read(VMCS_GUEST_GS_BASE, &(info->segments.gs.base));
-    check_vmcs_read(VMCS_GUEST_GS_SELECTOR, &(info->segments.gs.selector));
-    check_vmcs_read(VMCS_GUEST_GS_LIMIT, &(info->segments.gs.limit));
-    check_vmcs_read(VMCS_GUEST_GS_ACCESS, &(access.value));
-
-    translate_access_to_v3_seg(&access, &(info->segments.gs));
-
-    /* LDTR Segment */
-    check_vmcs_read(VMCS_GUEST_LDTR_BASE, &(info->segments.ldtr.base));
-    check_vmcs_read(VMCS_GUEST_LDTR_SELECTOR, &(info->segments.ldtr.selector));
-    check_vmcs_read(VMCS_GUEST_LDTR_LIMIT, &(info->segments.ldtr.limit));
-    check_vmcs_read(VMCS_GUEST_LDTR_ACCESS, &(access.value));
-
-    translate_access_to_v3_seg(&access, &(info->segments.ldtr));
-
-    /* TR Segment */
-    check_vmcs_read(VMCS_GUEST_TR_BASE, &(info->segments.tr.base));
-    check_vmcs_read(VMCS_GUEST_TR_SELECTOR, &(info->segments.tr.selector));
-    check_vmcs_read(VMCS_GUEST_TR_LIMIT, &(info->segments.tr.limit));
-    check_vmcs_read(VMCS_GUEST_TR_ACCESS, &(access.value));
-
-    translate_access_to_v3_seg(&access, &(info->segments.tr));
-
-    /* GDTR Segment */
-    check_vmcs_read(VMCS_GUEST_GDTR_BASE, &(info->segments.gdtr.base));
-    check_vmcs_read(VMCS_GUEST_GDTR_LIMIT, &(info->segments.gdtr.limit));
-    
-    /* IDTR Segment */
-    check_vmcs_read(VMCS_GUEST_IDTR_BASE, &(info->segments.idtr.base));
-    check_vmcs_read(VMCS_GUEST_IDTR_LIMIT, &(info->segments.idtr.limit));
-    
-    return error;
-}
 
 static inline void print_vmcs_field(vmcs_field_t vmcs_index) {
     int len = v3_vmcs_get_field_len(vmcs_index);
@@ -433,21 +391,13 @@ static inline void print_vmcs_field(vmcs_field_t vmcs_index) {
 }
 
 
+static void print_vmcs_segments() {
+    struct v3_segments segs; 
 
-static void print_guest_state()
-{
-    PrintDebug("VMCS_GUEST_STATE\n");
-    print_vmcs_field(VMCS_GUEST_RIP);
-    print_vmcs_field(VMCS_GUEST_RSP);
-    print_vmcs_field(VMCS_GUEST_RFLAGS);
-    print_vmcs_field(VMCS_GUEST_CR0);
-    print_vmcs_field(VMCS_GUEST_CR3);
-    print_vmcs_field(VMCS_GUEST_CR4);
-    print_vmcs_field(VMCS_GUEST_DR7);
+    v3_read_vmcs_segments(&segs);
+    v3_print_segments(&segs);
 
 
-    PrintDebug("\n");
-
     PrintDebug("   ==> CS\n");
     print_vmcs_field(VMCS_GUEST_CS_SELECTOR);
     print_vmcs_field(VMCS_GUEST_CS_BASE);
@@ -504,6 +454,28 @@ static void print_guest_state()
     print_vmcs_field(VMCS_GUEST_IDTR_BASE);
     print_vmcs_field(VMCS_GUEST_IDTR_LIMIT);
 
+
+}
+
+
+
+
+static void print_guest_state()
+{
+    PrintDebug("VMCS_GUEST_STATE\n");
+    print_vmcs_field(VMCS_GUEST_RIP);
+    print_vmcs_field(VMCS_GUEST_RSP);
+    print_vmcs_field(VMCS_GUEST_RFLAGS);
+    print_vmcs_field(VMCS_GUEST_CR0);
+    print_vmcs_field(VMCS_GUEST_CR3);
+    print_vmcs_field(VMCS_GUEST_CR4);
+    print_vmcs_field(VMCS_GUEST_DR7);
+
+
+    PrintDebug("\n");
+
+    print_vmcs_segments();
+
     PrintDebug("\n");
 
     print_vmcs_field(VMCS_GUEST_DBG_CTL);
index 0a53a7f..90118eb 100644 (file)
@@ -135,7 +135,7 @@ int v3_start_vm(struct guest_info * info, unsigned int cpu_mask) {
            return v3_start_svm_guest(info);
            break;
 #endif
-#if CONFIG_VMX && 0
+#if CONFIG_VMX
        case V3_VMX_CPU:
        case V3_VMX_EPT_CPU:
            return v3_start_vmx_guest(info);
index 627bb84..81a37d3 100644 (file)
@@ -277,7 +277,15 @@ static int pre_config_guest(struct guest_info * info, struct v3_config * config_
 
     info->yield_cycle_period = (V3_CPU_KHZ() * 1000) / sched_hz;
     
-
+    if (info->vm_class == V3_PC_VM) {
+       if (pre_config_pc(info, config_ptr) == -1) {
+           PrintError("PC Post configuration failure\n");
+           return -1;
+       }
+    } else {
+       PrintError("Invalid VM Class\n");
+       return -1;
+    }
 
     return 0;
 }
index 36fee9b..b2edaee 100644 (file)
@@ -18,7 +18,7 @@
  */
 
 
-static int post_config_pc(struct guest_info * info, struct v3_config * config_ptr) {
+static int pre_config_pc(struct guest_info * info, struct v3_config * config_ptr) {
 
 
     info->cpu_mode = REAL;
@@ -34,6 +34,10 @@ static int post_config_pc(struct guest_info * info, struct v3_config * config_pt
     info->vm_regs.rcx = 0;
     info->vm_regs.rax = 0;
 
+    return 0;
+}
+
+static int post_config_pc(struct guest_info * info, struct v3_config * config_ptr) {
 
 #define VGABIOS_START 0x000c0000
 #define ROMBIOS_START 0x000f0000
index 90ff060..29f1ccf 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <palacios/vmx.h>
 #include <palacios/vmm.h>
+#include <palacios/vmx_handler.h>
 #include <palacios/vmcs.h>
 #include <palacios/vmx_lowlevel.h>
 #include <palacios/vmm_lowlevel.h>
 #include <palacios/vmx_io.h>
 #include <palacios/vmx_msr.h>
 
-static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = {0};
+static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
 
 
-extern int v3_vmx_exit_handler();
-extern int v3_vmx_vmlaunch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
+
+extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
+extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
 
 static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
     int ret = 0;
@@ -51,6 +53,18 @@ static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
     return 0;
 }
 
+static int inline check_vmcs_read(vmcs_field_t field, void * val) {
+    int ret = 0;
+
+    ret = vmcs_read(field, val);
+
+    if (ret != VMX_SUCCESS) {
+        PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
+    }
+
+    return ret;
+}
+
 #if 0
 // For the 32 bit reserved bit fields 
 // MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
@@ -115,37 +129,13 @@ static addr_t allocate_vmcs() {
 }
 
 
-static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
-    struct vmx_data * vmx_info = NULL;
-    int vmx_ret = 0;
-
-    v3_pre_config_guest(info, config_ptr);
-
-    vmx_info = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
-
-    PrintDebug("vmx_data pointer: %p\n", (void *)vmx_info);
-
-    PrintDebug("Allocating VMCS\n");
-    vmx_info->vmcs_ptr_phys = allocate_vmcs();
-
-    PrintDebug("VMCS pointer: %p\n", (void *)(vmx_info->vmcs_ptr_phys));
-
-    info->vmm_data = vmx_info;
 
-    PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
-    
-    // TODO: Fix vmcs fields so they're 32-bit
 
-    PrintDebug("Clearing VMCS: %p\n", (void *)vmx_info->vmcs_ptr_phys);
-    vmx_ret = vmcs_clear(vmx_info->vmcs_ptr_phys);
-
-    if (vmx_ret != VMX_SUCCESS) {
-        PrintError("VMCLEAR failed\n");
-        return -1;
-    }
+static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) {
+    int vmx_ret = 0;
 
     PrintDebug("Loading VMCS\n");
-    vmx_ret = vmcs_load(vmx_info->vmcs_ptr_phys);
+    vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
 
     if (vmx_ret != VMX_SUCCESS) {
         PrintError("VMPTRLD failed\n");
@@ -171,7 +161,7 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
                         : "memory"
                         );
     gdtr_base = tmp_seg.base;
-    vmx_info->host_state.gdtr.base = gdtr_base;
+    vmx_state->host_state.gdtr.base = gdtr_base;
 
     __asm__ __volatile__(
                         "sidt (%0);"
@@ -179,7 +169,7 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
                         : "q"(&tmp_seg)
                         : "memory"
                         );
-    vmx_info->host_state.idtr.base = tmp_seg.base;
+    vmx_state->host_state.idtr.base = tmp_seg.base;
 
     __asm__ __volatile__(
                         "str (%0);"
@@ -187,7 +177,7 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
                         : "q"(&tmp_seg)
                         : "memory"
                         );
-    vmx_info->host_state.tr.selector = tmp_seg.selector;
+    vmx_state->host_state.tr.selector = tmp_seg.selector;
 
     /* The GDTR *index* is bits 3-15 of the selector. */
     struct tss_descriptor * desc = NULL;
@@ -203,7 +193,7 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
 #endif
                    );
 
-    vmx_info->host_state.tr.base = tmp_seg.base;
+    vmx_state->host_state.tr.base = tmp_seg.base;
 
   
 
@@ -217,18 +207,18 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
     v3_get_msr(VMX_PINBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
 
     /* Add external interrupts, NMI exiting, and virtual NMI */
-    vmx_info->pin_ctrls.value =  tmp_msr.lo;
-    vmx_info->pin_ctrls.nmi_exit = 1;
-    vmx_info->pin_ctrls.ext_int_exit = 1;
+    vmx_state->pin_ctrls.value =  tmp_msr.lo;
+    vmx_state->pin_ctrls.nmi_exit = 1;
+    vmx_state->pin_ctrls.ext_int_exit = 1;
 
     v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
 
-    vmx_info->pri_proc_ctrls.value = tmp_msr.lo;
-    vmx_info->pri_proc_ctrls.use_io_bitmap = 1;
-    vmx_info->pri_proc_ctrls.hlt_exit = 1;
-    vmx_info->pri_proc_ctrls.invlpg_exit = 1;
-    vmx_info->pri_proc_ctrls.use_msr_bitmap = 1;
-    vmx_info->pri_proc_ctrls.pause_exit = 1;
+    vmx_state->pri_proc_ctrls.value = tmp_msr.lo;
+    vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
+    vmx_state->pri_proc_ctrls.hlt_exit = 1;
+    vmx_state->pri_proc_ctrls.invlpg_exit = 1;
+    vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
+    vmx_state->pri_proc_ctrls.pause_exit = 1;
 
     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->io_map.arch_data));
     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR, 
@@ -237,15 +227,15 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
     vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->msr_map.arch_data));
 
     v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmx_info->exit_ctrls.value = tmp_msr.lo;
-    vmx_info->exit_ctrls.host_64_on = 1;
+    vmx_state->exit_ctrls.value = tmp_msr.lo;
+    vmx_state->exit_ctrls.host_64_on = 1;
 
-    if ((vmx_info->exit_ctrls.save_efer == 1) || (vmx_info->exit_ctrls.ld_efer == 1)) {
-        vmx_info->ia32e_avail = 1;
+    if ((vmx_state->exit_ctrls.save_efer == 1) || (vmx_state->exit_ctrls.ld_efer == 1)) {
+        vmx_state->ia32e_avail = 1;
     }
 
     v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
-    vmx_info->entry_ctrls.value = tmp_msr.lo;
+    vmx_state->entry_ctrls.value = tmp_msr.lo;
 
     {
        struct vmx_exception_bitmap excp_bmap;
@@ -296,11 +286,11 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
 
         info->ctrl_regs.cr3 = info->direct_map_pt;
 
-        // vmx_info->pinbased_ctrls |= NMI_EXIT;
+        // vmx_state->pinbased_ctrls |= NMI_EXIT;
 
         /* Add CR exits */
-        vmx_info->pri_proc_ctrls.cr3_ld_exit = 1;
-        vmx_info->pri_proc_ctrls.cr3_str_exit = 1;
+        vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
+        vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
     }
 
     // Setup segment registers
@@ -403,6 +393,7 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
 
     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
     
+
     if (v3_update_vmcs_ctrl_fields(info)) {
         PrintError("Could not write control fields!\n");
         return -1;
@@ -414,43 +405,309 @@ static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config
     }
 
 
-    if (v3_update_vmcs_guest_state(info) != VMX_SUCCESS) {
-        PrintError("Writing guest state failed!\n");
-        return -1;
+    vmx_state->state = VMXASSIST_DISABLED;
+
+    return 0;
+}
+
+int v3_init_vmx_vmcs(struct guest_info * info, v3_vm_class_t vm_class) {
+    struct vmx_data * vmx_state = NULL;
+    int vmx_ret = 0;
+    
+    vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
+
+    PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
+
+    PrintDebug("Allocating VMCS\n");
+    vmx_state->vmcs_ptr_phys = allocate_vmcs();
+
+    PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
+
+    info->vmm_data = vmx_state;
+
+    PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
+    
+    // TODO: Fix vmcs fields so they're 32-bit
+
+    PrintDebug("Clearing VMCS: %p\n", (void *)vmx_state->vmcs_ptr_phys);
+    vmx_ret = vmcs_clear(vmx_state->vmcs_ptr_phys);
+
+    if (vmx_ret != VMX_SUCCESS) {
+        PrintError("VMCLEAR failed\n");
+        return -1; 
+    }
+
+    if (vm_class == V3_PC_VM) {
+       PrintDebug("Initializing VMCS\n");
+       init_vmcs_bios(info, vmx_state);
+    } else {
+       PrintError("Invalid VM Class\n");
+       return -1;
+    }
+
+    return 0;
+}
+
+static int update_irq_exit_state(struct guest_info * info) {
+    struct vmx_exit_idt_vec_info idt_vec_info;
+
+    check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
+
+    if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
+#ifdef CONFIG_DEBUG_INTERRUPTS
+        PrintDebug("Calling v3_injecting_intr\n");
+#endif
+        info->intr_state.irq_started = 0;
+        v3_injecting_intr(info, info->intr_state.irq_vector, V3_EXTERNAL_IRQ);
     }
 
-    v3_print_vmcs();
+    return 0;
+}
+
+static int update_irq_entry_state(struct guest_info * info) {
+    struct vmx_exit_idt_vec_info idt_vec_info;
+    struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
+
+    check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
+
+    /* Check for pending exceptions to inject */
+    if (v3_excp_pending(info)) {
+        struct vmx_entry_int_info int_info;
+        int_info.value = 0;
+
+        // In VMX, almost every exception is hardware
+        // Software exceptions are pretty much only for breakpoint or overflow
+        int_info.type = 3;
+        int_info.vector = v3_get_excp_number(info);
+
+        if (info->excp_state.excp_error_code_valid) {
+            check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
+            int_info.error_code = 1;
+
+#ifdef CONFIG_DEBUG_INTERRUPTS
+            PrintDebug("Injecting exception %d with error code %x\n", 
+                    int_info.vector, info->excp_state.excp_error_code);
+#endif
+        }
+
+        int_info.valid = 1;
+#ifdef CONFIG_DEBUG_INTERRUPTS
+        PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)info->rip);
+#endif
+        check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
+
+        v3_injecting_excp(info, int_info.vector);
 
-    vmx_info->state = VMXASSIST_DISABLED;
+    } else if (((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) {
+       
+        if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
+
+#ifdef CONFIG_DEBUG_INTERRUPTS
+            PrintDebug("IRQ pending from previous injection\n");
+#endif
+
+            // Copy the IDT vectoring info over to reinject the old interrupt
+            if (idt_vec_info.error_code == 1) {
+                uint32_t err_code = 0;
+
+                check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
+                check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
+            }
+
+            idt_vec_info.undef = 0;
+            check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
+
+        } else {
+            struct vmx_entry_int_info ent_int;
+            ent_int.value = 0;
+
+            switch (v3_intr_pending(info)) {
+                case V3_EXTERNAL_IRQ: {
+                    info->intr_state.irq_vector = v3_get_intr(info); 
+                    ent_int.vector = info->intr_state.irq_vector;
+                    ent_int.type = 0;
+                    ent_int.error_code = 0;
+                    ent_int.valid = 1;
+
+#ifdef CONFIG_DEBUG_INTERRUPTS
+                    PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n", 
+                              info->intr_state.irq_vector, 
+                              (uint32_t)info->num_exits, 
+                              (void *)info->rip);
+#endif
+
+                    check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
+                    info->intr_state.irq_started = 1;
+
+                    break;
+                }
+                case V3_NMI:
+                    PrintDebug("Injecting NMI\n");
+
+                    ent_int.type = 2;
+                    ent_int.vector = 2;
+                    ent_int.valid = 1;
+                    check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
+
+                    break;
+                case V3_SOFTWARE_INTR:
+                    PrintDebug("Injecting software interrupt\n");
+                    ent_int.type = 4;
+
+                    ent_int.valid = 1;
+                    check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
+
+                   break;
+                case V3_VIRTUAL_IRQ:
+                    // Not sure what to do here, Intel doesn't have virtual IRQs
+                    // May be the same as external interrupts/IRQs
+
+                   break;
+                case V3_INVALID_INTR:
+                default:
+                    break;
+            }
+        }
+    } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
+        // Enable INTR window exiting so we know when IF=1
+        uint32_t instr_len;
+
+        check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
+
+#ifdef CONFIG_DEBUG_INTERRUPTS
+        PrintDebug("Enabling Interrupt-Window exiting: %d\n", instr_len);
+#endif
+
+        vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
+        check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
+    }
 
-    v3_post_config_guest(info, config_ptr);
 
     return 0;
 }
 
 
-static int start_vmx_guest(struct guest_info* info) {
-    uint32_t error = 0;
+/* 
+ * CAUTION and DANGER!!! 
+ * 
+ * The VMCS CANNOT(!!) be accessed outside of the cli/sti calls inside this function
+ * When exectuing a symbiotic call, the VMCS WILL be overwritten, so any dependencies 
+ * on its contents will cause things to break. The contents at the time of the exit WILL 
+ * change before the exit handler is executed.
+ */
+int v3_vmx_enter(struct guest_info * info) {
     int ret = 0;
+    uint64_t tmp_tsc = 0;
+    struct vmx_exit_info exit_info;
+
+    // Conditionally yield the CPU if the timeslice has expired
+    v3_yield_cond(info);
 
-    PrintDebug("Attempting VMLAUNCH\n");
 
-    info->run_state = VM_RUNNING;
+    // v3_print_guest_state(info);
+
+    // disable global interrupts for vm state transition
+    v3_disable_ints();
+
+    v3_vmx_restore_vmcs(info);
+
+
+#ifdef CONFIG_SYMBIOTIC
+    if (info->sym_state.sym_call_active == 0) {
+       update_irq_entry_state(info);
+    }
+#else 
+    update_irq_entry_state(info);
+#endif
+
 
     rdtscll(info->time_state.cached_host_tsc);
 
-    ret = v3_vmx_vmlaunch(&(info->vm_regs), info, &(info->ctrl_regs));
+    if (info->run_state == VM_STOPPED) {
+       info->run_state = VM_RUNNING;
+       ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
+    } else {
+       ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
+    }
+
+    //  PrintDebug("VMX Exit: ret=%d\n", ret);
 
     if (ret != VMX_SUCCESS) {
+       uint32_t error = 0;
+
         vmcs_read(VMCS_INSTR_ERR, &error);
-        PrintError("VMLAUNCH failed: %d\n", error);
+        PrintError("VMENTRY Error: %d\n", error);
+
+       return -1;
+    }
+
+    rdtscll(tmp_tsc);
+
+    info->num_exits++;
+
+    v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
 
-        v3_print_vmcs();
+    /* Update guest state */
+    v3_vmx_save_vmcs(info);
+
+    info->mem_mode = v3_get_vm_mem_mode(info);
+    info->cpu_mode = v3_get_vm_cpu_mode(info);
+
+
+    check_vmcs_read(VMCS_EXIT_INSTR_LEN, &(exit_info.instr_len));
+    check_vmcs_read(VMCS_EXIT_INSTR_INFO, &(exit_info.instr_info));
+    check_vmcs_read(VMCS_EXIT_REASON, &(exit_info.exit_reason));
+    check_vmcs_read(VMCS_EXIT_QUAL, &(exit_info.exit_qual));
+    check_vmcs_read(VMCS_EXIT_INT_INFO, &(exit_info.int_info));
+    check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
+    check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
+
+    //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
+
+
+#ifdef CONFIG_SYMBIOTIC
+    if (info->sym_state.sym_call_active == 0) {
+       update_irq_exit_state(info);
     }
+#else
+    update_irq_exit_state(info);
+#endif
+
+    // reenable global interrupts after vm exit
+    v3_enable_ints();
+
+    // Conditionally yield the CPU if the timeslice has expired
+    v3_yield_cond(info);
+
+    if (v3_handle_vmx_exit(info, &exit_info) == -1) {
+       PrintError("Error in VMX exit handler\n");
+       return -1;
+    }
+
+    return 0;
+}
+
+
+int v3_start_vmx_guest(struct guest_info* info) {
+
 
-    PrintDebug("Returned from VMLAUNCH ret=%d\n", ret);
+    PrintDebug("Launching VMX guest\n");
 
-    return -1;
+    rdtscll(info->time_state.cached_host_tsc);
+
+
+    while (1) {
+       if (v3_vmx_enter(info) == -1) {
+           v3_print_vmcs();
+           return -1;
+       }
+
+       if ((info->num_exits % 5000) == 0) {
+           V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
+       }
+    }
+
+    return 0;
 }
 
 
@@ -536,7 +793,7 @@ void v3_init_vmx_cpu(int cpu_id) {
         PrintError("VMX initialization failure\n");
         return;
     }
-       
+    
 
     if (has_vmx_nested_paging() == 1) {
         v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
@@ -546,13 +803,3 @@ void v3_init_vmx_cpu(int cpu_id) {
 
 }
 
-
-void v3_init_vmx_hooks(struct v3_ctrl_ops * vm_ops) {
-
-    // Setup the VMX specific vmm operations
-    vm_ops->init_guest = &init_vmx_guest;
-    vm_ops->start_guest = &start_vmx_guest;
-    vm_ops->has_nested_paging = &has_vmx_nested_paging;
-
-}
-
index 75f2774..f926c42 100644 (file)
@@ -22,8 +22,8 @@
 #include <palacios/vm_guest_mem.h>
 #include <palacios/vmx.h>
 
-static int vmx_save_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
-static int vmx_restore_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
+static void vmx_save_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
+static void vmx_restore_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
 
 int v3_vmxassist_ctx_switch(struct guest_info * info) {
     struct vmx_assist_context * old_ctx = NULL;
@@ -57,25 +57,16 @@ int v3_vmxassist_ctx_switch(struct guest_info * info) {
     if (vmx_info->state == VMXASSIST_DISABLED) {
         
         /* Save the old Context */
-        if (vmx_save_world_ctx(info, old_ctx) != 0) {
-            PrintError("Could not save VMXASSIST world context\n");
-            return -1;
-        }
+       vmx_save_world_ctx(info, old_ctx);
 
         /* restore new context, vmxassist should launch the bios the first time */
-        if (vmx_restore_world_ctx(info, new_ctx) != 0) {
-            PrintError("VMXASSIST could not restore new context\n");
-            return -1;
-        }
+        vmx_restore_world_ctx(info, new_ctx);
 
         vmx_info->state = VMXASSIST_ENABLED;
 
     } else if (vmx_info->state == VMXASSIST_ENABLED) {
         /* restore old context */
-        if (vmx_restore_world_ctx(info, old_ctx) != 0) {
-            PrintError("VMXASSIST could not restore old context\n");
-            return -1;
-        }
+       vmx_restore_world_ctx(info, old_ctx);
 
         vmx_info->state = VMXASSIST_DISABLED;
     }
@@ -83,129 +74,94 @@ int v3_vmxassist_ctx_switch(struct guest_info * info) {
     return 0;
 }
 
-        
-int vmx_save_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx) {
-    int error = 0;
 
-    PrintDebug("Writing from RIP: 0x%p\n", (void *)info->rip);
+static void save_segment(struct v3_segment * seg, struct vmx_assist_segment * vmx_assist_seg) {
+    struct vmcs_segment tmp_seg;
+
+    memset(&tmp_seg, 0, sizeof(struct vmcs_segment));
+
+    v3_seg_to_vmxseg(seg, &tmp_seg);
+
+    vmx_assist_seg->sel = tmp_seg.selector;
+    vmx_assist_seg->limit = tmp_seg.limit;
+    vmx_assist_seg->base = tmp_seg.base;
+    vmx_assist_seg->arbytes.bytes = tmp_seg.access.val;
+}
+
+
+static void load_segment(struct vmx_assist_segment * vmx_assist_seg, struct v3_segment * seg)  {
+    struct vmcs_segment tmp_seg;
 
-    error |= vmcs_read(VMCS_GUEST_RIP, &(ctx->eip));
-    error |= vmcs_read(VMCS_GUEST_RSP, &(ctx->esp));
-    error |= vmcs_read(VMCS_GUEST_RFLAGS, &(ctx->eflags));
+    memset(&tmp_seg, 0, sizeof(struct vmcs_segment));
 
-    error |= vmcs_read(VMCS_CR0_READ_SHDW, &(ctx->cr0));
+    tmp_seg.selector = vmx_assist_seg->sel;
+    tmp_seg.limit = vmx_assist_seg->limit;
+    tmp_seg.base = vmx_assist_seg->base;
+    tmp_seg.access.val = vmx_assist_seg->arbytes.bytes;
+
+    v3_vmxseg_to_seg(&tmp_seg, seg);
+}
+
+static void vmx_save_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx) {
+    struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
+
+    PrintDebug("Writing from RIP: 0x%p\n", (void *)info->rip);
+    
+    ctx->eip = info->rip;
+    ctx->esp = info->vm_regs.rsp;
+    ctx->eflags = info->ctrl_regs.rflags;
+
+    ctx->cr0 = info->shdw_pg_state.guest_cr0;
     ctx->cr3 = info->shdw_pg_state.guest_cr3;
-    error |= vmcs_read(VMCS_CR4_READ_SHDW, &(ctx->cr4));
-
-    error |= vmcs_read(VMCS_GUEST_IDTR_LIMIT, &(ctx->idtr_limit));
-    error |= vmcs_read(VMCS_GUEST_IDTR_BASE, &(ctx->idtr_base));
-
-    error |= vmcs_read(VMCS_GUEST_GDTR_LIMIT, &(ctx->gdtr_limit));
-    error |= vmcs_read(VMCS_GUEST_GDTR_BASE, &(ctx->gdtr_base));
-
-    error |= vmcs_read(VMCS_GUEST_CS_SELECTOR, &(ctx->cs_sel));
-    error |= vmcs_read(VMCS_GUEST_CS_LIMIT, &(ctx->cs_limit));
-    error |= vmcs_read(VMCS_GUEST_CS_BASE, &(ctx->cs_base));
-    error |= vmcs_read(VMCS_GUEST_CS_ACCESS, &(ctx->cs_arbytes.bytes));
-
-    error |= vmcs_read(VMCS_GUEST_DS_SELECTOR, &(ctx->ds_sel));
-    error |= vmcs_read(VMCS_GUEST_DS_LIMIT, &(ctx->ds_limit));
-    error |= vmcs_read(VMCS_GUEST_DS_BASE, &(ctx->ds_base));
-    error |= vmcs_read(VMCS_GUEST_DS_ACCESS, &(ctx->ds_arbytes.bytes));
-
-    error |= vmcs_read(VMCS_GUEST_ES_SELECTOR, &(ctx->es_sel));
-    error |= vmcs_read(VMCS_GUEST_ES_LIMIT, &(ctx->es_limit));
-    error |= vmcs_read(VMCS_GUEST_ES_BASE, &(ctx->es_base));
-    error |= vmcs_read(VMCS_GUEST_ES_ACCESS, &(ctx->es_arbytes.bytes));
-
-    error |= vmcs_read(VMCS_GUEST_SS_SELECTOR, &(ctx->ss_sel));
-    error |= vmcs_read(VMCS_GUEST_SS_LIMIT, &(ctx->ss_limit));
-    error |= vmcs_read(VMCS_GUEST_SS_BASE, &(ctx->ss_base));
-    error |= vmcs_read(VMCS_GUEST_SS_ACCESS, &(ctx->ss_arbytes.bytes));
-
-    error |= vmcs_read(VMCS_GUEST_FS_SELECTOR, &(ctx->fs_sel));
-    error |= vmcs_read(VMCS_GUEST_FS_LIMIT, &(ctx->fs_limit));
-    error |= vmcs_read(VMCS_GUEST_FS_BASE, &(ctx->fs_base));
-    error |= vmcs_read(VMCS_GUEST_FS_ACCESS, &(ctx->fs_arbytes.bytes));
-
-    error |= vmcs_read(VMCS_GUEST_GS_SELECTOR, &(ctx->gs_sel));
-    error |= vmcs_read(VMCS_GUEST_GS_LIMIT, &(ctx->gs_limit));
-    error |= vmcs_read(VMCS_GUEST_GS_BASE, &(ctx->gs_base));
-    error |= vmcs_read(VMCS_GUEST_GS_ACCESS, &(ctx->gs_arbytes.bytes));
-
-    error |= vmcs_read(VMCS_GUEST_TR_SELECTOR, &(ctx->tr_sel));
-    error |= vmcs_read(VMCS_GUEST_TR_LIMIT, &(ctx->tr_limit));
-    error |= vmcs_read(VMCS_GUEST_TR_BASE, &(ctx->tr_base));
-    error |= vmcs_read(VMCS_GUEST_TR_ACCESS, &(ctx->tr_arbytes.bytes));
-
-    error |= vmcs_read(VMCS_GUEST_LDTR_SELECTOR, &(ctx->ldtr_sel));
-    error |= vmcs_read(VMCS_GUEST_LDTR_LIMIT, &(ctx->ldtr_limit));
-    error |= vmcs_read(VMCS_GUEST_LDTR_BASE, &(ctx->ldtr_base));
-    error |= vmcs_read(VMCS_GUEST_LDTR_ACCESS, &(ctx->ldtr_arbytes.bytes));
-
-    return error;
+    ctx->cr4 = vmx_info->guest_cr4;
+
+    
+    save_segment(&(info->segments.cs), &(ctx->cs));
+    save_segment(&(info->segments.ds), &(ctx->ds));
+    save_segment(&(info->segments.es), &(ctx->es));
+    save_segment(&(info->segments.ss), &(ctx->ss));
+    save_segment(&(info->segments.fs), &(ctx->fs));
+    save_segment(&(info->segments.gs), &(ctx->gs));
+    save_segment(&(info->segments.tr), &(ctx->tr));
+    save_segment(&(info->segments.ldtr), &(ctx->ldtr));
+
+    // Odd segments 
+    ctx->idtr_limit = info->segments.idtr.limit;
+    ctx->idtr_base = info->segments.idtr.base;
+
+    ctx->gdtr_limit = info->segments.gdtr.limit;
+    ctx->gdtr_base = info->segments.gdtr.base;
 }
 
-int vmx_restore_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx) {
-    int error = 0;
+static void vmx_restore_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx) {
+    struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
 
     PrintDebug("ctx rip: %p\n", (void *)(addr_t)ctx->eip);
+    
+    info->rip = ctx->eip;
+    info->vm_regs.rsp = ctx->esp;
+    info->ctrl_regs.rflags = ctx->eflags;
 
-    error |= vmcs_write(VMCS_GUEST_RIP, ctx->eip);
-    error |= vmcs_write(VMCS_GUEST_RSP, ctx->esp);
-    error |= vmcs_write(VMCS_GUEST_RFLAGS, ctx->eflags);
-
-    error |= vmcs_write(VMCS_CR0_READ_SHDW, ctx->cr0);
+    info->shdw_pg_state.guest_cr0 = ctx->cr0;
     info->shdw_pg_state.guest_cr3 = ctx->cr3;
-    error |= vmcs_write(VMCS_CR4_READ_SHDW, ctx->cr4);
-
-    error |= vmcs_write(VMCS_GUEST_IDTR_LIMIT, ctx->idtr_limit);
-    error |= vmcs_write(VMCS_GUEST_IDTR_BASE, ctx->idtr_base);
-
-    error |= vmcs_write(VMCS_GUEST_GDTR_LIMIT, ctx->gdtr_limit);
-    error |= vmcs_write(VMCS_GUEST_GDTR_BASE, ctx->gdtr_base);
-
-    error |= vmcs_write(VMCS_GUEST_CS_SELECTOR, ctx->cs_sel);
-    error |= vmcs_write(VMCS_GUEST_CS_LIMIT, ctx->cs_limit);
-    error |= vmcs_write(VMCS_GUEST_CS_BASE, ctx->cs_base);
-    error |= vmcs_write(VMCS_GUEST_CS_ACCESS, ctx->cs_arbytes.bytes);
-
-    error |= vmcs_write(VMCS_GUEST_DS_SELECTOR, ctx->ds_sel);
-    error |= vmcs_write(VMCS_GUEST_DS_LIMIT, ctx->ds_limit);
-    error |= vmcs_write(VMCS_GUEST_DS_BASE, ctx->ds_base);
-    error |= vmcs_write(VMCS_GUEST_DS_ACCESS, ctx->ds_arbytes.bytes);
-
-    error |= vmcs_write(VMCS_GUEST_ES_SELECTOR, ctx->es_sel);
-    error |= vmcs_write(VMCS_GUEST_ES_LIMIT, ctx->es_limit);
-    error |= vmcs_write(VMCS_GUEST_ES_BASE, ctx->es_base);
-    error |= vmcs_write(VMCS_GUEST_ES_ACCESS, ctx->es_arbytes.bytes);
-
-    error |= vmcs_write(VMCS_GUEST_SS_SELECTOR, ctx->ss_sel);
-    error |= vmcs_write(VMCS_GUEST_SS_LIMIT, ctx->ss_limit);
-    error |= vmcs_write(VMCS_GUEST_SS_BASE, ctx->ss_base);
-    error |= vmcs_write(VMCS_GUEST_SS_ACCESS, ctx->ss_arbytes.bytes);
-
-    error |= vmcs_write(VMCS_GUEST_FS_SELECTOR, ctx->fs_sel);
-    error |= vmcs_write(VMCS_GUEST_FS_LIMIT, ctx->fs_limit);
-    error |= vmcs_write(VMCS_GUEST_FS_BASE, ctx->fs_base);
-    error |= vmcs_write(VMCS_GUEST_FS_ACCESS, ctx->fs_arbytes.bytes);
-
-    error |= vmcs_write(VMCS_GUEST_GS_SELECTOR, ctx->gs_sel);
-    error |= vmcs_write(VMCS_GUEST_GS_LIMIT, ctx->gs_limit);
-    error |= vmcs_write(VMCS_GUEST_GS_BASE, ctx->gs_base);
-    error |= vmcs_write(VMCS_GUEST_GS_ACCESS, ctx->gs_arbytes.bytes);
-
-    error |= vmcs_write(VMCS_GUEST_TR_SELECTOR, ctx->tr_sel);
-    error |= vmcs_write(VMCS_GUEST_TR_LIMIT, ctx->tr_limit);
-    error |= vmcs_write(VMCS_GUEST_TR_BASE, ctx->tr_base);
-    error |= vmcs_write(VMCS_GUEST_TR_ACCESS, ctx->tr_arbytes.bytes);
-
-    error |= vmcs_write(VMCS_GUEST_LDTR_SELECTOR, ctx->ldtr_sel);
-    error |= vmcs_write(VMCS_GUEST_LDTR_LIMIT, ctx->ldtr_limit);
-    error |= vmcs_write(VMCS_GUEST_LDTR_BASE, ctx->ldtr_base);
-    error |= vmcs_write(VMCS_GUEST_LDTR_ACCESS, ctx->ldtr_arbytes.bytes);
-
-    return error;
+    vmx_info->guest_cr4 = ctx->cr4;
+
+    load_segment(&(ctx->cs), &(info->segments.cs));
+    load_segment(&(ctx->ds), &(info->segments.ds));
+    load_segment(&(ctx->es), &(info->segments.es));
+    load_segment(&(ctx->ss), &(info->segments.ss));
+    load_segment(&(ctx->fs), &(info->segments.fs));
+    load_segment(&(ctx->gs), &(info->segments.gs));
+    load_segment(&(ctx->tr), &(info->segments.tr));
+    load_segment(&(ctx->ldtr), &(info->segments.ldtr));
+
+    // odd segments
+    info->segments.idtr.limit = ctx->idtr_limit;
+    info->segments.idtr.base = ctx->idtr_base;
+
+    info->segments.gdtr.limit = ctx->gdtr_limit;
+    info->segments.gdtr.base = ctx->gdtr_base;
+
 }
 
 
index 00fd7e9..c7bf6e3 100644 (file)
 #include <palacios/vmx_assist.h>
 #include <palacios/vm_guest_mem.h>
 #include <palacios/vmm_direct_paging.h>
-#include <palacios/vmx_handler.h>
+#include <palacios/vmm_ctrl_regs.h>
 
-static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual);
-static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val);
+static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
+static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
 
-int v3_vmx_handle_cr0_access(struct guest_info * info) {
-    struct vmx_exit_cr_qual cr_qual;
-    
-    vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
+int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
 
-    if (cr_qual.access_type < 2) {
+    if (cr_qual->access_type < 2) {
         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
         
-        if (cr_qual.access_type == 0) {
+        if (cr_qual->access_type == 0) {
 
-            if (handle_mov_to_cr0(info, reg) != 0) {
+            if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
                 PrintError("Could not handle CR0 write\n");
                 return -1;
             }
@@ -55,31 +52,27 @@ int v3_vmx_handle_cr0_access(struct guest_info * info) {
         return 0;
     }
 
-    PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual.access_type);
+    PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
     return -1;
 }
 
-int v3_vmx_handle_cr3_access(struct guest_info * info) {
-    struct vmx_exit_cr_qual cr_qual;
+int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
 
-    vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
-
-    if (cr_qual.access_type < 2) {
+    if (cr_qual->access_type < 2) {
         v3_reg_t * reg = get_reg_ptr(info, cr_qual);
 
-        if (cr_qual.access_type == 0) {
+        if (cr_qual->access_type == 0) {
             return handle_mov_to_cr3(info, reg);
         } else {
             return handle_mov_from_cr3(info, reg);
         }
     }
 
-    PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual.access_type);
+    PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
     return -1;
 }
 
 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
-    int instr_len = 0;
 
     if (info->shdw_pg_mode == SHADOW_PAGING) {
 
@@ -88,6 +81,7 @@ static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
                   (void *)info->ctrl_regs.cr3,
                   (void *)info->shdw_pg_state.guest_cr3);
        */
+
         if (info->cpu_mode == LONG) {
             info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
         } else {
@@ -112,14 +106,12 @@ static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
     }
 
 
-    vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
-    info->rip += instr_len;
 
     return 0;
 }
 
 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
-    int instr_len = 0;
+
 
     if (info->shdw_pg_mode == SHADOW_PAGING) {
 
@@ -137,38 +129,33 @@ static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
     }
 
 
-    vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
-    info->rip += instr_len;
-
     return 0;
 }
 
-static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0) {
+static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
     struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
     struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
     struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
     struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
     uint_t paging_transition = 0;
-    int instr_len = 0;
 
     /*
-    PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
-              (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
+      PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
+      (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
     */
 
     if (new_shdw_cr0->pe != shdw_cr0->pe) {
        /*
-        PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
-        PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
-        PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
+         PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
+         PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
+         PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
        */
+
         if (v3_vmxassist_ctx_switch(info) != 0) {
             PrintError("Unable to execute VMXASSIST context switch!\n");
             return -1;
         }
-
-        v3_load_vmcs_guest_state(info);
-
+       
         if (vmx_info->state == VMXASSIST_ENABLED) {
             PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)info->rip);
         } else {
@@ -176,66 +163,66 @@ static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0) {
                       (void *)info->rip);
         }
 
-        // vmx assist sets the new cr values itself
-        return 0;
-    }
-
-    if (new_shdw_cr0->pg != shdw_cr0->pg) {
-        paging_transition = 1;
-    }
-    // The shadow always reflects the new value
-    *shdw_cr0 = *new_shdw_cr0;
-
-    // We don't care about most of the flags, so lets go for it 
-    // and set them to the guest values
-    *guest_cr0 = *shdw_cr0;
-
-    // Except PG, PE, and NE, which are always set
-    guest_cr0->pe = 1;
-    guest_cr0->pg = 1;
-    guest_cr0->ne = 1;
+       // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
+       exit_info->instr_len = 0;
 
-    if (paging_transition) {
-        // Paging transition
+       //      v3_vmx_restore_vmcs(info);
+       //      v3_print_vmcs(info);
 
-        if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
-            struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
-
-            if (guest_efer->lme == 1) {
-               //     PrintDebug("Enabling long mode\n");
-
-                guest_efer->lma = 1;
-                guest_efer->lme = 1;
-
-                vmx_info->entry_ctrls.guest_ia32e = 1;
-            }
-
-           //            PrintDebug("Activating Shadow Page tables\n");
-
-            if (v3_activate_shadow_pt(info) == -1) {
-                PrintError("Failed to activate shadow page tables\n");
-                return -1;
-            }
+    } else {
 
-        } else if (v3_activate_passthrough_pt(info) == -1) {
-            PrintError("Failed to activate passthrough page tables\n");
-            return -1;
-        }
+       if (new_shdw_cr0->pg != shdw_cr0->pg) {
+           paging_transition = 1;
+       }
+       
+       // The shadow always reflects the new value
+       *shdw_cr0 = *new_shdw_cr0;
+       
+       // We don't care about most of the flags, so lets go for it 
+       // and set them to the guest values
+       *guest_cr0 = *shdw_cr0;
+       
+       // Except PG, PE, and NE, which are always set
+       guest_cr0->pe = 1;
+       guest_cr0->pg = 1;
+       guest_cr0->ne = 1;
+       
+       if (paging_transition) {
+           // Paging transition
+           
+           if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
+               struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
+               
+               if (guest_efer->lme == 1) {
+                   //     PrintDebug("Enabling long mode\n");
+                   
+                   guest_efer->lma = 1;
+                   guest_efer->lme = 1;
+                   
+                   vmx_info->entry_ctrls.guest_ia32e = 1;
+               }
+               
+               //            PrintDebug("Activating Shadow Page tables\n");
+               
+               if (v3_activate_shadow_pt(info) == -1) {
+                   PrintError("Failed to activate shadow page tables\n");
+                   return -1;
+               }
+               
+           } else if (v3_activate_passthrough_pt(info) == -1) {
+               PrintError("Failed to activate passthrough page tables\n");
+               return -1;
+           }
+       }
     }
-   
-    // PE loads its own RIP, otherwise we need to skip ahead an instruction
 
-    vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
-    info->rip += instr_len;
-   
     return 0;
 }
 
-static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual) {
+static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
     v3_reg_t * reg = NULL;
 
-    switch (cr_qual.gpr) {
+    switch (cr_qual->gpr) {
        case 0:
            reg = &(info->vm_regs.rax);
            break;
index 1da5abd..3fa4f98 100644 (file)
 #endif
 
 
-static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
-    int ret = 0;
-
-    ret = vmcs_write(field, val);
-
-    if (ret != VMX_SUCCESS) {
-        PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
-    }
-
-    return ret;
-}
-
-static int inline check_vmcs_read(vmcs_field_t field, void * val) {
-    int ret = 0;
-
-    ret = vmcs_read(field, val);
-
-    if (ret != VMX_SUCCESS) {
-        PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
-    }
-
-    return ret;
-}
-
-static int inline handle_cr_access(struct guest_info * info, ulong_t exit_qual) {
-    struct vmx_exit_cr_qual * cr_qual = (struct vmx_exit_cr_qual *)&exit_qual;
-
-    // PrintDebug("Control register: %d\n", cr_qual->access_type);
-    switch(cr_qual->cr_id) {
-        case 0:
-           //PrintDebug("Handling CR0 Access\n");
-            return v3_vmx_handle_cr0_access(info);
-        case 3:
-           //PrintDebug("Handling CR3 Access\n");
-            return v3_vmx_handle_cr3_access(info);
-        default:
-            PrintError("Unhandled CR access: %d\n", cr_qual->cr_id);
-            return -1;
-    }
-    
-    return -1;
-}
 
 
 /* At this point the GPRs are already copied into the guest_info state */
-int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs) {
-    uint64_t tmp_tsc = 0;
-    uint32_t exit_reason = 0;
-    addr_t exit_qual = 0;
+int v3_handle_vmx_exit(struct guest_info * info, struct vmx_exit_info * exit_info) {
     struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
-    struct vmx_exit_idt_vec_info idt_vec_info;
-
-    rdtscll(tmp_tsc);
-    v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
-
-    v3_enable_ints();
-
-    check_vmcs_read(VMCS_EXIT_REASON, &exit_reason);
-    check_vmcs_read(VMCS_EXIT_QUAL, &exit_qual);
-
-    //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_reason, exit_qual);
-
-    /* Update guest state */
-    v3_load_vmcs_guest_state(info);
-
-    // Load execution controls
-    check_vmcs_read(VMCS_PIN_CTRLS, &(vmx_info->pin_ctrls.value));
-    check_vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
-
-    if (vmx_info->pri_proc_ctrls.sec_ctrls) {
-        check_vmcs_read(VMCS_SEC_PROC_CTRLS, &(vmx_info->sec_proc_ctrls.value));
-    }
-
-    info->mem_mode = v3_get_vm_mem_mode(info);
-    info->cpu_mode = v3_get_vm_cpu_mode(info);
-
-    // Check if we got interrupted while delivering interrupt
-    // Variable will be used later if this is true
-
-    check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
-
-    if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
-        PrintDebug("Calling v3_injecting_intr\n");
-#endif
-        info->intr_state.irq_started = 0;
-        v3_injecting_intr(info, info->intr_state.irq_vector, V3_EXTERNAL_IRQ);
-    }
-
-    info->num_exits++;
-
-
-
-    if ((info->num_exits % 5000) == 0) {
-       PrintDebug("VMX Exit %d\n", (uint32_t)info->num_exits);
-    }
 
+    /*
+      PrintError("Handling VMEXIT: %s (%u), %lu (0x%lx)\n", 
+      v3_vmx_exit_code_to_str(exit_info->exit_reason),
+      exit_info->exit_reason, 
+      exit_info->exit_qual, exit_info->exit_qual);
+      
+      v3_print_vmcs();
+    */
 #ifdef CONFIG_TELEMETRY
     if (info->enable_telemetry) {
        v3_telemetry_start_exit(info);
     }
 #endif
 
-    switch (exit_reason) {
+    switch (exit_info->exit_reason) {
         case VMEXIT_INFO_EXCEPTION_OR_NMI: {
-            uint32_t int_info;
-            pf_error_t error_code;
+            pf_error_t error_code = *(pf_error_t *)&(exit_info->int_err);
 
-            check_vmcs_read(VMCS_EXIT_INT_INFO, &int_info);
-            check_vmcs_read(VMCS_EXIT_INT_ERR, &error_code);
 
             // JRL: Change "0x0e" to a macro value
-            if ((uint8_t)int_info == 0x0e) {
+            if ((uint8_t)exit_info->int_info == 0x0e) {
 #ifdef CONFIG_DEBUG_SHADOW_PAGING
-                PrintDebug("Page Fault at %p error_code=%x\n", (void *)exit_qual, *(uint32_t *)&error_code);
+                PrintDebug("Page Fault at %p error_code=%x\n", (void *)exit_info->exit_qual, *(uint32_t *)&error_code);
 #endif
 
                 if (info->shdw_pg_mode == SHADOW_PAGING) {
-                    if (v3_handle_shadow_pagefault(info, (addr_t)exit_qual, error_code) == -1) {
+                    if (v3_handle_shadow_pagefault(info, (addr_t)exit_info->exit_qual, error_code) == -1) {
                         PrintError("Error handling shadow page fault\n");
                         return -1;
                     }
@@ -164,7 +78,7 @@ int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info, struct v
                     return -1;
                 }
             } else {
-                PrintError("Unknown exception: 0x%x\n", (uint8_t)int_info);
+                PrintError("Unknown exception: 0x%x\n", (uint8_t)exit_info->int_info);
                 v3_print_GPRs(info);
                 return -1;
             }
@@ -215,7 +129,7 @@ int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info, struct v
            }
            break;
         case VMEXIT_IO_INSTR: {
-           struct vmx_exit_io_qual * io_qual = (struct vmx_exit_io_qual *)&exit_qual;
+           struct vmx_exit_io_qual * io_qual = (struct vmx_exit_io_qual *)&(exit_info->exit_qual);
 
             if (io_qual->dir == 0) {
                 if (io_qual->string) {
@@ -244,13 +158,34 @@ int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info, struct v
             }
             break;
        }
-        case VMEXIT_CR_REG_ACCESSES:
-            if (handle_cr_access(info, exit_qual) != 0) {
-                PrintError("Error handling CR access\n");
-                return -1;
-            }
+        case VMEXIT_CR_REG_ACCESSES: {
+           struct vmx_exit_cr_qual * cr_qual = (struct vmx_exit_cr_qual *)&(exit_info->exit_qual);
+           
+           // PrintDebug("Control register: %d\n", cr_qual->access_type);
+           switch(cr_qual->cr_id) {
+               case 0:
+                   //PrintDebug("Handling CR0 Access\n");
+                   if (v3_vmx_handle_cr0_access(info, cr_qual, exit_info) == -1) {
+                       PrintError("Error in CR0 access handler\n");
+                       return -1;
+                   }
+                   break;
+               case 3:
+                   //PrintDebug("Handling CR3 Access\n");
+                   if (v3_vmx_handle_cr3_access(info, cr_qual) == -1) {
+                       PrintError("Error in CR3 access handler\n");
+                       return -1;
+                   }
+                   break;
+               default:
+                   PrintError("Unhandled CR access: %d\n", cr_qual->cr_id);
+                   return -1;
+           }
+           
+           info->rip += exit_info->instr_len;
 
-            break;
+           break;
+       }
         case VMEXIT_HLT:
             PrintDebug("Guest halted\n");
 
@@ -270,8 +205,9 @@ int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info, struct v
             break;
         case VMEXIT_INTR_WINDOW:
 
+           vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
             vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
-            check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
+            vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
 
 #ifdef CONFIG_DEBUG_INTERRUPTS
             PrintDebug("Interrupts available again! (RIP=%llx)\n", info->rip);
@@ -280,142 +216,18 @@ int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info, struct v
             break;
         default:
             PrintError("Unhandled VMEXIT: %s (%u), %lu (0x%lx)\n", 
-                      v3_vmx_exit_code_to_str(exit_reason),
-                      exit_reason, exit_qual, exit_qual);
+                      v3_vmx_exit_code_to_str(exit_info->exit_reason),
+                      exit_info->exit_reason, 
+                      exit_info->exit_qual, exit_info->exit_qual);
             return -1;
     }
 
 #ifdef CONFIG_TELEMETRY
     if (info->enable_telemetry) {
-        v3_telemetry_end_exit(info, exit_reason);
+        v3_telemetry_end_exit(info, exit_info->exit_reason);
     }
 #endif
 
-
-    /* Check for pending exceptions to inject */
-    if (v3_excp_pending(info)) {
-        struct vmx_entry_int_info int_info;
-        int_info.value = 0;
-
-        // In VMX, almost every exception is hardware
-        // Software exceptions are pretty much only for breakpoint or overflow
-        int_info.type = 3;
-        int_info.vector = v3_get_excp_number(info);
-
-        if (info->excp_state.excp_error_code_valid) {
-            check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
-            int_info.error_code = 1;
-
-#ifdef CONFIG_DEBUG_INTERRUPTS
-            PrintDebug("Injecting exception %d with error code %x\n", 
-                    int_info.vector, info->excp_state.excp_error_code);
-#endif
-        }
-
-        int_info.valid = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
-        PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)info->rip);
-#endif
-        check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
-
-        v3_injecting_excp(info, int_info.vector);
-
-    } else if (((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) {
-       
-        if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
-
-#ifdef CONFIG_DEBUG_INTERRUPTS
-            PrintDebug("IRQ pending from previous injection\n");
-#endif
-
-            // Copy the IDT vectoring info over to reinject the old interrupt
-            if (idt_vec_info.error_code == 1) {
-                uint32_t err_code = 0;
-
-                check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
-                check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
-            }
-
-            idt_vec_info.undef = 0;
-            check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
-
-        } else {
-            struct vmx_entry_int_info ent_int;
-            ent_int.value = 0;
-
-            switch (v3_intr_pending(info)) {
-                case V3_EXTERNAL_IRQ: {
-                    info->intr_state.irq_vector = v3_get_intr(info); 
-                    ent_int.vector = info->intr_state.irq_vector;
-                    ent_int.type = 0;
-                    ent_int.error_code = 0;
-                    ent_int.valid = 1;
-
-#ifdef CONFIG_DEBUG_INTERRUPTS
-                    PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n", 
-                              info->intr_state.irq_vector, 
-                              (uint32_t)info->num_exits, 
-                              (void *)info->rip);
-#endif
-
-                    check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
-                    info->intr_state.irq_started = 1;
-
-                    break;
-                }
-                case V3_NMI:
-                    PrintDebug("Injecting NMI\n");
-
-                    ent_int.type = 2;
-                    ent_int.vector = 2;
-                    ent_int.valid = 1;
-                    check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
-
-                    break;
-                case V3_SOFTWARE_INTR:
-                    PrintDebug("Injecting software interrupt\n");
-                    ent_int.type = 4;
-
-                    ent_int.valid = 1;
-                    check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
-
-                   break;
-                case V3_VIRTUAL_IRQ:
-                    // Not sure what to do here, Intel doesn't have virtual IRQs
-                    // May be the same as external interrupts/IRQs
-
-                   break;
-                case V3_INVALID_INTR:
-                default:
-                    break;
-            }
-        }
-    } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
-        // Enable INTR window exiting so we know when IF=1
-        uint32_t instr_len;
-
-        check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
-
-#ifdef CONFIG_DEBUG_INTERRUPTS
-        PrintDebug("Enabling Interrupt-Window exiting: %d\n", instr_len);
-#endif
-
-        vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
-        check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
-    }
-
-    check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
-    check_vmcs_write(VMCS_GUEST_CR3, info->ctrl_regs.cr3);
-    check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
-    check_vmcs_write(VMCS_GUEST_RIP, info->rip);
-    check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
-
-    check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
-
-    v3_disable_ints();
-
-    rdtscll(info->time_state.cached_host_tsc);
-
     return 0;
 }
 
index 4d0e82c..aaae943 100644 (file)
        mov     104(%rax), %r13;        \
        mov     112(%rax), %r14;        \
        mov     120(%rax), %r15;        \
-    pushq %rbx;              \
-    movq 56(%rax), %rbx;     \
-    movq %rbx, %rax;         \
-    popq %rbx;;
+       pushq %rbx;                     \
+       movq 56(%rax), %rbx;            \
+       movq %rbx, %rax;                \
+       popq %rbx;
 
-#define save_ctrl_regs(location)    \
+#define save_ctrl_regs(location)  \
     pushq %rax;              \
     pushq %rbx;              \
     movq location, %rax;     \
     pop %rbx;    \
     pop %rax;    
 
-.align 8
-.globl v3_vmx_exit_handler
-v3_vmx_exit_handler:
-    // the save_* argument is a macro expansion; it has to jump past any pushes in the macro
-    // stack: vm_regs ptr, ctrl_regs_ptr
-    // save registers macro stack: vm_regs ptr, ctrl_regs ptr, pushed rax
-    // save_ctrl_regs macro stack: vm_regs ptr, ctrl_regs_ptr, pushed rax, pushed rbx
-    // Both macros jump past 2 saved values to reach their pointers, so both are 16(rsp)
-    save_registers(16(%rsp));
-    save_ctrl_regs(16(%rsp));
-    addq $16, %rsp
-    POPA
-    popf
-    pushq %rdi
-    pushq %rsi
-    pushq %rdx
-    call v3_handle_vmx_exit
-
-    andq %rax, %rax
-    jnz .Lvmm_failure
 
-v3_vmx_vmresume:
-    pop %rdx
-    pop %rsi
-    pop %rdi
-    pushf
-    PUSHA
-    pushq %rdi
-    pushq %rdx
-    restore_ctrl_regs(%rdx);
+#define PRE_LAUNCH(return_target)      \
+    pushf;                             \
+    PUSHA;                             \
+    pushq %rdi;                                \
+    pushq %rdx;                                \
+                                       \
+    movq %rsp, %rax;                   \
+    movq $VMCS_HOST_RSP, %rbx;         \
+    vmwrite %rax, %rbx;                        \
+    jz .Lfail_valid;                   \
+    jc .Lfail_invalid;                 \
+                                       \
+    movq return_target, %rax;          \
+    movq $VMCS_HOST_RIP, %rbx;         \
+    vmwrite %rax, %rbx;                        \
+    jz .Lfail_valid;                   \
+    jc .Lfail_invalid;                 \
+                                       \
+    restore_ctrl_regs(%rdx);           \
     restore_registers(%rdi);
 
-    vmresume
 
-    jz .Lfail_valid
-    jc .Lfail_invalid
-    addq $16, %rsp
-    jmp .Lreturn
+
 
 .align 8
-.globl v3_vmx_vmlaunch
+.globl v3_vmx_resume
 // vm_regs = %rdi, guest_info * = %rsi, ctrl_regs = %rdx
-v3_vmx_vmlaunch:
-    pushf
-    PUSHA
-    pushq %rdi
-    pushq %rdx
-    
-    movq %rsp, %rax
-    movq $VMCS_HOST_RSP, %rbx
-    vmwrite %rax, %rbx
-    jz .Lfail_valid
-    jc .Lfail_invalid
+v3_vmx_resume:
+
+    PRE_LAUNCH($vmx_resume_ret);
 
-    movq $v3_vmx_exit_handler, %rax
-    movq $VMCS_HOST_RIP, %rbx
-    vmwrite %rax, %rbx
+    vmresume
+
+vmx_resume_ret:
     jz .Lfail_valid
     jc .Lfail_invalid
+    jmp .Lnormal_exit
 
-    restore_ctrl_regs(%rdx);
-    restore_registers(%rdi);
+
+.align 8
+.globl v3_vmx_launch
+// vm_regs = %rdi, guest_info * = %rsi, ctrl_regs = %rdx
+v3_vmx_launch:
+
+    PRE_LAUNCH($vmx_launch_ret);
 
     vmlaunch
+
+vmx_launch_ret:
     jz .Lfail_valid
     jc .Lfail_invalid
-    jmp .Lreturn
+    jmp .Lnormal_exit
+
+
+
 
 .Lfail_valid:
     addq $16, %rsp
@@ -196,8 +185,18 @@ v3_vmx_vmlaunch:
     movq $VMM_FAILURE, %rax
     jmp .Lreturn
 
+
+.Lnormal_exit:
+    save_registers(16(%rsp));
+    save_ctrl_regs(16(%rsp));
+    addq $16, %rsp
+    POPA
+    popf
+    xorq %rax, %rax
+    jmp .Lreturn
+
+
 .Lreturn:
-    sti
     ret
     
 #else
diff --git a/palacios/src/palacios/vmx_lowlevel.asm b/palacios/src/palacios/vmx_lowlevel.asm
deleted file mode 100644 (file)
index 94f7432..0000000
+++ /dev/null
@@ -1,821 +0,0 @@
-; -*- fundamental -*- 
-;;
-;; This file is part of the Palacios Virtual Machine Monitor developed
-;; by the V3VEE Project with funding from the United States National 
-;; Science Foundation and the Department of Energy.  
-;;
-;; The V3VEE Project is a joint project between Northwestern University
-;; and the University of New Mexico.  You can find out more at 
-;; http://www.v3vee.org
-;;
-;; Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
-;; Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
-;; All rights reserved.
-;;
-;; Author: Jack Lange <jarusl@cs.northwestern.edu>
-;;
-;; This is free software.  You are permitted to use,
-;; redistribute, and modify it as specified in the file "V3VEE_LICENSE".
-;;
-
-%ifndef VMX_ASM
-%define VMX_ASM
-
-
-%include "defs.asm"
-%include "symbol.asm"
-
-
-%include "vmcs_fields.asm"
-
-VMX_SUCCESS equ        0x00000000
-VMX_FAIL_INVALID equ 0x00000001
-VMX_FAIL_VALID equ 0x00000002
-VMM_ERROR      equ 0x00000003
-
-[BITS 32]
-
-IMPORT Do_VMM
-
-
-; VMX Functions
-EXPORT VMCS_READ
-EXPORT VMCS_WRITE
-EXPORT VMCS_CLEAR
-EXPORT VMCS_LOAD
-EXPORT VMCS_STORE
-EXPORT Enable_VMX
-EXPORT Disable_VMX
-EXPORT Launch_VM
-EXPORT VMCS_LAUNCH
-EXPORT VMCS_RESUME
-EXPORT RunVMM
-EXPORT SAFE_VM_LAUNCH
-EXPORT Init_VMCS_HostState
-EXPORT Init_VMCS_GuestState
-       
-
-;
-; VMCS_LAUNCH
-;
-align 8
-VMCS_LAUNCH:
-       vmlaunch
-       jz      .error_code
-       jc      .error
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       ret
-
-
-
-;
-; VMCS_RESUME
-;
-align 8
-VMCS_RESUME:
-       vmresume
-       jz      .error_code
-       jc      .error
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       ret
-
-align 8
-SAFE_VM_LAUNCH:
-       pushf
-       pusha
-       mov     eax, HOST_RSP
-       vmwrite eax, esp
-       jz      .esp_err
-       jc      .esp_err
-       jmp     .vm_cont
-
-.esp_err
-       popa
-       jz      .error_code
-       jc      .error
-.vm_cont
-       vmlaunch
-       popa
-       jz      .error_code
-       jc      .error  
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       popf
-       ret
-
-
-;
-; RunVMM
-;
-align 8
-RunVMM:
-       pusha
-       call    Do_VMM
-       and     eax, eax
-       jnz     .vmm_error
-       jmp     .vm_cont
-
-.vmm_error
-       popa
-       popa
-       mov     eax, VMM_ERROR
-       jmp     .return
-
-.vm_cont
-       popa
-       vmresume
-       popa    ; we only get here if there is an error in the vmresume
-               ; we restore the host state and return an error code
-
-       jz      .error_code
-       jc      .error
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       popf
-       ret
-
-
-
-
-;
-; Setup_VMCS_GuestState
-; Copy all of the Guest registers into the guest state of a vmcs 
-;
-
-align 8
-InitGuestSelectors:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-       push    ebx
-
-       mov     ebx, VMCS_GUEST_ES_SELECTOR
-       mov     eax, es
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_GUEST_CS_SELECTOR
-       mov     eax, cs
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_GUEST_SS_SELECTOR
-       mov     eax, ss
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_GUEST_DS_SELECTOR
-       mov     eax, ds
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_GUEST_FS_SELECTOR
-       mov     eax, fs
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_GUEST_GS_SELECTOR
-       mov     eax, gs
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       str     [esp]
-       mov     eax, [esp]
-       mov     ebx, VMCS_GUEST_TR_SELECTOR
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       pop     ebx
-       pop     ebx
-       pop     ebp
-       ret
-ret
-
-align 8
-InitGuestDescRegs:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-       sub     esp, 6
-
-
-       sgdt    [esp]
-       mov     eax, [esp]
-       and     eax, 0xffff
-       mov     ebx, GUEST_GDTR_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, [esp+2]
-       mov     ebx, GUEST_GDTR_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-
-       sidt    [esp]
-       mov     eax, [esp]
-       and     eax, 0xffff
-       mov     ebx, GUEST_IDTR_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, [esp+2]
-       mov     ebx, GUEST_IDTR_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-
-       sldt    [esp]
-       mov     eax, [esp]      
-       mov     ebx, GUEST_LDTR_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-
-       mov     eax, 0x00000000
-       mov     ebx, GUEST_LDTR_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code     
-       jc      .error
-
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-
-       add     esp, 6
-       pop     ebx
-       pop     ebp
-       ret
-
-
-
-
-
-align 8
-InitGuestSegBases:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-
-
-       mov     eax, dword 0
-       mov     ebx, GUEST_ES_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, dword 0
-       mov     ebx, GUEST_CS_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, dword 0
-       mov     ebx, GUEST_SS_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, dword 0
-       mov     ebx, GUEST_DS_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, dword 0
-       mov     ebx, GUEST_FS_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, dword 0
-       mov     ebx, GUEST_GS_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-;      mov     eax, dword 0
-       mov     eax, 0x000220a0
-       mov     ebx, GUEST_TR_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-
-       pop     ebx
-       pop     ebp
-       ret
-
-align 8
-InitGuestSegsAccess:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-
-       mov     eax, 1100000010010011b
-       mov     ebx, GUEST_ES_ACCESS
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-
-
-       mov     eax, 1100000010011001b
-;      mov     eax, 0x0000c099
-       mov     ebx, GUEST_CS_ACCESS
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-;      mov     eax, 1100000010010111b
-       mov     eax, 1100000010010011b
-       mov     ebx, GUEST_SS_ACCESS
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, 1100000010010011b
-       mov     ebx, GUEST_DS_ACCESS
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-
-       mov     eax, 1100000010010011b
-       mov     ebx, GUEST_FS_ACCESS
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-
-       mov     eax, 1100000010010011b
-       mov     ebx, GUEST_GS_ACCESS
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, 0x10000
-       mov     ebx, GUEST_LDTR_ACCESS
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, 01000000010001011b
-       mov     ebx, GUEST_TR_ACCESS
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-; 
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       pop     ebx
-       pop     ebp
-       ret
-
-;; Do seg limit
-align 8
-InitGuestSegsLimits:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-
-       
-;      mov     eax, 0xffffffff
-       mov     eax, 0xffffffff
-       mov     ebx, GUEST_ES_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code     
-       jc      .error
-
-;      mov     eax, 0xffffffff
-       mov     eax, 0xffffffff
-       mov     ebx, GUEST_CS_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code     
-       jc      .error
-
-;      mov     eax, 0xffffffff
-       mov     eax, 0xffffffff
-       mov     ebx, GUEST_SS_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code     
-       jc      .error
-
-;      mov     eax, 0xffffffff
-       mov     eax, 0xffffffff
-       mov     ebx, GUEST_DS_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code     
-       jc      .error
-
-;      mov     eax, 0xffffffff
-       mov     eax, 0xffffffff
-       mov     ebx, GUEST_FS_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code     
-       jc      .error
-
-;      mov     eax, 0xffffffff
-       mov     eax, 0xffffffff
-       mov     ebx, GUEST_GS_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code     
-       jc      .error
-
-;      mov     eax, 0xffffffff
-       mov     eax, 0x68fff
-       mov     ebx, GUEST_TR_LIMIT
-       vmwrite ebx, eax
-       jz      .error_code     
-       jc      .error
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       pop     ebx
-       pop     ebp
-       ret
-
-
-align 8
-Init_VMCS_GuestState:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-
-       mov     ebx, GUEST_CR3
-       mov     eax, cr3
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       call    InitGuestSelectors
-       and     eax, 0xffffffff
-       jz      .selDone
-       jmp     .return
-.selDone
-
-       call    InitGuestDescRegs
-       and     eax, 0xffffffff
-       jz      .descRegsDone
-       jmp     .return
-.descRegsDone
-
-       call    InitGuestSegBases
-       and     eax, 0xffffffff
-       jz      .descSegBasesDone
-       jmp     .return
-.descSegBasesDone
-
-
-       call    InitGuestSegsLimits
-       and     eax, 0xffffffff
-       jz      .segsLimitsDone
-       jmp     .return
-.segsLimitsDone
-
-       call    InitGuestSegsAccess
-       and     eax, 0xffffffff
-       jz      .segsAccessDone
-       jmp     .return
-.segsAccessDone
-
-       mov     ebx, GUEST_RSP
-       mov     eax, esp
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, GUEST_RFLAGS
-       mov     eax, dword 0x00000002
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, GUEST_DR7
-       mov     eax, dword 0x00000400
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       pop     ebx
-       pop     ebp
-       ret
-
-;
-; Setup_VMCS_HostState
-; Copy all of the host registers into the host state of a vmcs 
-;
-
-align 8
-InitHostSelectors:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-       push    ebx
-
-       mov     ebx, VMCS_HOST_ES_SELECTOR
-       mov     eax, es
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_HOST_CS_SELECTOR
-       mov     eax, cs
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_HOST_SS_SELECTOR;
-    PrintDebug("VMX revision: 0x%p\n", (void*)vmxon_ptr);
-
-    if (v3_enable_vmx(vmxon_ptr) == 0) {
-        PrintDebug("VMX Enabled\n");
-    } else {
-        PrintError("VMX initialization failure\n");
-        return;
-    }
-       
-
-
-       mov     eax, ss
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_HOST_DS_SELECTOR
-       mov     eax, ds
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_HOST_FS_SELECTOR
-       mov     eax, fs
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     ebx, VMCS_HOST_GS_SELECTOR
-       mov     eax, gs
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       str     [esp]
-       mov     eax, [esp]
-       mov     ebx, VMCS_HOST_TR_SELECTOR
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       pop     ebx
-       pop     ebx
-       pop     ebp
-       ret
-ret
-
-
-
-
-
-align 8
-InitHostBaseRegs:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-       sub     esp, 6
-
-       sgdt    [esp]
-       mov     eax, [esp+2]
-       mov     ebx, HOST_GDTR_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       sidt    [esp]
-       mov     eax, [esp+2]
-       mov     ebx, HOST_IDTR_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-
-       mov     eax, dword 0
-       mov     ebx, HOST_FS_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, dword 0
-       mov     ebx, HOST_GS_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, dword 0
-       mov     ebx, HOST_TR_BASE
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-
-       add     esp, 6
-       pop     ebx
-       pop     ebp
-       ret
-
-
-align 8
-Init_VMCS_HostState:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-       
-       mov     ebx, HOST_CR3
-       mov     eax, cr3
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-
-       mov     ebx, HOST_RSP
-       mov     eax, esp
-       vmwrite ebx, eax
-       jz      .error_code
-       jc      .error
-
-;      push    esp
-       call    InitHostSelectors
-       and     eax, 0xffffffff
-       jz      .selDone
-       jmp     .return
-.selDone
-;      push    esp
-       call    InitHostBaseRegs
-       and     eax, 0xffffffff
-       jz      .baseRegsDone
-       jmp     .return
-.baseRegsDone
-
-
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       mov     eax, VMX_FAIL_INVALID
-       jmp     .return
-.error_code
-       mov     eax, VMX_FAIL_VALID
-.return
-       pop     ebx
-       pop     ebp
-       ret
-
-;
-; Launch_VM - inits a vmcs with an ip and launches it
-; [eip = ebp + 8], [vmcs = ebp + 12]
-; int Launch_VM(ullont_t VMCS, uint_t eip);
-;
-align 8
-Launch_VM:
-       push    ebp
-       mov     ebp, esp
-       push    ebx
-       mov     ebx, dword 0
-       vmclear [ebp+8]
-       jz      .error_code
-       jc      .error
-       add     ebx, dword 1
-       vmptrld [ebp+8]
-       jz      .error_code
-       jc      .error
-       mov     eax, dword 0x0000681E
-       add     ebx, dword 1
-       vmwrite eax, [ebp+16]
-       jz      .error_code
-       jc      .error
-       add     ebx, dword 1
-       vmlaunch
-       jz      .error_code
-       jc      .error
-       mov     eax, VMX_SUCCESS
-       jmp     .return
-.error
-       shl     ebx, 4
-       mov     eax, VMX_FAIL_INVALID
-       or      eax, ebx
-       jmp     .return
-.error_code
-       shl     ebx, 4
-       mov     eax, VMX_FAIL_VALID
-       or      eax, ebx
-       mov     ebx, dword 0x00004400
-       vmread  eax, ebx
-.return
-       pop     ebx
-       pop     ebp
-
-       ret
-
-
-%endif