int v3_init_svm_msr_map(struct guest_info * info);
-int v3_handle_msr_write(struct guest_info * info);
-
-int v3_handle_msr_read(struct guest_info * info);
-
-
-
#endif // ! __V3VEE__
uint64_t yield_cycle_period;
uint64_t yield_start_cycle;
+ uint64_t num_exits;
#ifdef CONFIG_TELEMETRY
uint_t enable_telemetry;
#include <palacios/vmm_types.h>
- /* Pin Based VM Execution Controls */
- /* INTEL MANUAL: 20-10 vol 3B */
-#define EXT_INTR_EXIT 0x00000001
-#define NMI_EXIT 0x00000008
-#define VIRTUAL_NMIS 0x00000020
-/* Processor Based VM Execution Controls */
-/* INTEL MANUAL: 20-11 vol. 3B */
-#define INTR_WIN_EXIT 0x00000004
-#define USE_TSC_OFFSET 0x00000008
-#define HLT_EXIT 0x00000080
-#define INVLPG_EXIT 0x00000200
-#define MWAIT_EXIT 0x00000400
-#define RDPMC_EXIT 0x00000800
-#define RDTSC_EXIT 0x00001000
-#define CR3_LOAD_EXIT 0x00008000
-#define CR3_STORE_EXIT 0x00010000
-#define CR8_LOAD_EXIT 0x00080000
-#define CR8_STORE_EXIT 0x00100000
-#define USE_TPR_SHADOW 0x00200000
-#define NMI_WINDOW_EXIT 0x00400000
-#define MOVDR_EXIT 0x00800000
-#define UNCOND_IO_EXIT 0x01000000
-#define USE_IO_BITMAPS 0x02000000
-#define USE_MSR_BITMAPS 0x10000000
-#define MONITOR_EXIT 0x20000000
-#define PAUSE_EXIT 0x40000000
-#define ACTIVE_SEC_CTRLS 0x80000000
+#include <palacios/vm_guest.h>
+
/* VM-Exit Controls */
/* INTEL MANUAL: 20-16 vol. 3B */
#define HOST_ADDR_SPACE_SIZE 0x00000200
#define ACK_IRQ_ON_EXIT 0x00008000
/* Control register exit masks */
-#define CR0_PE 0x00000001
-#define CR0_PG 0x80000000
#define CR4_VMXE 0x00002000
+int v3_load_vmcs_guest_state(struct guest_info * info);
+int v3_update_vmcs_guest_state(struct guest_info * info);
+int v3_update_vmcs_host_state(struct guest_info * info);
+int v3_update_vmcs_ctrl_fields(struct guest_info * info);
+
+
typedef enum {
VMCS_GUEST_ES_SELECTOR = 0x00000800,
VMCS_GUEST_CS_SELECTOR = 0x00000802,
VMCS_LINK_PTR_HIGH = 0x00002801,
VMCS_GUEST_DBG_CTL = 0x00002802,
VMCS_GUEST_DBG_CTL_HIGH = 0x00002803,
+ VMCS_GUEST_EFER = 0x00002805,
+ VMCS_GUEST_EFER_HIGH = 0x00002807,
VMCS_GUEST_PERF_GLOBAL_CTRL = 0x00002808,
VMCS_GUEST_PERF_GLOBAL_CTRL_HIGH = 0x00002809,
VMCS_IDT_VECTOR_INFO = 0x00004408,
VMCS_IDT_VECTOR_ERR = 0x0000440A,
VMCS_EXIT_INSTR_LEN = 0x0000440C,
- VMCS_VMX_INSTR_INFO = 0x0000440E,
+ VMCS_EXIT_INSTR_INFO = 0x0000440E,
/* 32 bit Guest state fields */
VMCS_GUEST_ES_LIMIT = 0x00004800,
VMCS_GUEST_CS_LIMIT = 0x00004802,
VMCS_HOST_RIP = 0x00006C16,
} vmcs_field_t;
-int v3_vmcs_get_field_len(vmcs_field_t field);
-const char* v3_vmcs_field_to_str(vmcs_field_t field);
-void v3_print_vmcs();
-
-
-
-/* Exit Vector Info */
-struct VMExitIntInfo {
- uint32_t nr : 8; // IRQ number, exception vector, NMI = 2
- uint32_t type : 3; // (0: ext. IRQ , 2: NMI , 3: hw exception , 6: sw exception
- uint32_t errorCode : 1; // 1: error Code present
- uint32_t iret : 1; // something to do with NMIs and IRETs (Intel 3B, sec. 23.2.2)
- uint32_t rsvd : 18; // always 0
- uint32_t valid : 1; // always 1 if valid
-} __attribute__((packed));
-
-
-/* End Exit Vector Info */
-
struct vmx_exception_bitmap {
union {
uint32_t value;
- struct {
- uint_t de : 1; // (0) divide by zero
- uint_t db : 1; // (1) Debug
- uint_t nmi : 1; // (2) Non-maskable interrupt
- uint_t bp : 1; // (3) Breakpoint
- uint_t of : 1; // (4) Overflow
- uint_t br : 1; // (5) Bound-Range
- uint_t ud : 1; // (6) Invalid-Opcode
- uint_t nm : 1; // (7) Device-not-available
- uint_t df : 1; // (8) Double Fault
- uint_t ex9 : 1;
- uint_t ts : 1; // (10) Invalid TSS
- uint_t np : 1; // (11) Segment-not-present
- uint_t ss : 1; // (12) Stack
- uint_t gp : 1; // (13) General Protection Fault
- uint_t pf : 1; // (14) Page fault
- uint_t ex15 : 1;
- uint_t mf : 1; // (15) Floating point exception
- uint_t ac : 1; // (16) Alignment-check
- uint_t mc : 1; // (17) Machine Check
- uint_t xf : 1; // (18) SIMD floating-point
- uint_t ex20 : 1;
- uint_t ex21 : 1;
- uint_t ex22 : 1;
- uint_t ex23 : 1;
- uint_t ex24 : 1;
- uint_t ex25 : 1;
- uint_t ex26 : 1;
- uint_t ex27 : 1;
- uint_t ex28 : 1;
- uint_t ex29 : 1;
- uint_t sx : 1; // (30) Security Exception
- uint_t ex31 : 1;
- } __attribute__ ((packed));
+ struct {
+ uint_t de : 1; // (0) divide by zero
+ uint_t db : 1; // (1) Debug
+ uint_t nmi : 1; // (2) Non-maskable interrupt
+ uint_t bp : 1; // (3) Breakpoint
+ uint_t of : 1; // (4) Overflow
+ uint_t br : 1; // (5) Bound-Range
+ uint_t ud : 1; // (6) Invalid-Opcode
+ uint_t nm : 1; // (7) Device-not-available
+ uint_t df : 1; // (8) Double Fault
+ uint_t ex9 : 1;
+ uint_t ts : 1; // (10) Invalid TSS
+ uint_t np : 1; // (11) Segment-not-present
+ uint_t ss : 1; // (12) Stack
+ uint_t gp : 1; // (13) General Protection Fault
+ uint_t pf : 1; // (14) Page fault
+ uint_t ex15 : 1;
+ uint_t mf : 1; // (15) Floating point exception
+ uint_t ac : 1; // (16) Alignment-check
+ uint_t mc : 1; // (17) Machine Check
+ uint_t xf : 1; // (18) SIMD floating-point
+ uint_t ex20 : 1;
+ uint_t ex21 : 1;
+ uint_t ex22 : 1;
+ uint_t ex23 : 1;
+ uint_t ex24 : 1;
+ uint_t ex25 : 1;
+ uint_t ex26 : 1;
+ uint_t ex27 : 1;
+ uint_t ex28 : 1;
+ uint_t ex29 : 1;
+ uint_t sx : 1; // (30) Security Exception
+ uint_t ex31 : 1;
+ } __attribute__ ((packed));
} __attribute__ ((packed));
} __attribute__((packed));
/* Segment Selector Access Rights (32 bits) */
/* INTEL Manual: 20-4 vol 3B */
-
-
struct vmcs_segment_access {
union {
uint32_t value;
} __attribute__((packed));
-//uint_t VMCSRead(uint_t tag, void * val);
+
+int v3_vmcs_get_field_len(vmcs_field_t field);
+
+const char * v3_vmcs_field_to_str(vmcs_field_t field);
+
+void v3_print_vmcs();
#endif // ! __V3VEE__
* All rights reserved.
*
* Author: Peter Dinda <pdinda@northwestern.edu>
+ * Author: Andy Gocke <agocke@gmail.com>
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
*/
-#ifndef __SVM_HALT_H
-#define __SVM_HALT_H
+#ifndef __VMM_HALT_H
+#define __VMM_HALT_H
#ifdef __V3VEE__
#include <palacios/vm_guest.h>
-#include <palacios/vmcb.h>
#include <palacios/vmm.h>
-int v3_handle_svm_halt(struct guest_info * info);
+int v3_handle_halt(struct guest_info * info);
#endif // ! __V3VEE__
static void __inline__ v3_cpuid(uint_t target, addr_t * eax, addr_t * ebx, addr_t * ecx, addr_t * edx) {
__asm__ __volatile__ (
- "pushl %%ebx\n\t"
"cpuid\n\t"
- "movl %%ebx, %%esi\n\t"
- "popl %%ebx\n\t"
: "=a" (*eax), "=S" (*ebx), "=c" (*ecx), "=d" (*edx)
- : "a" (target)
+ : "0" (target), "2" (*ecx)
);
return;
}
static void __inline__ v3_cpuid(uint_t target, addr_t * eax, addr_t * ebx, addr_t * ecx, addr_t * edx) {
__asm__ __volatile__ (
- "pushq %%rbx\n\t"
"cpuid\n\t"
- "movq %%rbx, %%rsi\n\t"
- "popq %%rbx\n\t"
- : "=a" (*eax), "=S" (*ebx), "=c" (*ecx), "=d" (*edx)
- : "a" (target)
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+ : "0" (target), "2" (*ecx)
);
return;
}
void v3_print_msr_map(struct guest_info * info);
+int v3_handle_msr_write(struct guest_info * info);
+
+int v3_handle_msr_read(struct guest_info * info);
+
#endif // ! __V3VEE__
*/
-#ifndef __VMX_H
-#define __VMX_H
+#ifndef __VMX_H__
+#define __VMX_H__
#ifdef __V3VEE__
#define CPUID_1_ECX_VTXFLAG 0x00000020
+struct vmx_pin_ctrls {
+ union {
+ uint32_t value;
+ struct {
+ uint_t ext_int_exit : 1;
+ uint_t rsvd1 : 2;
+ uint_t nmi_exit : 1;
+ uint_t rsvd2 : 1;
+ uint_t virt_nmi : 1;
+ uint_t active_preempt_timer : 1;
+ uint_t rsvd3 : 25;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
+
+
+struct vmx_pri_proc_ctrls {
+ union {
+ uint32_t value;
+ struct {
+ uint_t rsvd1 : 2;
+ uint_t int_wndw_exit : 1;
+ uint_t tsc_offset : 1;
+ uint_t rsvd2 : 3;
+ uint_t hlt_exit : 1;
+ uint_t rsvd3 : 1;
+ uint_t invlpg_exit : 1;
+ uint_t mwait_exit : 1;
+ uint_t rdpmc_exit : 1;
+ uint_t rdtsc_exit : 1;
+ uint_t rsvd4 : 2;
+ uint_t cr3_ld_exit : 1;
+ uint_t cr3_str_exit : 1;
+ uint_t rsvd5 : 2;
+ uint_t cr8_ld_exit : 1;
+ uint_t cr8_str_exit : 1;
+ uint_t tpr_shdw : 1;
+ uint_t nmi_wndw_exit : 1;
+ uint_t mov_dr_exit : 1;
+ uint_t uncon_io_exit : 1;
+ uint_t use_io_bitmap : 1;
+ uint_t rsvd6 : 1;
+ uint_t monitor_trap : 1;
+ uint_t use_msr_bitmap : 1;
+ uint_t monitor_exit : 1;
+ uint_t pause_exit : 1;
+ uint_t sec_ctrls : 1;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
+
+struct vmx_sec_proc_ctrls {
+ union {
+ uint32_t value;
+ struct {
+ uint_t virt_apic_acc : 1;
+ uint_t enable_ept : 1;
+ uint_t desc_table_exit : 1;
+ uint_t enable_rdtscp : 1;
+ uint_t virt_x2apic : 1;
+ uint_t enable_vpid : 1;
+ uint_t unrstrct_guest : 1;
+ uint_t rsvd1 : 2;
+ uint_t pause_loop_exit : 1;
+ uint_t rsvd2 : 21;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
+
+struct vmx_exit_ctrls {
+ union {
+ uint32_t value;
+ struct {
+ uint_t rsvd1 : 2;
+ uint_t save_dbg_ctrls : 1;
+ uint_t rsvd2 : 6;
+ uint_t host_64_on : 1;
+ uint_t rsvd3 : 2;
+ uint_t ld_perf_glbl_ctrl : 1;
+ uint_t rsvd4 : 2;
+ uint_t ack_int_on_exit : 1;
+ uint_t rsvd5 : 2;
+ uint_t save_pat : 1;
+ uint_t ld_pat : 1;
+ uint_t save_efer : 1;
+ uint_t ld_efer : 1;
+ uint_t save_preempt_timer : 1;
+ uint_t rsvd6 : 9;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
+
+struct vmx_entry_ctrls {
+ union {
+ uint32_t value;
+ struct {
+ uint_t rsvd1 : 2;
+ uint_t ld_dbg_ctrls : 1;
+ uint_t rsvd2 : 6;
+ uint_t guest_ia32e : 1;
+ uint_t smm_entry : 1;
+ uint_t no_dual_monitor : 1;
+ uint_t rsvd3 : 1;
+ uint_t ld_perf_glbl_ctrl : 1;
+ uint_t ld_pat : 1;
+ uint_t ld_efer : 1;
+ uint_t rsvd4 : 16;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
struct vmx_basic_msr {
uint32_t revision;
} vmx_state_t;
struct tss_descriptor {
- union {
- ulong_t value;
- struct {
- uint16_t limit1;
- uint16_t base1;
- uint_t base2 : 8;
- /* In IA32, type follows the form 10B1b, where B is the busy flag */
- uint_t type : 4;
- uint_t zero1 : 1;
- uint_t dpl : 2;
- uint_t present : 1;
- uint_t limit2 : 4;
- uint_t available : 1;
- uint_t zero2 : 1;
- uint_t zero3 : 1;
- uint_t granularity : 1;
- uint_t base3 : 8;
+ uint16_t limit1;
+ uint16_t base1;
+ uint_t base2 : 8;
+ /* In IA32, type follows the form 10B1b, where B is the busy flag */
+ uint_t type : 4;
+ uint_t zero1 : 1;
+ uint_t dpl : 2;
+ uint_t present : 1;
+ uint_t limit2 : 4;
+ uint_t available : 1;
+ uint_t zero2 : 1;
+ uint_t zero3 : 1;
+ uint_t granularity : 1;
+ uint_t base3 : 8;
#ifdef __V3_64BIT__
- uint32_t base4;
- uint_t rsvd1 : 8;
- uint_t zero4 : 5;
- uint_t rsvd2 : 19;
+ uint32_t base4;
+ uint_t rsvd1 : 8;
+ uint_t zero4 : 5;
+ uint_t rsvd2 : 19;
#endif
- } __attribute__((packed));
- } __attribute__((packed));
}__attribute__((packed));
struct vmcs_host_state {
struct vmx_data {
vmx_state_t state;
- addr_t vmcs_ptr_phys;
struct vmcs_host_state host_state;
- /* VMX Control Fields */
- uint32_t pinbased_ctrls;
- uint32_t pri_procbased_ctrls;
- uint32_t sec_procbased_ctrls;
- uint32_t exit_ctrls;
- uint32_t entry_ctrls;
-};
+ addr_t vmcs_ptr_phys;
-enum InstructionType { VM_UNKNOWN_INST, VM_MOV_TO_CR0 } ;
+ uint8_t ia32e_avail;
-struct Instruction {
- enum InstructionType type;
- uint_t address;
- uint_t size;
- uint_t input1;
- uint_t input2;
- uint_t output;
+ /* VMX Control Fields */
+ struct vmx_pin_ctrls pin_ctrls;
+ struct vmx_pri_proc_ctrls pri_proc_ctrls;
+ struct vmx_sec_proc_ctrls sec_proc_ctrls;
+ struct vmx_exit_ctrls exit_ctrls;
+ struct vmx_entry_ctrls entry_ctrls;
};
-
-
-
int v3_is_vmx_capable();
-void v3_init_vmx(struct v3_ctrl_ops* vm_ops);
-int v3_update_vmcs_guest_state(struct guest_info * info);
-int v3_update_vmcs_ctrl_fields(struct guest_info * info);
-int v3_update_vmcs_host_state(struct guest_info * info);
+void v3_init_vmx(struct v3_ctrl_ops * vm_ops);
#include <palacios/vm_guest.h>
#include <palacios/vmm_ctrl_regs.h>
-int v3_vmx_handle_cr0_write(struct guest_info * info, v3_reg_t new_val);
+int v3_vmx_handle_cr0_access(struct guest_info * info);
+int v3_vmx_handle_cr3_access(struct guest_info * info);
+
} vmx_exit_t;
/* VMCS Exit QUALIFICATIONs */
-struct vmexit_io_qual {
- uint32_t access_size : 3; // (0: 1 Byte ;; 1: 2 Bytes ;; 3: 4 Bytes)
- uint32_t dir : 1; // (0: Out ;; 1: In)
- uint32_t string : 1; // (0: not string ;; 1: string)
- uint32_t rep : 1; // (0: not REP ;; 1: REP)
- uint32_t op_enc : 1; // (0: DX ;; 1: immediate)
- uint32_t rsvd : 9; // Set to 0
- uint32_t port : 16; // IO Port Number
+struct vmx_exit_io_qual {
+ union {
+ uint32_t value;
+ struct {
+ uint32_t access_size : 3; // (0: 1 Byte ;; 1: 2 Bytes ;; 3: 4 Bytes)
+ uint32_t dir : 1; // (0: Out ;; 1: In)
+ uint32_t string : 1; // (0: not string ;; 1: string)
+ uint32_t rep : 1; // (0: not REP ;; 1: REP)
+ uint32_t op_enc : 1; // (0: DX ;; 1: immediate)
+ uint32_t rsvd : 9; // Set to 0
+ uint32_t port : 16; // IO Port Number
+ } __attribute__((packed));
+ } __attribute__((packed));
} __attribute__((packed));
+struct vmx_exit_io_instr_info {
+ union {
+ uint32_t value;
+ struct {
+ uint32_t undef1 : 7;
+ uint32_t addr_size : 3;
+ uint32_t undef2 : 5;
+ uint32_t seg_reg : 3;
+ uint32_t undef3 : 14;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
+
+/* Exit Interrupt Vector Info */
+struct vmx_exit_int_info {
+ union {
+ uint32_t value;
+ struct {
+ uint32_t vector : 8; // IRQ number, exception vector, NMI = 2
+ uint32_t type : 3; // (0: ext. IRQ , 2: NMI , 3: hw exception , 6: sw exception
+ uint32_t error_code : 1; // 1: error Code present
+ uint32_t nmi_unblock : 1; // something to do with NMIs and IRETs (Intel 3B, sec. 23.2.2)
+ uint32_t rsvd : 18; // always 0
+ uint32_t valid : 1; // always 1 if valid
+ } __attribute__ ((packed));
+ } __attribute__ ((packed));
+} __attribute__((packed));
+
+/* VMX entry interrupt format */
+struct vmx_entry_int_info {
+ union {
+ uint32_t value;
+ struct {
+ uint32_t vector : 8; // IRQ/exception vector number
+ uint32_t type : 3; // (0: ext. IRQ, 2: NMI, 3: hw excp, 4: sw int, 5: priv. sw excp, 6: sw excp, 7: other
+ uint32_t error_code : 1; // 1: deliver error code
+ uint32_t rsvd : 19;
+ uint32_t valid : 1; // 1: valid
+ } __attribute__ ((packed));
+ } __attribute__ ((packed));
+} __attribute__ ((packed));
+
+
struct VMExitDBGQual {
uint32_t src : 2; // (0: CALL ; 1: IRET ; 2: JMP ; 3: Task gate in IDT)
} __attribute__((packed));
-struct vmexit_cr_qual {
- uint32_t cr_id : 4; // cr number (0 for CLTS and LMSW) (bit 3 always 0, on 32bit)
- uint32_t access_type : 2; // (0: MOV to CR ; 1: MOV from CR ; 2: CLTS ; 3: LMSW)
- uint32_t lmsw_op_type : 1; // (0: register ; 1: memory)
- uint32_t rsvd1 : 1; // reserved to 0
- uint32_t gpr : 4; // (0:RAX+[CLTS/LMSW], 1:RCX, 2:RDX, 3:RBX, 4:RSP, 5:RBP, 6:RSI, 6:RDI, 8-15:64bit regs)
- uint32_t rsvd2 : 4; // reserved to 0
- uint32_t lmsw_src : 16; // src data for lmsw
+struct vmx_exit_cr_qual {
+ union {
+ uint32_t value;
+ struct {
+ uint32_t cr_id : 4; // cr number (0 for CLTS and LMSW) (bit 3 always 0, on 32bit)
+ uint32_t access_type : 2; // (0: MOV to CR ; 1: MOV from CR ; 2: CLTS ; 3: LMSW)
+ uint32_t lmsw_op_type : 1; // (0: register ; 1: memory)
+ uint32_t rsvd1 : 1; // reserved to 0
+ uint32_t gpr : 4; // (0:RAX+[CLTS/LMSW], 1:RCX, 2:RDX, 3:RBX, 4:RSP, 5:RBP, 6:RSI, 6:RDI, 8-15:64bit regs)
+ uint32_t rsvd2 : 4; // reserved to 0
+ uint32_t lmsw_src : 16; // src data for lmsw
+ } __attribute__((packed));
+ } __attribute__((packed));
} __attribute__((packed));
struct VMExitMovDRQual {
/* End Exit Qualifications */
-
-int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info);
+struct vmx_exit_idt_vec_info {
+ union {
+ uint32_t value;
+ struct {
+ uint32_t vector : 8;
+ uint32_t type : 3;
+ uint32_t error_code : 1;
+ uint32_t undef : 1;
+ uint32_t rsvd : 18;
+ uint32_t valid : 1;
+ } __attribute__ ((packed));
+ } __attribute__ ((packed));
+} __attribute__ ((packed));
+
+int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
+const char * v3_vmx_exit_code_to_str(vmx_exit_t exit);
#endif
/* The IRQ number is the number returned by pic_get_intr_number(), not the pin number */
static int pic_begin_irq(void * private_data, int irq) {
struct pic_internal * state = (struct pic_internal*)private_data;
-
+
if ((irq >= state->master_icw2) && (irq <= state->master_icw2 + 7)) {
- irq &= 0x7;
+ irq &= 0x7;
} else if ((irq >= state->slave_icw2) && (irq <= state->slave_icw2 + 7)) {
- irq &= 0x7;
- irq += 8;
+ irq &= 0x7;
+ irq += 8;
} else {
- // PrintError("8259 PIC: Could not find IRQ (0x%x) to Begin\n",irq);
- return -1;
+ // PrintError("8259 PIC: Could not find IRQ (0x%x) to Begin\n",irq);
+ return -1;
}
-
+
if (irq <= 7) {
- if (((state->master_irr & ~(state->master_imr)) >> irq) == 0x01) {
- state->master_isr |= (0x1 << irq);
+ if (((state->master_irr & ~(state->master_imr)) >> irq) == 0x01) {
+ state->master_isr |= (0x1 << irq);
- if (!(state->master_elcr & (0x1 << irq))) {
- state->master_irr &= ~(0x1 << irq);
- }
- }
+ if (!(state->master_elcr & (0x1 << irq))) {
+ state->master_irr &= ~(0x1 << irq);
+ }
+ }
} else {
- state->slave_isr |= (0x1 << (irq - 8));
+ state->slave_isr |= (0x1 << (irq - 8));
- if (!(state->slave_elcr & (0x1 << irq))) {
- state->slave_irr &= ~(0x1 << (irq - 8));
- }
+ if (!(state->slave_elcr & (0x1 << irq))) {
+ state->slave_irr &= ~(0x1 << (irq - 8));
+ }
}
return 0;
static int read_apic_msr(uint_t msr, v3_msr_t * dst, void * priv_data) {
struct vm_device * dev = (struct vm_device *)priv_data;
struct apic_state * apic = (struct apic_state *)dev->private_data;
- PrintError("READING APIC BASE ADDR: HI=%x LO=%x\n", apic->base_addr_msr.hi, apic->base_addr_msr.lo);
-
- return -1;
+ dst->value = apic->base_addr;
+ return 0;
}
/* Drive Commands */
static void ide_raise_irq(struct vm_device * dev, struct ide_channel * channel) {
if (channel->ctrl_reg.irq_disable == 0) {
- PrintDebug("Raising IDE Interrupt %d\n", channel->irq);
- channel->dma_status.int_gen = 1;
- v3_raise_irq(dev->vm, channel->irq);
+ PrintError("Raising IDE Interrupt %d\n", channel->irq);
+ channel->dma_status.int_gen = 1;
+ v3_raise_irq(dev->vm, channel->irq);
}
}
int intr_pin = pci_dev->config_header.intr_pin - 1;
int irq_index = (intr_pin + pci_dev->dev_num - 1) & 0x3;
- // PrintError("Raising PCI IRQ %d\n", piix3_cfg->pirq_rc[irq_index]);
+ PrintError("Raising PCI IRQ %d\n", piix3_cfg->pirq_rc[irq_index]);
v3_raise_irq(dev->vm, piix3_cfg->pirq_rc[irq_index]);
vmm_direct_paging.o \
vmm_emulator.o \
vmm_excp.o \
+ vmm_halt.o \
vmm_hashtable.o \
vmm_host_events.o \
vmm_hypercall.o \
obj-$(CONFIG_SVM) += svm.o \
- svm_halt.o \
svm_io.o \
svm_lowlevel.o \
svm_msr.o \
obj-$(CONFIG_TELEMETRY) += vmm_telemetry.o
obj-$(CONFIG_SOCKET) += vmm_socket.o
-obj-$(CONFIG_SYMBIOTIC_SWAP) += vmm_sym_swap.o
\ No newline at end of file
+obj-$(CONFIG_SYMBIOTIC_SWAP) += vmm_sym_swap.o
#include <palacios/vmm_decoder.h>
#include <palacios/vmm_ctrl_regs.h>
#include <palacios/svm_io.h>
-#include <palacios/svm_halt.h>
+#include <palacios/vmm_halt.h>
#include <palacios/svm_pause.h>
#include <palacios/svm_wbinvd.h>
#include <palacios/vmm_intr.h>
#ifdef CONFIG_DEBUG_HALT
PrintDebug("Guest halted\n");
#endif
- if (v3_handle_svm_halt(info) == -1) {
+ if (v3_handle_halt(info) == -1) {
return -1;
}
break;
}
-
-int v3_handle_msr_write(struct guest_info * info) {
- uint_t msr_num = info->vm_regs.rcx;
- struct v3_msr msr_val;
- struct v3_msr_hook * hook = NULL;
-
- hook = v3_get_msr_hook(info, msr_num);
-
- if (!hook) {
- PrintError("Hook for MSR write %d not found\n", msr_num);
- return -1;
- }
-
- msr_val.value = 0;
- msr_val.lo = info->vm_regs.rax;
- msr_val.hi = info->vm_regs.rdx;
-
- if (hook->write(msr_num, msr_val, hook->priv_data) == -1) {
- PrintError("Error in MSR hook Write\n");
- return -1;
- }
-
- return 0;
-}
-
-
-
-int v3_handle_msr_read(struct guest_info * info) {
- uint_t msr_num = info->vm_regs.rcx;
- struct v3_msr msr_val;
- struct v3_msr_hook * hook = NULL;
-
- hook = v3_get_msr_hook(info, msr_num);
-
- if (!hook) {
- PrintError("Hook for MSR read %d not found\n", msr_num);
- return -1;
- }
-
- msr_val.value = 0;
-
- if (hook->read(msr_num, &msr_val, hook->priv_data) == -1) {
- PrintError("Error in MSR hook Read\n");
- return -1;
- }
-
- info->vm_regs.rax = msr_val.lo;
- info->vm_regs.rdx = msr_val.hi;
-
- return 0;
-}
#include <palacios/vmcs.h>
#include <palacios/vmx_lowlevel.h>
#include <palacios/vmm.h>
+#include <palacios/vmx.h>
+#include <palacios/vm_guest_mem.h>
+#include <palacios/vmm_ctrl_regs.h>
+#include <palacios/vmm_lowlevel.h>
+static void inline translate_v3_seg_to_access(struct v3_segment * v3_seg,
+ struct vmcs_segment_access * access)
+{
+ access->type = v3_seg->type;
+ access->desc_type = v3_seg->system;
+ access->dpl = v3_seg->dpl;
+ access->present = v3_seg->present;
+ access->avail = v3_seg->avail;
+ access->long_mode = v3_seg->long_mode;
+ access->db = v3_seg->db;
+ access->granularity = v3_seg->granularity;
+}
+
+static void inline translate_access_to_v3_seg(struct vmcs_segment_access * access,
+ struct v3_segment * v3_seg)
+{
+ v3_seg->type = access->type;
+ v3_seg->system = access->desc_type;
+ v3_seg->dpl = access->dpl;
+ v3_seg->present = access->present;
+ v3_seg->avail = access->avail;
+ v3_seg->long_mode = access->long_mode;
+ v3_seg->db = access->db;
+ v3_seg->granularity = access->granularity;
+}
+
+
+static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
+{
+ int ret = 0;
+ ret = vmcs_write(field, val);
+
+ if (ret != VMX_SUCCESS) {
+ PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int inline check_vmcs_read(vmcs_field_t field, void * val)
+{
+ int ret = 0;
+ ret = vmcs_read(field, val);
+
+ if (ret != VMX_SUCCESS) {
+ PrintError("VMREAD error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
+ }
+
+ return ret;
+}
// static const char * v3_vmcs_field_to_str(vmcs_field_t field);
// Ignores "HIGH" addresses - 32 bit only for now
//
+int v3_update_vmcs_guest_state(struct guest_info * info)
+{
+ int vmx_ret = 0;
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, info->rip);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
+
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
+ vmx_ret |= check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_CR3, info->ctrl_regs.cr3);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, info->dbg_regs.dr7);
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
+ if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
+ }
+
+
+ /*** Write VMCS Segments ***/
+ struct vmcs_segment_access access;
+
+ memset(&access, 0, sizeof(access));
+
+ /* CS Segment */
+ translate_v3_seg_to_access(&(info->segments.cs), &access);
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
+
+ /* SS Segment */
+ memset(&access, 0, sizeof(access));
+ translate_v3_seg_to_access(&(info->segments.ss), &access);
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
+
+ /* DS Segment */
+ memset(&access, 0, sizeof(access));
+ translate_v3_seg_to_access(&(info->segments.ds), &access);
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
+
+
+ /* ES Segment */
+ memset(&access, 0, sizeof(access));
+ translate_v3_seg_to_access(&(info->segments.es), &access);
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
+
+ /* FS Segment */
+ memset(&access, 0, sizeof(access));
+ translate_v3_seg_to_access(&(info->segments.fs), &access);
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
+
+ /* GS Segment */
+ memset(&access, 0, sizeof(access));
+ translate_v3_seg_to_access(&(info->segments.gs), &access);
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
+
+ /* LDTR segment */
+ memset(&access, 0, sizeof(access));
+ translate_v3_seg_to_access(&(info->segments.ldtr), &access);
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
+
+ /* TR Segment */
+ memset(&access, 0, sizeof(access));
+ translate_v3_seg_to_access(&(info->segments.tr), &access);
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.tr.selector);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
+
+ /* GDTR Segment */
+
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
+
+ /* IDTR Segment*/
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
+
+ return vmx_ret;
+
+}
+
+int v3_update_vmcs_ctrl_fields(struct guest_info * info) {
+ int vmx_ret = 0;
+ struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
+
+ vmx_ret |= check_vmcs_write(VMCS_PIN_CTRLS, arch_data->pin_ctrls.value);
+ vmx_ret |= check_vmcs_write(VMCS_PROC_CTRLS, arch_data->pri_proc_ctrls.value);
+
+ if (arch_data->pri_proc_ctrls.sec_ctrls) {
+ vmx_ret |= check_vmcs_write(VMCS_SEC_PROC_CTRLS, arch_data->sec_proc_ctrls.value);
+ }
+
+ vmx_ret |= check_vmcs_write(VMCS_EXIT_CTRLS, arch_data->exit_ctrls.value);
+ vmx_ret |= check_vmcs_write(VMCS_ENTRY_CTRLS, arch_data->entry_ctrls.value);
+
+ return vmx_ret;
+}
+
+int v3_update_vmcs_host_state(struct guest_info * info) {
+ int vmx_ret = 0;
+ addr_t tmp;
+ struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
+ struct v3_msr tmp_msr;
+
+ __asm__ __volatile__ ( "movq %%cr0, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp);
+
+
+ __asm__ __volatile__ ( "movq %%cr3, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp);
+
+
+ __asm__ __volatile__ ( "movq %%cr4, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
+
+
+
+ vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
+ vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
+ vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
+
+#define FS_BASE_MSR 0xc0000100
+#define GS_BASE_MSR 0xc0000101
+
+ // FS.BASE MSR
+ v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);
+
+ // GS.BASE MSR
+ v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);
+
+
+
+ __asm__ __volatile__ ( "movq %%cs, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%ss, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%ds, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%es, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%fs, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
+
+ __asm__ __volatile__ ( "movq %%gs, %0; "
+ : "=q"(tmp)
+ :
+ );
+ vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
+
+ vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
+
+
+#define SYSENTER_CS_MSR 0x00000174
+#define SYSENTER_ESP_MSR 0x00000175
+#define SYSENTER_EIP_MSR 0x00000176
+
+ // SYSENTER CS MSR
+ v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
+
+ // SYSENTER_ESP MSR
+ v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
+
+ // SYSENTER_EIP MSR
+ v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
+
+ return vmx_ret;
+}
+
+
+int v3_load_vmcs_guest_state(struct guest_info * info)
+{
+
+ int error = 0;
+
+ check_vmcs_read(VMCS_GUEST_RIP, &(info->rip));
+ check_vmcs_read(VMCS_GUEST_RSP, &(info->vm_regs.rsp));
+
+ check_vmcs_read(VMCS_GUEST_CR0, &(info->ctrl_regs.cr0));
+ check_vmcs_read(VMCS_CR0_READ_SHDW, &(info->shdw_pg_state.guest_cr0));
+ check_vmcs_read(VMCS_GUEST_CR3, &(info->ctrl_regs.cr3));
+ check_vmcs_read(VMCS_GUEST_CR4, &(info->ctrl_regs.cr4));
+ check_vmcs_read(VMCS_GUEST_DR7, &(info->dbg_regs.dr7));
+
+ check_vmcs_read(VMCS_GUEST_RFLAGS, &(info->ctrl_regs.rflags));
+ if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
+ check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
+ }
+
+ // JRL: Add error checking
+
+ struct vmcs_segment_access access;
+ memset(&access, 0, sizeof(access));
+
+ /* CS Segment */
+ check_vmcs_read(VMCS_GUEST_CS_BASE, &(info->segments.cs.base));
+ check_vmcs_read(VMCS_GUEST_CS_SELECTOR, &(info->segments.cs.selector));
+ check_vmcs_read(VMCS_GUEST_CS_LIMIT, &(info->segments.cs.limit));
+ check_vmcs_read(VMCS_GUEST_CS_ACCESS, &(access.value));
+
+ translate_access_to_v3_seg(&access, &(info->segments.cs));
+
+ /* SS Segment */
+ check_vmcs_read(VMCS_GUEST_SS_BASE, &(info->segments.ss.base));
+ check_vmcs_read(VMCS_GUEST_SS_SELECTOR, &(info->segments.ss.selector));
+ check_vmcs_read(VMCS_GUEST_SS_LIMIT, &(info->segments.ss.limit));
+ check_vmcs_read(VMCS_GUEST_SS_ACCESS, &(access.value));
+
+ translate_access_to_v3_seg(&access, &(info->segments.ss));
+
+ /* DS Segment */
+ check_vmcs_read(VMCS_GUEST_DS_BASE, &(info->segments.ds.base));
+ check_vmcs_read(VMCS_GUEST_DS_SELECTOR, &(info->segments.ds.selector));
+ check_vmcs_read(VMCS_GUEST_DS_LIMIT, &(info->segments.ds.limit));
+ check_vmcs_read(VMCS_GUEST_DS_ACCESS, &(access.value));
+
+ translate_access_to_v3_seg(&access, &(info->segments.ds));
+
+ /* ES Segment */
+ check_vmcs_read(VMCS_GUEST_ES_BASE, &(info->segments.es.base));
+ check_vmcs_read(VMCS_GUEST_ES_SELECTOR, &(info->segments.es.selector));
+ check_vmcs_read(VMCS_GUEST_ES_LIMIT, &(info->segments.es.limit));
+ check_vmcs_read(VMCS_GUEST_ES_ACCESS, &(access.value));
+
+ translate_access_to_v3_seg(&access, &(info->segments.es));
+
+ /* FS Segment */
+ check_vmcs_read(VMCS_GUEST_FS_BASE, &(info->segments.fs.base));
+ check_vmcs_read(VMCS_GUEST_FS_SELECTOR, &(info->segments.fs.selector));
+ check_vmcs_read(VMCS_GUEST_FS_LIMIT, &(info->segments.fs.limit));
+ check_vmcs_read(VMCS_GUEST_FS_ACCESS, &(access.value));
+
+ translate_access_to_v3_seg(&access, &(info->segments.fs));
+
+ /* GS Segment */
+ check_vmcs_read(VMCS_GUEST_GS_BASE, &(info->segments.gs.base));
+ check_vmcs_read(VMCS_GUEST_GS_SELECTOR, &(info->segments.gs.selector));
+ check_vmcs_read(VMCS_GUEST_GS_LIMIT, &(info->segments.gs.limit));
+ check_vmcs_read(VMCS_GUEST_GS_ACCESS, &(access.value));
+
+ translate_access_to_v3_seg(&access, &(info->segments.gs));
+
+ /* LDTR Segment */
+ check_vmcs_read(VMCS_GUEST_LDTR_BASE, &(info->segments.ldtr.base));
+ check_vmcs_read(VMCS_GUEST_LDTR_SELECTOR, &(info->segments.ldtr.selector));
+ check_vmcs_read(VMCS_GUEST_LDTR_LIMIT, &(info->segments.ldtr.limit));
+ check_vmcs_read(VMCS_GUEST_LDTR_ACCESS, &(access.value));
+
+ translate_access_to_v3_seg(&access, &(info->segments.ldtr));
+
+ /* TR Segment */
+ check_vmcs_read(VMCS_GUEST_TR_BASE, &(info->segments.tr.base));
+ check_vmcs_read(VMCS_GUEST_TR_SELECTOR, &(info->segments.tr.selector));
+ check_vmcs_read(VMCS_GUEST_TR_LIMIT, &(info->segments.tr.limit));
+ check_vmcs_read(VMCS_GUEST_TR_ACCESS, &(access.value));
+
+ translate_access_to_v3_seg(&access, &(info->segments.tr));
+
+ /* GDTR Segment */
+ check_vmcs_read(VMCS_GUEST_GDTR_BASE, &(info->segments.gdtr.base));
+ check_vmcs_read(VMCS_GUEST_GDTR_LIMIT, &(info->segments.gdtr.limit));
+
+ /* IDTR Segment */
+ check_vmcs_read(VMCS_GUEST_IDTR_BASE, &(info->segments.idtr.base));
+ check_vmcs_read(VMCS_GUEST_IDTR_LIMIT, &(info->segments.idtr.limit));
+
+ return error;
+}
static inline void print_vmcs_field(vmcs_field_t vmcs_index) {
int len = v3_vmcs_get_field_len(vmcs_index);
print_vmcs_field(VMCS_EXIT_INSTR_LEN);
print_vmcs_field(VMCS_GUEST_LINEAR_ADDR);
- print_vmcs_field(VMCS_VMX_INSTR_INFO);
+ print_vmcs_field(VMCS_EXIT_INSTR_INFO);
print_vmcs_field(VMCS_IO_RCX);
print_vmcs_field(VMCS_IO_RSI);
case VMCS_IDT_VECTOR_INFO:
case VMCS_IDT_VECTOR_ERR:
case VMCS_EXIT_INSTR_LEN:
- case VMCS_VMX_INSTR_INFO:
+ case VMCS_EXIT_INSTR_INFO:
case VMCS_GUEST_ES_LIMIT:
case VMCS_GUEST_CS_LIMIT:
case VMCS_GUEST_SS_LIMIT:
static const char VMCS_IDT_VECTOR_INFO_STR[] = "IDT_VECTOR_INFO";
static const char VMCS_IDT_VECTOR_ERR_STR[] = "IDT_VECTOR_ERROR";
static const char VMCS_EXIT_INSTR_LEN_STR[] = "VM_EXIT_INSTR_LENGTH";
-static const char VMCS_VMX_INSTR_INFO_STR[] = "VMX_INSTR_INFO";
+static const char VMCS_EXIT_INSTR_INFO_STR[] = "VMX_INSTR_INFO";
static const char VMCS_GUEST_ES_LIMIT_STR[] = "GUEST_ES_LIMIT";
static const char VMCS_GUEST_CS_LIMIT_STR[] = "GUEST_CS_LIMIT";
static const char VMCS_GUEST_SS_LIMIT_STR[] = "GUEST_SS_LIMIT";
return VMCS_IDT_VECTOR_ERR_STR;
case VMCS_EXIT_INSTR_LEN:
return VMCS_EXIT_INSTR_LEN_STR;
- case VMCS_VMX_INSTR_INFO:
- return VMCS_VMX_INSTR_INFO_STR;
+ case VMCS_EXIT_INSTR_INFO:
+ return VMCS_EXIT_INSTR_INFO_STR;
case VMCS_GUEST_ES_LIMIT:
return VMCS_GUEST_ES_LIMIT_STR;
case VMCS_GUEST_CS_LIMIT:
dst->value = info->shdw_pg_state.guest_efer.value;
- info->rip += 2; // WRMSR/RDMSR are two byte operands
return 0;
}
// Enable/Disable Syscall
shadow_efer->sce = src.value & 0x1;
- info->rip += 2; // WRMSR/RDMSR are two byte operands
-
return 0;
}
pde[pde_index].present = 1;
pde[pde_index].writable = 1;
- pde[pde_index].user_page = 0;
+ pde[pde_index].user_page = 1;
pde[pde_index].pt_base_addr = PAGE_BASE_ADDR((addr_t)V3_PAddr(pte));
} else {
// Fix up the PTE entry
if (pte[pte_index].present == 0) {
- pte[pte_index].user_page = 0;
+ pte[pte_index].user_page = 1;
if (region->host_type == SHDW_REGION_ALLOCATED) {
// Full access
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
*/
-#include <palacios/svm_halt.h>
+#include <palacios/vmm_halt.h>
#include <palacios/vmm_intr.h>
// This should trigger a #GP if cpl != 0, otherwise, yield to host
//
-int v3_handle_svm_halt(struct guest_info * info) {
+int v3_handle_halt(struct guest_info * info) {
if (info->cpl != 0) {
v3_raise_exception(info, GPF_EXCEPTION);
PrintDebug("CPU Yield Done (%d cycles)\n", gap);
- info->rip+=1;
+ info->rip += 1;
}
return 0;
msr_map->update_map = NULL;
}
+int v3_handle_msr_write(struct guest_info * info) {
+ uint_t msr_num = info->vm_regs.rcx;
+ struct v3_msr msr_val;
+ struct v3_msr_hook * hook = NULL;
+
+ hook = v3_get_msr_hook(info, msr_num);
+
+ if (!hook) {
+ PrintError("Hook for MSR write %d not found\n", msr_num);
+ return -1;
+ }
+
+ msr_val.value = 0;
+ msr_val.lo = info->vm_regs.rax;
+ msr_val.hi = info->vm_regs.rdx;
+
+ if (hook->write(msr_num, msr_val, hook->priv_data) == -1) {
+ PrintError("Error in MSR hook Write\n");
+ return -1;
+ }
+
+ info->rip += 2;
+
+ return 0;
+}
+
+
+int v3_handle_msr_read(struct guest_info * info) {
+ uint_t msr_num = info->vm_regs.rcx;
+ struct v3_msr msr_val;
+ struct v3_msr_hook * hook = NULL;
+
+ hook = v3_get_msr_hook(info, msr_num);
+
+ if (!hook) {
+ PrintError("Hook for MSR read %d not found\n", msr_num);
+ return -1;
+ }
+
+ msr_val.value = 0;
+
+ if (hook->read(msr_num, &msr_val, hook->priv_data) == -1) {
+ PrintError("Error in MSR hook Read\n");
+ return -1;
+ }
+
+ info->vm_regs.rax = msr_val.lo;
+ info->vm_regs.rdx = msr_val.hi;
+
+ info->rip += 2;
+ return 0;
+}
int v3_hook_msr(struct guest_info * info, uint_t msr,
int (*read)(uint_t msr, struct v3_msr * dst, void * priv_data),
#include <palacios/vmx.h>
#include <palacios/vmm.h>
+#include <palacios/vmcs.h>
#include <palacios/vmx_lowlevel.h>
#include <palacios/vmm_lowlevel.h>
#include <palacios/vmm_ctrl_regs.h>
static addr_t vmxon_ptr_phys;
extern int v3_vmx_exit_handler();
-extern int v3_vmx_vmlaunch(struct v3_gprs * vm_regs, struct guest_info * info);
+extern int v3_vmx_vmlaunch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
-static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
-{
+static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
int ret = 0;
+
ret = vmcs_write(field,val);
if (ret != VMX_SUCCESS) {
return 0;
}
-static void inline translate_segment_access(struct v3_segment * v3_seg,
- struct vmcs_segment_access * access)
-{
- access->type = v3_seg->type;
- access->desc_type = v3_seg->system;
- access->dpl = v3_seg->dpl;
- access->present = v3_seg->present;
- access->avail = v3_seg->avail;
- access->long_mode = v3_seg->long_mode;
- access->db = v3_seg->db;
- access->granularity = v3_seg->granularity;
-}
-
-int v3_update_vmcs_ctrl_fields(struct guest_info * info) {
- int vmx_ret = 0;
- struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
-
- vmx_ret |= check_vmcs_write(VMCS_PIN_CTRLS, arch_data->pinbased_ctrls);
- vmx_ret |= check_vmcs_write(VMCS_PROC_CTRLS, arch_data->pri_procbased_ctrls);
-
- if(arch_data->pri_procbased_ctrls & ACTIVE_SEC_CTRLS) {
- vmx_ret |= check_vmcs_write(VMCS_SEC_PROC_CTRLS, arch_data->sec_procbased_ctrls);
- }
-
- vmx_ret |= check_vmcs_write(VMCS_EXIT_CTRLS, arch_data->exit_ctrls);
- vmx_ret |= check_vmcs_write(VMCS_ENTRY_CTRLS, arch_data->entry_ctrls);
-
- return vmx_ret;
-}
-
-int v3_update_vmcs_host_state(struct guest_info * info) {
- int vmx_ret = 0;
- addr_t tmp;
- struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
- struct v3_msr tmp_msr;
-
- __asm__ __volatile__ ( "movq %%cr0, %0; "
- : "=q"(tmp)
- :
- );
- vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp);
-
-
- __asm__ __volatile__ ( "movq %%cr3, %0; "
- : "=q"(tmp)
- :
- );
- vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp);
-
-
- __asm__ __volatile__ ( "movq %%cr4, %0; "
- : "=q"(tmp)
- :
- );
- vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
-
-
-
- vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
- vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
- vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
-
-#define FS_BASE_MSR 0xc0000100
-#define GS_BASE_MSR 0xc0000101
-
- // FS.BASE MSR
- v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);
-
- // GS.BASE MSR
- v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);
-
-
-
- __asm__ __volatile__ ( "movq %%cs, %0; "
- : "=q"(tmp)
- :
- );
- vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
-
- __asm__ __volatile__ ( "movq %%ss, %0; "
- : "=q"(tmp)
- :
- );
- vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
-
- __asm__ __volatile__ ( "movq %%ds, %0; "
- : "=q"(tmp)
- :
- );
- vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
-
- __asm__ __volatile__ ( "movq %%es, %0; "
- : "=q"(tmp)
- :
- );
- vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
-
- __asm__ __volatile__ ( "movq %%fs, %0; "
- : "=q"(tmp)
- :
- );
- vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
-
- __asm__ __volatile__ ( "movq %%gs, %0; "
- : "=q"(tmp)
- :
- );
- vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
-
- vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
-
-
-#define SYSENTER_CS_MSR 0x00000174
-#define SYSENTER_ESP_MSR 0x00000175
-#define SYSENTER_EIP_MSR 0x00000176
-
- // SYSENTER CS MSR
- v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
-
- // SYSENTER_ESP MSR
- v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
-
- // SYSENTER_EIP MSR
- v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
-
- return vmx_ret;
-}
-
-
-int v3_update_vmcs_guest_state(struct guest_info * info)
-{
- int vmx_ret = 0;
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, info->rip);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
-
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_CR3, info->ctrl_regs.cr3);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
-
-
-
- /*** Write VMCS Segments ***/
- struct vmcs_segment_access access;
-
- memset(&access, 0, sizeof(access));
-
- /* CS Segment */
- translate_segment_access(&(info->segments.cs), &access);
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
-
- /* SS Segment */
- memset(&access, 0, sizeof(access));
- translate_segment_access(&(info->segments.ss), &access);
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
-
- /* DS Segment */
- memset(&access, 0, sizeof(access));
- translate_segment_access(&(info->segments.ds), &access);
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
-
-
- /* ES Segment */
- memset(&access, 0, sizeof(access));
- translate_segment_access(&(info->segments.es), &access);
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
-
- /* FS Segment */
- memset(&access, 0, sizeof(access));
- translate_segment_access(&(info->segments.fs), &access);
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
-
- /* GS Segment */
- memset(&access, 0, sizeof(access));
- translate_segment_access(&(info->segments.gs), &access);
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
-
- /* LDTR segment */
- memset(&access, 0, sizeof(access));
- translate_segment_access(&(info->segments.ldtr), &access);
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
-
- /* TR Segment */
- memset(&access, 0, sizeof(access));
- translate_segment_access(&(info->segments.tr), &access);
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.tr.selector);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
-
- /* GDTR Segment */
-
- vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
-
- /* IDTR Segment*/
- vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
-
- return vmx_ret;
-
-}
-
-
-
-
#if 0
// For the 32 bit reserved bit fields
// MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
#endif
-static addr_t allocate_vmcs()
-{
+static addr_t allocate_vmcs() {
reg_ex_t msr;
- PrintDebug("Allocating page\n");
- struct vmcs_data * vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
+ struct vmcs_data * vmcs_page = NULL;
+ PrintDebug("Allocating page\n");
+ vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
memset(vmcs_page, 0, 4096);
v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
- v3_pre_config_guest(info, config_ptr);
+ struct vmx_data * vmx_info = NULL;
+ int vmx_ret = 0;
- struct vmx_data * vmx_data = NULL;
+ v3_pre_config_guest(info, config_ptr);
- vmx_data = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
+ vmx_info = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
- PrintDebug("vmx_data pointer: %p\n", (void *)vmx_data);
+ PrintDebug("vmx_data pointer: %p\n", (void *)vmx_info);
PrintDebug("Allocating VMCS\n");
- vmx_data->vmcs_ptr_phys = allocate_vmcs();
+ vmx_info->vmcs_ptr_phys = allocate_vmcs();
- PrintDebug("VMCS pointer: %p\n", (void *)(vmx_data->vmcs_ptr_phys));
+ PrintDebug("VMCS pointer: %p\n", (void *)(vmx_info->vmcs_ptr_phys));
- info->vmm_data = vmx_data;
+ info->vmm_data = vmx_info;
PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
// TODO: Fix vmcs fields so they're 32-bit
- int vmx_ret = 0;
- PrintDebug("Clearing VMCS: %p\n",(void*)vmx_data->vmcs_ptr_phys);
- vmx_ret = vmcs_clear(vmx_data->vmcs_ptr_phys);
+ PrintDebug("Clearing VMCS: %p\n", (void *)vmx_info->vmcs_ptr_phys);
+ vmx_ret = vmcs_clear(vmx_info->vmcs_ptr_phys);
if (vmx_ret != VMX_SUCCESS) {
PrintError("VMCLEAR failed\n");
}
PrintDebug("Loading VMCS\n");
- vmx_ret = vmcs_load(vmx_data->vmcs_ptr_phys);
+ vmx_ret = vmcs_load(vmx_info->vmcs_ptr_phys);
if (vmx_ret != VMX_SUCCESS) {
PrintError("VMPTRLD failed\n");
: "memory"
);
gdtr_base = tmp_seg.base;
- vmx_data->host_state.gdtr.base = gdtr_base;
+ vmx_info->host_state.gdtr.base = gdtr_base;
__asm__ __volatile__(
"sidt (%0);"
: "q"(&tmp_seg)
: "memory"
);
- vmx_data->host_state.idtr.base = tmp_seg.base;
+ vmx_info->host_state.idtr.base = tmp_seg.base;
__asm__ __volatile__(
"str (%0);"
: "q"(&tmp_seg)
: "memory"
);
- vmx_data->host_state.tr.selector = tmp_seg.selector;
+ vmx_info->host_state.tr.selector = tmp_seg.selector;
/* The GDTR *index* is bits 3-15 of the selector. */
- struct tss_descriptor * desc = (struct tss_descriptor *)
- (gdtr_base + 8*(tmp_seg.selector>>3));
+ struct tss_descriptor * desc = NULL;
+ desc = (struct tss_descriptor *)(gdtr_base + (8 * (tmp_seg.selector >> 3)));
- tmp_seg.base = (
- (desc->base1) |
+ tmp_seg.base = ((desc->base1) |
(desc->base2 << 16) |
(desc->base3 << 24) |
#ifdef __V3_64BIT__
#else
(0)
#endif
- );
+ );
- vmx_data->host_state.tr.base = tmp_seg.base;
+ vmx_info->host_state.tr.base = tmp_seg.base;
/********** Setup and VMX Control Fields from MSR ***********/
/* Setup IO map */
- (void) v3_init_vmx_io_map(info);
- (void) v3_init_vmx_msr_map(info);
+ v3_init_vmx_io_map(info);
+ v3_init_vmx_msr_map(info);
struct v3_msr tmp_msr;
- v3_get_msr(VMX_PINBASED_CTLS_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
- /* Add NMI exiting */
- vmx_data->pinbased_ctrls = tmp_msr.lo | NMI_EXIT;
+ v3_get_msr(VMX_PINBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+
+ /* Add external interrupts, NMI exiting, and virtual NMI */
+ vmx_info->pin_ctrls.value = tmp_msr.lo;
+ vmx_info->pin_ctrls.nmi_exit = 1;
+ vmx_info->pin_ctrls.ext_int_exit = 1;
v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- PrintDebug("MSR High: 0x%x\n", tmp_msr.hi);
- vmx_data->pri_procbased_ctrls = tmp_msr.lo | USE_IO_BITMAPS ;
+ vmx_info->pri_proc_ctrls.value = tmp_msr.lo;
+ vmx_info->pri_proc_ctrls.use_io_bitmap = 1;
+ vmx_info->pri_proc_ctrls.hlt_exit = 1;
+ vmx_info->pri_proc_ctrls.invlpg_exit = 1;
+ vmx_info->pri_proc_ctrls.use_msr_bitmap = 1;
+ vmx_info->pri_proc_ctrls.pause_exit = 1;
vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->io_map.arch_data));
vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR,
vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->msr_map.arch_data));
v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_data->exit_ctrls = tmp_msr.lo ;
-
- v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_data->entry_ctrls = tmp_msr.lo;
+ vmx_info->exit_ctrls.value = tmp_msr.lo;
+ vmx_info->exit_ctrls.host_64_on = 1;
- struct vmx_exception_bitmap excp_bmap;
- excp_bmap.value = 0xffffffff;
- excp_bmap.gp = 0;
- vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value);
+ if ((vmx_info->exit_ctrls.save_efer == 1) || (vmx_info->exit_ctrls.ld_efer == 1)) {
+ vmx_info->ia32e_avail = 1;
+ }
+ v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_info->entry_ctrls.value = tmp_msr.lo;
+ {
+ struct vmx_exception_bitmap excp_bmap;
+ excp_bmap.value = 0;
+
+ excp_bmap.pf = 1;
+
+ vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value);
+ }
/******* Setup VMXAssist guest state ***********/
info->rip = 0xd0000;
/* Print Control MSRs */
v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
+
v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
#define GUEST_CR4 0x00002000
info->ctrl_regs.cr0 = GUEST_CR0;
info->ctrl_regs.cr4 = GUEST_CR4;
+
+ ((struct cr0_32 *)&(info->shdw_pg_state.guest_cr0))->pe = 1;
/* Setup paging */
- if(info->shdw_pg_mode == SHADOW_PAGING) {
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
PrintDebug("Creating initial shadow page table\n");
- if(v3_init_passthrough_pts(info) == -1) {
+ if (v3_init_passthrough_pts(info) == -1) {
PrintError("Could not initialize passthrough page tables\n");
return -1;
}
+
+#define CR0_PE 0x00000001
+#define CR0_PG 0x80000000
- info->shdw_pg_state.guest_cr0 = CR0_PE;
- PrintDebug("Created\n");
vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG) );
- vmx_ret |= check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
info->ctrl_regs.cr3 = info->direct_map_pt;
- // vmx_data->pinbased_ctrls |= NMI_EXIT;
+ // vmx_info->pinbased_ctrls |= NMI_EXIT;
/* Add CR exits */
- vmx_data->pri_procbased_ctrls |= CR3_LOAD_EXIT
- | CR3_STORE_EXIT;
-
- vmx_data->exit_ctrls |= HOST_ADDR_SPACE_SIZE;
+ vmx_info->pri_proc_ctrls.cr3_ld_exit = 1;
+ vmx_info->pri_proc_ctrls.cr3_str_exit = 1;
}
- struct v3_segment * seg_reg = (struct v3_segment *)&(info->segments);
-
- int i;
- for(i=0; i < 10; i++)
+ // Setup segment registers
{
- seg_reg[i].selector = 3<<3;
- seg_reg[i].limit = 0xffff;
- seg_reg[i].base = 0x0;
- }
- info->segments.cs.selector = 2<<3;
-
- /* Set only the segment registers */
- for(i=0; i < 6; i++) {
- seg_reg[i].limit = 0xfffff;
- seg_reg[i].granularity = 1;
- seg_reg[i].type = 3;
- seg_reg[i].system = 1;
- seg_reg[i].dpl = 0;
- seg_reg[i].present = 1;
- seg_reg[i].db = 1;
- }
- info->segments.cs.type = 0xb;
+ struct v3_segment * seg_reg = (struct v3_segment *)&(info->segments);
+
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ seg_reg[i].selector = 3 << 3;
+ seg_reg[i].limit = 0xffff;
+ seg_reg[i].base = 0x0;
+ }
+
+ info->segments.cs.selector = 2<<3;
+
+ /* Set only the segment registers */
+ for (i = 0; i < 6; i++) {
+ seg_reg[i].limit = 0xfffff;
+ seg_reg[i].granularity = 1;
+ seg_reg[i].type = 3;
+ seg_reg[i].system = 1;
+ seg_reg[i].dpl = 0;
+ seg_reg[i].present = 1;
+ seg_reg[i].db = 1;
+ }
+
+ info->segments.cs.type = 0xb;
+
+ info->segments.ldtr.selector = 0x20;
+ info->segments.ldtr.type = 2;
+ info->segments.ldtr.system = 0;
+ info->segments.ldtr.present = 1;
+ info->segments.ldtr.granularity = 0;
- info->segments.ldtr.selector = 0x20;
- info->segments.ldtr.type = 2;
- info->segments.ldtr.system = 0;
- info->segments.ldtr.present = 1;
- info->segments.ldtr.granularity = 0;
-
- /************* Map in GDT and vmxassist *************/
+ /************* Map in GDT and vmxassist *************/
- uint64_t gdt[] __attribute__ ((aligned(32))) = {
- 0x0000000000000000ULL, /* 0x00: reserved */
- 0x0000830000000000ULL, /* 0x08: 32-bit TSS */
- //0x0000890000000000ULL, /* 0x08: 32-bit TSS */
- 0x00CF9b000000FFFFULL, /* 0x10: CS 32-bit */
- 0x00CF93000000FFFFULL, /* 0x18: DS 32-bit */
- 0x000082000000FFFFULL, /* 0x20: LDTR 32-bit */
- };
+ uint64_t gdt[] __attribute__ ((aligned(32))) = {
+ 0x0000000000000000ULL, /* 0x00: reserved */
+ 0x0000830000000000ULL, /* 0x08: 32-bit TSS */
+ //0x0000890000000000ULL, /* 0x08: 32-bit TSS */
+ 0x00CF9b000000FFFFULL, /* 0x10: CS 32-bit */
+ 0x00CF93000000FFFFULL, /* 0x18: DS 32-bit */
+ 0x000082000000FFFFULL, /* 0x20: LDTR 32-bit */
+ };
#define VMXASSIST_GDT 0x10000
- addr_t vmxassist_gdt = 0;
- if(guest_pa_to_host_va(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
- PrintError("Could not find VMXASSIST GDT destination\n");
- return -1;
- }
- memcpy((void*)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
+ addr_t vmxassist_gdt = 0;
+
+ if (guest_pa_to_host_va(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
+ PrintError("Could not find VMXASSIST GDT destination\n");
+ return -1;
+ }
+
+ memcpy((void *)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
- info->segments.gdtr.base = VMXASSIST_GDT;
+ info->segments.gdtr.base = VMXASSIST_GDT;
#define VMXASSIST_TSS 0x40000
- uint64_t vmxassist_tss = VMXASSIST_TSS;
- gdt[0x08 / sizeof(gdt[0])] |=
- ((vmxassist_tss & 0xFF000000) << (56 - 24)) |
- ((vmxassist_tss & 0x00FF0000) << (32 - 16)) |
- ((vmxassist_tss & 0x0000FFFF) << (16)) |
- (8392 - 1);
-
- info->segments.tr.selector = 0x08;
- info->segments.tr.base = vmxassist_tss;
-
- //info->segments.tr.type = 0x9;
- info->segments.tr.type = 0x3;
- info->segments.tr.system = 0;
- info->segments.tr.present = 1;
- info->segments.tr.granularity = 0;
-
+ uint64_t vmxassist_tss = VMXASSIST_TSS;
+ gdt[0x08 / sizeof(gdt[0])] |=
+ ((vmxassist_tss & 0xFF000000) << (56 - 24)) |
+ ((vmxassist_tss & 0x00FF0000) << (32 - 16)) |
+ ((vmxassist_tss & 0x0000FFFF) << (16)) |
+ (8392 - 1);
+
+ info->segments.tr.selector = 0x08;
+ info->segments.tr.base = vmxassist_tss;
+
+ //info->segments.tr.type = 0x9;
+ info->segments.tr.type = 0x3;
+ info->segments.tr.system = 0;
+ info->segments.tr.present = 1;
+ info->segments.tr.granularity = 0;
+ }
+ // setup VMXASSIST
+ {
#define VMXASSIST_START 0x000d0000
- extern uint8_t v3_vmxassist_start[];
- extern uint8_t v3_vmxassist_end[];
+ extern uint8_t v3_vmxassist_start[];
+ extern uint8_t v3_vmxassist_end[];
+ addr_t vmxassist_dst = 0;
+
+ if (guest_pa_to_host_va(info, VMXASSIST_START, &vmxassist_dst) == -1) {
+ PrintError("Could not find VMXASSIST destination\n");
+ return -1;
+ }
+
+ memcpy((void *)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
+ }
- addr_t vmxassist_dst = 0;
- if(guest_pa_to_host_va(info, VMXASSIST_START, &vmxassist_dst) == -1) {
- PrintError("Could not find VMXASSIST destination\n");
- return -1;
- }
- memcpy((void*)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
-
/*** Write all the info to the VMCS ***/
#define DEBUGCTL_MSR 0x1d9
v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
- vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, 0x400);
+ info->dbg_regs.dr7 = 0x400;
vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
- if(v3_update_vmcs_ctrl_fields(info)) {
+ if (v3_update_vmcs_ctrl_fields(info)) {
PrintError("Could not write control fields!\n");
return -1;
}
- if(v3_update_vmcs_host_state(info)) {
+ if (v3_update_vmcs_host_state(info)) {
PrintError("Could not write host state\n");
return -1;
}
- if(v3_update_vmcs_guest_state(info) != VMX_SUCCESS) {
+ if (v3_update_vmcs_guest_state(info) != VMX_SUCCESS) {
PrintError("Writing guest state failed!\n");
return -1;
}
v3_print_vmcs();
- vmx_data->state = VMXASSIST_DISABLED;
+ vmx_info->state = VMXASSIST_DISABLED;
v3_post_config_guest(info, config_ptr);
PrintDebug("Attempting VMLAUNCH\n");
- ret = v3_vmx_vmlaunch(&(info->vm_regs), info);
+ info->run_state = VM_RUNNING;
+
+ rdtscll(info->time_state.cached_host_tsc);
+
+ ret = v3_vmx_vmlaunch(&(info->vm_regs), info, &(info->ctrl_regs));
+
if (ret != VMX_SUCCESS) {
vmcs_read(VMCS_INSTR_ERR, &error);
PrintError("VMLAUNCH failed: %d\n", error);
v3_print_vmcs();
-
}
- PrintDebug("Returned from VMLAUNCH ret=%d(0x%x)\n", ret, ret);
+
+ PrintDebug("Returned from VMLAUNCH ret=%d\n", ret);
return -1;
}
void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
extern v3_cpu_arch_t v3_cpu_type;
-
struct v3_msr tmp_msr;
- uint64_t ret=0;
+ uint64_t ret = 0;
v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
: "%rbx"
);
- if((~ret & tmp_msr.value) == 0) {
+ if ((~ret & tmp_msr.value) == 0) {
__asm__ __volatile__ (
"movq %0, %%cr4;"
:
PrintError("Invalid CR4 Settings!\n");
return;
}
- __asm__ __volatile__ (
- "movq %%cr0, %%rbx; "
- "orq $0x00000020,%%rbx; "
- "movq %%rbx, %%cr0;"
- :
- :
- : "%rbx"
- );
- //
+
+ __asm__ __volatile__ (
+ "movq %%cr0, %%rbx; "
+ "orq $0x00000020,%%rbx; "
+ "movq %%rbx, %%cr0;"
+ :
+ :
+ : "%rbx"
+ );
+ //
// Should check and return Error here....
// Setup VMXON Region
vmxon_ptr_phys = allocate_vmcs();
- PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr_phys);
+
+ PrintDebug("VMXON pointer: 0x%p\n", (void *)vmxon_ptr_phys);
if (v3_enable_vmx(vmxon_ptr_phys) == VMX_SUCCESS) {
PrintDebug("VMX Enabled\n");
struct vmx_assist_context * old_ctx = NULL;
struct vmx_assist_context * new_ctx = NULL;
struct vmx_assist_header * hdr = NULL;
- vmx_state_t state = ((struct vmx_data *)info->vmm_data)->state;
+ struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
+
if (guest_pa_to_host_va(info, VMXASSIST_BASE, (addr_t *)&hdr) == -1) {
- PrintError("Could not translate address for vmxassist header\n");
- return -1;
+ PrintError("Could not translate address for vmxassist header\n");
+ return -1;
}
if (hdr->magic != VMXASSIST_MAGIC) {
- PrintError("VMXASSIT_MAGIC field is invalid\n");
+ PrintError("VMXASSIST_MAGIC field is invalid\n");
return -1;
}
if (guest_pa_to_host_va(info, (addr_t)(hdr->old_ctx_gpa), (addr_t *)&(old_ctx)) == -1) {
- PrintError("Could not translate address for VMXASSIST old context\n");
- return -1;
+ PrintError("Could not translate address for VMXASSIST old context\n");
+ return -1;
}
if (guest_pa_to_host_va(info, (addr_t)(hdr->new_ctx_gpa), (addr_t *)&(new_ctx)) == -1) {
- PrintError("Could not translate address for VMXASSIST new context\n");
- return -1;
+ PrintError("Could not translate address for VMXASSIST new context\n");
+ return -1;
}
-
- if (state == VMXASSIST_DISABLED) {
-
- /* Save the old Context */
+ if (vmx_info->state == VMXASSIST_DISABLED) {
+
+ /* Save the old Context */
if (vmx_save_world_ctx(info, old_ctx) != 0) {
- PrintError("Could not save VMXASSIST world context\n");
+ PrintError("Could not save VMXASSIST world context\n");
return -1;
- }
+ }
/* restore new context, vmxassist should launch the bios the first time */
if (vmx_restore_world_ctx(info, new_ctx) != 0) {
- PrintError("VMXASSIST could not restore new context\n");
+ PrintError("VMXASSIST could not restore new context\n");
return -1;
- }
+ }
- } else if (state == VMXASSIST_ENABLED) {
+ vmx_info->state = VMXASSIST_ENABLED;
+
+ } else if (vmx_info->state == VMXASSIST_ENABLED) {
/* restore old context */
if (vmx_restore_world_ctx(info, old_ctx) != 0) {
- PrintError("VMXASSIST could not restore old context\n");
+ PrintError("VMXASSIST could not restore old context\n");
return -1;
- }
+ }
+
+ vmx_info->state = VMXASSIST_DISABLED;
}
return 0;
#include <palacios/vmx.h>
#include <palacios/vmx_assist.h>
#include <palacios/vm_guest_mem.h>
+#include <palacios/vmm_direct_paging.h>
+#include <palacios/vmx_handler.h>
-static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t new_val);
+static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual);
+static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val);
+static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
+static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
-int v3_vmx_handle_cr0_write(struct guest_info * info, v3_reg_t new_val) {
- return handle_mov_to_cr0(info, new_val);
+int v3_vmx_handle_cr0_access(struct guest_info * info) {
+ struct vmx_exit_cr_qual cr_qual;
+
+ vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
+
+ if (cr_qual.access_type < 2) {
+ v3_reg_t * reg = get_reg_ptr(info, cr_qual);
+
+ if (cr_qual.access_type == 0) {
+
+ if (handle_mov_to_cr0(info, reg) != 0) {
+ PrintError("Could not handle CR0 write\n");
+ return -1;
+ }
+ } else {
+ // Mov from cr
+ PrintError("Mov From CR0 not handled\n");
+ return -1;
+ }
+
+ return 0;
+ }
+
+ PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual.access_type);
+ return -1;
+}
+
+int v3_vmx_handle_cr3_access(struct guest_info * info) {
+ struct vmx_exit_cr_qual cr_qual;
+
+ vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
+
+ if (cr_qual.access_type < 2) {
+ v3_reg_t * reg = get_reg_ptr(info, cr_qual);
+
+ if (cr_qual.access_type == 0) {
+ return handle_mov_to_cr3(info, reg);
+ } else {
+ return handle_mov_from_cr3(info, reg);
+ }
+ }
+
+ PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual.access_type);
+ return -1;
}
-static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t new_val) {
- PrintDebug("CR0 RIP: %p\n", (void *)info->rip);
+static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
+ int instr_len = 0;
+
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+
+ PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
+ (void *)info->ctrl_regs.cr3,
+ (void *)info->shdw_pg_state.guest_cr3);
+
+ if (info->cpu_mode == LONG) {
+ info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
+ } else {
+ info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
+ }
+
+
+ if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
+ if (v3_activate_shadow_pt(info) == -1) {
+ PrintError("Failed to activate 32 bit shadow page table\n");
+ return -1;
+ }
+ }
+
+ PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
+ (void *)info->ctrl_regs.cr3,
+ (void *)info->shdw_pg_state.guest_cr3);
+
+ } else if (info->shdw_pg_mode == NESTED_PAGING) {
+ PrintError("Nested paging not available in VMX right now!\n");
+ return -1;
+ }
+
+
+ vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
+ info->rip += instr_len;
+
+ return 0;
+}
+
+static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
+ int instr_len = 0;
+
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if ((v3_get_vm_cpu_mode(info) == LONG) ||
+ (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
+
+ *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
+ } else {
+ *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
+ }
+
+ } else {
+ PrintError("Unhandled paging mode\n");
+ return -1;
+ }
+
+
+ vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
+ info->rip += instr_len;
+
+ return 0;
+}
+
+static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0) {
struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
- struct cr0_32 * new_cr0 = (struct cr0_32 *)&new_val;
- struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
+ struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
+ struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
+ struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
+ uint_t paging_transition = 0;
+ int instr_len = 0;
- // PG and PE are always enabled for VMX
+ PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
+ (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
- // Check if this is a paging transition
- PrintDebug("Old CR0: 0x%x\n", *(uint32_t *)guest_cr0);
- PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shadow_cr0);
- PrintDebug("New CR0: 0x%x\n", *(uint32_t *)new_cr0);
-
- if ( new_cr0->pe ) {
+
+ if (new_shdw_cr0->pe != shdw_cr0->pe) {
+ PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
+ PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
+ PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
if (v3_vmxassist_ctx_switch(info) != 0) {
PrintError("Unable to execute VMXASSIST context switch!\n");
return -1;
}
- ((struct vmx_data *)info->vmm_data)->state = VMXASSIST_DISABLED;
+ v3_load_vmcs_guest_state(info);
- PrintDebug("New Shadow: 0x%x\n", *(uint32_t *)shadow_cr0);
- PrintDebug("mem_mode: %s\n", v3_mem_mode_to_str(v3_get_vm_mem_mode(info)));
+ if (vmx_info->state == VMXASSIST_ENABLED) {
+ PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)info->rip);
+ } else {
+ PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
+ (void *)info->rip);
+ }
+ // vmx assist sets the new cr values itself
return 0;
}
- return -1;
+ if (new_shdw_cr0->pg != shdw_cr0->pg) {
+ paging_transition = 1;
+ }
+
+ // The shadow always reflects the new value
+ *shdw_cr0 = *new_shdw_cr0;
+
+ // We don't care about most of the flags, so lets go for it
+ // and set them to the guest values
+ *guest_cr0 = *shdw_cr0;
+
+ // Except PG, PE, and NE, which are always set
+ guest_cr0->pe = 1;
+ guest_cr0->pg = 1;
+ guest_cr0->ne = 1;
+
+ if (paging_transition) {
+ // Paging transition
+
+ if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
+ struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
+
+ if (guest_efer->lme == 1) {
+ PrintDebug("Enabling long mode\n");
+
+ guest_efer->lma = 1;
+ guest_efer->lme = 1;
+
+ vmx_info->entry_ctrls.guest_ia32e = 1;
+ }
+
+ PrintDebug("Activating Shadow Page tables\n");
+
+ if (v3_activate_shadow_pt(info) == -1) {
+ PrintError("Failed to activate shadow page tables\n");
+ return -1;
+ }
+
+ } else if (v3_activate_passthrough_pt(info) == -1) {
+ PrintError("Failed to activate passthrough page tables\n");
+ return -1;
+ }
+ }
+
+ // PE loads its own RIP, otherwise we need to skip ahead an instruction
+
+ vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
+ info->rip += instr_len;
+
+ return 0;
+}
+
+static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual) {
+ v3_reg_t * reg = NULL;
+
+ switch (cr_qual.gpr) {
+ case 0:
+ reg = &(info->vm_regs.rax);
+ break;
+ case 1:
+ reg = &(info->vm_regs.rcx);
+ break;
+ case 2:
+ reg = &(info->vm_regs.rdx);
+ break;
+ case 3:
+ reg = &(info->vm_regs.rbx);
+ break;
+ case 4:
+ reg = &(info->vm_regs.rsp);
+ break;
+ case 5:
+ reg = &(info->vm_regs.rbp);
+ break;
+ case 6:
+ reg = &(info->vm_regs.rsi);
+ break;
+ case 7:
+ reg = &(info->vm_regs.rdi);
+ break;
+ case 8:
+ reg = &(info->vm_regs.r8);
+ break;
+ case 9:
+ reg = &(info->vm_regs.r9);
+ break;
+ case 10:
+ reg = &(info->vm_regs.r10);
+ break;
+ case 11:
+ reg = &(info->vm_regs.r11);
+ break;
+ case 12:
+ reg = &(info->vm_regs.r11);
+ break;
+ case 13:
+ reg = &(info->vm_regs.r13);
+ break;
+ case 14:
+ reg = &(info->vm_regs.r14);
+ break;
+ case 15:
+ reg = &(info->vm_regs.r15);
+ break;
+ }
+
+ return reg;
}
+
#include <palacios/vmm_lowlevel.h>
#include <palacios/vmx_ctrl_regs.h>
#include <palacios/vmx_assist.h>
+#include <palacios/vmm_halt.h>
+#ifdef CONFIG_TELEMETRY
+#include <palacios/vmm_telemetry.h>
+#endif
-static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
-{
+
+static int inline check_vmcs_write(vmcs_field_t field, addr_t val) {
int ret = 0;
+
ret = vmcs_write(field, val);
if (ret != VMX_SUCCESS) {
return ret;
}
-static int inline check_vmcs_read(vmcs_field_t field, void * val)
-{
+static int inline check_vmcs_read(vmcs_field_t field, void * val) {
int ret = 0;
+
ret = vmcs_read(field, val);
if (ret != VMX_SUCCESS) {
return ret;
}
-static void inline translate_access_to_v3_seg(struct vmcs_segment_access * access,
- struct v3_segment * v3_seg) {
- v3_seg->type = access->type;
- v3_seg->system = access->desc_type;
- v3_seg->dpl = access->dpl;
- v3_seg->present = access->present;
- v3_seg->avail = access->avail;
- v3_seg->long_mode = access->long_mode;
- v3_seg->db = access->db;
- v3_seg->granularity = access->granularity;
+static int inline handle_cr_access(struct guest_info * info, ulong_t exit_qual) {
+ struct vmx_exit_cr_qual * cr_qual = (struct vmx_exit_cr_qual *)&exit_qual;
+
+ // PrintDebug("Control register: %d\n", cr_qual->access_type);
+ switch(cr_qual->cr_id) {
+ case 0:
+ PrintDebug("Handling CR0 Access\n");
+ return v3_vmx_handle_cr0_access(info);
+ case 3:
+ PrintDebug("Handling CR3 Access\n");
+ return v3_vmx_handle_cr3_access(info);
+ default:
+ PrintError("Unhandled CR access: %d\n", cr_qual->cr_id);
+ return -1;
+ }
+
+ return -1;
}
-static int load_vmcs_guest_state(struct guest_info * info)
-{
-
- struct vmcs_segment_access access;
- int ret = 0;
-
- // JRL: Add error checking
- memset(&access, 0, sizeof(access));
-
- /* CS Segment */
- check_vmcs_read(VMCS_GUEST_CS_BASE, &(info->segments.cs.base));
- check_vmcs_read(VMCS_GUEST_CS_SELECTOR, &(info->segments.cs.selector));
- check_vmcs_read(VMCS_GUEST_CS_LIMIT, &(info->segments.cs.limit));
- check_vmcs_read(VMCS_GUEST_CS_ACCESS, &(access.value));
+/* At this point the GPRs are already copied into the guest_info state */
+int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs) {
+ uint64_t tmp_tsc = 0;
+ uint32_t exit_reason = 0;
+ addr_t exit_qual = 0;
+ struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
+ struct vmx_exit_idt_vec_info idt_vec_info;
- translate_access_to_v3_seg(&access, &(info->segments.cs));
+ rdtscll(tmp_tsc);
+ v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
- /* SS Segment */
- check_vmcs_read(VMCS_GUEST_SS_BASE, &(info->segments.ss.base));
- check_vmcs_read(VMCS_GUEST_SS_SELECTOR, &(info->segments.ss.selector));
- check_vmcs_read(VMCS_GUEST_SS_LIMIT, &(info->segments.ss.limit));
- check_vmcs_read(VMCS_GUEST_SS_ACCESS, &(access.value));
+ v3_enable_ints();
- translate_access_to_v3_seg(&access, &(info->segments.ss));
+ check_vmcs_read(VMCS_EXIT_REASON, &exit_reason);
+ check_vmcs_read(VMCS_EXIT_QUAL, &exit_qual);
- /* DS Segment */
- check_vmcs_read(VMCS_GUEST_DS_BASE, &(info->segments.ds.base));
- check_vmcs_read(VMCS_GUEST_DS_SELECTOR, &(info->segments.ds.selector));
- check_vmcs_read(VMCS_GUEST_DS_LIMIT, &(info->segments.ds.limit));
- check_vmcs_read(VMCS_GUEST_DS_ACCESS, &(access.value));
+ //PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_reason, exit_qual);
- translate_access_to_v3_seg(&access, &(info->segments.ds));
+ /* Update guest state */
+ v3_load_vmcs_guest_state(info);
- /* ES Segment */
- check_vmcs_read(VMCS_GUEST_ES_BASE, &(info->segments.es.base));
- check_vmcs_read(VMCS_GUEST_ES_SELECTOR, &(info->segments.es.selector));
- check_vmcs_read(VMCS_GUEST_ES_LIMIT, &(info->segments.es.limit));
- check_vmcs_read(VMCS_GUEST_ES_ACCESS, &(access.value));
+ // Load execution controls
+ check_vmcs_read(VMCS_PIN_CTRLS, &(vmx_info->pin_ctrls.value));
+ check_vmcs_read(VMCS_PROC_CTRLS, &(vmx_info->pri_proc_ctrls.value));
- translate_access_to_v3_seg(&access, &(info->segments.es));
+ if (vmx_info->pri_proc_ctrls.sec_ctrls) {
+ check_vmcs_read(VMCS_SEC_PROC_CTRLS, &(vmx_info->sec_proc_ctrls.value));
+ }
- /* FS Segment */
- check_vmcs_read(VMCS_GUEST_FS_BASE, &(info->segments.fs.base));
- check_vmcs_read(VMCS_GUEST_FS_SELECTOR, &(info->segments.fs.selector));
- check_vmcs_read(VMCS_GUEST_FS_LIMIT, &(info->segments.fs.limit));
- check_vmcs_read(VMCS_GUEST_FS_ACCESS, &(access.value));
+ info->mem_mode = v3_get_vm_mem_mode(info);
+ info->cpu_mode = v3_get_vm_cpu_mode(info);
- translate_access_to_v3_seg(&access, &(info->segments.fs));
+ // Check if we got interrupted while delivering interrupt
+ // Variable will be used later if this is true
+ check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
- /* GS Segment */
- check_vmcs_read(VMCS_GUEST_GS_BASE, &(info->segments.gs.base));
- check_vmcs_read(VMCS_GUEST_GS_SELECTOR, &(info->segments.gs.selector));
- check_vmcs_read(VMCS_GUEST_GS_LIMIT, &(info->segments.gs.limit));
- check_vmcs_read(VMCS_GUEST_GS_ACCESS, &(access.value));
+ if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("Calling v3_injecting_intr\n");
+#endif
+ info->intr_state.irq_started = 0;
+ v3_injecting_intr(info, info->intr_state.irq_vector, V3_EXTERNAL_IRQ);
+ }
- translate_access_to_v3_seg(&access, &(info->segments.gs));
+ info->num_exits++;
- /* LDTR Segment */
- check_vmcs_read(VMCS_GUEST_LDTR_BASE, &(info->segments.ldtr.base));
- check_vmcs_read(VMCS_GUEST_LDTR_SELECTOR, &(info->segments.ldtr.selector));
- check_vmcs_read(VMCS_GUEST_LDTR_LIMIT, &(info->segments.ldtr.limit));
- check_vmcs_read(VMCS_GUEST_LDTR_ACCESS, &(access.value));
- translate_access_to_v3_seg(&access, &(info->segments.ldtr));
- /* TR Segment */
- check_vmcs_read(VMCS_GUEST_TR_BASE, &(info->segments.tr.base));
- check_vmcs_read(VMCS_GUEST_TR_SELECTOR, &(info->segments.tr.selector));
- check_vmcs_read(VMCS_GUEST_TR_LIMIT, &(info->segments.tr.limit));
- check_vmcs_read(VMCS_GUEST_TR_ACCESS, &(access.value));
+ if ((info->num_exits % 5000) == 0) {
+ PrintDebug("VMX Exit %d\n", (uint32_t)info->num_exits);
+ }
- translate_access_to_v3_seg(&access, &(info->segments.tr));
+#ifdef CONFIG_TELEMETRY
+ if (info->enable_telemetry) {
+ v3_telemetry_start_exit(info);
+ }
+#endif
- /* GDTR Segment */
- check_vmcs_read(VMCS_GUEST_GDTR_BASE, &(info->segments.gdtr.base));
- check_vmcs_read(VMCS_GUEST_GDTR_LIMIT, &(info->segments.gdtr.limit));
-
- /* IDTR Segment */
- check_vmcs_read(VMCS_GUEST_IDTR_BASE, &(info->segments.idtr.base));
- check_vmcs_read(VMCS_GUEST_IDTR_LIMIT, &(info->segments.idtr.limit));
+ switch (exit_reason) {
+ case VMEXIT_INFO_EXCEPTION_OR_NMI: {
+ uint32_t int_info;
+ pf_error_t error_code;
+ check_vmcs_read(VMCS_EXIT_INT_INFO, &int_info);
+ check_vmcs_read(VMCS_EXIT_INT_ERR, &error_code);
- /*
- * Read the control state
- */
- check_vmcs_read(VMCS_GUEST_RIP, &(info->rip));
- check_vmcs_read(VMCS_GUEST_RSP, &(info->vm_regs.rsp));
- check_vmcs_read(VMCS_GUEST_CR0, &(info->ctrl_regs.cr0));
- check_vmcs_read(VMCS_CR0_READ_SHDW, &(info->shdw_pg_state.guest_cr0));
- check_vmcs_read(VMCS_GUEST_CR3, &(info->ctrl_regs.cr3));
- check_vmcs_read(VMCS_GUEST_CR4, &(info->ctrl_regs.cr4));
+ // JRL: Change "0x0e" to a macro value
+ if ((uint8_t)int_info == 0x0e) {
+#ifdef CONFIG_DEBUG_SHADOW_PAGING
+ PrintDebug("Page Fault at %p error_code=%x\n", (void *)exit_qual, *(uint32_t *)&error_code);
+#endif
- return ret;
-}
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (v3_handle_shadow_pagefault(info, (addr_t)exit_qual, error_code) == -1) {
+ PrintError("Error handling shadow page fault\n");
+ return -1;
+ }
+ } else {
+ PrintError("Page fault in unimplemented paging mode\n");
+ return -1;
+ }
+ } else {
+ PrintError("Unknown exception: 0x%x\n", (uint8_t)int_info);
+ v3_print_GPRs(info);
+ return -1;
+ }
+ break;
+ }
+ case VMEXIT_INVLPG:
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (v3_handle_shadow_invlpg(info) == -1) {
+ PrintError("Error handling INVLPG\n");
+ return -1;
+ }
+ }
-#if 0
-static void setup_v8086_mode_for_boot(struct guest_info * info)
-{
+ break;
+ case VMEXIT_CPUID: {
+ int instr_len;
+ uint32_t target = info->vm_regs.rax;
- ((struct vmx_data *)info->vmm_data)->state = VMXASSIST_V8086_BIOS;
- struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
- flags->rsvd1 = 1;
- flags->vm = 1;
- flags->iopl = 3;
+ v3_cpuid(target, (addr_t *)&(info->vm_regs.rax), (addr_t *)&(info->vm_regs.rbx),
+ (addr_t *)&(info->vm_regs.rcx), (addr_t *)&(info->vm_regs.rdx));
- info->rip = 0xfff0;
-
- /* Zero the segment registers */
- memset(&(info->segments), 0, sizeof(struct v3_segment)*6);
+ check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
+ info->rip += instr_len;
- info->segments.cs.selector = 0xf000;
- info->segments.cs.base = 0xf000 << 4;
- info->segments.cs.limit = 0xffff;
- info->segments.cs.type = 3;
- info->segments.cs.system = 1;
- info->segments.cs.dpl = 3;
- info->segments.cs.present = 1;
- info->segments.cs.granularity = 0;
+ break;
+ }
+ case VMEXIT_RDMSR:
+ if (v3_handle_msr_read(info) == -1) {
+ PrintError("Error handling MSR Read\n");
+ return -1;
+ }
- int i;
-
- /* Set values for selectors ds through ss */
- struct v3_segment * seg_ptr = (struct v3_segment *)&(info->segments);
- for(i = 1; i < 6 ; i++) {
- seg_ptr[i].selector = 0x0000;
- seg_ptr[i].base = 0x00000;
- seg_ptr[i].limit = 0xffff;
- seg_ptr[i].type = 3;
- seg_ptr[i].system = 1;
- seg_ptr[i].dpl = 3;
- seg_ptr[i].present = 1;
- seg_ptr[i].granularity = 0;
- }
+ break;
+ case VMEXIT_WRMSR:
+ if (v3_handle_msr_write(info) == -1) {
+ PrintError("Error handling MSR Write\n");
+ return -1;
+ }
-}
+ break;
+ case VMEXIT_IO_INSTR: {
+ struct vmx_exit_io_qual * io_qual = (struct vmx_exit_io_qual *)&exit_qual;
+
+ if (io_qual->dir == 0) {
+ if (io_qual->string) {
+ if (v3_handle_vmx_io_outs(info) == -1) {
+ PrintError("Error in outs IO handler\n");
+ return -1;
+ }
+ } else {
+ if (v3_handle_vmx_io_out(info) == -1) {
+ PrintError("Error in out IO handler\n");
+ return -1;
+ }
+ }
+ } else {
+ if (io_qual->string) {
+ if(v3_handle_vmx_io_ins(info) == -1) {
+ PrintError("Error in ins IO handler\n");
+ return -1;
+ }
+ } else {
+ if (v3_handle_vmx_io_in(info) == -1) {
+ PrintError("Error in in IO handler\n");
+ return -1;
+ }
+ }
+ }
+ break;
+ }
+ case VMEXIT_CR_REG_ACCESSES:
+ if (handle_cr_access(info, exit_qual) != 0) {
+ PrintError("Error handling CR access\n");
+ return -1;
+ }
-#endif
-
-static int inline handle_cr_access(struct guest_info * info, ulong_t exit_qual) {
- struct vmexit_cr_qual * cr_qual = (struct vmexit_cr_qual *)&exit_qual;
+ break;
+ case VMEXIT_HLT:
+ PrintDebug("Guest halted\n");
- PrintDebug("Control register: %d\n", cr_qual->access_type);
+ if (v3_handle_halt(info) == -1) {
+ PrintError("Error handling halt instruction\n");
+ return -1;
+ }
- if (cr_qual->access_type < 2) {
- v3_reg_t reg = 0;
-
- switch(cr_qual->gpr) {
- case 0:
- reg = info->vm_regs.rax;
- break;
- case 1:
- reg = info->vm_regs.rcx;
- break;
- case 2:
- reg = info->vm_regs.rdx;
- break;
- case 3:
- reg = info->vm_regs.rbx;
- break;
- case 4:
- reg = info->vm_regs.rsp;
- break;
- case 5:
- reg = info->vm_regs.rbp;
- break;
- case 6:
- reg = info->vm_regs.rsi;
- break;
- case 7:
- reg = info->vm_regs.rdi;
- break;
- case 8:
- reg = info->vm_regs.r8;
- break;
- case 9:
- reg = info->vm_regs.r9;
- break;
- case 10:
- reg = info->vm_regs.r10;
- break;
- case 11:
- reg = info->vm_regs.r11;
- break;
- case 12:
- reg = info->vm_regs.r11;
- break;
- case 13:
- reg = info->vm_regs.r13;
- break;
- case 14:
- reg = info->vm_regs.r14;
- break;
- case 15:
- reg = info->vm_regs.r15;
- break;
- }
+ break;
+ case VMEXIT_PAUSE:
+ // Handled as NOP
+ info->rip += 2;
- if (cr_qual->cr_id == 0) {
- uint32_t instr_len;
+ break;
+ case VMEXIT_EXTERNAL_INTR:
+ // Interrupts are handled outside switch
+ break;
+ case VMEXIT_INTR_WINDOW:
- vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
+ vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
+ check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
- if ( ~reg & CR0_PE ) {
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("Interrupts available again! (RIP=%llx)\n", info->rip);
+#endif
- if (v3_vmxassist_ctx_switch(info) != 0) {
- PrintError("Unable to execute VMXASSIST context switch!\n");
- return -1;
- }
+ break;
+ default:
+ PrintError("Unhandled VMEXIT: %s (%u), %lu (0x%lx)\n",
+ v3_vmx_exit_code_to_str(exit_reason),
+ exit_reason, exit_qual, exit_qual);
+ return -1;
+ }
- load_vmcs_guest_state(info);
+#ifdef CONFIG_TELEMETRY
+ if (info->enable_telemetry) {
+ v3_telemetry_end_exit(info, exit_reason);
+ }
+#endif
- ((struct vmx_data *)info->vmm_data)->state = VMXASSIST_ENABLED;
- PrintDebug("Loading vmxassist at RIP: 0x%p\n", (void *)info->rip);
- return 0;
- } else if (v3_vmx_handle_cr0_write(info, reg) != 0) {
- PrintError("Could not handle CR0 Write\n");
- return -1;
- }
+ /* Check for pending exceptions to inject */
+ if (v3_excp_pending(info)) {
+ struct vmx_entry_int_info int_info;
+ int_info.value = 0;
- load_vmcs_guest_state(info);
+ // In VMX, almost every exception is hardware
+ // Software exceptions are pretty much only for breakpoint or overflow
+ int_info.type = 3;
+ int_info.vector = v3_get_excp_number(info);
- PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: 0x%p\n", (void *)info->rip);
+ if (info->excp_state.excp_error_code_valid) {
+ check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
+ int_info.error_code = 1;
- return 0;
+ PrintDebug("Injecting exception %d with error code %x\n",
+ int_info.vector, info->excp_state.excp_error_code);
}
- }
- PrintError("Unhandled CR access\n");
- return -1;
-}
+ int_info.valid = 1;
+ PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)info->rip);
+ check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
+ v3_injecting_excp(info, int_info.vector);
-/* At this point the GPRs are already copied into the guest_info state */
-int v3_handle_vmx_exit(struct v3_gprs * gprs, struct guest_info * info) {
- uint32_t exit_reason;
- ulong_t exit_qual;
+ } else if (((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) {
+
+ if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
- check_vmcs_read(VMCS_EXIT_REASON, &exit_reason);
- check_vmcs_read(VMCS_EXIT_QUAL, &exit_qual);
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("IRQ pending from previous injection\n");
+#endif
- // PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_reason, exit_qual);
+ // Copy the IDT vectoring info over to reinject the old interrupt
+ if (idt_vec_info.error_code == 1) {
+ uint32_t err_code = 0;
- /* Update guest state */
- load_vmcs_guest_state(info);
-
- switch (exit_reason) {
- case VMEXIT_INFO_EXCEPTION_OR_NMI: {
- uint32_t int_info;
- pf_error_t error_code;
-
- check_vmcs_read(VMCS_EXIT_INT_INFO, &int_info);
- check_vmcs_read(VMCS_EXIT_INT_ERR, &error_code);
-
- // JRL: Change "0x0e" to a macro value
- if ((uint8_t)int_info == 0x0e) {
- PrintDebug("Page Fault at %p\n", (void *)exit_qual);
-
- if (info->shdw_pg_mode == SHADOW_PAGING) {
- if (v3_handle_shadow_pagefault(info, (addr_t)exit_qual, error_code) == -1) {
- PrintError("Error handling shadow page fault\n");
- return -1;
- }
- } else {
- PrintError("Page fault in unimplemented paging mode\n");
- return -1;
- }
- } else {
- PrintDebug("Unknown exception: 0x%x\n", (uint8_t)int_info);
- v3_print_GPRs(info);
- return -1;
- }
- break;
- }
-
- case VMEXIT_CPUID: {
- int instr_len;
+ check_vmcs_read(VMCS_IDT_VECTOR_ERR, &err_code);
+ check_vmcs_write(VMCS_ENTRY_EXCP_ERR, err_code);
+ }
- v3_cpuid(info->vm_regs.rax, (addr_t *)&(info->vm_regs.rax), (addr_t *)&(info->vm_regs.rbx),
- (addr_t *)&(info->vm_regs.rcx), (addr_t *)&(info->vm_regs.rdx));
+ idt_vec_info.undef = 0;
+ check_vmcs_write(VMCS_ENTRY_INT_INFO, idt_vec_info.value);
+
+ } else {
+ struct vmx_entry_int_info ent_int;
+ ent_int.value = 0;
+
+ switch (v3_intr_pending(info)) {
+ case V3_EXTERNAL_IRQ: {
+ info->intr_state.irq_vector = v3_get_intr(info);
+ ent_int.vector = info->intr_state.irq_vector;
+ ent_int.type = 0;
+ ent_int.error_code = 0;
+ ent_int.valid = 1;
+
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n",
+ info->intr_state.irq_vector,
+ (uint32_t)info->num_exits,
+ (void *)info->rip);
+#endif
- check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
+ check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
+ info->intr_state.irq_started = 1;
- info->rip += instr_len;
- break;
- }
-
- case VMEXIT_IO_INSTR: {
- struct vmexit_io_qual * io_qual = (struct vmexit_io_qual *)&exit_qual;
-
- if (io_qual->dir == 0) {
- if (io_qual->string) {
- if (v3_handle_vmx_io_outs(info) == -1) {
- PrintError("Error in outs IO handler\n");
- return -1;
- }
- } else {
- if (v3_handle_vmx_io_out(info) == -1) {
- PrintError("Error in out IO handler\n");
- return -1;
- }
- }
- } else {
- if (io_qual->string) {
- if(v3_handle_vmx_io_ins(info) == -1) {
- PrintError("Error in ins IO handler\n");
- return -1;
- }
- } else {
- if (v3_handle_vmx_io_in(info) == -1) {
- PrintError("Error in in IO handler\n");
- return -1;
- }
- }
- }
- break;
- }
-
- case VMEXIT_CR_REG_ACCESSES:
- if (handle_cr_access(info,exit_qual) != 0) {
- PrintError("Error handling CR access\n");
- return -1;
- }
+ break;
+ }
+ case V3_NMI:
+ PrintDebug("Injecting NMI\n");
+
+ ent_int.type = 2;
+ ent_int.vector = 2;
+ ent_int.valid = 1;
+ check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
+
+ break;
+ case V3_SOFTWARE_INTR:
+ PrintDebug("Injecting software interrupt\n");
+ ent_int.type = 4;
+
+ ent_int.valid = 1;
+ check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
+
+ break;
+ case V3_VIRTUAL_IRQ:
+ // Not sure what to do here, Intel doesn't have virtual IRQs
+ // May be the same as external interrupts/IRQs
+
+ break;
+ case V3_INVALID_INTR:
+ default:
+ break;
+ }
+ }
+ } else if ((v3_intr_pending(info)) && (vmx_info->pri_proc_ctrls.int_wndw_exit == 0)) {
+ // Enable INTR window exiting so we know when IF=1
+ uint32_t instr_len;
- break;
+ check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
- default:
- PrintError("Unhandled VMEXIT: %u (0x%x), %lu (0x%lx)\n", exit_reason, exit_reason, exit_qual, exit_qual);
- return -1;
+#ifdef CONFIG_DEBUG_INTERRUPTS
+ PrintDebug("Enabling Interrupt-Window exiting: %d\n", instr_len);
+#endif
+
+ vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
+ check_vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
}
check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
+ v3_disable_ints();
+
+ rdtscll(info->time_state.cached_host_tsc);
+
return 0;
}
+
+static const char VMEXIT_INFO_EXCEPTION_OR_NMI_STR[] = "VMEXIT_INFO_EXCEPTION_OR_NMI";
+static const char VMEXIT_EXTERNAL_INTR_STR[] = "VMEXIT_EXTERNAL_INTR";
+static const char VMEXIT_TRIPLE_FAULT_STR[] = "VMEXIT_TRIPLE_FAULT";
+static const char VMEXIT_INIT_SIGNAL_STR[] = "VMEXIT_INIT_SIGNAL";
+static const char VMEXIT_STARTUP_IPI_STR[] = "VMEXIT_STARTUP_IPI";
+static const char VMEXIT_IO_SMI_STR[] = "VMEXIT_IO_SMI";
+static const char VMEXIT_OTHER_SMI_STR[] = "VMEXIT_OTHER_SMI";
+static const char VMEXIT_INTR_WINDOW_STR[] = "VMEXIT_INTR_WINDOW";
+static const char VMEXIT_NMI_WINDOW_STR[] = "VMEXIT_NMI_WINDOW";
+static const char VMEXIT_TASK_SWITCH_STR[] = "VMEXIT_TASK_SWITCH";
+static const char VMEXIT_CPUID_STR[] = "VMEXIT_CPUID";
+static const char VMEXIT_HLT_STR[] = "VMEXIT_HLT";
+static const char VMEXIT_INVD_STR[] = "VMEXIT_INVD";
+static const char VMEXIT_INVLPG_STR[] = "VMEXIT_INVLPG";
+static const char VMEXIT_RDPMC_STR[] = "VMEXIT_RDPMC";
+static const char VMEXIT_RDTSC_STR[] = "VMEXIT_RDTSC";
+static const char VMEXIT_RSM_STR[] = "VMEXIT_RSM";
+static const char VMEXIT_VMCALL_STR[] = "VMEXIT_VMCALL";
+static const char VMEXIT_VMCLEAR_STR[] = "VMEXIT_VMCLEAR";
+static const char VMEXIT_VMLAUNCH_STR[] = "VMEXIT_VMLAUNCH";
+static const char VMEXIT_VMPTRLD_STR[] = "VMEXIT_VMPTRLD";
+static const char VMEXIT_VMPTRST_STR[] = "VMEXIT_VMPTRST";
+static const char VMEXIT_VMREAD_STR[] = "VMEXIT_VMREAD";
+static const char VMEXIT_VMRESUME_STR[] = "VMEXIT_VMRESUME";
+static const char VMEXIT_VMWRITE_STR[] = "VMEXIT_VMWRITE";
+static const char VMEXIT_VMXOFF_STR[] = "VMEXIT_VMXOFF";
+static const char VMEXIT_VMXON_STR[] = "VMEXIT_VMXON";
+static const char VMEXIT_CR_REG_ACCESSES_STR[] = "VMEXIT_CR_REG_ACCESSES";
+static const char VMEXIT_MOV_DR_STR[] = "VMEXIT_MOV_DR";
+static const char VMEXIT_IO_INSTR_STR[] = "VMEXIT_IO_INSTR";
+static const char VMEXIT_RDMSR_STR[] = "VMEXIT_RDMSR";
+static const char VMEXIT_WRMSR_STR[] = "VMEXIT_WRMSR";
+static const char VMEXIT_ENTRY_FAIL_INVALID_GUEST_STATE_STR[] = "VMEXIT_ENTRY_FAIL_INVALID_GUEST_STATE";
+static const char VMEXIT_ENTRY_FAIL_MSR_LOAD_STR[] = "VMEXIT_ENTRY_FAIL_MSR_LOAD";
+static const char VMEXIT_MWAIT_STR[] = "VMEXIT_MWAIT";
+static const char VMEXIT_MONITOR_STR[] = "VMEXIT_MONITOR";
+static const char VMEXIT_PAUSE_STR[] = "VMEXIT_PAUSE";
+static const char VMEXIT_ENTRY_FAILURE_MACHINE_CHECK_STR[] = "VMEXIT_ENTRY_FAILURE_MACHINE_CHECK";
+static const char VMEXIT_TPR_BELOW_THRESHOLD_STR[] = "VMEXIT_TPR_BELOW_THRESHOLD";
+static const char VMEXIT_APIC_STR[] = "VMEXIT_APIC";
+static const char VMEXIT_GDTR_IDTR_STR[] = "VMEXIT_GDTR_IDTR";
+static const char VMEXIT_LDTR_TR_STR[] = "VMEXIT_LDTR_TR";
+static const char VMEXIT_EPT_VIOLATION_STR[] = "VMEXIT_EPT_VIOLATION";
+static const char VMEXIT_EPT_CONFIG_STR[] = "VMEXIT_EPT_CONFIG";
+static const char VMEXIT_INVEPT_STR[] = "VMEXIT_INVEPT";
+static const char VMEXIT_RDTSCP_STR[] = "VMEXIT_RDTSCP";
+static const char VMEXIT_EXPIRED_PREEMPT_TIMER_STR[] = "VMEXIT_EXPIRED_PREEMPT_TIMER";
+static const char VMEXIT_INVVPID_STR[] = "VMEXIT_INVVPID";
+static const char VMEXIT_WBINVD_STR[] = "VMEXIT_WBINVD";
+static const char VMEXIT_XSETBV_STR[] = "VMEXIT_XSETBV";
+
+const char * v3_vmx_exit_code_to_str(vmx_exit_t exit)
+{
+ switch(exit) {
+ case VMEXIT_INFO_EXCEPTION_OR_NMI:
+ return VMEXIT_INFO_EXCEPTION_OR_NMI_STR;
+ case VMEXIT_EXTERNAL_INTR:
+ return VMEXIT_EXTERNAL_INTR_STR;
+ case VMEXIT_TRIPLE_FAULT:
+ return VMEXIT_TRIPLE_FAULT_STR;
+ case VMEXIT_INIT_SIGNAL:
+ return VMEXIT_INIT_SIGNAL_STR;
+ case VMEXIT_STARTUP_IPI:
+ return VMEXIT_STARTUP_IPI_STR;
+ case VMEXIT_IO_SMI:
+ return VMEXIT_IO_SMI_STR;
+ case VMEXIT_OTHER_SMI:
+ return VMEXIT_OTHER_SMI_STR;
+ case VMEXIT_INTR_WINDOW:
+ return VMEXIT_INTR_WINDOW_STR;
+ case VMEXIT_NMI_WINDOW:
+ return VMEXIT_NMI_WINDOW_STR;
+ case VMEXIT_TASK_SWITCH:
+ return VMEXIT_TASK_SWITCH_STR;
+ case VMEXIT_CPUID:
+ return VMEXIT_CPUID_STR;
+ case VMEXIT_HLT:
+ return VMEXIT_HLT_STR;
+ case VMEXIT_INVD:
+ return VMEXIT_INVD_STR;
+ case VMEXIT_INVLPG:
+ return VMEXIT_INVLPG_STR;
+ case VMEXIT_RDPMC:
+ return VMEXIT_RDPMC_STR;
+ case VMEXIT_RDTSC:
+ return VMEXIT_RDTSC_STR;
+ case VMEXIT_RSM:
+ return VMEXIT_RSM_STR;
+ case VMEXIT_VMCALL:
+ return VMEXIT_VMCALL_STR;
+ case VMEXIT_VMCLEAR:
+ return VMEXIT_VMCLEAR_STR;
+ case VMEXIT_VMLAUNCH:
+ return VMEXIT_VMLAUNCH_STR;
+ case VMEXIT_VMPTRLD:
+ return VMEXIT_VMPTRLD_STR;
+ case VMEXIT_VMPTRST:
+ return VMEXIT_VMPTRST_STR;
+ case VMEXIT_VMREAD:
+ return VMEXIT_VMREAD_STR;
+ case VMEXIT_VMRESUME:
+ return VMEXIT_VMRESUME_STR;
+ case VMEXIT_VMWRITE:
+ return VMEXIT_VMWRITE_STR;
+ case VMEXIT_VMXOFF:
+ return VMEXIT_VMXOFF_STR;
+ case VMEXIT_VMXON:
+ return VMEXIT_VMXON_STR;
+ case VMEXIT_CR_REG_ACCESSES:
+ return VMEXIT_CR_REG_ACCESSES_STR;
+ case VMEXIT_MOV_DR:
+ return VMEXIT_MOV_DR_STR;
+ case VMEXIT_IO_INSTR:
+ return VMEXIT_IO_INSTR_STR;
+ case VMEXIT_RDMSR:
+ return VMEXIT_RDMSR_STR;
+ case VMEXIT_WRMSR:
+ return VMEXIT_WRMSR_STR;
+ case VMEXIT_ENTRY_FAIL_INVALID_GUEST_STATE:
+ return VMEXIT_ENTRY_FAIL_INVALID_GUEST_STATE_STR;
+ case VMEXIT_ENTRY_FAIL_MSR_LOAD:
+ return VMEXIT_ENTRY_FAIL_MSR_LOAD_STR;
+ case VMEXIT_MWAIT:
+ return VMEXIT_MWAIT_STR;
+ case VMEXIT_MONITOR:
+ return VMEXIT_MONITOR_STR;
+ case VMEXIT_PAUSE:
+ return VMEXIT_PAUSE_STR;
+ case VMEXIT_ENTRY_FAILURE_MACHINE_CHECK:
+ return VMEXIT_ENTRY_FAILURE_MACHINE_CHECK_STR;
+ case VMEXIT_TPR_BELOW_THRESHOLD:
+ return VMEXIT_TPR_BELOW_THRESHOLD_STR;
+ case VMEXIT_APIC:
+ return VMEXIT_APIC_STR;
+ case VMEXIT_GDTR_IDTR:
+ return VMEXIT_GDTR_IDTR_STR;
+ case VMEXIT_LDTR_TR:
+ return VMEXIT_LDTR_TR_STR;
+ case VMEXIT_EPT_VIOLATION:
+ return VMEXIT_EPT_VIOLATION_STR;
+ case VMEXIT_EPT_CONFIG:
+ return VMEXIT_EPT_CONFIG_STR;
+ case VMEXIT_INVEPT:
+ return VMEXIT_INVEPT_STR;
+ case VMEXIT_RDTSCP:
+ return VMEXIT_RDTSCP_STR;
+ case VMEXIT_EXPIRED_PREEMPT_TIMER:
+ return VMEXIT_EXPIRED_PREEMPT_TIMER_STR;
+ case VMEXIT_INVVPID:
+ return VMEXIT_INVVPID_STR;
+ case VMEXIT_WBINVD:
+ return VMEXIT_WBINVD_STR;
+ case VMEXIT_XSETBV:
+ return VMEXIT_XSETBV_STR;
+ }
+ return NULL;
+}
+
/* Same as SVM */
-static int update_map(struct guest_info * info, uint16_t port, int hook_read, int hook_write)
-{
+static int update_map(struct guest_info * info, uint16_t port, int hook_read, int hook_write) {
uchar_t * bitmap = (uint8_t *)(info->io_map.arch_data);
int major = port / 8;
int minor = port % 8;
return 0;
}
-int v3_init_vmx_io_map(struct guest_info * info)
-{
+int v3_init_vmx_io_map(struct guest_info * info) {
info->io_map.update_map = update_map;
info->io_map.arch_data = V3_VAddr(V3_AllocPages(2));
- memset(info->io_map.arch_data, 0, PAGE_SIZE_4KB*2);
+ memset(info->io_map.arch_data, 0, PAGE_SIZE_4KB * 2);
return 0;
}
int v3_handle_vmx_io_in(struct guest_info * info) {
- ulong_t exit_qual;
+ struct vmx_exit_io_qual io_qual;
+ struct v3_io_hook * hook = NULL;
+ int read_size = 0;
uint32_t instr_length = 0;
- vmcs_read(VMCS_EXIT_QUAL, &exit_qual);
-
- struct vmexit_io_qual * io_qual = (struct vmexit_io_qual *)&exit_qual;
-
- struct v3_io_hook * hook = v3_get_io_hook(info, io_qual->port);
- int read_size = 0;
+ io_qual.value = 0;
+ vmcs_read(VMCS_EXIT_QUAL, &(io_qual.value));
+ hook = v3_get_io_hook(info, io_qual.port);
if (hook == NULL) {
- PrintError("Hook not present for IN on port %x\n", io_qual->port);
+ PrintError("Hook not present for IN on port %x\n", io_qual.port);
return -1;
}
- read_size = io_qual->access_size + 1;
+ read_size = io_qual.access_size + 1;
- PrintDebug("IN of %d bytes on port %d (0x%x)\n", read_size, io_qual->port, io_qual->port);
+ PrintDebug("IN of %d bytes on port %d (0x%x)\n", read_size, io_qual.port, io_qual.port);
- if (hook->read(io_qual->port, &(info->vm_regs.rax), read_size, hook->priv_data) != read_size) {
- PrintError("Read failure for IN on port %x\n", io_qual->port);
+ if (hook->read(io_qual.port, &(info->vm_regs.rax), read_size, hook->priv_data) != read_size) {
+ PrintError("Read failure for IN on port %x\n", io_qual.port);
return -1;
}
-
-
if (vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_length) != VMX_SUCCESS) {
PrintError("Could not read instruction length\n");
return -1;
return 0;
}
-int v3_handle_vmx_io_ins(struct guest_info * info)
-{
- ulong_t exit_qual;
-
- vmcs_read(VMCS_EXIT_QUAL, &exit_qual);
-
- struct vmexit_io_qual * io_qual = (struct vmexit_io_qual *)&exit_qual;
- struct v3_io_hook * hook = v3_get_io_hook(info, io_qual->port);
- int read_size;
- addr_t guest_va;
- addr_t host_addr;
- int rdi_change;
+int v3_handle_vmx_io_ins(struct guest_info * info) {
+ struct vmx_exit_io_qual io_qual;
+ struct v3_io_hook * hook = NULL;
+ int read_size = 0;
+ addr_t guest_va = 0;
+ addr_t host_addr = 0;
+ int rdi_change = 0;
ulong_t rep_num = 1;
+ struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
+ int instr_len = 0;
+
+ io_qual.value = 0;
+ vmcs_read(VMCS_EXIT_QUAL, &(io_qual.value));
+ hook = v3_get_io_hook(info, io_qual.port);
- if(hook == NULL) {
- PrintError("Hook not present for INS on port 0x%x\n", io_qual->port);
+ if (hook == NULL) {
+ PrintError("Hook not present for INS on port 0x%x\n", io_qual.port);
return -1;
}
- PrintDebug("INS on port 0x%x\n", io_qual->port);
+ PrintDebug("INS on port 0x%x\n", io_qual.port);
+
+ read_size = io_qual.access_size + 1;
- read_size = io_qual->access_size + 1;
+ if (io_qual.rep) {
+ struct vmx_exit_io_instr_info instr_info;
- if (io_qual->rep) {
- rep_num = info->vm_regs.rcx & get_gpr_mask(info);
+ instr_info.value = 0;
+ vmcs_read(VMCS_EXIT_INSTR_INFO, &instr_info.value);
+
+ if (instr_info.addr_size == 0) {
+ rep_num = info->vm_regs.rcx & 0xffff;
+ } else if(instr_info.addr_size == 1) {
+ rep_num = info->vm_regs.rcx & 0xffffffff;
+ } else if(instr_info.addr_size == 2) {
+ rep_num = info->vm_regs.rcx & 0xffffffffffffffffLL;
+ } else {
+ PrintDebug("Unknown INS address size!\n");
+ return -1;
+ }
}
- if ( ((struct rflags *)&(info->ctrl_regs.rflags))->df ) {
+ if (flags->df) {
rdi_change = -read_size;
} else {
rdi_change = read_size;
}
do {
- if (hook->read(io_qual->port, (char *)host_addr, read_size, hook->priv_data) != read_size) {
- PrintError("Read Failure for INS on port 0x%x\n", io_qual->port);
+ if (hook->read(io_qual.port, (char *)host_addr, read_size, hook->priv_data) != read_size) {
+ PrintError("Read Failure for INS on port 0x%x\n", io_qual.port);
return -1;
}
host_addr += rdi_change;
info->vm_regs.rdi += rdi_change;
- if (io_qual->rep) {
- --info->vm_regs.rcx;
+ if (io_qual.rep) {
+ info->vm_regs.rcx--;
}
- --rep_num;
-
- } while (rep_num > 0);
+
+ } while (--rep_num > 0);
- int instr_len = 0;
vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
-
info->rip += instr_len;
return 0;
int v3_handle_vmx_io_out(struct guest_info * info) {
- ulong_t exit_qual;
-
- vmcs_read(VMCS_EXIT_QUAL, &exit_qual);
-
- struct vmexit_io_qual * io_qual = (struct vmexit_io_qual *)&exit_qual;
+ struct vmx_exit_io_qual io_qual;
+ struct v3_io_hook * hook = NULL;
+ int write_size = 0;
+ uint32_t instr_length = 0;
- struct v3_io_hook * hook = v3_get_io_hook(info, io_qual->port);
+ vmcs_read(VMCS_EXIT_QUAL, &(io_qual.value));
+ hook = v3_get_io_hook(info, io_qual.port);
if (hook == NULL) {
- PrintError("Hook not present for out on port %x\n", io_qual->port);
+ PrintError("Hook not present for out on port %x\n", io_qual.port);
return -1;
}
- int write_size = io_qual->access_size + 1;
+ write_size = io_qual.access_size + 1;
- PrintDebug("OUT of %d bytes on port %d (0x%x)\n", write_size, io_qual->port, io_qual->port);
+ PrintDebug("OUT of %d bytes on port %d (0x%x)\n", write_size, io_qual.port, io_qual.port);
-
- if (hook->write(io_qual->port, &(info->vm_regs.rax), write_size, hook->priv_data) != write_size) {
- PrintError("Write failure for out on port %x\n",io_qual->port);
+ if (hook->write(io_qual.port, &(info->vm_regs.rax), write_size, hook->priv_data) != write_size) {
+ PrintError("Write failure for out on port %x\n",io_qual.port);
return -1;
}
- uint32_t instr_length = 0;
if (vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_length) != VMX_SUCCESS) {
PrintError("Could not read instruction length\n");
int v3_handle_vmx_io_outs(struct guest_info * info) {
- ulong_t exit_qual;
-
- vmcs_read(VMCS_EXIT_QUAL, &exit_qual);
-
- struct vmexit_io_qual * io_qual = (struct vmexit_io_qual *)&exit_qual;
- struct v3_io_hook * hook = v3_get_io_hook(info, io_qual->port);
+ struct vmx_exit_io_qual io_qual;
+ struct v3_io_hook * hook = NULL;
int write_size;
addr_t guest_va;
addr_t host_addr;
int rsi_change;
ulong_t rep_num = 1;
+ struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
+ int instr_len = 0;
+
+ vmcs_read(VMCS_EXIT_QUAL, &(io_qual.value));
+ hook = v3_get_io_hook(info, io_qual.port);
if (hook == NULL) {
- PrintError("Hook not present for OUTS on port 0x%x\n", io_qual->port);
+ PrintError("Hook not present for OUTS on port 0x%x\n", io_qual.port);
return -1;
}
- PrintDebug("OUTS on port 0x%x\n", io_qual->port);
+ PrintDebug("OUTS on port 0x%x\n", io_qual.port);
- write_size = io_qual->access_size + 1;
+ write_size = io_qual.access_size + 1;
- if (io_qual->rep) {
+ if (io_qual.rep) {
// Grab the address sized bits of rcx
- rep_num = info->vm_regs.rcx & get_gpr_mask(info);
+ struct vmx_exit_io_instr_info instr_info;
+
+ instr_info.value = 0;
+ vmcs_read(VMCS_EXIT_INSTR_INFO, &instr_info.value);
+
+ if (instr_info.addr_size == 0) {
+ rep_num = info->vm_regs.rcx & 0xffff;
+ } else if(instr_info.addr_size == 1) {
+ rep_num = info->vm_regs.rcx & 0xffffffff;
+ } else if(instr_info.addr_size == 2) {
+ rep_num = info->vm_regs.rcx & 0xffffffffffffffffLL;
+ } else {
+ PrintDebug("Unknown INS address size!\n");
+ return -1;
+ }
}
- if ( ((struct rflags *)&(info->ctrl_regs.rflags))->df ) {
+ if (flags->df) {
rsi_change = -write_size;
} else {
rsi_change = write_size;
}
do {
- if (hook->write(io_qual->port, (char *)host_addr, write_size, hook->priv_data) != write_size) {
- PrintError("Read failure for INS on port 0x%x\n", io_qual->port);
+ if (hook->write(io_qual.port, (char *)host_addr, write_size, hook->priv_data) != write_size) {
+ PrintError("Read failure for INS on port 0x%x\n", io_qual.port);
return -1;
}
host_addr += rsi_change;
info->vm_regs.rsi += rsi_change;
- if (io_qual->rep) {
+ if (io_qual.rep) {
--info->vm_regs.rcx;
}
- --rep_num;
- } while (rep_num > 0);
+ } while (--rep_num > 0);
- int instr_len = 0;
vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
-
info->rip += instr_len;
return 0;
pushq %rbx; \
movq 8(%rsp), %rbx; \
movq %rbx, 56(%rax); \
- popq %rbx; \
+ popq %rbx; \
\
movq %r8, 64(%rax); \
movq %r9, 72(%rax); \
movq %r15, 120(%rax); \
popq %rax;
-
#define restore_registers(location) \
mov location, %rax; \
mov (%rax), %rdi; \
pushq %rbx; \
movq 56(%rax), %rbx; \
movq %rbx, %rax; \
- popq %rbx;
-
-
+ popq %rbx;;
+#define save_ctrl_regs(location) \
+ pushq %rax; \
+ pushq %rbx; \
+ movq location, %rax; \
+ movq %cr2, %rbx; \
+ movq %rbx, 8(%rax); \
+ popq %rbx; \
+ popq %rax
+
+#define restore_ctrl_regs(location) \
+ pushq %rax; \
+ pushq %rbx; \
+ movq location, %rax; \
+ movq 8(%rax), %rbx; \
+ movq %rbx, %cr2; \
+ popq %rbx; \
+ popq %rax
#define PUSHA \
push %rax; \
.align 8
.globl v3_vmx_exit_handler
v3_vmx_exit_handler:
- save_registers(8(%rsp));
- addq $8, %rsp
+ // the save_* argument is a macro expansion; it has to jump past any pushes in the macro
+ // stack: vm_regs ptr, ctrl_regs_ptr
+ // save registers macro stack: vm_regs ptr, ctrl_regs ptr, pushed rax
+ // save_ctrl_regs macro stack: vm_regs ptr, ctrl_regs_ptr, pushed rax, pushed rbx
+ // Both macros jump past 2 saved values to reach their pointers, so both are 16(rsp)
+ save_registers(16(%rsp));
+ save_ctrl_regs(16(%rsp));
+ addq $16, %rsp
POPA
popf
pushq %rdi
pushq %rsi
+ pushq %rdx
call v3_handle_vmx_exit
andq %rax, %rax
jnz .Lvmm_failure
v3_vmx_vmresume:
+ pop %rdx
pop %rsi
pop %rdi
pushf
PUSHA
pushq %rdi
+ pushq %rdx
+ restore_ctrl_regs(%rdx);
restore_registers(%rdi);
vmresume
jz .Lfail_valid
jc .Lfail_invalid
- addq $8, %rsp
+ addq $16, %rsp
jmp .Lreturn
.align 8
.globl v3_vmx_vmlaunch
-// vm_regs = %rdi
+// vm_regs = %rdi, guest_info * = %rsi, ctrl_regs = %rdx
v3_vmx_vmlaunch:
- cli
pushf
PUSHA
pushq %rdi
+ pushq %rdx
movq %rsp, %rax
movq $VMCS_HOST_RSP, %rbx
jz .Lfail_valid
jc .Lfail_invalid
+ restore_ctrl_regs(%rdx);
restore_registers(%rdi);
vmlaunch
jmp .Lreturn
.Lfail_valid:
- addq $8, %rsp
+ addq $16, %rsp
POPA
popf
movq $VMX_FAIL_VALID, %rax
jmp .Lreturn
.Lfail_invalid:
- addq $8, %rsp
+ addq $16, %rsp
POPA
popf
movq $VMX_FAIL_INVALID, %rax
jmp .Lreturn
.Lvmm_failure:
- addq $16, %rsp
+ addq $24, %rsp
movq $VMM_FAILURE, %rax
jmp .Lreturn
#include <palacios/vmm.h>
#include <palacios/vm_guest.h>
+#include <palacios/vmm_msr.h>
+
+#define LOW_MSR_START 0x00000000
+#define LOW_MSR_END 0x1fff
+#define HIGH_MSR_START 0xc0000000
+#define HIGH_MSR_END 0xc0001fff
+
+#define LOW_MSR_INDEX 0
+#define HIGH_MSR_INDEX 1024
+
+static int get_bitmap_index(uint_t msr)
+{
+ if( (msr >= LOW_MSR_START) && msr <= LOW_MSR_END) {
+ return LOW_MSR_INDEX + msr;
+ } else if (( msr >= HIGH_MSR_START ) && (msr <= HIGH_MSR_END)) {
+ return HIGH_MSR_INDEX + (msr - HIGH_MSR_START);
+ } else {
+ PrintError("MSR out of range: 0x%x\n", msr);
+ return -1;
+ }
+}
/* Same as SVM */
static int update_map(struct guest_info * info, uint_t msr, int hook_reads, int hook_writes) {
-#if 0
int index = get_bitmap_index(msr);
- uint_t major = index / 4;
- uint_t minor = (index % 4) * 2;
- uchar_t val = 0;
- uchar_t mask = 0x3;
+ uint_t major = index / 8;
+ uint_t minor = (index % 8);
+ uchar_t mask = 0x1;
+ uint8_t read_val = (hook_reads) ? 0x1 : 0x0;
+ uint8_t write_val = (hook_writes) ? 0x1 : 0x0;
uint8_t * bitmap = (uint8_t *)(info->msr_map.arch_data);
- if (hook_reads) {
- val |= 0x1;
- }
-
- if (hook_writes) {
- val |= 0x2;
- }
*(bitmap + major) &= ~(mask << minor);
- *(bitmap + major) |= (val << minor);
-#endif
+ *(bitmap + major) |= (read_val << minor);
+
+
+ *(bitmap + 2048 + major) &= ~(mask << minor);
+ *(bitmap + 2048 + major) |= (write_val << minor);
return 0;
}