(void*)(acc->get_r13(core)),
(void*)(acc->get_r14(core)),
(void*)(acc->get_r15(core)));
- DEBUG(" cr0=%p\n cr2=%p\n cr3=%p\n cr4=%p\n cr8=%p\n efer=%p\n",
+ DEBUG(" cr0=%p\n cr2=%p\n cr3=%p\n cr4=%p\n apic_tpr=%p\n efer=%p\n",
(void*)(acc->get_cr0(core)),
(void*)(acc->get_cr2(core)),
(void*)(acc->get_cr3(core)),
(void*)(acc->get_cr4(core)),
- (void*)(acc->get_cr8(core)),
+ (void*)(acc->get_apic_tpr(core)),
(void*)(acc->get_efer(core)));
return 0;
}
static void _delete_link(struct vnet_link * link){
- unsigned long flags;
+ unsigned long flags = 0;
link->sock->ops->release(link->sock);
}
static void deinit_links_list(void){
- struct vnet_link * link, * tmp_link;
+ struct vnet_link * link = NULL, * tmp_link = NULL;
list_for_each_entry_safe(link, tmp_link, &(vnet_brg_s.link_list), node) {
_delete_link(link);
int len,
int link_id){
struct v3_vnet_pkt pkt;
+ memset(pkt,0,sizeof(struct v3_vnet_pkt));
pkt.size = len;
+ pkt.dst_type = LINK_NOSET;
pkt.src_type = LINK_EDGE;
pkt.src_id = link_id;
memcpy(pkt.header, buf, ETHERNET_HEADER_LEN);
bridge_send_pkt(struct v3_vm_info * vm,
struct v3_vnet_pkt * pkt,
void * private_data) {
- struct vnet_link * link;
+ struct vnet_link * link = NULL;
if(net_debug >= 2){
DEBUG("VNET Lnx Host Bridge: packet received from VNET Core ... pkt size: %d, link: %d\n",
GET_SET_REG_DECL(cr2)
GET_SET_REG_DECL(cr3)
GET_SET_REG_DECL(cr4)
- GET_SET_REG_DECL(cr8)
+ GET_SET_REG_DECL(apic_tpr)
GET_SET_REG_DECL(efer)
int (*gva_to_hva)(palacios_core_t core, uint64_t gva, uint64_t *hva);
/* The virtual core # of this cpu (what the guest sees this core as) */
uint32_t vcpu_id;
-
+
};
-
/*
// First opcode byte
static const uchar_t cr_access_byte = 0x0f;
int v3_handle_cr4_write(struct guest_info * info);
int v3_handle_cr4_read(struct guest_info * info);
+int v3_handle_cr8_write(struct guest_info * info);
+int v3_handle_cr8_read(struct guest_info * info);
+
int v3_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data);
int v3_handle_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * dst, void * priv_data);
v3_reg_t cr2;
v3_reg_t cr3;
v3_reg_t cr4;
- v3_reg_t cr8;
+ v3_reg_t apic_tpr; // cr8 is (apic_tpr >> 4) & 0xf
v3_reg_t rflags;
v3_reg_t efer;
};
struct vmx_exit_cr_qual * cr_qual);
int v3_vmx_handle_cr4_access(struct guest_info * info,
struct vmx_exit_cr_qual * cr_qual);
+int v3_vmx_handle_cr8_access(struct guest_info * info,
+ struct vmx_exit_cr_qual * cr_qual);
#endif
struct int_cmd_reg int_cmd;
struct log_dst_reg log_dst;
struct dst_fmt_reg dst_fmt;
- struct arb_prio_reg arb_prio;
- struct task_prio_reg task_prio;
- struct proc_prio_reg proc_prio;
+ //struct arb_prio_reg arb_prio; // computed on the fly
+ //struct task_prio_reg task_prio; // stored in core.ctrl_regs.apic_tpr
+ //struct proc_prio_reg proc_prio; // computed on the fly
struct ext_apic_feature_reg ext_apic_feature;
struct spec_eoi_reg spec_eoi;
static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data);
static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data);
+static void set_apic_tpr(struct apic_state *apic, uint32_t val);
+
+
// No lcoking done
static void init_apic_state(struct apic_state * apic, uint32_t id) {
apic->base_addr = DEFAULT_BASE_ADDR;
// The P6 has 6 LVT entries, so we set the value to (6-1)...
apic->apic_ver.val = 0x80050010;
- apic->task_prio.val = 0x00000000;
- apic->arb_prio.val = 0x00000000;
- apic->proc_prio.val = 0x00000000;
+ set_apic_tpr(apic,0x00000000);
+ // note that arbitration priority and processor priority are derived values
+ // and are computed on the fly
+
apic->log_dst.val = 0x00000000;
apic->dst_fmt.val = 0xffffffff;
apic->spurious_int.val = 0x000000ff;
-
static int get_highest_isr(struct apic_state * apic) {
int i = 0, j = 0;
}
+static uint32_t get_isrv(struct apic_state *apic)
+{
+ int isr = get_highest_isr(apic);
+
+ if (isr>=0) {
+ return (uint32_t) isr;
+ } else {
+ return 0;
+ }
+}
+
+static uint32_t get_irrv(struct apic_state *apic)
+{
+ int irr = get_highest_irr(apic);
+
+ if (irr>=0) {
+ return (uint32_t) irr;
+ } else {
+ return 0;
+ }
+}
+
+
+static uint32_t get_apic_tpr(struct apic_state *apic)
+{
+ return (uint32_t) (apic->core->ctrl_regs.apic_tpr); // see comment in vmm_ctrl_regs.c for how this works
+
+}
+
+static void set_apic_tpr(struct apic_state *apic, uint32_t val)
+{
+ PrintDebug("Set apic_tpr to 0x%x from apic reg path\n",val);
+ apic->core->ctrl_regs.apic_tpr = (uint64_t) val; // see comment in vmm_ctrl_regs.c for how this works
+}
+
+static uint32_t get_apic_ppr(struct apic_state *apic)
+{
+ uint32_t tpr = get_apic_tpr(apic);
+ uint32_t isrv = get_isrv(apic);
+ uint32_t tprlevel, isrlevel;
+ uint32_t ppr;
+
+ tprlevel = (tpr >> 4) & 0xf;
+ isrlevel = (isrv >> 4) & 0xf;
+
+ if (tprlevel>=isrlevel) {
+ ppr = tpr; // get class and subclass
+ } else {
+ ppr = (isrlevel << 4); // get class only
+ }
+
+ return ppr;
+}
+
+
+
+static uint32_t get_apic_apr(struct apic_state *apic)
+{
+ uint32_t tpr = get_apic_tpr(apic);
+ uint32_t isrv = get_isrv(apic);
+ uint32_t irrv = get_irrv(apic);
+ uint32_t tprlevel, isrlevel, irrlevel;
+
+ tprlevel = (tpr >> 4) & 0xf;
+ isrlevel = (isrv >> 4) & 0xf;
+ irrlevel = (irrv >> 4) & 0xf;
+
+ if (tprlevel >= isrlevel) {
+ if (tprlevel >= irrlevel) {
+ return tpr; // get both class and subclass
+ } else {
+ return irrlevel << 4; // get class only
+ }
+ } else {
+ if (isrlevel >= irrlevel) {
+ return isrlevel << 4; // get class only
+ } else {
+ return irrlevel << 4; // get class only
+ }
+ }
+
+}
static int apic_do_eoi(struct guest_info * core, struct apic_state * apic) {
}
} else { // APIC_LOWEST_DELIVERY
struct apic_state * cur_best_apic = NULL;
+ uint32_t cur_best_apr;
uint8_t mda = ipi->dst;
int i;
if (cur_best_apic == 0) {
cur_best_apic = dest_apic;
- } else if (dest_apic->task_prio.val < cur_best_apic->task_prio.val) {
- cur_best_apic = dest_apic;
+ cur_best_apr = get_apic_apr(dest_apic) & 0xf0;
+ } else {
+ uint32_t dest_apr = get_apic_apr(dest_apic) & 0xf0;
+ if (dest_apr < cur_best_apr) {
+ cur_best_apic = dest_apic;
+ cur_best_apr = dest_apr;
+ }
}
v3_unlock_irqrestore(apic_dev->state_lock, flags);
val = apic->apic_ver.val;
break;
case TPR_OFFSET:
- val = apic->task_prio.val;
+ val = get_apic_tpr(apic);
break;
case APR_OFFSET:
- val = apic->arb_prio.val;
+ val = get_apic_apr(apic);
break;
case PPR_OFFSET:
- val = apic->proc_prio.val;
+ val = get_apic_ppr(apic);
break;
case REMOTE_READ_OFFSET:
val = apic->rem_rd_data;
apic->lapic_id.val = op_val;
break;
case TPR_OFFSET:
- apic->task_prio.val = op_val;
+ set_apic_tpr(apic,op_val);
break;
case LDR_OFFSET:
PrintDebug("apic %u: core %u: setting log_dst.val to 0x%x\n",
// PrintDebug("apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->vcpu_id,req_irq,svc_irq);
+
if ((req_irq >= 0) &&
(req_irq > svc_irq)) {
- return 1;
- }
- return 0;
+ // We have a new requested vector that is higher priority than
+ // the vector that is in-service
+
+ uint32_t ppr = get_apic_ppr(apic);
+
+ if ((req_irq & 0xf0) > (ppr & 0xf0)) {
+ // it's also higher priority than the current
+ // processor priority. Therefore this
+ // interrupt can go in now.
+ return 1;
+ } else {
+ // processor priority is currently too high
+ // for this interrupt to go in now.
+ // note that if tpr=0xf?, then ppr=0xf?
+ // and thus all vectors will be masked
+ // as required (tpr=0xf? => all masked)
+ return 0;
+ }
+ } else {
+ // the vector that is in service is higher
+ // priority than any new requested vector
+ return 0;
+ }
}
int req_irq = get_highest_irr(apic);
int svc_irq = get_highest_isr(apic);
- if (svc_irq == -1) {
- return req_irq;
- } else if (svc_irq < req_irq) {
- return req_irq;
- }
- return -1;
+ // for the logic here, see the comments for apic_intr_pending
+ if ((req_irq >=0) &&
+ (req_irq > svc_irq)) {
+
+ uint32_t ppr = get_apic_ppr(apic);
+
+ if ((req_irq & 0xf0) > (ppr & 0xf0)) {
+ return req_irq;
+ } else {
+ // hmm, this should not have happened, but, anyway,
+ // no interrupt is currently ready to go in
+ return -1;
+ }
+ } else {
+ return -1;
+ }
}
-
static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
static int apic_save(struct v3_chkpt_ctx * ctx, void * private_data) {
struct apic_dev_state * apic_state = (struct apic_dev_state *)private_data;
int i = 0;
+ uint32_t temp;
V3_CHKPT_STD_SAVE(ctx, apic_state->num_apics);
V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].int_cmd);
V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].log_dst);
V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].dst_fmt);
- V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].arb_prio);
- V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].task_prio);
- V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].proc_prio);
+
+ // APR and PPR are stored only for compatability
+ // TPR is in APIC_TPR, APR and PPR are derived
+
+ temp = get_apic_apr(&(apic_state->apics[i]));
+ V3_CHKPT_STD_SAVE(ctx, temp);
+ temp = get_apic_tpr(&(apic_state->apics[i]));
+ V3_CHKPT_STD_SAVE(ctx, temp);
+ temp = get_apic_ppr(&(apic_state->apics[i]));
+ V3_CHKPT_STD_SAVE(ctx, temp);
+
V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].ext_apic_feature);
V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].spec_eoi);
V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].tmr_cur_cnt);
static int apic_load(struct v3_chkpt_ctx * ctx, void * private_data) {
struct apic_dev_state *apic_state = (struct apic_dev_state *)private_data;
int i = 0;
+ uint32_t temp;
V3_CHKPT_STD_LOAD(ctx,apic_state->num_apics);
V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].int_cmd);
V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].log_dst);
V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].dst_fmt);
- V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].arb_prio);
- V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].task_prio);
- V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].proc_prio);
+
+ // APR is ignored
+ V3_CHKPT_STD_LOAD(ctx, temp);
+ // TPR is written back to APIC_TPR
+ V3_CHKPT_STD_LOAD(ctx, temp);
+ set_apic_tpr(&(apic_state->apics[i]),temp);
+ // PPR is ignored
+ V3_CHKPT_STD_LOAD(ctx, temp);
+
V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].ext_apic_feature);
V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].spec_eoi);
V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].tmr_cur_cnt);
GET_SET_CR_IMPL(cr2)
GET_SET_CR_IMPL(cr3)
GET_SET_CR_IMPL(cr4)
-GET_SET_CR_IMPL(cr8)
+GET_SET_CR_IMPL(apic_tpr)
GET_SET_CR_IMPL(efer)
GET_SET_CR_IMPL(rflags)
DECL_IT(cr2)
DECL_IT(cr3)
DECL_IT(cr4)
-DECL_IT(cr8)
+DECL_IT(apic_tpr)
DECL_IT(efer)
DECL_IT(rflags)
PrintDebug("Exiting on interrupts\n");
ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
ctrl_area->instrs.INTR = 1;
+ // The above also assures the TPR changes (CR8) are only virtual
+ // However, we need to see TPR writes since they will
+ // affect the virtual apic
+ // we reflect out cr8 to ctrl_regs->apic_tpr
+ ctrl_area->cr_reads.cr8 = 1;
+ ctrl_area->cr_writes.cr8 = 1;
+ // We will do all TPR comparisons in the virtual apic
+ // We also do not want the V_TPR to be able to mask the PIC
+ ctrl_area->guest_ctrl.V_IGN_TPR = 1;
+
+
+
v3_hook_msr(core->vm_info, EFER_MSR,
&v3_handle_efer_read,
&v3_svm_handle_efer_write,
ctrl_area->cr_writes.cr3 = 1;
-
ctrl_area->instrs.INVLPG = 1;
ctrl_area->exceptions.pf = 1;
guest_state->g_pat = 0x7040600070406ULL;
-
} else if (core->shdw_pg_mode == NESTED_PAGING) {
// Flush the TLB on entries/exits
ctrl_area->TLB_CONTROL = 1;
#endif
guest_ctrl->guest_ctrl.V_IRQ = 1;
guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector;
+
+ // We ignore the virtual TPR on this injection
+ // TPR/PPR tests have already been done in the APIC.
guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
- guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
+ guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ; // 0xf;
} else {
switch (v3_intr_pending(info)) {
guest_ctrl->guest_ctrl.V_IRQ = 1;
guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
+
+ // We ignore the virtual TPR on this injection
+ // TPR/PPR tests have already been done in the APIC.
guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
- guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
+ guest_ctrl->guest_ctrl.V_INTR_PRIO = info->intr_core_state.irq_vector >> 4 ; // 0xf;
#ifdef V3_CONFIG_DEBUG_INTERRUPTS
PrintDebug("Injecting Interrupt %d (EIP=%p)\n",
guest_state->cr4 = info->ctrl_regs.cr4;
guest_state->dr6 = info->dbg_regs.dr6;
guest_state->dr7 = info->dbg_regs.dr7;
- guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
+
+ // CR8 is now updated by read/writes and it contains the APIC TPR
+ // the V_TPR should be just the class part of that.
+ // This update is here just for completeness. We currently
+ // are ignoring V_TPR on all injections and doing the priority logivc
+ // in the APIC.
+ // guest_ctrl->guest_ctrl.V_TPR = ((info->ctrl_regs.apic_tpr) >> 4) & 0xf;
+
+ //guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
+ //
+
guest_state->rflags = info->ctrl_regs.rflags;
guest_state->efer = info->ctrl_regs.efer;
info->ctrl_regs.cr4 = guest_state->cr4;
info->dbg_regs.dr6 = guest_state->dr6;
info->dbg_regs.dr7 = guest_state->dr7;
- info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
+ //
+ // We do not track this anymore
+ // V_TPR is ignored and we do the logic in the APIC
+ //info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
+ //
info->ctrl_regs.rflags = guest_state->rflags;
info->ctrl_regs.efer = guest_state->efer;
return -1;
}
break;
+
+ case SVM_EXIT_CR8_WRITE:
+#ifdef V3_CONFIG_DEBUG_CTRL_REGS
+ PrintDebug("CR8 Read\n");
+#endif
+ if (v3_handle_cr8_read(info) == -1) {
+ return -1;
+ }
+ break;
+
+ case SVM_EXIT_CR8_READ:
+#ifdef V3_CONFIG_DEBUG_CTRL_REGS
+ PrintDebug("CR8 Read\n");
+#endif
+ if (v3_handle_cr8_read(info) == -1) {
+ return -1;
+ }
+ break;
+
case SVM_EXIT_EXCP14: {
addr_t fault_addr = exit_info2;
pf_error_t * error_code = (pf_error_t *)&(exit_info1);
info->yield_start_cycle += info->vm_info->yield_cycle_period;
}
}
-
+
/*
* unconditional cpu yield
V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.cr0);
V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.cr2);
V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.cr4);
- V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.cr8);
+ V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.apic_tpr);
V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.rflags);
V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.efer);
V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.cr0);
V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.cr2);
V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.cr4);
- V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.cr8);
+ V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.apic_tpr);
V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.rflags);
V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.efer);
}
+/*
+ The CR8 and APIC TPR interaction are kind of crazy.
+
+ CR8 mandates that the priority class is in bits 3:0
+
+ The interaction of CR8 and an actual APIC is somewhat implementation dependent, but
+ a basic current APIC has the priority class at 7:4 and the *subclass* at 3:0
+
+ The APIC TPR (both fields) can be written as the APIC register
+ A write to CR8 sets the priority class field, and should zero the subclass
+ A read from CR8 gets just the priority class field
+
+ In the apic_tpr storage location, we have:
+
+ zeros [class] [subclass]
+
+ Because of this, an APIC implementation should use apic_tpr to store its TPR
+ In fact, it *should* do this, otherwise its TPR may get out of sync with the architected TPR
+
+ On a CR8 read, we return just
+
+ zeros 0000 [class]
+
+ On a CR8 write, we set the register to
+
+ zeros [class] 0000
+
+*/
+
+int v3_handle_cr8_write(struct guest_info * info) {
+ int ret;
+ uchar_t instr[15];
+ struct x86_instr dec_instr;
+
+ if (info->mem_mode == PHYSICAL_MEM) {
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ } else {
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ }
+
+ if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
+ PrintError("Could not decode instruction\n");
+ return -1;
+ }
+
+ if (dec_instr.op_type == V3_OP_MOV2CR) {
+ PrintDebug("MOV2CR8 (cpu_mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
+
+ if ((info->cpu_mode == LONG) ||
+ (info->cpu_mode == LONG_32_COMPAT)) {
+ uint64_t *val = (uint64_t *)(dec_instr.src_operand.operand);
+
+ info->ctrl_regs.apic_tpr = (*val & 0xf) << 4;
+
+ V3_Print("Write of CR8 sets apic_tpr to 0x%llx\n",info->ctrl_regs.apic_tpr);
+
+ } else {
+ // probably should raise exception here
+ }
+ } else {
+ PrintError("Unhandled opcode in handle_cr8_write\n");
+ return -1;
+ }
+
+ info->rip += dec_instr.instr_length;
+
+ return 0;
+}
+
+
+
+int v3_handle_cr8_read(struct guest_info * info) {
+ uchar_t instr[15];
+ int ret;
+ struct x86_instr dec_instr;
+
+ if (info->mem_mode == PHYSICAL_MEM) {
+ ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ } else {
+ ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
+ }
+
+ if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
+ PrintError("Could not decode instruction\n");
+ return -1;
+ }
+
+ if (dec_instr.op_type == V3_OP_MOVCR2) {
+ PrintDebug("MOVCR82 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
+
+ if ((info->cpu_mode == LONG) ||
+ (info->cpu_mode == LONG_32_COMPAT)) {
+ uint64_t *dst_reg = (uint64_t *)(dec_instr.dst_operand.operand);
+
+ *dst_reg = (info->ctrl_regs.apic_tpr >> 4) & 0xf;
+
+ V3_Print("Read of CR8 (apic_tpr) returns 0x%llx\n",*dst_reg);
+
+ } else {
+ // probably should raise exception
+ }
+
+ } else {
+ PrintError("Unhandled opcode in handle_cr8_read\n");
+ return -1;
+ }
+
+ info->rip += dec_instr.instr_length;
+
+ return 0;
+}
+
+
int v3_handle_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * dst, void * priv_data) {
PrintDebug("EFER Read HI=%x LO=%x\n", core->shdw_pg_state.guest_efer.hi, core->shdw_pg_state.guest_efer.lo);
vmx_state->pin_ctrls.ext_int_exit = 1;
+
/* We enable the preemption timer by default to measure accurate guest time */
if (avail_pin_ctrls.active_preempt_timer) {
V3_Print("VMX Preemption Timer is available\n");
// Setup Guests initial PAT field
vmx_ret |= check_vmcs_write(VMCS_GUEST_PAT, 0x0007040600070406LL);
+ // Capture CR8 mods so that we can keep the apic_tpr correct
+ vmx_state->pri_proc_ctrls.cr8_ld_exit = 1;
+ vmx_state->pri_proc_ctrls.cr8_str_exit = 1;
+
+
/* Setup paging */
if (core->shdw_pg_mode == SHADOW_PAGING) {
PrintDebug("Creating initial shadow page table\n");
#endif
+
if (v3_update_vmcs_ctrl_fields(core)) {
return -1;
}
+int v3_vmx_handle_cr8_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
+ if (cr_qual->access_type < 2) {
+
+ if (cr_qual->access_type == 0) {
+ if (v3_handle_cr8_write(info) != 0) {
+ PrintError("Could not handle CR8 write\n");
+ return -1;
+ }
+ } else {
+ if (v3_handle_cr8_read(info) != 0) {
+ PrintError("Could not handle CR8 read\n");
+ return -1;
+ }
+ }
+
+ return 0;
+ }
+
+ PrintError("Invalid CR8 Access type?? (type=%d)\n", cr_qual->access_type);
+ return -1;
+}
+
static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
if (info->shdw_pg_mode == SHADOW_PAGING) {
return -1;
}
break;
+ case 8:
+ if (v3_vmx_handle_cr8_access(info, cr_qual) == -1) {
+ PrintError("Error in CR8 access handler\n");
+ return -1;
+ }
+ break;
default:
PrintError("Unhandled CR access: %d\n", cr_qual->cr_id);
return -1;