#include <palacios/vmm_types.h>
-#ifndef CONFIG_DEBUG_APIC
+#include <palacios/vmm_queue.h>
+#include <palacios/vmm_lock.h>
+
+/* The locking in this file is nasty.
+ * There are 3 different locking approaches that are taken, depending on the APIC operation
+ * 1. Queue locks. Actual irq insertions are done via queueing irq ops at the dest apic.
+ * The destination apic's core is responsible for draining the queue, and actually
+ * setting the vector table.
+ * 2. State lock. This is a standard lock taken when internal apic state is read/written.
+ * When an irq's destination is determined this lock is taken to examine the apic's
+ * addressability.
+ * 3. VM barrier lock. This is taken when actual VM core state is changed (via SIPI).
+ */
+
+
+
+#ifndef V3_CONFIG_DEBUG_APIC
#undef PrintDebug
#define PrintDebug(fmt, args...)
-#endif
+#else
-#ifdef CONFIG_DEBUG_APIC
static char * shorthand_str[] = {
"(no shorthand)",
"(self)",
"(Start Up)",
"(ExtInt)",
};
+
#endif
typedef enum { APIC_TMR_INT, APIC_THERM_INT, APIC_PERF_INT,
APIC_LINT0_INT, APIC_LINT1_INT, APIC_ERR_INT } apic_irq_type_t;
-#define APIC_FIXED_DELIVERY 0x0
-#define APIC_SMI_DELIVERY 0x2
-#define APIC_NMI_DELIVERY 0x4
-#define APIC_INIT_DELIVERY 0x5
-#define APIC_EXTINT_DELIVERY 0x7
+
+#define APIC_SHORTHAND_NONE 0x0
+#define APIC_SHORTHAND_SELF 0x1
+#define APIC_SHORTHAND_ALL 0x2
+#define APIC_SHORTHAND_ALL_BUT_ME 0x3
+
+#define APIC_DEST_PHYSICAL 0x0
+#define APIC_DEST_LOGICAL 0x1
#define BASE_ADDR_MSR 0x0000001B
+
+
+struct irq_queue_entry {
+ uint32_t vector;
+ int (*ack)(struct guest_info * core, uint32_t irq, void * private_data);
+ void * private_data;
+
+ struct list_head list_node;
+};
+
+
+
+
typedef enum {INIT_ST,
SIPI,
STARTED} ipi_state_t;
struct int_cmd_reg int_cmd;
struct log_dst_reg log_dst;
struct dst_fmt_reg dst_fmt;
- struct arb_prio_reg arb_prio;
- struct task_prio_reg task_prio;
- struct proc_prio_reg proc_prio;
+ //struct arb_prio_reg arb_prio; // computed on the fly
+ //struct task_prio_reg task_prio; // stored in core.ctrl_regs.apic_tpr
+ //struct proc_prio_reg proc_prio; // computed on the fly
struct ext_apic_feature_reg ext_apic_feature;
struct spec_eoi_reg spec_eoi;
uint32_t tmr_cur_cnt;
uint32_t tmr_init_cnt;
-
+ uint32_t missed_ints;
struct local_vec_tbl_reg ext_intr_vec_tbl[4];
uint8_t int_en_reg[32];
uint8_t trig_mode_reg[32];
+ struct {
+ int (*ack)(struct guest_info * core, uint32_t irq, void * private_data);
+ void * private_data;
+ } irq_ack_cbs[256];
+
struct guest_info * core;
void * controller_handle;
struct v3_timer * timer;
+
+ struct {
+ v3_lock_t lock;
+
+ uint64_t num_entries;
+ struct list_head entries;
+ } irq_queue ;
+
uint32_t eoi;
- v3_lock_t lock;
- // debug
- uint8_t in_icr;
};
struct apic_dev_state {
int num_apics;
- v3_lock_t ipi_lock; // acquired by route_ipi - only one IPI active at a time
+
+ v3_lock_t state_lock;
struct apic_state apics[0];
} __attribute__((packed));
static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data);
static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data);
-// No lcoking done
+static void set_apic_tpr(struct apic_state *apic, uint32_t val);
+
+static int is_apic_bsp(struct apic_state * apic) {
+ return ((apic->base_addr_msr.value & 0x0000000000000100LL) != 0);
+}
+
+
+
+
+// No locking done
static void init_apic_state(struct apic_state * apic, uint32_t id) {
apic->base_addr = DEFAULT_BASE_ADDR;
// same base address regardless of ap or main
apic->base_addr_msr.value |= ((uint64_t)DEFAULT_BASE_ADDR);
- PrintDebug("apic %u: (init_apic_state): msr=0x%llx\n",id, apic->base_addr_msr.value);
+ PrintDebug(VM_NONE, VCORE_NONE, "apic %u: (init_apic_state): msr=0x%llx\n",id, apic->base_addr_msr.value);
- PrintDebug("apic %u: (init_apic_state): Sizeof Interrupt Request Register %d, should be 32\n",
+ PrintDebug(VM_NONE, VCORE_NONE, "apic %u: (init_apic_state): Sizeof Interrupt Request Register %d, should be 32\n",
id, (uint_t)sizeof(apic->int_req_reg));
memset(apic->int_req_reg, 0, sizeof(apic->int_req_reg));
apic->rem_rd_data = 0x00000000;
apic->tmr_init_cnt = 0x00000000;
apic->tmr_cur_cnt = 0x00000000;
+ apic->missed_ints = 0;
- apic->lapic_id.val = id;
+ // note that it's the *lower* 24 bits that are
+ // reserved, not the upper 24.
+ apic->lapic_id.val = 0;
+ apic->lapic_id.apic_id = id;
apic->ipi_state = INIT_ST;
// The P6 has 6 LVT entries, so we set the value to (6-1)...
apic->apic_ver.val = 0x80050010;
- apic->task_prio.val = 0x00000000;
- apic->arb_prio.val = 0x00000000;
- apic->proc_prio.val = 0x00000000;
+ set_apic_tpr(apic,0x00000000);
+ // note that arbitration priority and processor priority are derived values
+ // and are computed on the fly
+
apic->log_dst.val = 0x00000000;
apic->dst_fmt.val = 0xffffffff;
apic->spurious_int.val = 0x000000ff;
apic->ext_apic_ctrl.val = 0x00000000;
apic->spec_eoi.val = 0x00000000;
- v3_lock_init(&(apic->lock));
- apic->in_ipi=0;
+ INIT_LIST_HEAD(&(apic->irq_queue.entries));
+ v3_lock_init(&(apic->irq_queue.lock));
+ apic->irq_queue.num_entries = 0;
- apic->in_icr=0;
}
-// MSR handler - locks apic itself
+
static int read_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t * dst, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
+
+ PrintDebug(core->vm_info, core, "apic %u: core %u: MSR read\n", apic->lapic_id.val, core->vcpu_id);
- PrintDebug("apic %u: core %u: MSR read\n", apic->lapic_id.val, core->cpu_id);
- v3_lock(apic->lock);
dst->value = apic->base_addr;
- v3_unlock(apic->lock);
+
return 0;
}
-// MSR handler - locks apic itself
+
static int write_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t src, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
- struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->cpu_id, apic->base_addr);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
+ struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, apic->base_addr);
- PrintDebug("apic %u: core %u: MSR write\n", apic->lapic_id.val, core->cpu_id);
+ PrintDebug(core->vm_info, core, "apic %u: core %u: MSR write\n", apic->lapic_id.val, core->vcpu_id);
if (old_reg == NULL) {
// uh oh...
- PrintError("apic %u: core %u: APIC Base address region does not exit...\n",
- apic->lapic_id.val, core->cpu_id);
+ PrintError(core->vm_info, core, "apic %u: core %u: APIC Base address region does not exit...\n",
+ apic->lapic_id.val, core->vcpu_id);
return -1;
}
- v3_lock(apic->lock);
+
v3_delete_mem_region(core->vm_info, old_reg);
apic->base_addr = src.value;
- if (v3_hook_full_mem(core->vm_info, core->cpu_id, apic->base_addr,
+ if (v3_hook_full_mem(core->vm_info, core->vcpu_id, apic->base_addr,
apic->base_addr + PAGE_SIZE_4KB,
apic_read, apic_write, apic_dev) == -1) {
- PrintError("apic %u: core %u: Could not hook new APIC Base address\n",
- apic->lapic_id.val, core->cpu_id);
- v3_unlock(apic->lock);
+ PrintError(core->vm_info, core, "apic %u: core %u: Could not hook new APIC Base address\n",
+ apic->lapic_id.val, core->vcpu_id);
+
return -1;
}
- v3_unlock(apic->lock);
+
return 0;
}
+
+
+
// irq_num is the bit offset into a 256 bit buffer...
-// return values
-// -1 = error
-// 0 = OK, no interrupt needed now
-// 1 = OK, interrupt needed now
-// the caller is expeced to have locked the apic
-static int activate_apic_irq_nolock(struct apic_state * apic, uint32_t irq_num) {
+static int activate_apic_irq(struct apic_state * apic, uint32_t irq_num,
+ int (*ack)(struct guest_info * core, uint32_t irq, void * private_data),
+ void * private_data) {
int major_offset = (irq_num & ~0x00000007) >> 3;
int minor_offset = irq_num & 0x00000007;
uint8_t * req_location = apic->int_req_reg + major_offset;
uint8_t flag = 0x1 << minor_offset;
- if (irq_num <= 15 || irq_num>255) {
- PrintError("apic %u: core %d: Attempting to raise an invalid interrupt: %d\n",
- apic->lapic_id.val, apic->core->cpu_id, irq_num);
- return -1;
- }
-
-
- PrintDebug("apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
+ PrintDebug(VM_NONE, VCORE_NONE, "apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->vcpu_id, irq_num);
if (*req_location & flag) {
- PrintDebug("Interrupt %d coallescing\n", irq_num);
+ PrintDebug(VM_NONE, VCORE_NONE, "Interrupt %d coallescing\n", irq_num);
return 0;
}
if (*en_location & flag) {
*req_location |= flag;
-
- if (apic->in_icr) {
- PrintError("apic %u: core %d: activate_apic_irq_nolock to deliver irq 0x%x when in_icr=1\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
- // return 0;
- }
+ apic->irq_ack_cbs[irq_num].ack = ack;
+ apic->irq_ack_cbs[irq_num].private_data = private_data;
return 1;
} else {
- PrintDebug("apic %u: core %d: Interrupt not enabled... %.2x\n",
- apic->lapic_id.val, apic->core->cpu_id,*en_location);
- return 0;
+ PrintDebug(VM_NONE, VCORE_NONE, "apic %u: core %d: Interrupt not enabled... %.2x\n",
+ apic->lapic_id.val, apic->core->vcpu_id, *en_location);
}
+ return 0;
}
-// Caller is expected to have locked the apic
+
+static int add_apic_irq_entry(struct apic_state * apic, uint32_t irq_num,
+ int (*ack)(struct guest_info * core, uint32_t irq, void * private_data),
+ void * private_data) {
+ unsigned int flags = 0;
+ struct irq_queue_entry * entry = NULL;
+
+ if (irq_num <= 15) {
+ PrintError(VM_NONE, VCORE_NONE, "core %d: Attempting to raise an invalid interrupt: %d\n",
+ apic->core->vcpu_id, irq_num);
+ return -1;
+ }
+
+ entry = V3_Malloc(sizeof(struct irq_queue_entry));
+
+ if (entry == NULL) {
+ PrintError(VM_NONE, VCORE_NONE, "Could not allocate irq queue entry\n");
+ return -1;
+ }
+
+ entry->vector = irq_num;
+ entry->ack = ack;
+ entry->private_data = private_data;
+
+ flags = v3_lock_irqsave(apic->irq_queue.lock);
+
+ list_add_tail(&(entry->list_node), &(apic->irq_queue.entries));
+ apic->irq_queue.num_entries++;
+
+ v3_unlock_irqrestore(apic->irq_queue.lock, flags);
+
+
+ return 0;
+}
+
+static void drain_irq_entries(struct apic_state * apic) {
+
+ while (1) {
+ unsigned int flags = 0;
+ struct irq_queue_entry * entry = NULL;
+
+ flags = v3_lock_irqsave(apic->irq_queue.lock);
+
+ if (!list_empty(&(apic->irq_queue.entries))) {
+ struct list_head * q_entry = apic->irq_queue.entries.next;
+ entry = list_entry(q_entry, struct irq_queue_entry, list_node);
+
+ apic->irq_queue.num_entries--;
+ list_del(q_entry);
+ }
+
+ v3_unlock_irqrestore(apic->irq_queue.lock, flags);
+
+ if (entry == NULL) {
+ break;
+ }
+
+ activate_apic_irq(apic, entry->vector, entry->ack, entry->private_data);
+
+ V3_Free(entry);
+ }
+
+}
+
+
+
static int get_highest_isr(struct apic_state * apic) {
int i = 0, j = 0;
- // We iterate backwards to find the highest priority
+ // We iterate backwards to find the highest priority in-service request
for (i = 31; i >= 0; i--) {
uint8_t * svc_major = apic->int_svc_reg + i;
}
-// Caller is expected to have locked the apic
+
static int get_highest_irr(struct apic_state * apic) {
int i = 0, j = 0;
- // We iterate backwards to find the highest priority
+ // We iterate backwards to find the highest priority enabled requested interrupt
for (i = 31; i >= 0; i--) {
uint8_t * req_major = apic->int_req_reg + i;
-
+ uint8_t * en_major = apic->int_en_reg + i;
+
if ((*req_major) & 0xff) {
for (j = 7; j >= 0; j--) {
uint8_t flag = 0x1 << j;
- if ((*req_major) & flag) {
+ if ((*req_major & *en_major) & flag) {
return ((i * 8) + j);
}
}
}
+static uint32_t get_isrv(struct apic_state *apic)
+{
+ int isr = get_highest_isr(apic);
+
+ if (isr>=0) {
+ return (uint32_t) isr;
+ } else {
+ return 0;
+ }
+}
+
+static uint32_t get_irrv(struct apic_state *apic)
+{
+ int irr = get_highest_irr(apic);
+
+ if (irr>=0) {
+ return (uint32_t) irr;
+ } else {
+ return 0;
+ }
+}
+
+
+static uint32_t get_apic_tpr(struct apic_state *apic)
+{
+ return (uint32_t) (apic->core->ctrl_regs.apic_tpr); // see comment in vmm_ctrl_regs.c for how this works
+
+}
+
+static void set_apic_tpr(struct apic_state *apic, uint32_t val)
+{
+ PrintDebug(VM_NONE, VCORE_NONE, "Set apic_tpr to 0x%x from apic reg path\n",val);
+ apic->core->ctrl_regs.apic_tpr = (uint64_t) val; // see comment in vmm_ctrl_regs.c for how this works
+}
+
+static uint32_t get_apic_ppr(struct apic_state *apic)
+{
+ uint32_t tpr = get_apic_tpr(apic);
+ uint32_t isrv = get_isrv(apic);
+ uint32_t tprlevel, isrlevel;
+ uint32_t ppr;
+
+ tprlevel = (tpr >> 4) & 0xf;
+ isrlevel = (isrv >> 4) & 0xf;
+
+ if (tprlevel>=isrlevel) {
+ ppr = tpr; // get class and subclass
+ } else {
+ ppr = (isrlevel << 4); // get class only
+ }
+
+ return ppr;
+}
+
+
+
+static uint32_t get_apic_apr(struct apic_state *apic)
+{
+ uint32_t tpr = get_apic_tpr(apic);
+ uint32_t isrv = get_isrv(apic);
+ uint32_t irrv = get_irrv(apic);
+ uint32_t tprlevel, isrlevel, irrlevel;
+
+ tprlevel = (tpr >> 4) & 0xf;
+ isrlevel = (isrv >> 4) & 0xf;
+ irrlevel = (irrv >> 4) & 0xf;
-// Caller is expected to have locked the apic
-static int apic_do_eoi(struct apic_state * apic) {
+ if (tprlevel >= isrlevel) {
+ if (tprlevel >= irrlevel) {
+ return tpr; // get both class and subclass
+ } else {
+ return irrlevel << 4; // get class only
+ }
+ } else {
+ if (isrlevel >= irrlevel) {
+ return isrlevel << 4; // get class only
+ } else {
+ return irrlevel << 4; // get class only
+ }
+ }
+
+}
+
+
+static int apic_do_eoi(struct guest_info * core, struct apic_state * apic) {
int isr_irq = get_highest_isr(apic);
if (isr_irq != -1) {
uint8_t flag = 0x1 << minor_offset;
uint8_t * svc_location = apic->int_svc_reg + major_offset;
- PrintDebug("apic %u: core ?: Received APIC EOI for IRQ %d\n", apic->lapic_id.val,isr_irq);
+ PrintDebug(core->vm_info, core, "apic %u: core ?: Received APIC EOI for IRQ %d\n", apic->lapic_id.val,isr_irq);
*svc_location &= ~flag;
-#ifdef CONFIG_CRAY_XT
+ if (apic->irq_ack_cbs[isr_irq].ack) {
+ apic->irq_ack_cbs[isr_irq].ack(core, isr_irq, apic->irq_ack_cbs[isr_irq].private_data);
+ }
+
+#ifdef V3_CONFIG_CRAY_XT
if ((isr_irq == 238) ||
(isr_irq == 239)) {
- PrintDebug("apic %u: core ?: Acking IRQ %d\n", apic->lapic_id.val,isr_irq);
+ PrintDebug(core->vm_info, core, "apic %u: core ?: Acking IRQ %d\n", apic->lapic_id.val,isr_irq);
}
if (isr_irq == 238) {
}
#endif
} else {
- //PrintError("apic %u: core ?: Spurious EOI...\n",apic->lapic_id.val);
+ //PrintError(core->vm_info, core, "apic %u: core ?: Spurious EOI...\n",apic->lapic_id.val);
}
return 0;
}
-// Caller is expected to have locked the apic
-static int activate_internal_irq_nolock(struct apic_state * apic, apic_irq_type_t int_type) {
+
+static int activate_internal_irq(struct apic_state * apic, apic_irq_type_t int_type) {
uint32_t vec_num = 0;
uint32_t del_mode = 0;
int masked = 0;
switch (int_type) {
case APIC_TMR_INT:
vec_num = apic->tmr_vec_tbl.vec;
- del_mode = APIC_FIXED_DELIVERY;
+ del_mode = IPI_FIXED;
masked = apic->tmr_vec_tbl.mask;
break;
case APIC_THERM_INT:
break;
case APIC_ERR_INT:
vec_num = apic->err_vec_tbl.vec;
- del_mode = APIC_FIXED_DELIVERY;
+ del_mode = IPI_FIXED;
masked = apic->err_vec_tbl.mask;
break;
default:
- PrintError("apic %u: core ?: Invalid APIC interrupt type\n", apic->lapic_id.val);
+ PrintError(VM_NONE, VCORE_NONE, "apic %u: core ?: Invalid APIC interrupt type\n", apic->lapic_id.val);
return -1;
}
// interrupt is masked, don't send
if (masked == 1) {
- PrintDebug("apic %u: core ?: Inerrupt is masked\n", apic->lapic_id.val);
+ PrintDebug(VM_NONE, VCORE_NONE, "apic %u: core ?: Inerrupt is masked\n", apic->lapic_id.val);
return 0;
}
- if (del_mode == APIC_FIXED_DELIVERY) {
- //PrintDebug("Activating internal APIC IRQ %d\n", vec_num);
- return activate_apic_irq_nolock(apic, vec_num);
+ if (del_mode == IPI_FIXED) {
+ //PrintDebug(VM_NONE, VCORE_NONE, "Activating internal APIC IRQ %d\n", vec_num);
+ return add_apic_irq_entry(apic, vec_num, NULL, NULL);
} else {
- PrintError("apic %u: core ?: Unhandled Delivery Mode\n", apic->lapic_id.val);
+ PrintError(VM_NONE, VCORE_NONE, "apic %u: core ?: Unhandled Delivery Mode\n", apic->lapic_id.val);
return -1;
}
}
-// Caller is expected to have locked the destination apic
-static inline int should_deliver_cluster_ipi(struct guest_info * dst_core,
+
+static inline int should_deliver_cluster_ipi(struct apic_dev_state * apic_dev,
+ struct guest_info * dst_core,
struct apic_state * dst_apic, uint8_t mda) {
+ int ret = 0;
+
+
if ( ((mda & 0xf0) == (dst_apic->log_dst.dst_log_id & 0xf0)) && /* (I am in the cluster and */
((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { /* I am in the set) */
+ ret = 1;
+ } else {
+ ret = 0;
+ }
- PrintDebug("apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, mda,
+
+ if (ret == 1) {
+ PrintDebug(VM_NONE, VCORE_NONE, "apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n",
+ dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
dst_apic->log_dst.dst_log_id);
-
- return 1;
} else {
- PrintDebug("apic %u core %u: rejecting clustered IRQ (mda 0x%x != log_dst 0x%x)\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, mda,
+ PrintDebug(VM_NONE, VCORE_NONE, "apic %u core %u: rejecting clustered IRQ (mda 0x%x != log_dst 0x%x)\n",
+ dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
dst_apic->log_dst.dst_log_id);
- return 0;
}
+
+ return ret;
+
}
-// Caller is expected to have locked the destiation apic
-static inline int should_deliver_flat_ipi(struct guest_info * dst_core,
+static inline int should_deliver_flat_ipi(struct apic_dev_state * apic_dev,
+ struct guest_info * dst_core,
struct apic_state * dst_apic, uint8_t mda) {
- if (dst_apic->log_dst.dst_log_id & mda) { // I am in the set
+ int ret = 0;
- PrintDebug("apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, mda,
- dst_apic->log_dst.dst_log_id);
- return 1;
+ if ((dst_apic->log_dst.dst_log_id & mda) != 0) { // I am in the set
+ ret = 1;
+ } else {
+ ret = 0;
+ }
- } else {
- PrintDebug("apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, mda,
+ if (ret == 1) {
+ PrintDebug(VM_NONE, VCORE_NONE, "apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n",
+ dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
dst_apic->log_dst.dst_log_id);
- return 0;
- }
+ } else {
+ PrintDebug(VM_NONE, VCORE_NONE, "apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n",
+ dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
+ dst_apic->log_dst.dst_log_id);
+ }
+
+
+ return ret;
}
-// Caller is expected to have locked the destiation apic
-static int should_deliver_ipi(struct guest_info * dst_core,
+
+static int should_deliver_ipi(struct apic_dev_state * apic_dev,
+ struct guest_info * dst_core,
struct apic_state * dst_apic, uint8_t mda) {
+ addr_t flags = 0;
+ int ret = 0;
+ flags = v3_lock_irqsave(apic_dev->state_lock);
if (dst_apic->dst_fmt.model == 0xf) {
if (mda == 0xff) {
/* always deliver broadcast */
- return 1;
+ ret = 1;
+ } else {
+ ret = should_deliver_flat_ipi(apic_dev, dst_core, dst_apic, mda);
}
-
- return should_deliver_flat_ipi(dst_core, dst_apic, mda);
-
} else if (dst_apic->dst_fmt.model == 0x0) {
if (mda == 0xff) {
/* always deliver broadcast */
- return 1;
+ ret = 1;
+ } else {
+ ret = should_deliver_cluster_ipi(apic_dev, dst_core, dst_apic, mda);
}
- return should_deliver_cluster_ipi(dst_core, dst_apic, mda);
-
} else {
- PrintError("apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, dst_apic->dst_fmt.model);
- return -1;
+ ret = -1;
}
+
+ v3_unlock_irqrestore(apic_dev->state_lock, flags);
+
+
+ if (ret == -1) {
+ PrintError(VM_NONE, VCORE_NONE, "apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n",
+ dst_apic->lapic_id.val, dst_core->vcpu_id, dst_apic->dst_fmt.model);
+ }
+
+ return ret;
}
-// Caller is expected to have locked the source apic (if any) and destination apic
+
+
+
+// Only the src_apic pointer is used
static int deliver_ipi(struct apic_state * src_apic,
struct apic_state * dst_apic,
- uint32_t vector, uint8_t del_mode) {
+ struct v3_gen_ipi * ipi) {
struct guest_info * dst_core = dst_apic->core;
- int do_xcall;
- switch (del_mode) {
- case 0: //fixed
- case 1: // lowest priority - caller needs to have decided which apic to deliver to!
- PrintDebug("delivering IRQ %d to core %u\n", vector, dst_core->cpu_id);
+ switch (ipi->mode) {
- do_xcall=activate_apic_irq_nolock(dst_apic, vector);
-
- if (do_xcall<0) {
- PrintError("Failed to activate apic irq!\n");
- return -1;
- }
+ case IPI_FIXED:
+ case IPI_LOWEST_PRIO: {
+ // lowest priority -
+ // caller needs to have decided which apic to deliver to!
- if (do_xcall && (dst_apic != src_apic)) {
- // Assume core # is same as logical processor for now
- // TODO FIX THIS FIX THIS
- // THERE SHOULD BE: guestapicid->virtualapicid map,
- // cpu_id->logical processor map
- // host maitains logical proc->phsysical proc
- PrintDebug(" non-local core with new interrupt, forcing it to exit now\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "delivering IRQ %d to core %u\n", ipi->vector, dst_core->vcpu_id);
-#ifdef CONFIG_MULTITHREAD_OS
- v3_interrupt_cpu(dst_core->vm_info, dst_core->cpu_id, 0);
-#else
- V3_ASSERT(0);
-#endif
+ add_apic_irq_entry(dst_apic, ipi->vector, ipi->ack, ipi->private_data);
+
+ if (dst_apic != src_apic) {
+ PrintDebug(VM_NONE, VCORE_NONE, " non-local core with new interrupt, forcing it to exit now\n");
+ v3_interrupt_cpu(dst_core->vm_info, dst_core->pcpu_id, 0);
}
break;
- case 5: { //INIT
+ }
+ case IPI_INIT: {
+
+ PrintDebug(VM_NONE, VCORE_NONE, " INIT delivery to core %u\n", dst_core->vcpu_id);
- PrintDebug(" INIT delivery to core %u\n", dst_core->cpu_id);
+ if (is_apic_bsp(dst_apic)) {
+ PrintError(VM_NONE, VCORE_NONE, "Attempted to INIT BSP CPU. Ignoring since I have no idea what the hell to do...\n");
+ break;
+ }
- // TODO: any APIC reset on dest core (shouldn't be needed, but not sure...)
- // Sanity check
if (dst_apic->ipi_state != INIT_ST) {
- PrintError(" Warning: core %u is not in INIT state (mode = %d), ignored (assuming this is the deassert)\n",
- dst_core->cpu_id, dst_apic->ipi_state);
- // Only a warning, since INIT INIT SIPI is common
- break;
+ v3_raise_barrier(dst_core->vm_info, src_apic->core);
+ dst_core->core_run_state = CORE_STOPPED;
+ dst_apic->ipi_state = INIT_ST;
+ v3_lower_barrier(dst_core->vm_info);
+
}
// We transition the target core to SIPI state
// in both cases, it will quickly notice this transition
// in particular, we should not need to force an exit here
- PrintDebug(" INIT delivery done\n");
+ PrintDebug(VM_NONE, VCORE_NONE, " INIT delivery done\n");
break;
}
- case 6: { //SIPI
+ case IPI_SIPI: {
// Sanity check
if (dst_apic->ipi_state != SIPI) {
- PrintError(" core %u is not in SIPI state (mode = %d), ignored!\n",
- dst_core->cpu_id, dst_apic->ipi_state);
+ PrintError(VM_NONE, VCORE_NONE, " core %u is not in SIPI state (mode = %d), ignored!\n",
+ dst_core->vcpu_id, dst_apic->ipi_state);
break;
}
- // Write the RIP, CS, and descriptor
- // assume the rest is already good to go
- //
- // vector VV -> rip at 0
- // CS = VV00
- // This means we start executing at linear address VV000
- //
- // So the selector needs to be VV00
- // and the base needs to be VV000
- //
- dst_core->rip = 0;
- dst_core->segments.cs.selector = vector << 8;
- dst_core->segments.cs.limit = 0xffff;
- dst_core->segments.cs.base = vector << 12;
-
- PrintDebug(" SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
- vector, dst_core->segments.cs.selector, dst_core->cpu_id);
+ v3_reset_vm_core(dst_core, ipi->vector);
+
+ PrintDebug(VM_NONE, VCORE_NONE, " SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
+ ipi->vector, dst_core->segments.cs.selector, dst_core->vcpu_id);
// Maybe need to adjust the APIC?
// We transition the target core to SIPI state
dst_core->core_run_state = CORE_RUNNING; // note: locking should not be needed here
dst_apic->ipi_state = STARTED;
-
+
// As with INIT, we should not need to do anything else
-
- PrintDebug(" SIPI delivery done\n");
-
+
+ PrintDebug(VM_NONE, VCORE_NONE, " SIPI delivery done\n");
+
break;
}
- case 2: // SMI
- case 3: // reserved
- case 4: // NMI
- case 7: // ExtInt
+
+ case IPI_EXTINT: // EXTINT
+ /* Two possible things to do here:
+ * 1. Ignore the IPI and assume the 8259a (PIC) will handle it
+ * 2. Add 32 to the vector and inject it...
+ * We probably just want to do 1 here, and assume the raise_irq() will hit the 8259a.
+ */
+ return 0;
+
+ case IPI_SMI:
+ case IPI_RES1: // reserved
+ case IPI_NMI:
default:
- PrintError("IPI %d delivery is unsupported\n", del_mode);
+ PrintError(VM_NONE, VCORE_NONE, "IPI %d delivery is unsupported\n", ipi->mode);
return -1;
}
-
+
return 0;
-
+
}
-// Caller is expected to have locked the source apic, if any
-// route_ipi will lock the destination apics
-/*
+static struct apic_state * find_physical_apic(struct apic_dev_state * apic_dev, uint32_t dst_idx) {
+ struct apic_state * dst_apic = NULL;
+ addr_t flags;
+ int i;
+
+ flags = v3_lock_irqsave(apic_dev->state_lock);
+
+ if ( (dst_idx > 0) && (dst_idx < apic_dev->num_apics) ) {
+ // see if it simply is the core id
+ if (apic_dev->apics[dst_idx].lapic_id.apic_id == dst_idx) {
+ dst_apic = &(apic_dev->apics[dst_idx]);
+ }
+ }
- Note that this model introduces a potential deadlock:
+ for (i = 0; i < apic_dev->num_apics; i++) {
+ if (apic_dev->apics[i].lapic_id.apic_id == dst_idx) {
+ dst_apic = &(apic_dev->apics[i]);
+ }
+ }
- APIC A-> APIC B while APIC B -> APIC A
+ v3_unlock_irqrestore(apic_dev->state_lock, flags);
- lock(A) lock(B)
- lock(B) lock(A)
+ return dst_apic;
- This deadlock condition is not currently handled.
- A good way of handling it might be to check to see if the
- destination apic is currently sending an IPI, and,
- if so, back out and ask the caller to drop the sender lock
- reacquire it, and then try route_ipi again. However,
- logical delivery complicates this considerably since
- we can hit the above situation in the middle of sending
- the ipi to a group of destination apics.
+}
-*/
static int route_ipi(struct apic_dev_state * apic_dev,
struct apic_state * src_apic,
- struct int_cmd_reg * icr) {
+ struct v3_gen_ipi * ipi) {
struct apic_state * dest_apic = NULL;
- v3_lock(apic_dev->ipi_lock);
- // now I know only one IPI is being routed, this one
- // also, I do not have any apic locks
- // I need to acquire locks on pairs of src/dest apics
- // and I will do that using the total order
- // given by their cores
-
-
- PrintDebug("apic: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
- deliverymode_str[icr->del_mode],
- icr->vec,
+ PrintDebug(VM_NONE, VCORE_NONE, "apic: IPI %s %u from apic %p to %s %s %u\n",
+ deliverymode_str[ipi->mode],
+ ipi->vector,
src_apic,
- (icr->dst_mode == 0) ? "(physical)" : "(logical)",
- shorthand_str[icr->dst_shorthand],
- icr->dst,
- icr->val);
-
-#if 1
- if (icr->vec!=48) {
- V3_Print("apic: IPI %u from apic %p to %s %u (icr=0x%llx)\n",
- icr->vec,
- src_apic,
- (icr->dst_mode == 0) ? "(physical)" : "(logical)",
- icr->dst,
- icr->val);
- }
+ (ipi->logical == 0) ? "(physical)" : "(logical)",
+ shorthand_str[ipi->dst_shorthand],
+ ipi->dst);
-#endif
- /* Locking issue: we hold src_apic already. We will acquire dest_apic if needed */
- /* But this could lead to deadlock - we really need to have a total ordering */
-
- switch (icr->dst_shorthand) {
+ switch (ipi->dst_shorthand) {
- case 0: // no shorthand
- if (icr->dst_mode == 0) {
- // physical delivery
+ case APIC_SHORTHAND_NONE: // no shorthand
+ if (ipi->logical == APIC_DEST_PHYSICAL) {
- if (icr->dst >= apic_dev->num_apics) {
- PrintError("apic: Attempted send to unregistered apic id=%u\n", icr->dst);
+ dest_apic = find_physical_apic(apic_dev, ipi->dst);
+
+ if (dest_apic == NULL) {
+ PrintError(VM_NONE, VCORE_NONE, "apic: Attempted send to unregistered apic id=%u\n", ipi->dst);
return -1;
}
-
- dest_apic = &(apic_dev->apics[icr->dst]);
-
- v3_lock(dest_apic->lock);
-
- if (deliver_ipi(src_apic, dest_apic,
- icr->vec, icr->del_mode) == -1) {
- PrintError("apic: Could not deliver IPI\n");
- v3_unlock(dest_apic->lock);
+ if (deliver_ipi(src_apic, dest_apic, ipi) == -1) {
+ PrintError(VM_NONE, VCORE_NONE, "apic: Could not deliver IPI\n");
return -1;
}
- v3_unlock(dest_apic->lock);
- } else {
- // logical delivery
- if (icr->del_mode!=1) {
+ PrintDebug(VM_NONE, VCORE_NONE, "apic: done\n");
+
+ } else if (ipi->logical == APIC_DEST_LOGICAL) {
+
+ if (ipi->mode != IPI_LOWEST_PRIO) {
+ int i;
+ uint8_t mda = ipi->dst;
+
// logical, but not lowest priority
// we immediately trigger
// fixed, smi, reserved, nmi, init, sipi, etc
- int i;
- int have_lock;
-
- uint8_t mda = icr->dst;
+
for (i = 0; i < apic_dev->num_apics; i++) {
+ int del_flag = 0;
dest_apic = &(apic_dev->apics[i]);
-
- if (src_apic==0 || dest_apic!=src_apic) {
- v3_lock(dest_apic->lock);
- have_lock=1;
- } else {
- have_lock=0;
- }
-
- int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
+ del_flag = should_deliver_ipi(apic_dev, dest_apic->core, dest_apic, mda);
if (del_flag == -1) {
- PrintError("apic: Error checking delivery mode\n");
- if (have_lock) {
- v3_unlock(dest_apic->lock);
- }
+
+ PrintError(VM_NONE, VCORE_NONE, "apic: Error checking delivery mode\n");
return -1;
} else if (del_flag == 1) {
- if (deliver_ipi(src_apic, dest_apic,
- icr->vec, icr->del_mode) == -1) {
- PrintError("apic: Error: Could not deliver IPI\n");
- if (have_lock) {
- v3_unlock(dest_apic->lock);
- }
+
+ if (deliver_ipi(src_apic, dest_apic, ipi) == -1) {
+ PrintError(VM_NONE, VCORE_NONE, "apic: Error: Could not deliver IPI\n");
return -1;
}
}
-
- if (have_lock) {
- v3_unlock(dest_apic->lock);
- }
}
- } else {
- // logical, lowest priority
- // scan, then trigger
- int i;
- int have_cur_lock; // do we have a lock on the one we are now considering?
+ } else { // APIC_LOWEST_DELIVERY
struct apic_state * cur_best_apic = NULL;
-
- uint8_t mda = icr->dst;
+ uint32_t cur_best_apr;
+ uint8_t mda = ipi->dst;
+ int i;
+
+ // logical, lowest priority
for (i = 0; i < apic_dev->num_apics; i++) {
-
+ int del_flag = 0;
+
dest_apic = &(apic_dev->apics[i]);
-
- if (src_apic==0 || dest_apic!=src_apic) {
- v3_lock(dest_apic->lock);
- have_cur_lock=1;
- } else {
- have_cur_lock=0;
- }
-
- int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
+ del_flag = should_deliver_ipi(apic_dev, dest_apic->core, dest_apic, mda);
if (del_flag == -1) {
- PrintError("apic: Error checking delivery mode\n");
- if (have_cur_lock) {
- v3_unlock(dest_apic->lock);
- }
- if (cur_best_apic && cur_best_apic!=src_apic) {
- v3_unlock(cur_best_apic->lock);
- }
-
+ PrintError(VM_NONE, VCORE_NONE, "apic: Error checking delivery mode\n");
+
return -1;
} else if (del_flag == 1) {
// update priority for lowest priority scan
- if (!cur_best_apic) {
- cur_best_apic=dest_apic;
- have_cur_lock=0; // will unlock as cur_best_apic
- } else if (dest_apic->task_prio.val < cur_best_apic->task_prio.val) {
- // we now unlock the current best one and then switch
- // so in the end we have a lock on the new cur_best_apic
- if (cur_best_apic!=src_apic) {
- v3_unlock(cur_best_apic->lock);
+ addr_t flags = 0;
+
+ flags = v3_lock_irqsave(apic_dev->state_lock);
+
+ if (cur_best_apic == 0) {
+ cur_best_apic = dest_apic;
+ cur_best_apr = get_apic_apr(dest_apic) & 0xf0;
+ } else {
+ uint32_t dest_apr = get_apic_apr(dest_apic) & 0xf0;
+ if (dest_apr < cur_best_apr) {
+ cur_best_apic = dest_apic;
+ cur_best_apr = dest_apr;
}
- cur_best_apic=dest_apic;
- have_cur_lock=0;
}
+
+ v3_unlock_irqrestore(apic_dev->state_lock, flags);
+
}
- if (have_cur_lock) {
- v3_unlock(dest_apic->lock);
- }
-
}
+
// now we will deliver to the best one if it exists
if (!cur_best_apic) {
- PrintDebug("apic: lowest priority deliver, but no destinations!\n");
+ PrintDebug(VM_NONE, VCORE_NONE, "apic: lowest priority deliver, but no destinations!\n");
} else {
- if (deliver_ipi(src_apic, cur_best_apic,
- icr->vec, icr->del_mode) == -1) {
- PrintError("apic: Error: Could not deliver IPI\n");
- if (cur_best_apic!=src_apic) {
- v3_unlock(cur_best_apic->lock);
- }
+ if (deliver_ipi(src_apic, cur_best_apic, ipi) == -1) {
+ PrintError(VM_NONE, VCORE_NONE, "apic: Error: Could not deliver IPI\n");
return -1;
- } else {
- if (cur_best_apic!=src_apic) {
- v3_unlock(cur_best_apic->lock);
- }
- //V3_Print("apic: logical, lowest priority delivery to apic %u\n",cur_best_apic->lapic_id.val);
}
+ //V3_Print(VM_NONE, VCORE_NONE, "apic: logical, lowest priority delivery to apic %u\n",cur_best_apic->lapic_id.val);
}
}
}
break;
- case 1: // self
-
- /* I assume I am already locked! */
+ case APIC_SHORTHAND_SELF: // self
if (src_apic == NULL) { /* this is not an apic, but it's trying to send to itself??? */
- PrintError("apic: Sending IPI to self from generic IPI sender\n");
+ PrintError(VM_NONE, VCORE_NONE, "apic: Sending IPI to self from generic IPI sender\n");
break;
}
- if (icr->dst_mode == 0) { /* physical delivery */
- if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
- PrintError("apic: Could not deliver IPI to self (physical)\n");
+
+
+ if (ipi->logical == APIC_DEST_PHYSICAL) { /* physical delivery */
+ if (deliver_ipi(src_apic, src_apic, ipi) == -1) {
+ PrintError(VM_NONE, VCORE_NONE, "apic: Could not deliver IPI to self (physical)\n");
return -1;
}
- } else { /* logical delivery */
- PrintError("apic: use of logical delivery in self (untested)\n");
- if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
- PrintError("apic: Could not deliver IPI to self (logical)\n");
+ } else if (ipi->logical == APIC_DEST_LOGICAL) { /* logical delivery */
+ PrintError(VM_NONE, VCORE_NONE, "apic: use of logical delivery in self (untested)\n");
+
+ if (deliver_ipi(src_apic, src_apic, ipi) == -1) {
+ PrintError(VM_NONE, VCORE_NONE, "apic: Could not deliver IPI to self (logical)\n");
return -1;
}
}
+
break;
- case 2:
- case 3: { /* all and all-but-me */
+ case APIC_SHORTHAND_ALL:
+ case APIC_SHORTHAND_ALL_BUT_ME: { /* all and all-but-me */
/* assuming that logical verus physical doesn't matter
although it is odd that both are used */
-
- int have_lock;
int i;
for (i = 0; i < apic_dev->num_apics; i++) {
dest_apic = &(apic_dev->apics[i]);
-
- if ((dest_apic != src_apic) || (icr->dst_shorthand == 2)) {
- if (src_apic==0 || dest_apic!=src_apic) {
- v3_lock(dest_apic->lock);
- have_lock=1;
- } else {
- have_lock=0;
- }
- if (deliver_ipi(src_apic, dest_apic, icr->vec, icr->del_mode) == -1) {
- PrintError("apic: Error: Could not deliver IPI\n");
- if (have_lock) {
- v3_unlock(dest_apic->lock);
- }
+ if ((dest_apic != src_apic) || (ipi->dst_shorthand == APIC_SHORTHAND_ALL)) {
+ if (deliver_ipi(src_apic, dest_apic, ipi) == -1) {
+ PrintError(VM_NONE, VCORE_NONE, "apic: Error: Could not deliver IPI\n");
return -1;
}
- if (have_lock) {
- v3_unlock(dest_apic->lock);
- }
}
- }
+ }
break;
}
default:
- PrintError("apic: Error routing IPI, invalid Mode (%d)\n", icr->dst_shorthand);
+ PrintError(VM_NONE, VCORE_NONE, "apic: Error routing IPI, invalid Mode (%d)\n", ipi->dst_shorthand);
return -1;
}
-
-
+
return 0;
}
// External function, expected to acquire lock on apic
static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
addr_t reg_addr = guest_addr - apic->base_addr;
struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
uint32_t val = 0;
- v3_lock(apic->lock);
- PrintDebug("apic %u: core %u: at %p: Read apic address space (%p)\n",
- apic->lapic_id.val, core->cpu_id, apic, (void *)guest_addr);
+ PrintDebug(core->vm_info, core, "apic %u: core %u: at %p: Read apic address space (%p)\n",
+ apic->lapic_id.val, core->vcpu_id, apic, (void *)guest_addr);
if (msr->apic_enable == 0) {
- PrintError("apic %u: core %u: Read from APIC address space with disabled APIC, apic msr=0x%llx\n",
- apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
-
- goto apic_read_out_bad;
+ PrintError(core->vm_info, core, "apic %u: core %u: Read from APIC address space with disabled APIC, apic msr=0x%llx\n",
+ apic->lapic_id.val, core->vcpu_id, apic->base_addr_msr.value);
+ return -1;
}
/* Because "May not be supported" doesn't matter to Linux developers... */
/* if (length != 4) { */
- /* PrintError("Invalid apic read length (%d)\n", length); */
+ /* PrintError(core->vm_info, core, "Invalid apic read length (%d)\n", length); */
/* return -1; */
/* } */
case EOI_OFFSET:
// Well, only an idiot would read from a architectural write only register
// Oh, Hello Linux.
- // PrintError("Attempting to read from write only register\n");
+ // PrintError(core->vm_info, core, "Attempting to read from write only register\n");
// return -1;
break;
val = apic->apic_ver.val;
break;
case TPR_OFFSET:
- val = apic->task_prio.val;
+ val = get_apic_tpr(apic);
break;
case APR_OFFSET:
- val = apic->arb_prio.val;
+ val = get_apic_apr(apic);
break;
case PPR_OFFSET:
- val = apic->proc_prio.val;
+ val = get_apic_ppr(apic);
break;
case REMOTE_READ_OFFSET:
val = apic->rem_rd_data;
case SEOI_OFFSET:
default:
- PrintError("apic %u: core %u: Read from Unhandled APIC Register: %x (getting zero)\n",
- apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
- goto apic_read_out_bad;
+ PrintError(core->vm_info, core, "apic %u: core %u: Read from Unhandled APIC Register: %x (getting zero)\n",
+ apic->lapic_id.val, core->vcpu_id, (uint32_t)reg_addr);
+ return -1;
}
*val_ptr = *(((uint8_t *)&val) + byte_addr);
} else if ((length == 2) &&
- ((reg_addr & 0x3) == 0x3)) {
+ ((reg_addr & 0x3) != 0x3)) {
uint_t byte_addr = reg_addr & 0x3;
uint16_t * val_ptr = (uint16_t *)dst;
*val_ptr = *(((uint16_t *)&val) + byte_addr);
*val_ptr = val;
} else {
- PrintError("apic %u: core %u: Invalid apic read length (%d)\n",
- apic->lapic_id.val, core->cpu_id, length);
- goto apic_read_out_bad;
+ PrintError(core->vm_info, core, "apic %u: core %u: Invalid apic read length (%d)\n",
+ apic->lapic_id.val, core->vcpu_id, length);
+ return -1;
}
- PrintDebug("apic %u: core %u: Read finished (val=%x)\n",
- apic->lapic_id.val, core->cpu_id, *(uint32_t *)dst);
-
+ PrintDebug(core->vm_info, core, "apic %u: core %u: Read finished (val=%x)\n",
+ apic->lapic_id.val, core->vcpu_id, *(uint32_t *)dst);
- // apic_read_out_good:
- v3_unlock(apic->lock);
return length;
-
- apic_read_out_bad:
- v3_unlock(apic->lock);
- return -1;
}
*/
static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
addr_t reg_addr = guest_addr - apic->base_addr;
struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
uint32_t op_val = *(uint32_t *)src;
+ addr_t flags = 0;
+ PrintDebug(core->vm_info, core, "apic %u: core %u: at %p and priv_data is at %p\n",
+ apic->lapic_id.val, core->vcpu_id, apic, priv_data);
- v3_lock(apic->lock);
-
- PrintDebug("apic %u: core %u: at %p and priv_data is at %p\n",
- apic->lapic_id.val, core->cpu_id, apic, priv_data);
-
- PrintDebug("apic %u: core %u: write to address space (%p) (val=%x)\n",
- apic->lapic_id.val, core->cpu_id, (void *)guest_addr, *(uint32_t *)src);
+ PrintDebug(core->vm_info, core, "apic %u: core %u: write to address space (%p) (val=%x)\n",
+ apic->lapic_id.val, core->vcpu_id, (void *)guest_addr, *(uint32_t *)src);
if (msr->apic_enable == 0) {
- PrintError("apic %u: core %u: Write to APIC address space with disabled APIC, apic msr=0x%llx\n",
- apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
- goto apic_write_out_bad;
+ PrintError(core->vm_info, core, "apic %u: core %u: Write to APIC address space with disabled APIC, apic msr=0x%llx\n",
+ apic->lapic_id.val, core->vcpu_id, apic->base_addr_msr.value);
+ return -1;
}
if (length != 4) {
- PrintError("apic %u: core %u: Invalid apic write length (%d)\n",
- apic->lapic_id.val, length, core->cpu_id);
- goto apic_write_out_bad;
+ PrintError(core->vm_info, core, "apic %u: core %u: Invalid apic write length (%d)\n",
+ apic->lapic_id.val, length, core->vcpu_id);
+ return -1;
}
switch (reg_addr) {
case PPR_OFFSET:
case EXT_APIC_FEATURE_OFFSET:
- PrintError("apic %u: core %u: Attempting to write to read only register %p (error)\n",
- apic->lapic_id.val, core->cpu_id, (void *)reg_addr);
- // goto apic_write_out_bad;
+ PrintError(core->vm_info, core, "apic %u: core %u: Attempting to write to read only register %p (error)\n",
+ apic->lapic_id.val, core->vcpu_id, (void *)reg_addr);
break;
// Data registers
case APIC_ID_OFFSET:
- V3_Print("apic %u: core %u: my id is being changed to %u\n",
- apic->lapic_id.val, core->cpu_id, op_val);
+ //V3_Print(core->vm_info, core, "apic %u: core %u: my id is being changed to %u\n",
+ // apic->lapic_id.val, core->vcpu_id, op_val);
apic->lapic_id.val = op_val;
break;
case TPR_OFFSET:
- apic->task_prio.val = op_val;
+ set_apic_tpr(apic,op_val);
break;
case LDR_OFFSET:
- PrintDebug("apic %u: core %u: setting log_dst.val to 0x%x\n",
- apic->lapic_id.val, core->cpu_id, op_val);
+ PrintDebug(core->vm_info, core, "apic %u: core %u: setting log_dst.val to 0x%x\n",
+ apic->lapic_id.val, core->vcpu_id, op_val);
+ flags = v3_lock_irqsave(apic_dev->state_lock);
apic->log_dst.val = op_val;
+ v3_unlock_irqrestore(apic_dev->state_lock, flags);
break;
case DFR_OFFSET:
+ flags = v3_lock_irqsave(apic_dev->state_lock);
apic->dst_fmt.val = op_val;
+ v3_unlock_irqrestore(apic_dev->state_lock, flags);
break;
case SPURIOUS_INT_VEC_OFFSET:
apic->spurious_int.val = op_val;
apic->tmr_cur_cnt = op_val;
break;
case TMR_DIV_CFG_OFFSET:
+ PrintDebug(core->vm_info, core, "apic %u: core %u: setting tmr_div_cfg to 0x%x\n",
+ apic->lapic_id.val, core->vcpu_id, op_val);
apic->tmr_div_cfg.val = op_val;
break;
// Action Registers
case EOI_OFFSET:
- // do eoi (we already have the lock)
- apic_do_eoi(apic);
+ // do eoi
+ apic_do_eoi(core, apic);
break;
case INT_CMD_LO_OFFSET: {
- // execute command (we already have the lock)
+ // execute command
- struct int_cmd_reg tmp_icr;
+ struct v3_gen_ipi tmp_ipi;
apic->int_cmd.lo = op_val;
- tmp_icr=apic->int_cmd;
+ tmp_ipi.vector = apic->int_cmd.vec;
+ tmp_ipi.mode = apic->int_cmd.del_mode;
+ tmp_ipi.logical = apic->int_cmd.dst_mode;
+ tmp_ipi.trigger_mode = apic->int_cmd.trig_mode;
+ tmp_ipi.dst_shorthand = apic->int_cmd.dst_shorthand;
+ tmp_ipi.dst = apic->int_cmd.dst;
+
+ tmp_ipi.ack = NULL;
+ tmp_ipi.private_data = NULL;
+
- // V3_Print("apic %u: core %u: sending cmd 0x%llx to apic %u\n",
- // apic->lapic_id.val, core->cpu_id,
+ // V3_Print(core->vm_info, core, "apic %u: core %u: sending cmd 0x%llx to apic %u\n",
+ // apic->lapic_id.val, core->vcpu_id,
// apic->int_cmd.val, apic->int_cmd.dst);
- apic->in_icr=0;
- apic->in_ipi=1;
-
- v3_unlock(apic->lock);
-
- // route_ipi is responsible for locking both source and destiation(s)
- if (route_ipi(apic_dev, apic, &tmp_icr) == -1) {
- PrintError("IPI Routing failure\n");
- goto apic_write_out_bad;
+ if (route_ipi(apic_dev, apic, &tmp_ipi) == -1) {
+ PrintError(core->vm_info, core, "IPI Routing failure\n");
+ return -1;
}
- v3_lock(apic->lock);
- apic->in_ipi=0;
- }
break;
-
+ }
case INT_CMD_HI_OFFSET: {
- // already have the lock
- if (apic->in_icr) {
- PrintError("apic %u: core %u: writing command high=0x%x while in_icr=1\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
- }
-
apic->int_cmd.hi = op_val;
- //V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
- apic->in_icr=1;
- }
+ //V3_Print(core->vm_info, core, "apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->vcpu_id,apic->int_cmd.hi);
break;
-
-
+ }
// Unhandled Registers
case EXT_APIC_CMD_OFFSET:
case SEOI_OFFSET:
default:
- PrintError("apic %u: core %u: Write to Unhandled APIC Register: %x (ignored)\n",
- apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
+ PrintError(core->vm_info, core, "apic %u: core %u: Write to Unhandled APIC Register: %x (ignored)\n",
+ apic->lapic_id.val, core->vcpu_id, (uint32_t)reg_addr);
- goto apic_write_out_bad;
+ return -1;
}
- PrintDebug("apic %u: core %u: Write finished\n", apic->lapic_id.val, core->cpu_id);
+ PrintDebug(core->vm_info, core, "apic %u: core %u: Write finished\n", apic->lapic_id.val, core->vcpu_id);
- // apic_write_out_good:
- v3_unlock(apic->lock);
return length;
- apic_write_out_bad:
- v3_unlock(apic->lock);
- return -1;
}
/* Interrupt Controller Functions */
-// internally used, expects caller to lock
-static int apic_intr_pending_nolock(struct guest_info * core, void * private_data) {
+
+static int apic_intr_pending(struct guest_info * core, void * private_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
- int req_irq = get_highest_irr(apic);
- int svc_irq = get_highest_isr(apic);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
+ int req_irq = 0;
+ int svc_irq = 0;
- // PrintDebug("apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->cpu_id,req_irq,svc_irq);
+ // Activate all queued IRQ entries
+ drain_irq_entries(apic);
- if ((req_irq >= 0) &&
- (req_irq > svc_irq)) {
- return 1;
- }
+ // Check for newly activated entries
+ req_irq = get_highest_irr(apic);
+ svc_irq = get_highest_isr(apic);
- return 0;
-}
+ // PrintDebug(core->vm_info, core, "apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->vcpu_id,req_irq,svc_irq);
-// externally visible, so must lock itself
-static int apic_intr_pending(struct guest_info * core, void * private_data) {
- struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
- int rc;
- v3_lock(apic->lock);
-
- rc=apic_intr_pending_nolock(core,private_data);
-
- v3_unlock(apic->lock);
+ if ((req_irq >= 0) &&
+ (req_irq > svc_irq)) {
- return rc;
-}
+ // We have a new requested vector that is higher priority than
+ // the vector that is in-service
-// Internal - no lock
-static int apic_get_intr_number_nolock(struct guest_info * core, void * private_data) {
- struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
- int req_irq = get_highest_irr(apic);
- int svc_irq = get_highest_isr(apic);
+ uint32_t ppr = get_apic_ppr(apic);
- if (svc_irq == -1) {
- return req_irq;
- } else if (svc_irq < req_irq) {
- return req_irq;
+ if ((req_irq & 0xf0) > (ppr & 0xf0)) {
+ // it's also higher priority than the current
+ // processor priority. Therefore this
+ // interrupt can go in now.
+ return 1;
+ } else {
+ // processor priority is currently too high
+ // for this interrupt to go in now.
+ // note that if tpr=0xf?, then ppr=0xf?
+ // and thus all vectors will be masked
+ // as required (tpr=0xf? => all masked)
+ return 0;
+ }
+ } else {
+ // the vector that is in service is higher
+ // priority than any new requested vector
+ return 0;
}
-
- return -1;
}
-// Externally visible, so must lock itself
+
static int apic_get_intr_number(struct guest_info * core, void * private_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
- int rc;
-
- v3_lock(apic->lock);
-
- rc=apic_get_intr_number_nolock(core,private_data);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
+ int req_irq = get_highest_irr(apic);
+ int svc_irq = get_highest_isr(apic);
- v3_unlock(apic->lock);
- return rc;
+ // for the logic here, see the comments for apic_intr_pending
+ if ((req_irq >=0) &&
+ (req_irq > svc_irq)) {
+
+ uint32_t ppr = get_apic_ppr(apic);
+
+ if ((req_irq & 0xf0) > (ppr & 0xf0)) {
+ return req_irq;
+ } else {
+ // hmm, this should not have happened, but, anyway,
+ // no interrupt is currently ready to go in
+ return -1;
+ }
+ } else {
+ return -1;
+ }
}
-//
-// Here there is no source APIC, so there is no need to lock it
-// Furthermore, the expectation is that route_ipi will lock the destiation apic
+
int v3_apic_send_ipi(struct v3_vm_info * vm, struct v3_gen_ipi * ipi, void * dev_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)
(((struct vm_device *)dev_data)->private_data);
- struct int_cmd_reg tmp_icr;
-
- // zero out all the fields
- tmp_icr.val = 0;
- tmp_icr.vec = ipi->vector;
- tmp_icr.del_mode = ipi->mode;
- tmp_icr.dst_mode = ipi->logical;
- tmp_icr.trig_mode = ipi->trigger_mode;
- tmp_icr.dst_shorthand = ipi->dst_shorthand;
- tmp_icr.dst = ipi->dst;
-
- // route_ipi is responsible for locking the destination apic
- return route_ipi(apic_dev, NULL, &tmp_icr);
+ return route_ipi(apic_dev, NULL, ipi);
}
-int v3_apic_raise_intr(struct v3_vm_info * vm, uint32_t irq, uint32_t dst, void * dev_data) {
- struct apic_dev_state * apic_dev = (struct apic_dev_state *)
- (((struct vm_device*)dev_data)->private_data);
- struct apic_state * apic = &(apic_dev->apics[dst]);
- int do_xcall;
-
- PrintDebug("apic %u core ?: raising interrupt IRQ %u (dst = %u).\n", apic->lapic_id.val, irq, dst);
- v3_lock(apic->lock);
-
- do_xcall=activate_apic_irq_nolock(apic, irq);
-
- if (do_xcall<0) {
- PrintError("Failed to activate apic irq\n");
- v3_unlock(apic->lock);
- return -1;
- }
-
- if (do_xcall && (V3_Get_CPU() != dst)) {
-#ifdef CONFIG_MULTITHREAD_OS
- v3_interrupt_cpu(vm, dst, 0);
-#else
- V3_ASSERT(0);
-#endif
-
- }
- v3_unlock(apic->lock);
- return 0;
-}
-
-
-// internal - caller must lock
-static int apic_begin_irq_nolock(struct guest_info * core, void * private_data, int irq) {
+static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
int major_offset = (irq & ~0x00000007) >> 3;
int minor_offset = irq & 0x00000007;
uint8_t *req_location = apic->int_req_reg + major_offset;
*req_location &= ~flag;
} else {
// do nothing...
- //PrintDebug("apic %u: core %u: begin irq for %d ignored since I don't own it\n",
- // apic->lapic_id.val, core->cpu_id, irq);
+ //PrintDebug(core->vm_info, core, "apic %u: core %u: begin irq for %d ignored since I don't own it\n",
+ // apic->lapic_id.val, core->vcpu_id, irq);
}
return 0;
}
-// Since this is called, externally, it should lock the apic
-static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
- struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
- int rc;
- v3_lock(apic->lock);
+/* Timer Functions */
- rc=apic_begin_irq_nolock(core,private_data,irq);
+static void apic_inject_timer_intr(struct guest_info *core,
+ void * priv_data) {
+ struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
+ // raise irq
+ PrintDebug(core->vm_info, core, "apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d)\n",
+ apic->lapic_id.val, core->vcpu_id,
+ apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt);
+
+ if (apic_intr_pending(core, priv_data)) {
+ PrintDebug(core->vm_info, core, "apic %u: core %u: Overriding pending IRQ %d\n",
+ apic->lapic_id.val, core->vcpu_id,
+ apic_get_intr_number(core, priv_data));
+ }
- v3_unlock(apic->lock);
+ if (activate_internal_irq(apic, APIC_TMR_INT) == -1) {
+ PrintError(core->vm_info, core, "apic %u: core %u: Could not raise Timer interrupt\n",
+ apic->lapic_id.val, core->vcpu_id);
+ }
- return rc;
+ return;
}
+
-/* Timer Functions */
-// Caller will lock the apic
-static void apic_update_time_nolock(struct guest_info * core,
+static void apic_update_time(struct guest_info * core,
uint64_t cpu_cycles, uint64_t cpu_freq,
void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
// The 32 bit GCC runtime is a pile of shit
#ifdef __V3_64BIT__
if ((apic->tmr_init_cnt == 0) ||
( (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_ONESHOT) &&
(apic->tmr_cur_cnt == 0))) {
- //PrintDebug("apic %u: core %u: APIC timer not yet initialized\n",apic->lapic_id.val,info->cpu_id);
+ //PrintDebug(core->vm_info, core, "apic %u: core %u: APIC timer not yet initialized\n",apic->lapic_id.val,info->vcpu_id);
return;
}
shift_num = 7;
break;
default:
- PrintError("apic %u: core %u: Invalid Timer Divider configuration\n",
- apic->lapic_id.val, core->cpu_id);
+ PrintError(core->vm_info, core, "apic %u: core %u: Invalid Timer Divider configuration\n",
+ apic->lapic_id.val, core->vcpu_id);
return;
}
tmr_ticks = cpu_cycles >> shift_num;
- // PrintDebug("Timer Ticks: %p\n", (void *)tmr_ticks);
+ // PrintDebug(core->vm_info, core, "Timer Ticks: %p\n", (void *)tmr_ticks);
if (tmr_ticks < apic->tmr_cur_cnt) {
apic->tmr_cur_cnt -= tmr_ticks;
+#ifdef V3_CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS
+ if (apic->missed_ints && !apic_intr_pending(core, priv_data)) {
+ PrintDebug(core->vm_info, core, "apic %u: core %u: Injecting queued APIC timer interrupt.\n",
+ apic->lapic_id.val, core->vcpu_id);
+ apic_inject_timer_intr(core, priv_data);
+ apic->missed_ints--;
+ }
+#endif /* CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS */
} else {
tmr_ticks -= apic->tmr_cur_cnt;
apic->tmr_cur_cnt = 0;
- // raise irq
- PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
- apic->lapic_id.val, core->cpu_id,
- apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
-
- if (apic_intr_pending_nolock(core, priv_data)) {
- PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
- apic->lapic_id.val, core->cpu_id,
- apic_get_intr_number(core, priv_data));
- }
+ apic_inject_timer_intr(core, priv_data);
- if (activate_internal_irq_nolock(apic, APIC_TMR_INT) == -1) {
- PrintError("apic %u: core %u: Could not raise Timer interrupt\n",
- apic->lapic_id.val, core->cpu_id);
- }
-
if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
+ int queued_ints = tmr_ticks / apic->tmr_init_cnt;
tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
+ apic->missed_ints += queued_ints;
}
}
return;
}
-
-static void apic_update_time(struct guest_info * core,
- uint64_t cpu_cycles, uint64_t cpu_freq,
- void * priv_data) {
- struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
-
- v3_lock(apic->lock);
-
- apic_update_time_nolock(core,cpu_cycles,cpu_freq,priv_data);
-
- v3_unlock(apic->lock);
-
- return;
-}
-
static struct intr_ctrl_ops intr_ops = {
.intr_pending = apic_intr_pending,
.get_intr_number = apic_get_intr_number,
v3_remove_timer(core, apic->timer);
}
+ v3_lock_deinit(&(apic->irq_queue.lock));
+
// unhook memory
}
v3_unhook_msr(vm, BASE_ADDR_MSR);
+ v3_lock_deinit(&(apic_dev->state_lock));
+
V3_Free(apic_dev);
return 0;
}
+#ifdef V3_CONFIG_CHECKPOINT
+
+#define KEY_MAX 128
+#define MAKE_KEY(x) snprintf(key,KEY_MAX,"%s%d",x,i);
+
+static int apic_save(struct v3_chkpt_ctx * ctx, void * private_data) {
+ struct apic_dev_state * apic_state = (struct apic_dev_state *)private_data;
+ int i = 0;
+ uint32_t temp;
+ char key[KEY_MAX];
+
+ V3_CHKPT_SAVE(ctx, "NUM_APICS", apic_state->num_apics,savefailout);
+
+ for (i = 0; i < apic_state->num_apics; i++) {
+ drain_irq_entries(&(apic_state->apics[i]));
+
+ MAKE_KEY("BASE_ADDR");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].base_addr,savefailout);
+ MAKE_KEY("BASE_ADDR_MSR");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].base_addr_msr,savefailout);
+ MAKE_KEY("LAPIC_ID");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].lapic_id,savefailout);
+ MAKE_KEY("APIC_VER");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].apic_ver,savefailout);
+ MAKE_KEY("EXT_APIC_CTRL");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].ext_apic_ctrl,savefailout);
+ MAKE_KEY("LOCAL_VEC_TBL");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].local_vec_tbl,savefailout);
+ MAKE_KEY("TMR_VEC_TBL");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].tmr_vec_tbl,savefailout);
+ MAKE_KEY("TMR_DIV_CFG");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].tmr_div_cfg,savefailout);
+ MAKE_KEY("LINT0_VEC_TBL");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].lint0_vec_tbl,savefailout);
+ MAKE_KEY("LINT1_VEC_TBL");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].lint1_vec_tbl,savefailout);
+ MAKE_KEY("PERF_CTR_LOC_VEC_TBL");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].perf_ctr_loc_vec_tbl,savefailout);
+ MAKE_KEY("THERM_LOC_VEC_TBL");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].therm_loc_vec_tbl,savefailout);
+ MAKE_KEY("ERR_VEC_TBL");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].err_vec_tbl,savefailout);
+ MAKE_KEY("ERR_STATUS");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].err_status,savefailout);
+ MAKE_KEY("SPURIOUS_INT");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].spurious_int,savefailout);
+ MAKE_KEY("INT_CMD");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].int_cmd,savefailout);
+ MAKE_KEY("LOG_DST");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].log_dst,savefailout);
+ MAKE_KEY("DST_FMT");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].dst_fmt,savefailout);
+
+ // APR and PPR are stored only for compatability
+ // TPR is in APIC_TPR, APR and PPR are derived
+
+ temp = get_apic_apr(&(apic_state->apics[i]));
+ MAKE_KEY("ARB_PRIO");
+ V3_CHKPT_SAVE(ctx, key, temp,savefailout);
+ temp = get_apic_tpr(&(apic_state->apics[i]));
+ MAKE_KEY("TASK_PRIO");
+ V3_CHKPT_SAVE(ctx,key,temp,savefailout);
+ temp = get_apic_ppr(&(apic_state->apics[i]));
+ MAKE_KEY("PROC_PRIO");
+ V3_CHKPT_SAVE(ctx, key,temp,savefailout);
+
+ MAKE_KEY("EXT_APIC_FEATURE");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].ext_apic_feature,savefailout);
+ MAKE_KEY("SPEC_EOI");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].spec_eoi,savefailout);
+ MAKE_KEY("TMR_CUR_CNT");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].tmr_cur_cnt,savefailout);
+
+ MAKE_KEY("TMR_INIT_CNT");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].tmr_init_cnt,savefailout);
+ MAKE_KEY("EXT_INTR_VEC_TBL");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].ext_intr_vec_tbl,savefailout);
+
+ MAKE_KEY("REM_RD_DATA");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].rem_rd_data,savefailout);
+ MAKE_KEY("IPI_STATE");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].ipi_state,savefailout);
+ MAKE_KEY("INT_REQ_REG");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].int_req_reg,savefailout);
+ MAKE_KEY("INT_SVC_REG");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].int_svc_reg,savefailout);
+ MAKE_KEY("INT_EN_REG");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].int_en_reg,savefailout);
+ MAKE_KEY("TRIG_MODE_REG");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].trig_mode_reg,savefailout);
+ MAKE_KEY("EOI");
+ V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].eoi,savefailout);
+
+ }
+
+ return 0;
+
+ savefailout:
+ PrintError(VM_NONE, VCORE_NONE, "Failed to save apic\n");
+ return -1;
+}
+
+static int apic_load(struct v3_chkpt_ctx * ctx, void * private_data) {
+ struct apic_dev_state *apic_state = (struct apic_dev_state *)private_data;
+ int i = 0;
+ uint32_t temp;
+ char key[KEY_MAX];
+
+ V3_CHKPT_LOAD(ctx,"NUM_APICS", apic_state->num_apics, loadfailout);
+
+ for (i = 0; i < apic_state->num_apics; i++) {
+ drain_irq_entries(&(apic_state->apics[i]));
+
+ MAKE_KEY("BASE_ADDR");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].base_addr,loadfailout);
+ MAKE_KEY("BASE_ADDR_MSR");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].base_addr_msr,loadfailout);
+ MAKE_KEY("LAPIC_ID");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].lapic_id,loadfailout);
+ MAKE_KEY("APIC_VER");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].apic_ver,loadfailout);
+ MAKE_KEY("EXT_APIC_CTRL");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].ext_apic_ctrl,loadfailout);
+ MAKE_KEY("LOCAL_VEC_TBL");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].local_vec_tbl,loadfailout);
+ MAKE_KEY("TMR_VEC_TBL");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].tmr_vec_tbl,loadfailout);
+ MAKE_KEY("TMR_DIV_CFG");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].tmr_div_cfg,loadfailout);
+ MAKE_KEY("LINT0_VEC_TBL");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].lint0_vec_tbl,loadfailout);
+ MAKE_KEY("LINT1_VEC_TBL");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].lint1_vec_tbl,loadfailout);
+ MAKE_KEY("PERF_CTR_LOC_VEC_TBL");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].perf_ctr_loc_vec_tbl,loadfailout);
+ MAKE_KEY("THERM_LOC_VEC_TBL");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].therm_loc_vec_tbl,loadfailout);
+ MAKE_KEY("ERR_VEC_TBL");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].err_vec_tbl,loadfailout);
+ MAKE_KEY("ERR_STATUS");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].err_status,loadfailout);
+ MAKE_KEY("SPURIOUS_INT");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].spurious_int,loadfailout);
+ MAKE_KEY("INT_CMD");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].int_cmd,loadfailout);
+ MAKE_KEY("LOG_DST");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].log_dst,loadfailout);
+ MAKE_KEY("DST_FMT");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].dst_fmt,loadfailout);
+
+ // APR and PPR are stored only for compatability
+ // TPR is in APIC_TPR, APR and PPR are derived
+
+ MAKE_KEY("ARB_PRIO");
+ V3_CHKPT_LOAD(ctx, key, temp,loadfailout);
+ // discarded
+
+ MAKE_KEY("TASK_PRIO");
+ V3_CHKPT_LOAD(ctx,key,temp,loadfailout);
+ set_apic_tpr(&(apic_state->apics[i]),temp);
+
+ MAKE_KEY("PROC_PRIO");
+ V3_CHKPT_LOAD(ctx, key,temp,loadfailout);
+ // discarded
+
+
+ MAKE_KEY("EXT_APIC_FEATURE");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].ext_apic_feature,loadfailout);
+ MAKE_KEY("SPEC_EOI");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].spec_eoi,loadfailout);
+ MAKE_KEY("TMR_CUR_CNT");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].tmr_cur_cnt,loadfailout);
+
+ MAKE_KEY("TMR_INIT_CNT");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].tmr_init_cnt,loadfailout);
+ MAKE_KEY("EXT_INTR_VEC_TBL");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].ext_intr_vec_tbl,loadfailout);
+
+ MAKE_KEY("REM_RD_DATA");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].rem_rd_data,loadfailout);
+ MAKE_KEY("IPI_STATE");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].ipi_state,loadfailout);
+ MAKE_KEY("INT_REQ_REG");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].int_req_reg,loadfailout);
+ MAKE_KEY("INT_SVC_REG");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].int_svc_reg,loadfailout);
+ MAKE_KEY("INT_EN_REG");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].int_en_reg,loadfailout);
+ MAKE_KEY("TRIG_MODE_REG");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].trig_mode_reg,loadfailout);
+ MAKE_KEY("EOI");
+ V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].eoi,loadfailout);
+ }
+
+
+ return 0;
+
+ loadfailout:
+ PrintError(VM_NONE,VCORE_NONE, "Failed to load apic\n");
+ return -1;
+
+}
+
+#endif
static struct v3_device_ops dev_ops = {
.free = (int (*)(void *))apic_free,
+#ifdef V3_CONFIG_CHECKPOINT
+ .save = apic_save,
+ .load = apic_load
+#endif
};
struct apic_dev_state * apic_dev = NULL;
int i = 0;
- PrintDebug("apic: creating an APIC for each core\n");
+ PrintDebug(vm, VCORE_NONE, "apic: creating an APIC for each core\n");
apic_dev = (struct apic_dev_state *)V3_Malloc(sizeof(struct apic_dev_state) +
sizeof(struct apic_state) * vm->num_cores);
- apic_dev->num_apics = vm->num_cores;
- v3_lock_init(&(apic_dev->ipi_lock));
+ if (!apic_dev) {
+ PrintError(vm, VCORE_NONE, "Failed to allocate space for APIC\n");
+ return -1;
+ }
+
+ memset(apic_dev,0,
+ sizeof(struct apic_dev_state) +
+ sizeof(struct apic_state) * vm->num_cores);
+
+ apic_dev->num_apics = vm->num_cores;
+ v3_lock_init(&(apic_dev->state_lock));
struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, apic_dev);
if (dev == NULL) {
- PrintError("apic: Could not attach device %s\n", dev_id);
+ PrintError(vm, VCORE_NONE, "apic: Could not attach device %s\n", dev_id);
V3_Free(apic_dev);
return -1;
}
apic->timer = v3_add_timer(core, &timer_ops, apic_dev);
if (apic->timer == NULL) {
- PrintError("APIC: Failed to attach timer to core %d\n", i);
+ PrintError(vm, VCORE_NONE,"APIC: Failed to attach timer to core %d\n", i);
v3_remove_device(dev);
return -1;
}
- v3_hook_full_mem(vm, core->cpu_id, apic->base_addr, apic->base_addr + PAGE_SIZE_4KB, apic_read, apic_write, apic_dev);
+ v3_hook_full_mem(vm, core->vcpu_id, apic->base_addr, apic->base_addr + PAGE_SIZE_4KB, apic_read, apic_write, apic_dev);
- PrintDebug("apic %u: (setup device): done, my id is %u\n", i, apic->lapic_id.val);
+ PrintDebug(vm, VCORE_NONE, "apic %u: (setup device): done, my id is %u\n", i, apic->lapic_id.val);
}
-#ifdef CONFIG_DEBUG_APIC
+#ifdef V3_CONFIG_DEBUG_APIC
for (i = 0; i < vm->num_cores; i++) {
struct apic_state * apic = &(apic_dev->apics[i]);
- PrintDebug("apic: sanity check: apic %u (at %p) has id %u and msr value %llx and core at %p\n",
+ PrintDebug(vm, VCORE_NONE, "apic: sanity check: apic %u (at %p) has id %u and msr value %llx and core at %p\n",
i, apic, apic->lapic_id.val, apic->base_addr_msr.value,apic->core);
}
#endif
- PrintDebug("apic: priv_data is at %p\n", apic_dev);
+ PrintDebug(vm, VCORE_NONE, "apic: priv_data is at %p\n", apic_dev);
v3_hook_msr(vm, BASE_ADDR_MSR, read_apic_msr, write_apic_msr, apic_dev);