* Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
* All rights reserved.
*
- * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ * Authors: Jack Lange <jarusl@cs.northwestern.edu>
+ * Peter Dinda <pdinda@northwestern.edu> (SMP)
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
#include <palacios/vmm_types.h>
+//
+// MUST DO APIC SCAN FOR PHYSICAL DELIVERY
+//
+
+
+
#ifndef CONFIG_DEBUG_APIC
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
-#ifdef CONFIG_DEBUG_APIC
static char * shorthand_str[] = {
"(no shorthand)",
"(self)",
"(Start Up)",
"(ExtInt)",
};
+#ifdef CONFIG_DEBUG_APIC
#endif
+
+#define v3_lock(p) p=p
+#define v3_unlock(p) p=p
+
+
typedef enum { APIC_TMR_INT, APIC_THERM_INT, APIC_PERF_INT,
APIC_LINT0_INT, APIC_LINT1_INT, APIC_ERR_INT } apic_irq_type_t;
#define APIC_FIXED_DELIVERY 0x0
+#define APIC_LOWEST_DELIVERY 0x1
#define APIC_SMI_DELIVERY 0x2
+#define APIC_RES1_DELIVERY 0x3
#define APIC_NMI_DELIVERY 0x4
#define APIC_INIT_DELIVERY 0x5
+#define APIC_SIPI_DELIVERY 0x6
#define APIC_EXTINT_DELIVERY 0x7
+#define APIC_SHORTHAND_NONE 0x0
+#define APIC_SHORTHAND_SELF 0x1
+#define APIC_SHORTHAND_ALL 0x2
+#define APIC_SHORTHAND_ALL_BUT_ME 0x3
+
+#define APIC_DEST_PHYSICAL 0x0
+#define APIC_DEST_LOGICAL 0x1
+
#define BASE_ADDR_MSR 0x0000001B
#define DEFAULT_BASE_ADDR 0xfee00000
struct guest_info * core;
+ void * controller_handle;
+
struct v3_timer * timer;
uint32_t eoi;
v3_lock_t lock;
+
+ // debug
+ uint8_t in_icr;
};
struct apic_dev_state {
int num_apics;
+ // v3_lock_t ipi_lock; // acquired by route_ipi - only one IPI active at a time
struct apic_state apics[0];
} __attribute__((packed));
+
+
static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data);
static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data);
+// No lcoking done
static void init_apic_state(struct apic_state * apic, uint32_t id) {
apic->base_addr = DEFAULT_BASE_ADDR;
apic->spec_eoi.val = 0x00000000;
v3_lock_init(&(apic->lock));
+
+ //debug
+ apic->in_icr=0;
}
+// MSR handler - locks apic itself
static int read_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t * dst, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
return 0;
}
-
+// MSR handler - locks apic itself
static int write_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t src, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
// irq_num is the bit offset into a 256 bit buffer...
-static int activate_apic_irq(struct apic_state * apic, uint32_t irq_num) {
+// return values
+// -1 = error
+// 0 = OK, no interrupt needed now
+// 1 = OK, interrupt needed now
+// the caller is expeced to have locked the apic
+static int activate_apic_irq_nolock(struct apic_state * apic, uint32_t irq_num) {
int major_offset = (irq_num & ~0x00000007) >> 3;
int minor_offset = irq_num & 0x00000007;
uint8_t * req_location = apic->int_req_reg + major_offset;
uint8_t flag = 0x1 << minor_offset;
-
- if (irq_num <= 15) {
+ if (irq_num <= 15 || irq_num>255) {
PrintError("apic %u: core %d: Attempting to raise an invalid interrupt: %d\n",
apic->lapic_id.val, apic->core->cpu_id, irq_num);
return -1;
if (*req_location & flag) {
PrintDebug("Interrupt %d coallescing\n", irq_num);
+ return 0;
}
if (*en_location & flag) {
*req_location |= flag;
+
+ if (apic->in_icr) {
+ PrintError("apic %u: core %d: activate_apic_irq_nolock to deliver irq 0x%x when in_icr=1\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
+ // return 0;
+ }
+
+ return 1;
} else {
PrintDebug("apic %u: core %d: Interrupt not enabled... %.2x\n",
apic->lapic_id.val, apic->core->cpu_id,*en_location);
return 0;
}
- return 0;
}
-
+// Caller is expected to have locked the apic
static int get_highest_isr(struct apic_state * apic) {
int i = 0, j = 0;
}
-
+// Caller is expected to have locked the apic
static int get_highest_irr(struct apic_state * apic) {
int i = 0, j = 0;
-
+// Caller is expected to have locked the apic
static int apic_do_eoi(struct apic_state * apic) {
int isr_irq = get_highest_isr(apic);
return 0;
}
-
-static int activate_internal_irq(struct apic_state * apic, apic_irq_type_t int_type) {
+// Caller is expected to have locked the apic
+static int activate_internal_irq_nolock(struct apic_state * apic, apic_irq_type_t int_type) {
uint32_t vec_num = 0;
uint32_t del_mode = 0;
int masked = 0;
if (del_mode == APIC_FIXED_DELIVERY) {
//PrintDebug("Activating internal APIC IRQ %d\n", vec_num);
- return activate_apic_irq(apic, vec_num);
+ return activate_apic_irq_nolock(apic, vec_num);
} else {
PrintError("apic %u: core ?: Unhandled Delivery Mode\n", apic->lapic_id.val);
return -1;
}
-
+// Caller is expected to have locked the destination apic
static inline int should_deliver_cluster_ipi(struct guest_info * dst_core,
struct apic_state * dst_apic, uint8_t mda) {
- if ( ((mda & 0xf0) == (dst_apic->log_dst.dst_log_id & 0xf0)) && // (I am in the cluster and
- ((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { // I am in the set)
+ if ( ((mda & 0xf0) == (dst_apic->log_dst.dst_log_id & 0xf0)) && /* (I am in the cluster and */
+ ((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { /* I am in the set) */
PrintDebug("apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n",
dst_apic->lapic_id.val, dst_core->cpu_id, mda,
}
}
+// Caller is expected to have locked the destiation apic
static inline int should_deliver_flat_ipi(struct guest_info * dst_core,
struct apic_state * dst_apic, uint8_t mda) {
PrintDebug("apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n",
dst_apic->lapic_id.val, dst_core->cpu_id, mda,
dst_apic->log_dst.dst_log_id);
- return 1;
+
+ return 1;
+
} else {
+
PrintDebug("apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n",
dst_apic->lapic_id.val, dst_core->cpu_id, mda,
dst_apic->log_dst.dst_log_id);
- return 0;
+ return 0;
}
}
-
+// Caller is expected to have locked the destiation apic
static int should_deliver_ipi(struct guest_info * dst_core,
struct apic_state * dst_apic, uint8_t mda) {
if (dst_apic->dst_fmt.model == 0xf) {
if (mda == 0xff) {
- // always deliver broadcast
+ /* always deliver broadcast */
return 1;
}
return should_deliver_flat_ipi(dst_core, dst_apic, mda);
+
} else if (dst_apic->dst_fmt.model == 0x0) {
if (mda == 0xff) {
- // always deliver broadcast
+ /* always deliver broadcast */
return 1;
}
return should_deliver_cluster_ipi(dst_core, dst_apic, mda);
+
} else {
PrintError("apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n",
dst_apic->lapic_id.val, dst_core->cpu_id, dst_apic->dst_fmt.model);
}
}
-
+// Caller is expected to have locked the destination apic
+// Only the src_apic pointer is used
static int deliver_ipi(struct apic_state * src_apic,
struct apic_state * dst_apic,
uint32_t vector, uint8_t del_mode) {
+
struct guest_info * dst_core = dst_apic->core;
+ int do_xcall;
switch (del_mode) {
- case 0: //fixed
- case 1: // lowest priority
+ case APIC_FIXED_DELIVERY:
+ case APIC_LOWEST_DELIVERY:
+ // lowest priority -
+ // caller needs to have decided which apic to deliver to!
+
PrintDebug("delivering IRQ %d to core %u\n", vector, dst_core->cpu_id);
- activate_apic_irq(dst_apic, vector);
+ do_xcall=activate_apic_irq_nolock(dst_apic, vector);
+
+ if (do_xcall<0) {
+ PrintError("Failed to activate apic irq!\n");
+ return -1;
+ }
- if (dst_apic != src_apic) {
+ if (do_xcall && (dst_apic != src_apic)) {
// Assume core # is same as logical processor for now
// TODO FIX THIS FIX THIS
// THERE SHOULD BE: guestapicid->virtualapicid map,
// cpu_id->logical processor map
// host maitains logical proc->phsysical proc
- PrintDebug(" non-local core, forcing it to exit\n");
+ PrintDebug(" non-local core with new interrupt, forcing it to exit now\n");
+#ifdef CONFIG_MULTITHREAD_OS
v3_interrupt_cpu(dst_core->vm_info, dst_core->cpu_id, 0);
+#else
+ V3_ASSERT(0);
+#endif
}
break;
- case 5: { //INIT
+
+ case APIC_INIT_DELIVERY: {
PrintDebug(" INIT delivery to core %u\n", dst_core->cpu_id);
// Sanity check
if (dst_apic->ipi_state != INIT_ST) {
- PrintError(" Warning: core %u is not in INIT state (mode = %d), ignored\n",
+ PrintError(" Warning: core %u is not in INIT state (mode = %d), ignored (assuming this is the deassert)\n",
dst_core->cpu_id, dst_apic->ipi_state);
// Only a warning, since INIT INIT SIPI is common
break;
break;
}
- case 6: { //SIPI
+ case APIC_SIPI_DELIVERY: {
// Sanity check
if (dst_apic->ipi_state != SIPI) {
break;
}
- case 2: // SMI
- case 3: // reserved
- case 4: // NMI
- case 7: // ExtInt
+ case APIC_SMI_DELIVERY:
+ case APIC_RES1_DELIVERY: // reserved
+ case APIC_NMI_DELIVERY:
+ case APIC_EXTINT_DELIVERY: // ExtInt
default:
PrintError("IPI %d delivery is unsupported\n", del_mode);
return -1;
}
-
+// route_ipi is responsible for all locking
+// the assumption is that you enter with no locks
+// there is a global lock for the icc bus, so only
+// one route_ipi progresses at any time
+// destination apics are locked as needed
+// if multiple apic locks are acquired at any point,
+// this is done in the order of the array, so no
+// deadlock should be possible
static int route_ipi(struct apic_dev_state * apic_dev,
struct apic_state * src_apic,
struct int_cmd_reg * icr) {
struct apic_state * dest_apic = NULL;
- PrintDebug("route_ipi: src_apic=%p, icr_data=%p\n",
- src_apic, (void *)(addr_t)icr->val);
-
- if ((icr->dst_mode == 0) && (icr->dst >= apic_dev->num_apics)) {
- PrintError("route_ipi: Attempted send to unregistered apic id=%u\n",
- icr->dst);
- return -1;
- }
-
- dest_apic = &(apic_dev->apics[icr->dst]);
+ //v3_lock(apic_dev->ipi_lock); // this may not be needed
+ // now I know only one IPI is being routed, this one
+ // also, I do not have any apic locks
+ // I need to acquire locks on pairs of src/dest apics
+ // and I will do that using the total order
+ // given by their cores
- PrintDebug("route_ipi: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
+ PrintDebug("apic: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
deliverymode_str[icr->del_mode],
icr->vec,
src_apic,
icr->dst,
icr->val);
+#if 1
+ if (icr->vec!=48) {
+ V3_Print("apic: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
+ deliverymode_str[icr->del_mode],
+ icr->vec,
+ src_apic,
+ (icr->dst_mode == 0) ? "(physical)" : "(logical)",
+ shorthand_str[icr->dst_shorthand],
+ icr->dst,
+ icr->val);
+ }
+
+#endif
+
+
switch (icr->dst_shorthand) {
- case 0: // no shorthand
- if (icr->dst_mode == 0) {
- // physical delivery
+ case APIC_SHORTHAND_NONE: // no shorthand
+ if (icr->dst_mode == APIC_DEST_PHYSICAL) {
+
+ if (icr->dst >= apic_dev->num_apics) {
+ PrintError("apic: Attempted send to unregistered apic id=%u\n", icr->dst);
+ goto route_ipi_out_bad;
+ }
+
+
+ dest_apic = &(apic_dev->apics[icr->dst]);
+
+ V3_Print("apic: phsyical destination of %u (apic %u at 0x%p)\n", icr->dst,dest_apic->lapic_id.val,dest_apic);
+
+ v3_lock(dest_apic->lock);
if (deliver_ipi(src_apic, dest_apic,
icr->vec, icr->del_mode) == -1) {
- PrintError("Error: Could not deliver IPI\n");
- return -1;
+ PrintError("apic: Could not deliver IPI\n");
+ v3_unlock(dest_apic->lock);
+ goto route_ipi_out_bad;
}
- } else {
- // logical delivery
- int i;
- uint8_t mda = icr->dst;
-
- for (i = 0; i < apic_dev->num_apics; i++) {
- dest_apic = &(apic_dev->apics[i]);
- int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
-
- if (del_flag == -1) {
- PrintError("Error checking delivery mode\n");
- return -1;
- } else if (del_flag == 1) {
- if (deliver_ipi(src_apic, dest_apic,
+ v3_unlock(dest_apic->lock);
+
+ V3_Print("apic: done\n");
+
+ } else if (icr->dst_mode == APIC_DEST_LOGICAL) {
+
+ if (icr->del_mode!=APIC_LOWEST_DELIVERY ) {
+ // logical, but not lowest priority
+ // we immediately trigger
+ // fixed, smi, reserved, nmi, init, sipi, etc
+ int i;
+
+ uint8_t mda = icr->dst;
+
+ for (i = 0; i < apic_dev->num_apics; i++) {
+
+ dest_apic = &(apic_dev->apics[i]);
+
+ v3_lock(dest_apic->lock);
+
+ int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
+
+ if (del_flag == -1) {
+ PrintError("apic: Error checking delivery mode\n");
+ v3_unlock(dest_apic->lock);
+ goto route_ipi_out_bad;
+ } else if (del_flag == 1) {
+ if (deliver_ipi(src_apic, dest_apic,
+ icr->vec, icr->del_mode) == -1) {
+ PrintError("apic: Error: Could not deliver IPI\n");
+ v3_unlock(dest_apic->lock);
+ goto route_ipi_out_bad;
+ }
+ }
+
+ v3_unlock(dest_apic->lock);
+ }
+ } else { //APIC_LOWEST_DELIVERY
+ // logical, lowest priority
+ // scan, keeping a lock on the current best, then trigger
+ int i;
+ int have_cur_lock;
+ struct apic_state * cur_best_apic = NULL;
+
+ uint8_t mda = icr->dst;
+
+ have_cur_lock=0;
+
+
+ // Note that even if there are multiple concurrent
+ // copies of this loop executing, they are all
+ // locking in the same order
+
+ for (i = 0; i < apic_dev->num_apics; i++) {
+
+ dest_apic = &(apic_dev->apics[i]);
+
+ v3_lock(dest_apic->lock);
+ have_cur_lock=1;
+
+ int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
+
+ if (del_flag == -1) {
+ PrintError("apic: Error checking delivery mode\n");
+ v3_unlock(dest_apic->lock);
+ if (cur_best_apic && cur_best_apic!=dest_apic) {
+ v3_unlock(cur_best_apic->lock);
+ }
+ goto route_ipi_out_bad;
+ } else if (del_flag == 1) {
+ // update priority for lowest priority scan
+ if (!cur_best_apic) {
+ cur_best_apic=dest_apic; // note we leave it locked
+ have_cur_lock=0; // we will unlock as cur_best_apic
+ } else if (dest_apic->task_prio.val < cur_best_apic->task_prio.val) {
+ // we now unlock the current best one and then switch
+ // so in the end we have a lock on the new cur_best_apic
+ v3_unlock(cur_best_apic->lock);
+ cur_best_apic=dest_apic;
+ have_cur_lock=0; // will unlock as cur_best_apic
+ }
+ }
+ if (have_cur_lock) {
+ v3_unlock(dest_apic->lock);
+ }
+
+ }
+ // now we will deliver to the best one if it exists
+ // and it is locked
+ if (!cur_best_apic) {
+ PrintDebug("apic: lowest priority deliver, but no destinations!\n");
+ } else {
+ if (deliver_ipi(src_apic, cur_best_apic,
icr->vec, icr->del_mode) == -1) {
- PrintError("Error: Could not deliver IPI\n");
- return -1;
+ PrintError("apic: Error: Could not deliver IPI\n");
+ v3_unlock(cur_best_apic->lock);
+ goto route_ipi_out_bad;
+ } else {
+ v3_unlock(cur_best_apic->lock);
}
+ //V3_Print("apic: logical, lowest priority delivery to apic %u\n",cur_best_apic->lapic_id.val);
}
}
}
-
+
break;
- case 1: // self
+ case APIC_SHORTHAND_SELF: // self
+
+ /* I assume I am already locked! */
- if (src_apic == NULL) {
- PrintError("Sending IPI to self from generic IPI sender\n");
+ if (src_apic == NULL) { /* this is not an apic, but it's trying to send to itself??? */
+ PrintError("apic: Sending IPI to self from generic IPI sender\n");
break;
}
- if (icr->dst_mode == 0) {
+ v3_lock(src_apic->lock);
+
+ if (icr->dst_mode == APIC_DEST_PHYSICAL) { /* physical delivery */
if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
- PrintError("Could not deliver IPI\n");
- return -1;
+ PrintError("apic: Could not deliver IPI to self (physical)\n");
+ v3_unlock(src_apic->lock);
+ goto route_ipi_out_bad;
+ }
+ } else if (icr->dst_mode == APIC_DEST_LOGICAL) { /* logical delivery */
+ PrintError("apic: use of logical delivery in self (untested)\n");
+ if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
+ PrintError("apic: Could not deliver IPI to self (logical)\n");
+ v3_unlock(src_apic->lock);
+ goto route_ipi_out_bad;
}
- } else {
- // logical delivery
- PrintError("use of logical delivery in self is not yet supported.\n");
- return -1;
}
+ v3_unlock(src_apic->lock);
break;
- case 2:
- case 3: { // all and all-but-me
- // assuming that logical verus physical doesn't matter
- // although it is odd that both are used
+ case APIC_SHORTHAND_ALL:
+ case APIC_SHORTHAND_ALL_BUT_ME: { /* all and all-but-me */
+ /* assuming that logical verus physical doesn't matter
+ although it is odd that both are used */
+
int i;
for (i = 0; i < apic_dev->num_apics; i++) {
dest_apic = &(apic_dev->apics[i]);
-
- if ((dest_apic != src_apic) || (icr->dst_shorthand == 2)) {
+
+
+ if ((dest_apic != src_apic) || (icr->dst_shorthand == APIC_SHORTHAND_ALL)) {
+ v3_lock(dest_apic->lock);
if (deliver_ipi(src_apic, dest_apic, icr->vec, icr->del_mode) == -1) {
- PrintError("Error: Could not deliver IPI\n");
- return -1;
+ PrintError("apic: Error: Could not deliver IPI\n");
+ v3_unlock(dest_apic->lock);
+ goto route_ipi_out_bad;
}
+ v3_unlock(dest_apic->lock);
}
- }
-
- break;
+
+ }
}
+ break;
default:
- PrintError("Error routing IPI, invalid Mode (%d)\n", icr->dst_shorthand);
- return -1;
+ PrintError("apic: Error routing IPI, invalid Mode (%d)\n", icr->dst_shorthand);
+ goto route_ipi_out_bad;
}
-
+
+ // route_ipi_out_good:
+ //v3_unlock(apic_dev->ipi_lock);
return 0;
-}
+ route_ipi_out_bad:
+ //v3_unlock(apic_dev->ipi_lock);
+ return -1;
+}
+// External function, expected to acquire lock on apic
static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
uint32_t val = 0;
+ v3_lock(apic->lock);
PrintDebug("apic %u: core %u: at %p: Read apic address space (%p)\n",
apic->lapic_id.val, core->cpu_id, apic, (void *)guest_addr);
PrintError("apic %u: core %u: Read from APIC address space with disabled APIC, apic msr=0x%llx\n",
apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
- return -1;
+ goto apic_read_out_bad;
}
default:
PrintError("apic %u: core %u: Read from Unhandled APIC Register: %x (getting zero)\n",
apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
- return -1;
+ goto apic_read_out_bad;
}
} else {
PrintError("apic %u: core %u: Invalid apic read length (%d)\n",
apic->lapic_id.val, core->cpu_id, length);
- return -1;
+ goto apic_read_out_bad;
}
PrintDebug("apic %u: core %u: Read finished (val=%x)\n",
apic->lapic_id.val, core->cpu_id, *(uint32_t *)dst);
+
+ // apic_read_out_good:
+ v3_unlock(apic->lock);
return length;
+
+ apic_read_out_bad:
+ v3_unlock(apic->lock);
+ return -1;
}
struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
uint32_t op_val = *(uint32_t *)src;
+
+ v3_lock(apic->lock);
+
PrintDebug("apic %u: core %u: at %p and priv_data is at %p\n",
apic->lapic_id.val, core->cpu_id, apic, priv_data);
if (msr->apic_enable == 0) {
PrintError("apic %u: core %u: Write to APIC address space with disabled APIC, apic msr=0x%llx\n",
apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
- return -1;
+ goto apic_write_out_bad;
}
if (length != 4) {
PrintError("apic %u: core %u: Invalid apic write length (%d)\n",
apic->lapic_id.val, length, core->cpu_id);
- return -1;
+ goto apic_write_out_bad;
}
switch (reg_addr) {
PrintError("apic %u: core %u: Attempting to write to read only register %p (error)\n",
apic->lapic_id.val, core->cpu_id, (void *)reg_addr);
- // return -1;
+ // goto apic_write_out_bad;
break;
// Data registers
case APIC_ID_OFFSET:
- PrintDebug("apic %u: core %u: my id is being changed to %u\n",
+ V3_Print("apic %u: core %u: my id is being changed to %u\n",
apic->lapic_id.val, core->cpu_id, op_val);
apic->lapic_id.val = op_val;
// Action Registers
case EOI_OFFSET:
- // do eoi
+ // do eoi (we already have the lock)
apic_do_eoi(apic);
break;
- case INT_CMD_LO_OFFSET:
+ case INT_CMD_LO_OFFSET: {
+ // execute command (we already have the lock)
+
+ struct int_cmd_reg tmp_icr;
+
apic->int_cmd.lo = op_val;
- PrintDebug("apic %u: core %u: sending cmd 0x%llx to apic %u\n",
- apic->lapic_id.val, core->cpu_id,
- apic->int_cmd.val, apic->int_cmd.dst);
+ tmp_icr=apic->int_cmd;
+
+ // V3_Print("apic %u: core %u: sending cmd 0x%llx to apic %u\n",
+ // apic->lapic_id.val, core->cpu_id,
+ // apic->int_cmd.val, apic->int_cmd.dst);
- if (route_ipi(apic_dev, apic, &(apic->int_cmd)) == -1) {
+ apic->in_icr=0;
+
+ v3_unlock(apic->lock);
+
+ // route_ipi is responsible for locking apics, so we go in unlocked)
+ if (route_ipi(apic_dev, apic, &tmp_icr) == -1) {
PrintError("IPI Routing failure\n");
- return -1;
+ goto apic_write_out_bad;
}
+ // v3_lock(apic->lock); // expected for leaving this function
+
+ }
break;
- case INT_CMD_HI_OFFSET:
+ case INT_CMD_HI_OFFSET: {
+ // already have the lock
+ if (apic->in_icr) {
+ PrintError("apic %u: core %u: writing command high=0x%x while in_icr=1\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
+ }
+
apic->int_cmd.hi = op_val;
+ //V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
+ apic->in_icr=1;
+ }
break;
PrintError("apic %u: core %u: Write to Unhandled APIC Register: %x (ignored)\n",
apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
- return -1;
+ goto apic_write_out_bad;
}
PrintDebug("apic %u: core %u: Write finished\n", apic->lapic_id.val, core->cpu_id);
+ // apic_write_out_good:
+ v3_unlock(apic->lock);
return length;
+
+ apic_write_out_bad:
+ v3_unlock(apic->lock);
+ return -1;
}
/* Interrupt Controller Functions */
-// returns 1 if an interrupt is pending, 0 otherwise
-static int apic_intr_pending(struct guest_info * core, void * private_data) {
+// internally used, expects caller to lock
+static int apic_intr_pending_nolock(struct guest_info * core, void * private_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
int req_irq = get_highest_irr(apic);
return 0;
}
-static int apic_get_intr_number(struct guest_info * core, void * private_data) {
+// externally visible, so must lock itself
+static int apic_intr_pending(struct guest_info * core, void * private_data) {
+ struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
+ struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ int rc;
+
+ v3_lock(apic->lock);
+
+ rc=apic_intr_pending_nolock(core,private_data);
+
+ v3_unlock(apic->lock);
+
+ return rc;
+}
+
+// Internal - no lock
+static int apic_get_intr_number_nolock(struct guest_info * core, void * private_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
int req_irq = get_highest_irr(apic);
}
+// Externally visible, so must lock itself
+static int apic_get_intr_number(struct guest_info * core, void * private_data) {
+ struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
+ struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ int rc;
+
+ v3_lock(apic->lock);
+
+ rc=apic_get_intr_number_nolock(core,private_data);
+
+ v3_unlock(apic->lock);
+
+ return rc;
+}
+
+
+//
+// Here there is no source APIC, so there is no need to lock it
+// Furthermore, the expectation is that route_ipi will lock the destiation apic
int v3_apic_send_ipi(struct v3_vm_info * vm, struct v3_gen_ipi * ipi, void * dev_data) {
- struct apic_dev_state * apic_dev = (struct apic_dev_state *)dev_data;
+ struct apic_dev_state * apic_dev = (struct apic_dev_state *)
+ (((struct vm_device *)dev_data)->private_data);
struct int_cmd_reg tmp_icr;
// zero out all the fields
tmp_icr.val = 0;
-
tmp_icr.vec = ipi->vector;
tmp_icr.del_mode = ipi->mode;
tmp_icr.dst_mode = ipi->logical;
tmp_icr.trig_mode = ipi->trigger_mode;
tmp_icr.dst_shorthand = ipi->dst_shorthand;
tmp_icr.dst = ipi->dst;
-
+ // route_ipi is responsible for locking the destination apic
return route_ipi(apic_dev, NULL, &tmp_icr);
}
int v3_apic_raise_intr(struct v3_vm_info * vm, uint32_t irq, uint32_t dst, void * dev_data) {
- struct apic_dev_state * apic_dev = (struct apic_dev_state *)(dev_data);
+ struct apic_dev_state * apic_dev = (struct apic_dev_state *)
+ (((struct vm_device*)dev_data)->private_data);
struct apic_state * apic = &(apic_dev->apics[dst]);
+ int do_xcall;
PrintDebug("apic %u core ?: raising interrupt IRQ %u (dst = %u).\n", apic->lapic_id.val, irq, dst);
- activate_apic_irq(apic, irq);
+ v3_lock(apic->lock);
- if (V3_Get_CPU() != dst) {
- v3_interrupt_cpu(vm, dst, 0);
+ do_xcall=activate_apic_irq_nolock(apic, irq);
+
+ if (do_xcall<0) {
+ PrintError("Failed to activate apic irq\n");
+ v3_unlock(apic->lock);
+ return -1;
}
+
+ if (do_xcall>0 && (V3_Get_CPU() != dst)) {
+#ifdef CONFIG_MULTITHREAD_OS
+ v3_interrupt_cpu(vm, dst, 0);
+#else
+ V3_ASSERT(0);
+#endif
+ }
+ v3_unlock(apic->lock);
return 0;
}
-
-static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
+// internal - caller must lock
+static int apic_begin_irq_nolock(struct guest_info * core, void * private_data, int irq) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
int major_offset = (irq & ~0x00000007) >> 3;
int minor_offset = irq & 0x00000007;
- uint8_t * req_location = apic->int_req_reg + major_offset;
- uint8_t * svc_location = apic->int_svc_reg + major_offset;
+ uint8_t *req_location = apic->int_req_reg + major_offset;
+ uint8_t *svc_location = apic->int_svc_reg + major_offset;
uint8_t flag = 0x01 << minor_offset;
if (*req_location & flag) {
return 0;
}
+// Since this is called, externally, it should lock the apic
+static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
+ struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
+ struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ int rc;
+
+ v3_lock(apic->lock);
+
+ rc=apic_begin_irq_nolock(core,private_data,irq);
+
+ v3_unlock(apic->lock);
+
+ return rc;
+}
/* Timer Functions */
-static void apic_update_time(struct guest_info * core,
+// Caller will lock the apic
+static void apic_update_time_nolock(struct guest_info * core,
uint64_t cpu_cycles, uint64_t cpu_freq,
void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
apic->lapic_id.val, core->cpu_id,
apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
- if (apic_intr_pending(core, priv_data)) {
+ if (apic_intr_pending_nolock(core, priv_data)) {
PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
apic->lapic_id.val, core->cpu_id,
apic_get_intr_number(core, priv_data));
}
- if (activate_internal_irq(apic, APIC_TMR_INT) == -1) {
+ if (activate_internal_irq_nolock(apic, APIC_TMR_INT) == -1) {
PrintError("apic %u: core %u: Could not raise Timer interrupt\n",
apic->lapic_id.val, core->cpu_id);
}
}
}
-
+ return;
}
+static void apic_update_time(struct guest_info * core,
+ uint64_t cpu_cycles, uint64_t cpu_freq,
+ void * priv_data) {
+ struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
+ struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+
+ v3_lock(apic->lock);
+
+ apic_update_time_nolock(core,cpu_cycles,cpu_freq,priv_data);
+
+ v3_unlock(apic->lock);
+
+ return;
+}
+
static struct intr_ctrl_ops intr_ops = {
.intr_pending = apic_intr_pending,
.get_intr_number = apic_get_intr_number,
-static int apic_free(struct vm_device * dev) {
- struct apic_dev_state * apic_dev = (struct apic_dev_state *)dev->private_data;
+static int apic_free(struct apic_dev_state * apic_dev) {
int i = 0;
+ struct v3_vm_info * vm = NULL;
- for (i = 0; i < dev->vm->num_cores; i++) {
+ for (i = 0; i < apic_dev->num_apics; i++) {
struct apic_state * apic = &(apic_dev->apics[i]);
- struct guest_info * core = &(dev->vm->cores[i]);
+ struct guest_info * core = apic->core;
+ vm = core->vm_info;
- // unregister intr controller
+ v3_remove_intr_controller(core, apic->controller_handle);
if (apic->timer) {
v3_remove_timer(core, apic->timer);
}
- v3_unhook_msr(dev->vm, BASE_ADDR_MSR);
+ v3_unhook_msr(vm, BASE_ADDR_MSR);
V3_Free(apic_dev);
return 0;
static struct v3_device_ops dev_ops = {
- .free = apic_free,
+ .free = (int (*)(void *))apic_free,
};
-
-
static int apic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
char * dev_id = v3_cfg_val(cfg, "ID");
struct apic_dev_state * apic_dev = NULL;
apic_dev->num_apics = vm->num_cores;
- struct vm_device * dev = v3_allocate_device(dev_id, &dev_ops, apic_dev);
+ //v3_lock_init(&(apic_dev->ipi_lock));
+
+ struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, apic_dev);
- if (v3_attach_device(vm, dev) == -1) {
+ if (dev == NULL) {
PrintError("apic: Could not attach device %s\n", dev_id);
V3_Free(apic_dev);
return -1;
init_apic_state(apic, i);
- v3_register_intr_controller(core, &intr_ops, apic_dev);
+ apic->controller_handle = v3_register_intr_controller(core, &intr_ops, apic_dev);
apic->timer = v3_add_timer(core, &timer_ops, apic_dev);
if (apic->timer == NULL) {
PrintError("APIC: Failed to attach timer to core %d\n", i);
- v3_detach_device(dev);
+ v3_remove_device(dev);
return -1;
}
#ifdef CONFIG_DEBUG_APIC
for (i = 0; i < vm->num_cores; i++) {
struct apic_state * apic = &(apic_dev->apics[i]);
- PrintDebug("apic: sanity check: apic %u (at %p) has id %u and msr value %llx\n",
- i, apic, apic->lapic_id.val, apic->base_addr_msr.value);
+ PrintDebug("apic: sanity check: apic %u (at %p) has id %u and msr value %llx and core at %p\n",
+ i, apic, apic->lapic_id.val, apic->base_addr_msr.value,apic->core);
}
#endif