X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fdevices%2Fapic.c;h=d88852d21008ea6a355b8af7a391905d6ba6889c;hb=f7e83e5d2d00ba107ccda346da4660ab523471bb;hp=5a2856adeedc319623a3d84c3e2cd6906e674f77;hpb=a8627ddaccf49073eb04286c5ea4767e2903c351;p=palacios.git diff --git a/palacios/src/devices/apic.c b/palacios/src/devices/apic.c index 5a2856a..d88852d 100644 --- a/palacios/src/devices/apic.c +++ b/palacios/src/devices/apic.c @@ -36,7 +36,7 @@ * 1. Queue locks. Actual irq insertions are done via queueing irq ops at the dest apic. * The destination apic's core is responsible for draining the queue, and actually * setting the vector table. - * 2. State locks. This is a standard lock taken when internal apic state is read/written. + * 2. State lock. This is a standard lock taken when internal apic state is read/written. * When an irq's destination is determined this lock is taken to examine the apic's * addressability. * 3. VM barrier lock. This is taken when actual VM core state is changed (via SIPI). @@ -179,6 +179,7 @@ struct apic_msr { + typedef enum {INIT_ST, SIPI, STARTED} ipi_state_t; @@ -219,7 +220,7 @@ struct apic_state { uint32_t tmr_cur_cnt; uint32_t tmr_init_cnt; - + uint32_t missed_ints; struct local_vec_tbl_reg ext_intr_vec_tbl[4]; @@ -239,7 +240,7 @@ struct apic_state { struct v3_timer * timer; - v3_lock_t state_lock; + struct v3_queue irq_queue; uint32_t eoi; @@ -253,6 +254,8 @@ struct apic_state { struct apic_dev_state { int num_apics; + v3_lock_t state_lock; + struct apic_state apics[0]; } __attribute__((packed)); @@ -292,6 +295,7 @@ static void init_apic_state(struct apic_state * apic, uint32_t id) { apic->rem_rd_data = 0x00000000; apic->tmr_init_cnt = 0x00000000; apic->tmr_cur_cnt = 0x00000000; + apic->missed_ints = 0; apic->lapic_id.val = id; @@ -377,6 +381,9 @@ static int write_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t src, vo } + + + // irq_num is the bit offset into a 256 bit buffer... static int activate_apic_irq(struct apic_state * apic, uint32_t irq_num) { int major_offset = (irq_num & ~0x00000007) >> 3; @@ -386,13 +393,6 @@ static int activate_apic_irq(struct apic_state * apic, uint32_t irq_num) { uint8_t flag = 0x1 << minor_offset; - if (irq_num <= 15 || irq_num > 255) { - PrintError("apic %u: core %d: Attempting to raise an invalid interrupt: %d\n", - apic->lapic_id.val, apic->core->vcpu_id, irq_num); - return -1; - } - - PrintDebug("apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->vcpu_id, irq_num); if (*req_location & flag) { @@ -412,6 +412,30 @@ static int activate_apic_irq(struct apic_state * apic, uint32_t irq_num) { } +static int add_apic_irq_entry(struct apic_state * apic, uint8_t irq_num) { + + if (irq_num <= 15) { + PrintError("core %d: Attempting to raise an invalid interrupt: %d\n", + apic->core->vcpu_id, irq_num); + return -1; + } + + v3_enqueue(&(apic->irq_queue), (addr_t)irq_num); + + return 0; +} + +static void drain_irq_entries(struct apic_state * apic) { + uint32_t irq = 0; + + while ((irq = (uint32_t)v3_dequeue(&(apic->irq_queue))) != 0) { + activate_apic_irq(apic, irq); + } + +} + + + static int get_highest_isr(struct apic_state * apic) { int i = 0, j = 0; @@ -540,7 +564,7 @@ static int activate_internal_irq(struct apic_state * apic, apic_irq_type_t int_t if (del_mode == APIC_FIXED_DELIVERY) { //PrintDebug("Activating internal APIC IRQ %d\n", vec_num); - return activate_apic_irq(apic, vec_num); + return add_apic_irq_entry(apic, vec_num); } else { PrintError("apic %u: core ?: Unhandled Delivery Mode\n", apic->lapic_id.val); return -1; @@ -549,77 +573,108 @@ static int activate_internal_irq(struct apic_state * apic, apic_irq_type_t int_t -static inline int should_deliver_cluster_ipi(struct guest_info * dst_core, +static inline int should_deliver_cluster_ipi(struct apic_dev_state * apic_dev, + struct guest_info * dst_core, struct apic_state * dst_apic, uint8_t mda) { + int ret = 0; + + if ( ((mda & 0xf0) == (dst_apic->log_dst.dst_log_id & 0xf0)) && /* (I am in the cluster and */ ((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { /* I am in the set) */ + ret = 1; + } else { + ret = 0; + } + + if (ret == 1) { PrintDebug("apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n", dst_apic->lapic_id.val, dst_core->vcpu_id, mda, dst_apic->log_dst.dst_log_id); - - return 1; } else { PrintDebug("apic %u core %u: rejecting clustered IRQ (mda 0x%x != log_dst 0x%x)\n", dst_apic->lapic_id.val, dst_core->vcpu_id, mda, dst_apic->log_dst.dst_log_id); - return 0; } + + return ret; + } -static inline int should_deliver_flat_ipi(struct guest_info * dst_core, +static inline int should_deliver_flat_ipi(struct apic_dev_state * apic_dev, + struct guest_info * dst_core, struct apic_state * dst_apic, uint8_t mda) { - if (dst_apic->log_dst.dst_log_id & mda) { // I am in the set + int ret = 0; - PrintDebug("apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n", - dst_apic->lapic_id.val, dst_core->vcpu_id, mda, - dst_apic->log_dst.dst_log_id); - return 1; + if ((dst_apic->log_dst.dst_log_id & mda) != 0) { // I am in the set + ret = 1; + } else { + ret = 0; + } - } else { + if (ret == 1) { + PrintDebug("apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n", + dst_apic->lapic_id.val, dst_core->vcpu_id, mda, + dst_apic->log_dst.dst_log_id); + } else { PrintDebug("apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n", dst_apic->lapic_id.val, dst_core->vcpu_id, mda, dst_apic->log_dst.dst_log_id); - return 0; - } + } + + + return ret; } -static int should_deliver_ipi(struct guest_info * dst_core, +static int should_deliver_ipi(struct apic_dev_state * apic_dev, + struct guest_info * dst_core, struct apic_state * dst_apic, uint8_t mda) { + addr_t flags = 0; + int ret = 0; + flags = v3_lock_irqsave(apic_dev->state_lock); if (dst_apic->dst_fmt.model == 0xf) { if (mda == 0xff) { /* always deliver broadcast */ - return 1; + ret = 1; + } else { + ret = should_deliver_flat_ipi(apic_dev, dst_core, dst_apic, mda); } - - return should_deliver_flat_ipi(dst_core, dst_apic, mda); - } else if (dst_apic->dst_fmt.model == 0x0) { if (mda == 0xff) { /* always deliver broadcast */ - return 1; + ret = 1; + } else { + ret = should_deliver_cluster_ipi(apic_dev, dst_core, dst_apic, mda); } - return should_deliver_cluster_ipi(dst_core, dst_apic, mda); - } else { + ret = -1; + } + + v3_unlock_irqrestore(apic_dev->state_lock, flags); + + + if (ret == -1) { PrintError("apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n", dst_apic->lapic_id.val, dst_core->vcpu_id, dst_apic->dst_fmt.model); - return -1; } + + return ret; } + + // Only the src_apic pointer is used static int deliver_ipi(struct apic_state * src_apic, struct apic_state * dst_apic, @@ -636,21 +691,17 @@ static int deliver_ipi(struct apic_state * src_apic, // lowest priority - // caller needs to have decided which apic to deliver to! - int do_xcall; - PrintDebug("delivering IRQ %d to core %u\n", vector, dst_core->vcpu_id); - do_xcall = activate_apic_irq(dst_apic, vector); + add_apic_irq_entry(dst_apic, vector); - - +#ifdef V3_CONFIG_MULTITHREAD_OS if (dst_apic != src_apic) { PrintDebug(" non-local core with new interrupt, forcing it to exit now\n"); - -#ifdef V3_CONFIG_MULTITHREAD_OS v3_interrupt_cpu(dst_core->vm_info, dst_core->pcpu_id, 0); -#endif } +#endif + break; } @@ -690,20 +741,7 @@ static int deliver_ipi(struct apic_state * src_apic, break; } - // Write the RIP, CS, and descriptor - // assume the rest is already good to go - // - // vector VV -> rip at 0 - // CS = VV00 - // This means we start executing at linear address VV000 - // - // So the selector needs to be VV00 - // and the base needs to be VV000 - // - dst_core->rip = 0; - dst_core->segments.cs.selector = vector << 8; - dst_core->segments.cs.limit = 0xffff; - dst_core->segments.cs.base = vector << 12; + v3_reset_vm_core(dst_core, vector); PrintDebug(" SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n", vector, dst_core->segments.cs.selector, dst_core->vcpu_id); @@ -719,10 +757,18 @@ static int deliver_ipi(struct apic_state * src_apic, break; } + + case APIC_EXTINT_DELIVERY: // EXTINT + /* Two possible things to do here: + * 1. Ignore the IPI and assume the 8259a (PIC) will handle it + * 2. Add 32 to the vector and inject it... + * We probably just want to do 1 here, and assume the raise_irq() will hit the 8259a. + */ + return 0; + case APIC_SMI_DELIVERY: case APIC_RES1_DELIVERY: // reserved case APIC_NMI_DELIVERY: - case APIC_EXTINT_DELIVERY: // ExtInt default: PrintError("IPI %d delivery is unsupported\n", del_mode); return -1; @@ -732,24 +778,29 @@ static int deliver_ipi(struct apic_state * src_apic, } -static struct apic_state * find_physical_apic(struct apic_dev_state *apic_dev, struct int_cmd_reg *icr) -{ +static struct apic_state * find_physical_apic(struct apic_dev_state * apic_dev, uint32_t dst_idx) { + struct apic_state * dst_apic = NULL; + addr_t flags; int i; - - if ( (icr->dst > 0) && (icr->dst < apic_dev->num_apics) ) { + + flags = v3_lock_irqsave(apic_dev->state_lock); + + if ( (dst_idx > 0) && (dst_idx < apic_dev->num_apics) ) { // see if it simply is the core id - if (apic_dev->apics[icr->dst].lapic_id.val == icr->dst) { - return &(apic_dev->apics[icr->dst]); + if (apic_dev->apics[dst_idx].lapic_id.val == dst_idx) { + dst_apic = &(apic_dev->apics[dst_idx]); } } for (i = 0; i < apic_dev->num_apics; i++) { - if (apic_dev->apics[i].lapic_id.val == icr->dst) { - return &(apic_dev->apics[i]); + if (apic_dev->apics[i].lapic_id.val == dst_idx) { + dst_apic = &(apic_dev->apics[i]); } } - - return NULL; + + v3_unlock_irqrestore(apic_dev->state_lock, flags); + + return dst_apic; } @@ -775,7 +826,7 @@ static int route_ipi(struct apic_dev_state * apic_dev, case APIC_SHORTHAND_NONE: // no shorthand if (icr->dst_mode == APIC_DEST_PHYSICAL) { - dest_apic = find_physical_apic(apic_dev, icr); + dest_apic = find_physical_apic(apic_dev, icr->dst); if (dest_apic == NULL) { PrintError("apic: Attempted send to unregistered apic id=%u\n", icr->dst); @@ -807,7 +858,7 @@ static int route_ipi(struct apic_dev_state * apic_dev, dest_apic = &(apic_dev->apics[i]); - del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda); + del_flag = should_deliver_ipi(apic_dev, dest_apic->core, dest_apic, mda); if (del_flag == -1) { @@ -822,11 +873,11 @@ static int route_ipi(struct apic_dev_state * apic_dev, } } } - } else { //APIC_LOWEST_DELIVERY - int i; + } else { // APIC_LOWEST_DELIVERY struct apic_state * cur_best_apic = NULL; uint8_t mda = icr->dst; - + int i; + // logical, lowest priority for (i = 0; i < apic_dev->num_apics; i++) { @@ -834,7 +885,7 @@ static int route_ipi(struct apic_dev_state * apic_dev, dest_apic = &(apic_dev->apics[i]); - del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda); + del_flag = should_deliver_ipi(apic_dev, dest_apic->core, dest_apic, mda); if (del_flag == -1) { PrintError("apic: Error checking delivery mode\n"); @@ -842,11 +893,18 @@ static int route_ipi(struct apic_dev_state * apic_dev, return -1; } else if (del_flag == 1) { // update priority for lowest priority scan + addr_t flags = 0; + + flags = v3_lock_irqsave(apic_dev->state_lock); + if (cur_best_apic == 0) { cur_best_apic = dest_apic; } else if (dest_apic->task_prio.val < cur_best_apic->task_prio.val) { cur_best_apic = dest_apic; } + + v3_unlock_irqrestore(apic_dev->state_lock, flags); + } } @@ -1158,7 +1216,7 @@ static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, ui *val_ptr = *(((uint8_t *)&val) + byte_addr); } else if ((length == 2) && - ((reg_addr & 0x3) == 0x3)) { + ((reg_addr & 0x3) != 0x3)) { uint_t byte_addr = reg_addr & 0x3; uint16_t * val_ptr = (uint16_t *)dst; *val_ptr = *(((uint16_t *)&val) + byte_addr); @@ -1189,6 +1247,7 @@ static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, u addr_t reg_addr = guest_addr - apic->base_addr; struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value); uint32_t op_val = *(uint32_t *)src; + addr_t flags = 0; PrintDebug("apic %u: core %u: at %p and priv_data is at %p\n", apic->lapic_id.val, core->vcpu_id, apic, priv_data); @@ -1258,10 +1317,14 @@ static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, u case LDR_OFFSET: PrintDebug("apic %u: core %u: setting log_dst.val to 0x%x\n", apic->lapic_id.val, core->vcpu_id, op_val); + flags = v3_lock_irqsave(apic_dev->state_lock); apic->log_dst.val = op_val; + v3_unlock_irqrestore(apic_dev->state_lock, flags); break; case DFR_OFFSET: + flags = v3_lock_irqsave(apic_dev->state_lock); apic->dst_fmt.val = op_val; + v3_unlock_irqrestore(apic_dev->state_lock, flags); break; case SPURIOUS_INT_VEC_OFFSET: apic->spurious_int.val = op_val; @@ -1295,6 +1358,8 @@ static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, u apic->tmr_cur_cnt = op_val; break; case TMR_DIV_CFG_OFFSET: + PrintDebug("apic %u: core %u: setting tmr_div_cfg to 0x%x\n", + apic->lapic_id.val, core->vcpu_id, op_val); apic->tmr_div_cfg.val = op_val; break; @@ -1367,8 +1432,7 @@ static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, u } case INT_CMD_HI_OFFSET: { apic->int_cmd.hi = op_val; - V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->vcpu_id,apic->int_cmd.hi); - + //V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->vcpu_id,apic->int_cmd.hi); break; } // Unhandled Registers @@ -1395,11 +1459,15 @@ static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, u static int apic_intr_pending(struct guest_info * core, void * private_data) { struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data); struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]); + int req_irq = 0; + int svc_irq = 0; - // drain irq QUEUE + // Activate all queued IRQ entries + drain_irq_entries(apic); - int req_irq = get_highest_irr(apic); - int svc_irq = get_highest_isr(apic); + // Check for newly activated entries + req_irq = get_highest_irr(apic); + svc_irq = get_highest_isr(apic); // PrintDebug("apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->vcpu_id,req_irq,svc_irq); @@ -1454,25 +1522,16 @@ int v3_apic_raise_intr(struct v3_vm_info * vm, uint32_t irq, uint32_t dst, void struct apic_dev_state * apic_dev = (struct apic_dev_state *) (((struct vm_device*)dev_data)->private_data); struct apic_state * apic = &(apic_dev->apics[dst]); - int do_xcall; PrintDebug("apic %u core ?: raising interrupt IRQ %u (dst = %u).\n", apic->lapic_id.val, irq, dst); - do_xcall = activate_apic_irq(apic, irq); + add_apic_irq_entry(apic, irq); - if (do_xcall < 0) { - PrintError("Failed to activate apic irq\n"); - return -1; - } - - if (do_xcall > 0 && (V3_Get_CPU() != dst)) { -#ifdef V3_CONFIG_MULTITHREAD_OS +#ifdef V3_CONFIG_MULTITHREAD_OS + if ((V3_Get_CPU() != dst)) { v3_interrupt_cpu(vm, dst, 0); -#else - V3_ASSERT(0); -#endif - } +#endif return 0; } @@ -1503,10 +1562,33 @@ static int apic_begin_irq(struct guest_info * core, void * private_data, int irq } +/* Timer Functions */ + +static void apic_inject_timer_intr(struct guest_info *core, + void * priv_data) { + struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data); + struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]); + // raise irq + PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d)\n", + apic->lapic_id.val, core->vcpu_id, + apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt); + + if (apic_intr_pending(core, priv_data)) { + PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n", + apic->lapic_id.val, core->vcpu_id, + apic_get_intr_number(core, priv_data)); + } + + if (activate_internal_irq(apic, APIC_TMR_INT) == -1) { + PrintError("apic %u: core %u: Could not raise Timer interrupt\n", + apic->lapic_id.val, core->vcpu_id); + } + return; +} + -/* Timer Functions */ static void apic_update_time(struct guest_info * core, uint64_t cpu_cycles, uint64_t cpu_freq, @@ -1572,36 +1654,31 @@ static void apic_update_time(struct guest_info * core, if (tmr_ticks < apic->tmr_cur_cnt) { apic->tmr_cur_cnt -= tmr_ticks; +#ifdef V3_CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS + if (apic->missed_ints && !apic_intr_pending(core, priv_data)) { + PrintDebug("apic %u: core %u: Injecting queued APIC timer interrupt.\n", + apic->lapic_id.val, core->vcpu_id); + apic_inject_timer_intr(core, priv_data); + apic->missed_ints--; + } +#endif /* CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS */ } else { tmr_ticks -= apic->tmr_cur_cnt; apic->tmr_cur_cnt = 0; - // raise irq - PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n", - apic->lapic_id.val, core->vcpu_id, - apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num); - - if (apic_intr_pending(core, priv_data)) { - PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n", - apic->lapic_id.val, core->vcpu_id, - apic_get_intr_number(core, priv_data)); - } + apic_inject_timer_intr(core, priv_data); - if (activate_internal_irq(apic, APIC_TMR_INT) == -1) { - PrintError("apic %u: core %u: Could not raise Timer interrupt\n", - apic->lapic_id.val, core->vcpu_id); - } - if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) { + int queued_ints = tmr_ticks / apic->tmr_init_cnt; tmr_ticks = tmr_ticks % apic->tmr_init_cnt; apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks; + apic->missed_ints += queued_ints; } } return; } - static struct intr_ctrl_ops intr_ops = { .intr_pending = apic_intr_pending, .get_intr_number = apic_get_intr_number, @@ -1642,9 +1719,109 @@ static int apic_free(struct apic_dev_state * apic_dev) { return 0; } +#ifdef V3_CONFIG_CHECKPOINT +static int apic_save(struct v3_chkpt_ctx * ctx, void * private_data) { + struct apic_dev_state * apic_state = (struct apic_dev_state *)private_data; + int i = 0; + + V3_CHKPT_STD_SAVE(ctx, apic_state->num_apics); + + //V3_CHKPT_STD_SAVE(ctx,apic_state->state_lock); + for (i = 0; i < apic_state->num_apics; i++) { + + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].base_addr); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].base_addr_msr); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].lapic_id); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].apic_ver); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].ext_apic_ctrl); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].local_vec_tbl); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].tmr_vec_tbl); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].tmr_div_cfg); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].lint0_vec_tbl); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].lint1_vec_tbl); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].perf_ctr_loc_vec_tbl); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].therm_loc_vec_tbl); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].err_vec_tbl); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].err_status); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].spurious_int); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].int_cmd); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].log_dst); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].dst_fmt); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].arb_prio); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].task_prio); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].proc_prio); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].ext_apic_feature); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].spec_eoi); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].tmr_cur_cnt); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].tmr_init_cnt); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].ext_intr_vec_tbl); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].rem_rd_data); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].ipi_state); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].int_req_reg); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].int_svc_reg); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].int_en_reg); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].trig_mode_reg); + V3_CHKPT_STD_SAVE(ctx, apic_state->apics[i].eoi); + + } + + return 0; +} + +static int apic_load(struct v3_chkpt_ctx * ctx, void * private_data) { + struct apic_dev_state *apic_state = (struct apic_dev_state *)private_data; + int i = 0; + + V3_CHKPT_STD_LOAD(ctx,apic_state->num_apics); + + for (i = 0; i < apic_state->num_apics; i++) { + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].base_addr); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].base_addr_msr); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].lapic_id); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].apic_ver); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].ext_apic_ctrl); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].local_vec_tbl); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].tmr_vec_tbl); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].tmr_div_cfg); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].lint0_vec_tbl); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].lint1_vec_tbl); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].perf_ctr_loc_vec_tbl); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].therm_loc_vec_tbl); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].err_vec_tbl); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].err_status); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].spurious_int); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].int_cmd); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].log_dst); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].dst_fmt); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].arb_prio); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].task_prio); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].proc_prio); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].ext_apic_feature); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].spec_eoi); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].tmr_cur_cnt); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].tmr_init_cnt); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].ext_intr_vec_tbl); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].rem_rd_data); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].ipi_state); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].int_req_reg); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].int_svc_reg); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].int_en_reg); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].trig_mode_reg); + V3_CHKPT_STD_LOAD(ctx, apic_state->apics[i].eoi); + } + + + return 0; +} + +#endif static struct v3_device_ops dev_ops = { .free = (int (*)(void *))apic_free, +#ifdef V3_CONFIG_CHECKPOINT + .save = apic_save, + .load = apic_load +#endif }; @@ -1660,6 +1837,7 @@ static int apic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { sizeof(struct apic_state) * vm->num_cores); apic_dev->num_apics = vm->num_cores; + v3_lock_init(&(apic_dev->state_lock)); struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, apic_dev);