2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Authors: Jack Lange <jarusl@cs.northwestern.edu>
15 * Peter Dinda <pdinda@northwestern.edu> (SMP)
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <devices/apic.h>
23 #include <devices/apic_regs.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vmm_msr.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vm_guest.h>
28 #include <palacios/vmm_types.h>
31 #include <palacios/vmm_queue.h>
32 #include <palacios/vmm_lock.h>
34 /* The locking in this file is nasty.
35 * There are 3 different locking approaches that are taken, depending on the APIC operation
36 * 1. Queue locks. Actual irq insertions are done via queueing irq ops at the dest apic.
37 * The destination apic's core is responsible for draining the queue, and actually
38 * setting the vector table.
39 * 2. State lock. This is a standard lock taken when internal apic state is read/written.
40 * When an irq's destination is determined this lock is taken to examine the apic's
42 * 3. VM barrier lock. This is taken when actual VM core state is changed (via SIPI).
47 #ifndef V3_CONFIG_DEBUG_APIC
49 #define PrintDebug(fmt, args...)
52 static char * shorthand_str[] = {
59 static char * deliverymode_str[] = {
72 typedef enum { APIC_TMR_INT, APIC_THERM_INT, APIC_PERF_INT,
73 APIC_LINT0_INT, APIC_LINT1_INT, APIC_ERR_INT } apic_irq_type_t;
75 #define APIC_FIXED_DELIVERY 0x0
76 #define APIC_LOWEST_DELIVERY 0x1
77 #define APIC_SMI_DELIVERY 0x2
78 #define APIC_RES1_DELIVERY 0x3
79 #define APIC_NMI_DELIVERY 0x4
80 #define APIC_INIT_DELIVERY 0x5
81 #define APIC_SIPI_DELIVERY 0x6
82 #define APIC_EXTINT_DELIVERY 0x7
84 #define APIC_SHORTHAND_NONE 0x0
85 #define APIC_SHORTHAND_SELF 0x1
86 #define APIC_SHORTHAND_ALL 0x2
87 #define APIC_SHORTHAND_ALL_BUT_ME 0x3
89 #define APIC_DEST_PHYSICAL 0x0
90 #define APIC_DEST_LOGICAL 0x1
93 #define BASE_ADDR_MSR 0x0000001B
94 #define DEFAULT_BASE_ADDR 0xfee00000
96 #define APIC_ID_OFFSET 0x020
97 #define APIC_VERSION_OFFSET 0x030
98 #define TPR_OFFSET 0x080
99 #define APR_OFFSET 0x090
100 #define PPR_OFFSET 0x0a0
101 #define EOI_OFFSET 0x0b0
102 #define REMOTE_READ_OFFSET 0x0c0
103 #define LDR_OFFSET 0x0d0
104 #define DFR_OFFSET 0x0e0
105 #define SPURIOUS_INT_VEC_OFFSET 0x0f0
107 #define ISR_OFFSET0 0x100 // 0x100 - 0x170
108 #define ISR_OFFSET1 0x110 // 0x100 - 0x170
109 #define ISR_OFFSET2 0x120 // 0x100 - 0x170
110 #define ISR_OFFSET3 0x130 // 0x100 - 0x170
111 #define ISR_OFFSET4 0x140 // 0x100 - 0x170
112 #define ISR_OFFSET5 0x150 // 0x100 - 0x170
113 #define ISR_OFFSET6 0x160 // 0x100 - 0x170
114 #define ISR_OFFSET7 0x170 // 0x100 - 0x170
116 #define TRIG_OFFSET0 0x180 // 0x180 - 0x1f0
117 #define TRIG_OFFSET1 0x190 // 0x180 - 0x1f0
118 #define TRIG_OFFSET2 0x1a0 // 0x180 - 0x1f0
119 #define TRIG_OFFSET3 0x1b0 // 0x180 - 0x1f0
120 #define TRIG_OFFSET4 0x1c0 // 0x180 - 0x1f0
121 #define TRIG_OFFSET5 0x1d0 // 0x180 - 0x1f0
122 #define TRIG_OFFSET6 0x1e0 // 0x180 - 0x1f0
123 #define TRIG_OFFSET7 0x1f0 // 0x180 - 0x1f0
126 #define IRR_OFFSET0 0x200 // 0x200 - 0x270
127 #define IRR_OFFSET1 0x210 // 0x200 - 0x270
128 #define IRR_OFFSET2 0x220 // 0x200 - 0x270
129 #define IRR_OFFSET3 0x230 // 0x200 - 0x270
130 #define IRR_OFFSET4 0x240 // 0x200 - 0x270
131 #define IRR_OFFSET5 0x250 // 0x200 - 0x270
132 #define IRR_OFFSET6 0x260 // 0x200 - 0x270
133 #define IRR_OFFSET7 0x270 // 0x200 - 0x270
136 #define ESR_OFFSET 0x280
137 #define INT_CMD_LO_OFFSET 0x300
138 #define INT_CMD_HI_OFFSET 0x310
139 #define TMR_LOC_VEC_TBL_OFFSET 0x320
140 #define THERM_LOC_VEC_TBL_OFFSET 0x330
141 #define PERF_CTR_LOC_VEC_TBL_OFFSET 0x340
142 #define LINT0_VEC_TBL_OFFSET 0x350
143 #define LINT1_VEC_TBL_OFFSET 0x360
144 #define ERR_VEC_TBL_OFFSET 0x370
145 #define TMR_INIT_CNT_OFFSET 0x380
146 #define TMR_CUR_CNT_OFFSET 0x390
147 #define TMR_DIV_CFG_OFFSET 0x3e0
148 #define EXT_APIC_FEATURE_OFFSET 0x400
149 #define EXT_APIC_CMD_OFFSET 0x410
150 #define SEOI_OFFSET 0x420
152 #define IER_OFFSET0 0x480 // 0x480 - 0x4f0
153 #define IER_OFFSET1 0x490 // 0x480 - 0x4f0
154 #define IER_OFFSET2 0x4a0 // 0x480 - 0x4f0
155 #define IER_OFFSET3 0x4b0 // 0x480 - 0x4f0
156 #define IER_OFFSET4 0x4c0 // 0x480 - 0x4f0
157 #define IER_OFFSET5 0x4d0 // 0x480 - 0x4f0
158 #define IER_OFFSET6 0x4e0 // 0x480 - 0x4f0
159 #define IER_OFFSET7 0x4f0 // 0x480 - 0x4f0
161 #define EXT_INT_LOC_VEC_TBL_OFFSET0 0x500 // 0x500 - 0x530
162 #define EXT_INT_LOC_VEC_TBL_OFFSET1 0x510 // 0x500 - 0x530
163 #define EXT_INT_LOC_VEC_TBL_OFFSET2 0x520 // 0x500 - 0x530
164 #define EXT_INT_LOC_VEC_TBL_OFFSET3 0x530 // 0x500 - 0x530
171 uint8_t bootstrap_cpu : 1;
173 uint8_t apic_enable : 1;
174 uint64_t base_addr : 40;
176 } __attribute__((packed));
177 } __attribute__((packed));
178 } __attribute__((packed));
183 typedef enum {INIT_ST,
185 STARTED} ipi_state_t;
187 struct apic_dev_state;
193 struct apic_msr base_addr_msr;
196 /* memory map registers */
198 struct lapic_id_reg lapic_id;
199 struct apic_ver_reg apic_ver;
200 struct ext_apic_ctrl_reg ext_apic_ctrl;
201 struct local_vec_tbl_reg local_vec_tbl;
202 struct tmr_vec_tbl_reg tmr_vec_tbl;
203 struct tmr_div_cfg_reg tmr_div_cfg;
204 struct lint_vec_tbl_reg lint0_vec_tbl;
205 struct lint_vec_tbl_reg lint1_vec_tbl;
206 struct perf_ctr_loc_vec_tbl_reg perf_ctr_loc_vec_tbl;
207 struct therm_loc_vec_tbl_reg therm_loc_vec_tbl;
208 struct err_vec_tbl_reg err_vec_tbl;
209 struct err_status_reg err_status;
210 struct spurious_int_reg spurious_int;
211 struct int_cmd_reg int_cmd;
212 struct log_dst_reg log_dst;
213 struct dst_fmt_reg dst_fmt;
214 struct arb_prio_reg arb_prio;
215 struct task_prio_reg task_prio;
216 struct proc_prio_reg proc_prio;
217 struct ext_apic_feature_reg ext_apic_feature;
218 struct spec_eoi_reg spec_eoi;
221 uint32_t tmr_cur_cnt;
222 uint32_t tmr_init_cnt;
225 struct local_vec_tbl_reg ext_intr_vec_tbl[4];
227 uint32_t rem_rd_data;
230 ipi_state_t ipi_state;
232 uint8_t int_req_reg[32];
233 uint8_t int_svc_reg[32];
234 uint8_t int_en_reg[32];
235 uint8_t trig_mode_reg[32];
237 struct guest_info * core;
239 void * controller_handle;
241 struct v3_timer * timer;
244 struct v3_queue irq_queue;
254 struct apic_dev_state {
257 v3_lock_t state_lock;
259 struct apic_state apics[0];
260 } __attribute__((packed));
266 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data);
267 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data);
270 static void init_apic_state(struct apic_state * apic, uint32_t id) {
271 apic->base_addr = DEFAULT_BASE_ADDR;
274 // boot processor, enabled
275 apic->base_addr_msr.value = 0x0000000000000900LL;
277 // ap processor, enabled
278 apic->base_addr_msr.value = 0x0000000000000800LL;
281 // same base address regardless of ap or main
282 apic->base_addr_msr.value |= ((uint64_t)DEFAULT_BASE_ADDR);
284 PrintDebug("apic %u: (init_apic_state): msr=0x%llx\n",id, apic->base_addr_msr.value);
286 PrintDebug("apic %u: (init_apic_state): Sizeof Interrupt Request Register %d, should be 32\n",
287 id, (uint_t)sizeof(apic->int_req_reg));
289 memset(apic->int_req_reg, 0, sizeof(apic->int_req_reg));
290 memset(apic->int_svc_reg, 0, sizeof(apic->int_svc_reg));
291 memset(apic->int_en_reg, 0xff, sizeof(apic->int_en_reg));
292 memset(apic->trig_mode_reg, 0, sizeof(apic->trig_mode_reg));
294 apic->eoi = 0x00000000;
295 apic->rem_rd_data = 0x00000000;
296 apic->tmr_init_cnt = 0x00000000;
297 apic->tmr_cur_cnt = 0x00000000;
299 apic->lapic_id.val = id;
301 apic->ipi_state = INIT_ST;
303 // The P6 has 6 LVT entries, so we set the value to (6-1)...
304 apic->apic_ver.val = 0x80050010;
306 apic->task_prio.val = 0x00000000;
307 apic->arb_prio.val = 0x00000000;
308 apic->proc_prio.val = 0x00000000;
309 apic->log_dst.val = 0x00000000;
310 apic->dst_fmt.val = 0xffffffff;
311 apic->spurious_int.val = 0x000000ff;
312 apic->err_status.val = 0x00000000;
313 apic->int_cmd.val = 0x0000000000000000LL;
314 apic->tmr_vec_tbl.val = 0x00010000;
315 apic->therm_loc_vec_tbl.val = 0x00010000;
316 apic->perf_ctr_loc_vec_tbl.val = 0x00010000;
317 apic->lint0_vec_tbl.val = 0x00010000;
318 apic->lint1_vec_tbl.val = 0x00010000;
319 apic->err_vec_tbl.val = 0x00010000;
320 apic->tmr_div_cfg.val = 0x00000000;
321 //apic->ext_apic_feature.val = 0x00000007;
322 apic->ext_apic_feature.val = 0x00040007;
323 apic->ext_apic_ctrl.val = 0x00000000;
324 apic->spec_eoi.val = 0x00000000;
327 v3_init_queue(&(apic->irq_queue));
336 static int read_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t * dst, void * priv_data) {
337 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
338 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
340 PrintDebug("apic %u: core %u: MSR read\n", apic->lapic_id.val, core->vcpu_id);
342 dst->value = apic->base_addr;
348 static int write_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t src, void * priv_data) {
349 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
350 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
351 struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, apic->base_addr);
354 PrintDebug("apic %u: core %u: MSR write\n", apic->lapic_id.val, core->vcpu_id);
356 if (old_reg == NULL) {
358 PrintError("apic %u: core %u: APIC Base address region does not exit...\n",
359 apic->lapic_id.val, core->vcpu_id);
365 v3_delete_mem_region(core->vm_info, old_reg);
367 apic->base_addr = src.value;
369 if (v3_hook_full_mem(core->vm_info, core->vcpu_id, apic->base_addr,
370 apic->base_addr + PAGE_SIZE_4KB,
371 apic_read, apic_write, apic_dev) == -1) {
372 PrintError("apic %u: core %u: Could not hook new APIC Base address\n",
373 apic->lapic_id.val, core->vcpu_id);
386 // irq_num is the bit offset into a 256 bit buffer...
387 static int activate_apic_irq(struct apic_state * apic, uint32_t irq_num) {
388 int major_offset = (irq_num & ~0x00000007) >> 3;
389 int minor_offset = irq_num & 0x00000007;
390 uint8_t * req_location = apic->int_req_reg + major_offset;
391 uint8_t * en_location = apic->int_en_reg + major_offset;
392 uint8_t flag = 0x1 << minor_offset;
395 PrintDebug("apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->vcpu_id, irq_num);
397 if (*req_location & flag) {
398 PrintDebug("Interrupt %d coallescing\n", irq_num);
402 if (*en_location & flag) {
403 *req_location |= flag;
406 PrintDebug("apic %u: core %d: Interrupt not enabled... %.2x\n",
407 apic->lapic_id.val, apic->core->vcpu_id, *en_location);
414 static int add_apic_irq_entry(struct apic_state * apic, uint8_t irq_num) {
417 PrintError("core %d: Attempting to raise an invalid interrupt: %d\n",
418 apic->core->vcpu_id, irq_num);
422 v3_enqueue(&(apic->irq_queue), (addr_t)irq_num);
427 static void drain_irq_entries(struct apic_state * apic) {
430 while ((irq = (uint32_t)v3_dequeue(&(apic->irq_queue))) != 0) {
431 activate_apic_irq(apic, irq);
439 static int get_highest_isr(struct apic_state * apic) {
442 // We iterate backwards to find the highest priority
443 for (i = 31; i >= 0; i--) {
444 uint8_t * svc_major = apic->int_svc_reg + i;
446 if ((*svc_major) & 0xff) {
447 for (j = 7; j >= 0; j--) {
448 uint8_t flag = 0x1 << j;
449 if ((*svc_major) & flag) {
450 return ((i * 8) + j);
461 static int get_highest_irr(struct apic_state * apic) {
464 // We iterate backwards to find the highest priority
465 for (i = 31; i >= 0; i--) {
466 uint8_t * req_major = apic->int_req_reg + i;
468 if ((*req_major) & 0xff) {
469 for (j = 7; j >= 0; j--) {
470 uint8_t flag = 0x1 << j;
471 if ((*req_major) & flag) {
472 return ((i * 8) + j);
484 static int apic_do_eoi(struct apic_state * apic) {
485 int isr_irq = get_highest_isr(apic);
488 int major_offset = (isr_irq & ~0x00000007) >> 3;
489 int minor_offset = isr_irq & 0x00000007;
490 uint8_t flag = 0x1 << minor_offset;
491 uint8_t * svc_location = apic->int_svc_reg + major_offset;
493 PrintDebug("apic %u: core ?: Received APIC EOI for IRQ %d\n", apic->lapic_id.val,isr_irq);
495 *svc_location &= ~flag;
497 #ifdef V3_CONFIG_CRAY_XT
499 if ((isr_irq == 238) ||
501 PrintDebug("apic %u: core ?: Acking IRQ %d\n", apic->lapic_id.val,isr_irq);
504 if (isr_irq == 238) {
509 //PrintError("apic %u: core ?: Spurious EOI...\n",apic->lapic_id.val);
516 static int activate_internal_irq(struct apic_state * apic, apic_irq_type_t int_type) {
517 uint32_t vec_num = 0;
518 uint32_t del_mode = 0;
524 vec_num = apic->tmr_vec_tbl.vec;
525 del_mode = APIC_FIXED_DELIVERY;
526 masked = apic->tmr_vec_tbl.mask;
529 vec_num = apic->therm_loc_vec_tbl.vec;
530 del_mode = apic->therm_loc_vec_tbl.msg_type;
531 masked = apic->therm_loc_vec_tbl.mask;
534 vec_num = apic->perf_ctr_loc_vec_tbl.vec;
535 del_mode = apic->perf_ctr_loc_vec_tbl.msg_type;
536 masked = apic->perf_ctr_loc_vec_tbl.mask;
539 vec_num = apic->lint0_vec_tbl.vec;
540 del_mode = apic->lint0_vec_tbl.msg_type;
541 masked = apic->lint0_vec_tbl.mask;
544 vec_num = apic->lint1_vec_tbl.vec;
545 del_mode = apic->lint1_vec_tbl.msg_type;
546 masked = apic->lint1_vec_tbl.mask;
549 vec_num = apic->err_vec_tbl.vec;
550 del_mode = APIC_FIXED_DELIVERY;
551 masked = apic->err_vec_tbl.mask;
554 PrintError("apic %u: core ?: Invalid APIC interrupt type\n", apic->lapic_id.val);
558 // interrupt is masked, don't send
560 PrintDebug("apic %u: core ?: Inerrupt is masked\n", apic->lapic_id.val);
564 if (del_mode == APIC_FIXED_DELIVERY) {
565 //PrintDebug("Activating internal APIC IRQ %d\n", vec_num);
566 return add_apic_irq_entry(apic, vec_num);
568 PrintError("apic %u: core ?: Unhandled Delivery Mode\n", apic->lapic_id.val);
575 static inline int should_deliver_cluster_ipi(struct apic_dev_state * apic_dev,
576 struct guest_info * dst_core,
577 struct apic_state * dst_apic, uint8_t mda) {
582 if ( ((mda & 0xf0) == (dst_apic->log_dst.dst_log_id & 0xf0)) && /* (I am in the cluster and */
583 ((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { /* I am in the set) */
591 PrintDebug("apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n",
592 dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
593 dst_apic->log_dst.dst_log_id);
595 PrintDebug("apic %u core %u: rejecting clustered IRQ (mda 0x%x != log_dst 0x%x)\n",
596 dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
597 dst_apic->log_dst.dst_log_id);
604 static inline int should_deliver_flat_ipi(struct apic_dev_state * apic_dev,
605 struct guest_info * dst_core,
606 struct apic_state * dst_apic, uint8_t mda) {
611 if ((dst_apic->log_dst.dst_log_id & mda) != 0) { // I am in the set
619 PrintDebug("apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n",
620 dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
621 dst_apic->log_dst.dst_log_id);
623 PrintDebug("apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n",
624 dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
625 dst_apic->log_dst.dst_log_id);
634 static int should_deliver_ipi(struct apic_dev_state * apic_dev,
635 struct guest_info * dst_core,
636 struct apic_state * dst_apic, uint8_t mda) {
640 flags = v3_lock_irqsave(apic_dev->state_lock);
642 if (dst_apic->dst_fmt.model == 0xf) {
645 /* always deliver broadcast */
648 ret = should_deliver_flat_ipi(apic_dev, dst_core, dst_apic, mda);
650 } else if (dst_apic->dst_fmt.model == 0x0) {
653 /* always deliver broadcast */
656 ret = should_deliver_cluster_ipi(apic_dev, dst_core, dst_apic, mda);
663 v3_unlock_irqrestore(apic_dev->state_lock, flags);
667 PrintError("apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n",
668 dst_apic->lapic_id.val, dst_core->vcpu_id, dst_apic->dst_fmt.model);
677 // Only the src_apic pointer is used
678 static int deliver_ipi(struct apic_state * src_apic,
679 struct apic_state * dst_apic,
680 uint32_t vector, uint8_t del_mode) {
683 struct guest_info * dst_core = dst_apic->core;
688 case APIC_FIXED_DELIVERY:
689 case APIC_LOWEST_DELIVERY: {
691 // caller needs to have decided which apic to deliver to!
693 PrintDebug("delivering IRQ %d to core %u\n", vector, dst_core->vcpu_id);
695 add_apic_irq_entry(dst_apic, vector);
697 #ifdef V3_CONFIG_MULTITHREAD_OS
698 if (dst_apic != src_apic) {
699 PrintDebug(" non-local core with new interrupt, forcing it to exit now\n");
700 v3_interrupt_cpu(dst_core->vm_info, dst_core->pcpu_id, 0);
707 case APIC_INIT_DELIVERY: {
709 PrintDebug(" INIT delivery to core %u\n", dst_core->vcpu_id);
711 // TODO: any APIC reset on dest core (shouldn't be needed, but not sure...)
714 if (dst_apic->ipi_state != INIT_ST) {
715 PrintError(" Warning: core %u is not in INIT state (mode = %d), ignored (assuming this is the deassert)\n",
716 dst_core->vcpu_id, dst_apic->ipi_state);
717 // Only a warning, since INIT INIT SIPI is common
721 // We transition the target core to SIPI state
722 dst_apic->ipi_state = SIPI; // note: locking should not be needed here
724 // That should be it since the target core should be
725 // waiting in host on this transition
726 // either it's on another core or on a different preemptive thread
727 // in both cases, it will quickly notice this transition
728 // in particular, we should not need to force an exit here
730 PrintDebug(" INIT delivery done\n");
734 case APIC_SIPI_DELIVERY: {
737 if (dst_apic->ipi_state != SIPI) {
738 PrintError(" core %u is not in SIPI state (mode = %d), ignored!\n",
739 dst_core->vcpu_id, dst_apic->ipi_state);
743 v3_reset_vm_core(dst_core, vector);
745 PrintDebug(" SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
746 vector, dst_core->segments.cs.selector, dst_core->vcpu_id);
747 // Maybe need to adjust the APIC?
749 // We transition the target core to SIPI state
750 dst_core->core_run_state = CORE_RUNNING; // note: locking should not be needed here
751 dst_apic->ipi_state = STARTED;
753 // As with INIT, we should not need to do anything else
755 PrintDebug(" SIPI delivery done\n");
760 case APIC_EXTINT_DELIVERY: // EXTINT
761 /* Two possible things to do here:
762 * 1. Ignore the IPI and assume the 8259a (PIC) will handle it
763 * 2. Add 32 to the vector and inject it...
764 * We probably just want to do 1 here, and assume the raise_irq() will hit the 8259a.
768 case APIC_SMI_DELIVERY:
769 case APIC_RES1_DELIVERY: // reserved
770 case APIC_NMI_DELIVERY:
772 PrintError("IPI %d delivery is unsupported\n", del_mode);
780 static struct apic_state * find_physical_apic(struct apic_dev_state * apic_dev, uint32_t dst_idx) {
781 struct apic_state * dst_apic = NULL;
785 flags = v3_lock_irqsave(apic_dev->state_lock);
787 if ( (dst_idx > 0) && (dst_idx < apic_dev->num_apics) ) {
788 // see if it simply is the core id
789 if (apic_dev->apics[dst_idx].lapic_id.val == dst_idx) {
790 dst_apic = &(apic_dev->apics[dst_idx]);
794 for (i = 0; i < apic_dev->num_apics; i++) {
795 if (apic_dev->apics[i].lapic_id.val == dst_idx) {
796 dst_apic = &(apic_dev->apics[i]);
800 v3_unlock_irqrestore(apic_dev->state_lock, flags);
807 static int route_ipi(struct apic_dev_state * apic_dev,
808 struct apic_state * src_apic,
809 struct int_cmd_reg * icr) {
810 struct apic_state * dest_apic = NULL;
813 PrintDebug("apic: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
814 deliverymode_str[icr->del_mode],
817 (icr->dst_mode == 0) ? "(physical)" : "(logical)",
818 shorthand_str[icr->dst_shorthand],
823 switch (icr->dst_shorthand) {
825 case APIC_SHORTHAND_NONE: // no shorthand
826 if (icr->dst_mode == APIC_DEST_PHYSICAL) {
828 dest_apic = find_physical_apic(apic_dev, icr->dst);
830 if (dest_apic == NULL) {
831 PrintError("apic: Attempted send to unregistered apic id=%u\n", icr->dst);
835 if (deliver_ipi(src_apic, dest_apic,
836 icr->vec, icr->del_mode) == -1) {
837 PrintError("apic: Could not deliver IPI\n");
842 PrintDebug("apic: done\n");
844 } else if (icr->dst_mode == APIC_DEST_LOGICAL) {
846 if (icr->del_mode != APIC_LOWEST_DELIVERY) {
848 uint8_t mda = icr->dst;
850 // logical, but not lowest priority
851 // we immediately trigger
852 // fixed, smi, reserved, nmi, init, sipi, etc
855 for (i = 0; i < apic_dev->num_apics; i++) {
858 dest_apic = &(apic_dev->apics[i]);
860 del_flag = should_deliver_ipi(apic_dev, dest_apic->core, dest_apic, mda);
862 if (del_flag == -1) {
864 PrintError("apic: Error checking delivery mode\n");
866 } else if (del_flag == 1) {
868 if (deliver_ipi(src_apic, dest_apic,
869 icr->vec, icr->del_mode) == -1) {
870 PrintError("apic: Error: Could not deliver IPI\n");
875 } else { // APIC_LOWEST_DELIVERY
876 struct apic_state * cur_best_apic = NULL;
877 uint8_t mda = icr->dst;
880 // logical, lowest priority
882 for (i = 0; i < apic_dev->num_apics; i++) {
885 dest_apic = &(apic_dev->apics[i]);
887 del_flag = should_deliver_ipi(apic_dev, dest_apic->core, dest_apic, mda);
889 if (del_flag == -1) {
890 PrintError("apic: Error checking delivery mode\n");
893 } else if (del_flag == 1) {
894 // update priority for lowest priority scan
897 flags = v3_lock_irqsave(apic_dev->state_lock);
899 if (cur_best_apic == 0) {
900 cur_best_apic = dest_apic;
901 } else if (dest_apic->task_prio.val < cur_best_apic->task_prio.val) {
902 cur_best_apic = dest_apic;
905 v3_unlock_irqrestore(apic_dev->state_lock, flags);
910 // now we will deliver to the best one if it exists
911 if (!cur_best_apic) {
912 PrintDebug("apic: lowest priority deliver, but no destinations!\n");
914 if (deliver_ipi(src_apic, cur_best_apic,
915 icr->vec, icr->del_mode) == -1) {
916 PrintError("apic: Error: Could not deliver IPI\n");
919 //V3_Print("apic: logical, lowest priority delivery to apic %u\n",cur_best_apic->lapic_id.val);
926 case APIC_SHORTHAND_SELF: // self
928 if (src_apic == NULL) { /* this is not an apic, but it's trying to send to itself??? */
929 PrintError("apic: Sending IPI to self from generic IPI sender\n");
935 if (icr->dst_mode == APIC_DEST_PHYSICAL) { /* physical delivery */
936 if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
937 PrintError("apic: Could not deliver IPI to self (physical)\n");
940 } else if (icr->dst_mode == APIC_DEST_LOGICAL) { /* logical delivery */
941 PrintError("apic: use of logical delivery in self (untested)\n");
943 if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
944 PrintError("apic: Could not deliver IPI to self (logical)\n");
951 case APIC_SHORTHAND_ALL:
952 case APIC_SHORTHAND_ALL_BUT_ME: { /* all and all-but-me */
953 /* assuming that logical verus physical doesn't matter
954 although it is odd that both are used */
957 for (i = 0; i < apic_dev->num_apics; i++) {
958 dest_apic = &(apic_dev->apics[i]);
960 if ((dest_apic != src_apic) || (icr->dst_shorthand == APIC_SHORTHAND_ALL)) {
961 if (deliver_ipi(src_apic, dest_apic, icr->vec, icr->del_mode) == -1) {
962 PrintError("apic: Error: Could not deliver IPI\n");
971 PrintError("apic: Error routing IPI, invalid Mode (%d)\n", icr->dst_shorthand);
979 // External function, expected to acquire lock on apic
980 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
981 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
982 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
983 addr_t reg_addr = guest_addr - apic->base_addr;
984 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
988 PrintDebug("apic %u: core %u: at %p: Read apic address space (%p)\n",
989 apic->lapic_id.val, core->vcpu_id, apic, (void *)guest_addr);
991 if (msr->apic_enable == 0) {
992 PrintError("apic %u: core %u: Read from APIC address space with disabled APIC, apic msr=0x%llx\n",
993 apic->lapic_id.val, core->vcpu_id, apic->base_addr_msr.value);
998 /* Because "May not be supported" doesn't matter to Linux developers... */
999 /* if (length != 4) { */
1000 /* PrintError("Invalid apic read length (%d)\n", length); */
1004 switch (reg_addr & ~0x3) {
1006 // Well, only an idiot would read from a architectural write only register
1008 // PrintError("Attempting to read from write only register\n");
1013 case APIC_ID_OFFSET:
1014 val = apic->lapic_id.val;
1016 case APIC_VERSION_OFFSET:
1017 val = apic->apic_ver.val;
1020 val = apic->task_prio.val;
1023 val = apic->arb_prio.val;
1026 val = apic->proc_prio.val;
1028 case REMOTE_READ_OFFSET:
1029 val = apic->rem_rd_data;
1032 val = apic->log_dst.val;
1035 val = apic->dst_fmt.val;
1037 case SPURIOUS_INT_VEC_OFFSET:
1038 val = apic->spurious_int.val;
1041 val = apic->err_status.val;
1043 case TMR_LOC_VEC_TBL_OFFSET:
1044 val = apic->tmr_vec_tbl.val;
1046 case LINT0_VEC_TBL_OFFSET:
1047 val = apic->lint0_vec_tbl.val;
1049 case LINT1_VEC_TBL_OFFSET:
1050 val = apic->lint1_vec_tbl.val;
1052 case ERR_VEC_TBL_OFFSET:
1053 val = apic->err_vec_tbl.val;
1055 case TMR_INIT_CNT_OFFSET:
1056 val = apic->tmr_init_cnt;
1058 case TMR_DIV_CFG_OFFSET:
1059 val = apic->tmr_div_cfg.val;
1063 val = *(uint32_t *)(apic->int_en_reg);
1066 val = *(uint32_t *)(apic->int_en_reg + 4);
1069 val = *(uint32_t *)(apic->int_en_reg + 8);
1072 val = *(uint32_t *)(apic->int_en_reg + 12);
1075 val = *(uint32_t *)(apic->int_en_reg + 16);
1078 val = *(uint32_t *)(apic->int_en_reg + 20);
1081 val = *(uint32_t *)(apic->int_en_reg + 24);
1084 val = *(uint32_t *)(apic->int_en_reg + 28);
1088 val = *(uint32_t *)(apic->int_svc_reg);
1091 val = *(uint32_t *)(apic->int_svc_reg + 4);
1094 val = *(uint32_t *)(apic->int_svc_reg + 8);
1097 val = *(uint32_t *)(apic->int_svc_reg + 12);
1100 val = *(uint32_t *)(apic->int_svc_reg + 16);
1103 val = *(uint32_t *)(apic->int_svc_reg + 20);
1106 val = *(uint32_t *)(apic->int_svc_reg + 24);
1109 val = *(uint32_t *)(apic->int_svc_reg + 28);
1113 val = *(uint32_t *)(apic->trig_mode_reg);
1116 val = *(uint32_t *)(apic->trig_mode_reg + 4);
1119 val = *(uint32_t *)(apic->trig_mode_reg + 8);
1122 val = *(uint32_t *)(apic->trig_mode_reg + 12);
1125 val = *(uint32_t *)(apic->trig_mode_reg + 16);
1128 val = *(uint32_t *)(apic->trig_mode_reg + 20);
1131 val = *(uint32_t *)(apic->trig_mode_reg + 24);
1134 val = *(uint32_t *)(apic->trig_mode_reg + 28);
1138 val = *(uint32_t *)(apic->int_req_reg);
1141 val = *(uint32_t *)(apic->int_req_reg + 4);
1144 val = *(uint32_t *)(apic->int_req_reg + 8);
1147 val = *(uint32_t *)(apic->int_req_reg + 12);
1150 val = *(uint32_t *)(apic->int_req_reg + 16);
1153 val = *(uint32_t *)(apic->int_req_reg + 20);
1156 val = *(uint32_t *)(apic->int_req_reg + 24);
1159 val = *(uint32_t *)(apic->int_req_reg + 28);
1161 case TMR_CUR_CNT_OFFSET:
1162 val = apic->tmr_cur_cnt;
1165 // We are not going to implement these....
1166 case THERM_LOC_VEC_TBL_OFFSET:
1167 val = apic->therm_loc_vec_tbl.val;
1169 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1170 val = apic->perf_ctr_loc_vec_tbl.val;
1175 // handled registers
1176 case INT_CMD_LO_OFFSET:
1177 val = apic->int_cmd.lo;
1179 case INT_CMD_HI_OFFSET:
1180 val = apic->int_cmd.hi;
1183 // handle current timer count
1185 // Unhandled Registers
1186 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1187 val = apic->ext_intr_vec_tbl[0].val;
1189 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1190 val = apic->ext_intr_vec_tbl[1].val;
1192 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1193 val = apic->ext_intr_vec_tbl[2].val;
1195 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1196 val = apic->ext_intr_vec_tbl[3].val;
1200 case EXT_APIC_FEATURE_OFFSET:
1201 case EXT_APIC_CMD_OFFSET:
1205 PrintError("apic %u: core %u: Read from Unhandled APIC Register: %x (getting zero)\n",
1206 apic->lapic_id.val, core->vcpu_id, (uint32_t)reg_addr);
1212 uint_t byte_addr = reg_addr & 0x3;
1213 uint8_t * val_ptr = (uint8_t *)dst;
1215 *val_ptr = *(((uint8_t *)&val) + byte_addr);
1217 } else if ((length == 2) &&
1218 ((reg_addr & 0x3) != 0x3)) {
1219 uint_t byte_addr = reg_addr & 0x3;
1220 uint16_t * val_ptr = (uint16_t *)dst;
1221 *val_ptr = *(((uint16_t *)&val) + byte_addr);
1223 } else if (length == 4) {
1224 uint32_t * val_ptr = (uint32_t *)dst;
1228 PrintError("apic %u: core %u: Invalid apic read length (%d)\n",
1229 apic->lapic_id.val, core->vcpu_id, length);
1233 PrintDebug("apic %u: core %u: Read finished (val=%x)\n",
1234 apic->lapic_id.val, core->vcpu_id, *(uint32_t *)dst);
1243 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data) {
1244 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1245 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1246 addr_t reg_addr = guest_addr - apic->base_addr;
1247 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
1248 uint32_t op_val = *(uint32_t *)src;
1251 PrintDebug("apic %u: core %u: at %p and priv_data is at %p\n",
1252 apic->lapic_id.val, core->vcpu_id, apic, priv_data);
1254 PrintDebug("apic %u: core %u: write to address space (%p) (val=%x)\n",
1255 apic->lapic_id.val, core->vcpu_id, (void *)guest_addr, *(uint32_t *)src);
1257 if (msr->apic_enable == 0) {
1258 PrintError("apic %u: core %u: Write to APIC address space with disabled APIC, apic msr=0x%llx\n",
1259 apic->lapic_id.val, core->vcpu_id, apic->base_addr_msr.value);
1265 PrintError("apic %u: core %u: Invalid apic write length (%d)\n",
1266 apic->lapic_id.val, length, core->vcpu_id);
1271 case REMOTE_READ_OFFSET:
1272 case APIC_VERSION_OFFSET:
1299 case EXT_APIC_FEATURE_OFFSET:
1301 PrintError("apic %u: core %u: Attempting to write to read only register %p (error)\n",
1302 apic->lapic_id.val, core->vcpu_id, (void *)reg_addr);
1307 case APIC_ID_OFFSET:
1308 //V3_Print("apic %u: core %u: my id is being changed to %u\n",
1309 // apic->lapic_id.val, core->vcpu_id, op_val);
1311 apic->lapic_id.val = op_val;
1314 apic->task_prio.val = op_val;
1317 PrintDebug("apic %u: core %u: setting log_dst.val to 0x%x\n",
1318 apic->lapic_id.val, core->vcpu_id, op_val);
1319 flags = v3_lock_irqsave(apic_dev->state_lock);
1320 apic->log_dst.val = op_val;
1321 v3_unlock_irqrestore(apic_dev->state_lock, flags);
1324 flags = v3_lock_irqsave(apic_dev->state_lock);
1325 apic->dst_fmt.val = op_val;
1326 v3_unlock_irqrestore(apic_dev->state_lock, flags);
1328 case SPURIOUS_INT_VEC_OFFSET:
1329 apic->spurious_int.val = op_val;
1332 apic->err_status.val = op_val;
1334 case TMR_LOC_VEC_TBL_OFFSET:
1335 apic->tmr_vec_tbl.val = op_val;
1337 case THERM_LOC_VEC_TBL_OFFSET:
1338 apic->therm_loc_vec_tbl.val = op_val;
1340 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1341 apic->perf_ctr_loc_vec_tbl.val = op_val;
1343 case LINT0_VEC_TBL_OFFSET:
1344 apic->lint0_vec_tbl.val = op_val;
1346 case LINT1_VEC_TBL_OFFSET:
1347 apic->lint1_vec_tbl.val = op_val;
1349 case ERR_VEC_TBL_OFFSET:
1350 apic->err_vec_tbl.val = op_val;
1352 case TMR_INIT_CNT_OFFSET:
1353 apic->tmr_init_cnt = op_val;
1354 apic->tmr_cur_cnt = op_val;
1356 case TMR_CUR_CNT_OFFSET:
1357 apic->tmr_cur_cnt = op_val;
1359 case TMR_DIV_CFG_OFFSET:
1360 PrintDebug("apic %u: core %u: setting tmr_div_cfg to 0x%x\n",
1361 apic->lapic_id.val, core->vcpu_id, op_val);
1362 apic->tmr_div_cfg.val = op_val;
1366 // Enable mask (256 bits)
1368 *(uint32_t *)(apic->int_en_reg) = op_val;
1371 *(uint32_t *)(apic->int_en_reg + 4) = op_val;
1374 *(uint32_t *)(apic->int_en_reg + 8) = op_val;
1377 *(uint32_t *)(apic->int_en_reg + 12) = op_val;
1380 *(uint32_t *)(apic->int_en_reg + 16) = op_val;
1383 *(uint32_t *)(apic->int_en_reg + 20) = op_val;
1386 *(uint32_t *)(apic->int_en_reg + 24) = op_val;
1389 *(uint32_t *)(apic->int_en_reg + 28) = op_val;
1392 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1393 apic->ext_intr_vec_tbl[0].val = op_val;
1395 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1396 apic->ext_intr_vec_tbl[1].val = op_val;
1398 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1399 apic->ext_intr_vec_tbl[2].val = op_val;
1401 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1402 apic->ext_intr_vec_tbl[3].val = op_val;
1412 case INT_CMD_LO_OFFSET: {
1415 struct int_cmd_reg tmp_icr;
1417 apic->int_cmd.lo = op_val;
1419 tmp_icr = apic->int_cmd;
1421 // V3_Print("apic %u: core %u: sending cmd 0x%llx to apic %u\n",
1422 // apic->lapic_id.val, core->vcpu_id,
1423 // apic->int_cmd.val, apic->int_cmd.dst);
1425 if (route_ipi(apic_dev, apic, &tmp_icr) == -1) {
1426 PrintError("IPI Routing failure\n");
1432 case INT_CMD_HI_OFFSET: {
1433 apic->int_cmd.hi = op_val;
1434 V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->vcpu_id,apic->int_cmd.hi);
1438 // Unhandled Registers
1439 case EXT_APIC_CMD_OFFSET:
1442 PrintError("apic %u: core %u: Write to Unhandled APIC Register: %x (ignored)\n",
1443 apic->lapic_id.val, core->vcpu_id, (uint32_t)reg_addr);
1448 PrintDebug("apic %u: core %u: Write finished\n", apic->lapic_id.val, core->vcpu_id);
1456 /* Interrupt Controller Functions */
1459 static int apic_intr_pending(struct guest_info * core, void * private_data) {
1460 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1461 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1465 // Activate all queued IRQ entries
1466 drain_irq_entries(apic);
1468 // Check for newly activated entries
1469 req_irq = get_highest_irr(apic);
1470 svc_irq = get_highest_isr(apic);
1472 // PrintDebug("apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->vcpu_id,req_irq,svc_irq);
1474 if ((req_irq >= 0) &&
1475 (req_irq > svc_irq)) {
1484 static int apic_get_intr_number(struct guest_info * core, void * private_data) {
1485 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1486 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1487 int req_irq = get_highest_irr(apic);
1488 int svc_irq = get_highest_isr(apic);
1490 if (svc_irq == -1) {
1492 } else if (svc_irq < req_irq) {
1501 int v3_apic_send_ipi(struct v3_vm_info * vm, struct v3_gen_ipi * ipi, void * dev_data) {
1502 struct apic_dev_state * apic_dev = (struct apic_dev_state *)
1503 (((struct vm_device *)dev_data)->private_data);
1504 struct int_cmd_reg tmp_icr;
1506 // zero out all the fields
1509 tmp_icr.vec = ipi->vector;
1510 tmp_icr.del_mode = ipi->mode;
1511 tmp_icr.dst_mode = ipi->logical;
1512 tmp_icr.trig_mode = ipi->trigger_mode;
1513 tmp_icr.dst_shorthand = ipi->dst_shorthand;
1514 tmp_icr.dst = ipi->dst;
1517 return route_ipi(apic_dev, NULL, &tmp_icr);
1521 int v3_apic_raise_intr(struct v3_vm_info * vm, uint32_t irq, uint32_t dst, void * dev_data) {
1522 struct apic_dev_state * apic_dev = (struct apic_dev_state *)
1523 (((struct vm_device*)dev_data)->private_data);
1524 struct apic_state * apic = &(apic_dev->apics[dst]);
1526 PrintDebug("apic %u core ?: raising interrupt IRQ %u (dst = %u).\n", apic->lapic_id.val, irq, dst);
1528 add_apic_irq_entry(apic, irq);
1530 #ifdef V3_CONFIG_MULTITHREAD_OS
1531 if ((V3_Get_CPU() != dst)) {
1532 v3_interrupt_cpu(vm, dst, 0);
1541 static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
1542 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1543 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1544 int major_offset = (irq & ~0x00000007) >> 3;
1545 int minor_offset = irq & 0x00000007;
1546 uint8_t *req_location = apic->int_req_reg + major_offset;
1547 uint8_t *svc_location = apic->int_svc_reg + major_offset;
1548 uint8_t flag = 0x01 << minor_offset;
1550 if (*req_location & flag) {
1551 // we will only pay attention to a begin irq if we
1552 // know that we initiated it!
1553 *svc_location |= flag;
1554 *req_location &= ~flag;
1557 //PrintDebug("apic %u: core %u: begin irq for %d ignored since I don't own it\n",
1558 // apic->lapic_id.val, core->vcpu_id, irq);
1568 /* Timer Functions */
1570 static void apic_update_time(struct guest_info * core,
1571 uint64_t cpu_cycles, uint64_t cpu_freq,
1573 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1574 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1576 // The 32 bit GCC runtime is a pile of shit
1578 uint64_t tmr_ticks = 0;
1580 uint32_t tmr_ticks = 0;
1583 uint8_t tmr_div = *(uint8_t *)&(apic->tmr_div_cfg.val);
1584 uint_t shift_num = 0;
1587 // Check whether this is true:
1588 // -> If the Init count is zero then the timer is disabled
1589 // and doesn't just blitz interrupts to the CPU
1590 if ((apic->tmr_init_cnt == 0) ||
1591 ( (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_ONESHOT) &&
1592 (apic->tmr_cur_cnt == 0))) {
1593 //PrintDebug("apic %u: core %u: APIC timer not yet initialized\n",apic->lapic_id.val,info->vcpu_id);
1611 case APIC_TMR_DIV16:
1614 case APIC_TMR_DIV32:
1617 case APIC_TMR_DIV64:
1620 case APIC_TMR_DIV128:
1624 PrintError("apic %u: core %u: Invalid Timer Divider configuration\n",
1625 apic->lapic_id.val, core->vcpu_id);
1629 tmr_ticks = cpu_cycles >> shift_num;
1630 // PrintDebug("Timer Ticks: %p\n", (void *)tmr_ticks);
1632 if (tmr_ticks < apic->tmr_cur_cnt) {
1633 apic->tmr_cur_cnt -= tmr_ticks;
1635 tmr_ticks -= apic->tmr_cur_cnt;
1636 apic->tmr_cur_cnt = 0;
1639 PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
1640 apic->lapic_id.val, core->vcpu_id,
1641 apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
1643 if (apic_intr_pending(core, priv_data)) {
1644 PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
1645 apic->lapic_id.val, core->vcpu_id,
1646 apic_get_intr_number(core, priv_data));
1649 if (activate_internal_irq(apic, APIC_TMR_INT) == -1) {
1650 PrintError("apic %u: core %u: Could not raise Timer interrupt\n",
1651 apic->lapic_id.val, core->vcpu_id);
1654 if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
1655 static unsigned int nexits = 0;
1656 static unsigned int missed_ints = 0;
1659 missed_ints += tmr_ticks / apic->tmr_init_cnt;
1661 if ((missed_ints > 0) && (nexits >= 5000)) {
1662 V3_Print("apic %u: core %u: missed %u timer interrupts total in last %u exits.\n",
1663 apic->lapic_id.val, core->vcpu_id, missed_ints, nexits);
1668 tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
1669 apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
1677 static struct intr_ctrl_ops intr_ops = {
1678 .intr_pending = apic_intr_pending,
1679 .get_intr_number = apic_get_intr_number,
1680 .begin_irq = apic_begin_irq,
1684 static struct v3_timer_ops timer_ops = {
1685 .update_timer = apic_update_time,
1691 static int apic_free(struct apic_dev_state * apic_dev) {
1693 struct v3_vm_info * vm = NULL;
1695 for (i = 0; i < apic_dev->num_apics; i++) {
1696 struct apic_state * apic = &(apic_dev->apics[i]);
1697 struct guest_info * core = apic->core;
1701 v3_remove_intr_controller(core, apic->controller_handle);
1704 v3_remove_timer(core, apic->timer);
1711 v3_unhook_msr(vm, BASE_ADDR_MSR);
1718 static struct v3_device_ops dev_ops = {
1719 .free = (int (*)(void *))apic_free,
1724 static int apic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1725 char * dev_id = v3_cfg_val(cfg, "ID");
1726 struct apic_dev_state * apic_dev = NULL;
1729 PrintDebug("apic: creating an APIC for each core\n");
1731 apic_dev = (struct apic_dev_state *)V3_Malloc(sizeof(struct apic_dev_state) +
1732 sizeof(struct apic_state) * vm->num_cores);
1734 apic_dev->num_apics = vm->num_cores;
1735 v3_lock_init(&(apic_dev->state_lock));
1737 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, apic_dev);
1740 PrintError("apic: Could not attach device %s\n", dev_id);
1746 for (i = 0; i < vm->num_cores; i++) {
1747 struct apic_state * apic = &(apic_dev->apics[i]);
1748 struct guest_info * core = &(vm->cores[i]);
1752 init_apic_state(apic, i);
1754 apic->controller_handle = v3_register_intr_controller(core, &intr_ops, apic_dev);
1756 apic->timer = v3_add_timer(core, &timer_ops, apic_dev);
1758 if (apic->timer == NULL) {
1759 PrintError("APIC: Failed to attach timer to core %d\n", i);
1760 v3_remove_device(dev);
1764 v3_hook_full_mem(vm, core->vcpu_id, apic->base_addr, apic->base_addr + PAGE_SIZE_4KB, apic_read, apic_write, apic_dev);
1766 PrintDebug("apic %u: (setup device): done, my id is %u\n", i, apic->lapic_id.val);
1769 #ifdef V3_CONFIG_DEBUG_APIC
1770 for (i = 0; i < vm->num_cores; i++) {
1771 struct apic_state * apic = &(apic_dev->apics[i]);
1772 PrintDebug("apic: sanity check: apic %u (at %p) has id %u and msr value %llx and core at %p\n",
1773 i, apic, apic->lapic_id.val, apic->base_addr_msr.value,apic->core);
1778 PrintDebug("apic: priv_data is at %p\n", apic_dev);
1780 v3_hook_msr(vm, BASE_ADDR_MSR, read_apic_msr, write_apic_msr, apic_dev);
1787 device_register("LAPIC", apic_init)