2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Authors: Jack Lange <jarusl@cs.northwestern.edu>
15 * Peter Dinda <pdinda@northwestern.edu> (SMP)
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <devices/apic.h>
23 #include <devices/apic_regs.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vmm_msr.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vm_guest.h>
28 #include <palacios/vmm_types.h>
32 // MUST DO APIC SCAN FOR PHYSICAL DELIVERY
37 #ifndef CONFIG_DEBUG_APIC
39 #define PrintDebug(fmt, args...)
42 static char * shorthand_str[] = {
49 static char * deliverymode_str[] = {
59 #ifdef CONFIG_DEBUG_APIC
63 #define v3_lock(p) p=p
64 #define v3_unlock(p) p=p
67 typedef enum { APIC_TMR_INT, APIC_THERM_INT, APIC_PERF_INT,
68 APIC_LINT0_INT, APIC_LINT1_INT, APIC_ERR_INT } apic_irq_type_t;
70 #define APIC_FIXED_DELIVERY 0x0
71 #define APIC_LOWEST_DELIVERY 0x1
72 #define APIC_SMI_DELIVERY 0x2
73 #define APIC_RES1_DELIVERY 0x3
74 #define APIC_NMI_DELIVERY 0x4
75 #define APIC_INIT_DELIVERY 0x5
76 #define APIC_SIPI_DELIVERY 0x6
77 #define APIC_EXTINT_DELIVERY 0x7
79 #define APIC_SHORTHAND_NONE 0x0
80 #define APIC_SHORTHAND_SELF 0x1
81 #define APIC_SHORTHAND_ALL 0x2
82 #define APIC_SHORTHAND_ALL_BUT_ME 0x3
84 #define APIC_DEST_PHYSICAL 0x0
85 #define APIC_DEST_LOGICAL 0x1
88 #define BASE_ADDR_MSR 0x0000001B
89 #define DEFAULT_BASE_ADDR 0xfee00000
91 #define APIC_ID_OFFSET 0x020
92 #define APIC_VERSION_OFFSET 0x030
93 #define TPR_OFFSET 0x080
94 #define APR_OFFSET 0x090
95 #define PPR_OFFSET 0x0a0
96 #define EOI_OFFSET 0x0b0
97 #define REMOTE_READ_OFFSET 0x0c0
98 #define LDR_OFFSET 0x0d0
99 #define DFR_OFFSET 0x0e0
100 #define SPURIOUS_INT_VEC_OFFSET 0x0f0
102 #define ISR_OFFSET0 0x100 // 0x100 - 0x170
103 #define ISR_OFFSET1 0x110 // 0x100 - 0x170
104 #define ISR_OFFSET2 0x120 // 0x100 - 0x170
105 #define ISR_OFFSET3 0x130 // 0x100 - 0x170
106 #define ISR_OFFSET4 0x140 // 0x100 - 0x170
107 #define ISR_OFFSET5 0x150 // 0x100 - 0x170
108 #define ISR_OFFSET6 0x160 // 0x100 - 0x170
109 #define ISR_OFFSET7 0x170 // 0x100 - 0x170
111 #define TRIG_OFFSET0 0x180 // 0x180 - 0x1f0
112 #define TRIG_OFFSET1 0x190 // 0x180 - 0x1f0
113 #define TRIG_OFFSET2 0x1a0 // 0x180 - 0x1f0
114 #define TRIG_OFFSET3 0x1b0 // 0x180 - 0x1f0
115 #define TRIG_OFFSET4 0x1c0 // 0x180 - 0x1f0
116 #define TRIG_OFFSET5 0x1d0 // 0x180 - 0x1f0
117 #define TRIG_OFFSET6 0x1e0 // 0x180 - 0x1f0
118 #define TRIG_OFFSET7 0x1f0 // 0x180 - 0x1f0
121 #define IRR_OFFSET0 0x200 // 0x200 - 0x270
122 #define IRR_OFFSET1 0x210 // 0x200 - 0x270
123 #define IRR_OFFSET2 0x220 // 0x200 - 0x270
124 #define IRR_OFFSET3 0x230 // 0x200 - 0x270
125 #define IRR_OFFSET4 0x240 // 0x200 - 0x270
126 #define IRR_OFFSET5 0x250 // 0x200 - 0x270
127 #define IRR_OFFSET6 0x260 // 0x200 - 0x270
128 #define IRR_OFFSET7 0x270 // 0x200 - 0x270
131 #define ESR_OFFSET 0x280
132 #define INT_CMD_LO_OFFSET 0x300
133 #define INT_CMD_HI_OFFSET 0x310
134 #define TMR_LOC_VEC_TBL_OFFSET 0x320
135 #define THERM_LOC_VEC_TBL_OFFSET 0x330
136 #define PERF_CTR_LOC_VEC_TBL_OFFSET 0x340
137 #define LINT0_VEC_TBL_OFFSET 0x350
138 #define LINT1_VEC_TBL_OFFSET 0x360
139 #define ERR_VEC_TBL_OFFSET 0x370
140 #define TMR_INIT_CNT_OFFSET 0x380
141 #define TMR_CUR_CNT_OFFSET 0x390
142 #define TMR_DIV_CFG_OFFSET 0x3e0
143 #define EXT_APIC_FEATURE_OFFSET 0x400
144 #define EXT_APIC_CMD_OFFSET 0x410
145 #define SEOI_OFFSET 0x420
147 #define IER_OFFSET0 0x480 // 0x480 - 0x4f0
148 #define IER_OFFSET1 0x490 // 0x480 - 0x4f0
149 #define IER_OFFSET2 0x4a0 // 0x480 - 0x4f0
150 #define IER_OFFSET3 0x4b0 // 0x480 - 0x4f0
151 #define IER_OFFSET4 0x4c0 // 0x480 - 0x4f0
152 #define IER_OFFSET5 0x4d0 // 0x480 - 0x4f0
153 #define IER_OFFSET6 0x4e0 // 0x480 - 0x4f0
154 #define IER_OFFSET7 0x4f0 // 0x480 - 0x4f0
156 #define EXT_INT_LOC_VEC_TBL_OFFSET0 0x500 // 0x500 - 0x530
157 #define EXT_INT_LOC_VEC_TBL_OFFSET1 0x510 // 0x500 - 0x530
158 #define EXT_INT_LOC_VEC_TBL_OFFSET2 0x520 // 0x500 - 0x530
159 #define EXT_INT_LOC_VEC_TBL_OFFSET3 0x530 // 0x500 - 0x530
166 uint8_t bootstrap_cpu : 1;
168 uint8_t apic_enable : 1;
169 uint64_t base_addr : 40;
171 } __attribute__((packed));
172 } __attribute__((packed));
173 } __attribute__((packed));
177 typedef enum {INIT_ST,
179 STARTED} ipi_state_t;
181 struct apic_dev_state;
187 struct apic_msr base_addr_msr;
190 /* memory map registers */
192 struct lapic_id_reg lapic_id;
193 struct apic_ver_reg apic_ver;
194 struct ext_apic_ctrl_reg ext_apic_ctrl;
195 struct local_vec_tbl_reg local_vec_tbl;
196 struct tmr_vec_tbl_reg tmr_vec_tbl;
197 struct tmr_div_cfg_reg tmr_div_cfg;
198 struct lint_vec_tbl_reg lint0_vec_tbl;
199 struct lint_vec_tbl_reg lint1_vec_tbl;
200 struct perf_ctr_loc_vec_tbl_reg perf_ctr_loc_vec_tbl;
201 struct therm_loc_vec_tbl_reg therm_loc_vec_tbl;
202 struct err_vec_tbl_reg err_vec_tbl;
203 struct err_status_reg err_status;
204 struct spurious_int_reg spurious_int;
205 struct int_cmd_reg int_cmd;
206 struct log_dst_reg log_dst;
207 struct dst_fmt_reg dst_fmt;
208 struct arb_prio_reg arb_prio;
209 struct task_prio_reg task_prio;
210 struct proc_prio_reg proc_prio;
211 struct ext_apic_feature_reg ext_apic_feature;
212 struct spec_eoi_reg spec_eoi;
215 uint32_t tmr_cur_cnt;
216 uint32_t tmr_init_cnt;
219 struct local_vec_tbl_reg ext_intr_vec_tbl[4];
221 uint32_t rem_rd_data;
224 ipi_state_t ipi_state;
226 uint8_t int_req_reg[32];
227 uint8_t int_svc_reg[32];
228 uint8_t int_en_reg[32];
229 uint8_t trig_mode_reg[32];
231 struct guest_info * core;
233 void * controller_handle;
235 struct v3_timer * timer;
248 struct apic_dev_state {
250 // v3_lock_t ipi_lock; // acquired by route_ipi - only one IPI active at a time
252 struct apic_state apics[0];
253 } __attribute__((packed));
259 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data);
260 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data);
263 static void init_apic_state(struct apic_state * apic, uint32_t id) {
264 apic->base_addr = DEFAULT_BASE_ADDR;
267 // boot processor, enabled
268 apic->base_addr_msr.value = 0x0000000000000900LL;
270 // ap processor, enabled
271 apic->base_addr_msr.value = 0x0000000000000800LL;
274 // same base address regardless of ap or main
275 apic->base_addr_msr.value |= ((uint64_t)DEFAULT_BASE_ADDR);
277 PrintDebug("apic %u: (init_apic_state): msr=0x%llx\n",id, apic->base_addr_msr.value);
279 PrintDebug("apic %u: (init_apic_state): Sizeof Interrupt Request Register %d, should be 32\n",
280 id, (uint_t)sizeof(apic->int_req_reg));
282 memset(apic->int_req_reg, 0, sizeof(apic->int_req_reg));
283 memset(apic->int_svc_reg, 0, sizeof(apic->int_svc_reg));
284 memset(apic->int_en_reg, 0xff, sizeof(apic->int_en_reg));
285 memset(apic->trig_mode_reg, 0, sizeof(apic->trig_mode_reg));
287 apic->eoi = 0x00000000;
288 apic->rem_rd_data = 0x00000000;
289 apic->tmr_init_cnt = 0x00000000;
290 apic->tmr_cur_cnt = 0x00000000;
292 apic->lapic_id.val = id;
294 apic->ipi_state = INIT_ST;
296 // The P6 has 6 LVT entries, so we set the value to (6-1)...
297 apic->apic_ver.val = 0x80050010;
299 apic->task_prio.val = 0x00000000;
300 apic->arb_prio.val = 0x00000000;
301 apic->proc_prio.val = 0x00000000;
302 apic->log_dst.val = 0x00000000;
303 apic->dst_fmt.val = 0xffffffff;
304 apic->spurious_int.val = 0x000000ff;
305 apic->err_status.val = 0x00000000;
306 apic->int_cmd.val = 0x0000000000000000LL;
307 apic->tmr_vec_tbl.val = 0x00010000;
308 apic->therm_loc_vec_tbl.val = 0x00010000;
309 apic->perf_ctr_loc_vec_tbl.val = 0x00010000;
310 apic->lint0_vec_tbl.val = 0x00010000;
311 apic->lint1_vec_tbl.val = 0x00010000;
312 apic->err_vec_tbl.val = 0x00010000;
313 apic->tmr_div_cfg.val = 0x00000000;
314 //apic->ext_apic_feature.val = 0x00000007;
315 apic->ext_apic_feature.val = 0x00040007;
316 apic->ext_apic_ctrl.val = 0x00000000;
317 apic->spec_eoi.val = 0x00000000;
319 v3_lock_init(&(apic->lock));
328 // MSR handler - locks apic itself
329 static int read_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t * dst, void * priv_data) {
330 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
331 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
333 PrintDebug("apic %u: core %u: MSR read\n", apic->lapic_id.val, core->cpu_id);
335 dst->value = apic->base_addr;
336 v3_unlock(apic->lock);
340 // MSR handler - locks apic itself
341 static int write_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t src, void * priv_data) {
342 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
343 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
344 struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->cpu_id, apic->base_addr);
347 PrintDebug("apic %u: core %u: MSR write\n", apic->lapic_id.val, core->cpu_id);
349 if (old_reg == NULL) {
351 PrintError("apic %u: core %u: APIC Base address region does not exit...\n",
352 apic->lapic_id.val, core->cpu_id);
358 v3_delete_mem_region(core->vm_info, old_reg);
360 apic->base_addr = src.value;
362 if (v3_hook_full_mem(core->vm_info, core->cpu_id, apic->base_addr,
363 apic->base_addr + PAGE_SIZE_4KB,
364 apic_read, apic_write, apic_dev) == -1) {
365 PrintError("apic %u: core %u: Could not hook new APIC Base address\n",
366 apic->lapic_id.val, core->cpu_id);
367 v3_unlock(apic->lock);
371 v3_unlock(apic->lock);
376 // irq_num is the bit offset into a 256 bit buffer...
379 // 0 = OK, no interrupt needed now
380 // 1 = OK, interrupt needed now
381 // the caller is expeced to have locked the apic
382 static int activate_apic_irq_nolock(struct apic_state * apic, uint32_t irq_num) {
383 int major_offset = (irq_num & ~0x00000007) >> 3;
384 int minor_offset = irq_num & 0x00000007;
385 uint8_t * req_location = apic->int_req_reg + major_offset;
386 uint8_t * en_location = apic->int_en_reg + major_offset;
387 uint8_t flag = 0x1 << minor_offset;
390 if (irq_num <= 15 || irq_num>255) {
391 PrintError("apic %u: core %d: Attempting to raise an invalid interrupt: %d\n",
392 apic->lapic_id.val, apic->core->cpu_id, irq_num);
397 PrintDebug("apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
399 if (*req_location & flag) {
400 PrintDebug("Interrupt %d coallescing\n", irq_num);
404 if (*en_location & flag) {
405 *req_location |= flag;
408 PrintError("apic %u: core %d: activate_apic_irq_nolock to deliver irq 0x%x when in_icr=1\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
414 PrintDebug("apic %u: core %d: Interrupt not enabled... %.2x\n",
415 apic->lapic_id.val, apic->core->cpu_id,*en_location);
422 // Caller is expected to have locked the apic
423 static int get_highest_isr(struct apic_state * apic) {
426 // We iterate backwards to find the highest priority
427 for (i = 31; i >= 0; i--) {
428 uint8_t * svc_major = apic->int_svc_reg + i;
430 if ((*svc_major) & 0xff) {
431 for (j = 7; j >= 0; j--) {
432 uint8_t flag = 0x1 << j;
433 if ((*svc_major) & flag) {
434 return ((i * 8) + j);
444 // Caller is expected to have locked the apic
445 static int get_highest_irr(struct apic_state * apic) {
448 // We iterate backwards to find the highest priority
449 for (i = 31; i >= 0; i--) {
450 uint8_t * req_major = apic->int_req_reg + i;
452 if ((*req_major) & 0xff) {
453 for (j = 7; j >= 0; j--) {
454 uint8_t flag = 0x1 << j;
455 if ((*req_major) & flag) {
456 return ((i * 8) + j);
467 // Caller is expected to have locked the apic
468 static int apic_do_eoi(struct apic_state * apic) {
469 int isr_irq = get_highest_isr(apic);
472 int major_offset = (isr_irq & ~0x00000007) >> 3;
473 int minor_offset = isr_irq & 0x00000007;
474 uint8_t flag = 0x1 << minor_offset;
475 uint8_t * svc_location = apic->int_svc_reg + major_offset;
477 PrintDebug("apic %u: core ?: Received APIC EOI for IRQ %d\n", apic->lapic_id.val,isr_irq);
479 *svc_location &= ~flag;
481 #ifdef CONFIG_CRAY_XT
483 if ((isr_irq == 238) ||
485 PrintDebug("apic %u: core ?: Acking IRQ %d\n", apic->lapic_id.val,isr_irq);
488 if (isr_irq == 238) {
493 //PrintError("apic %u: core ?: Spurious EOI...\n",apic->lapic_id.val);
499 // Caller is expected to have locked the apic
500 static int activate_internal_irq_nolock(struct apic_state * apic, apic_irq_type_t int_type) {
501 uint32_t vec_num = 0;
502 uint32_t del_mode = 0;
508 vec_num = apic->tmr_vec_tbl.vec;
509 del_mode = APIC_FIXED_DELIVERY;
510 masked = apic->tmr_vec_tbl.mask;
513 vec_num = apic->therm_loc_vec_tbl.vec;
514 del_mode = apic->therm_loc_vec_tbl.msg_type;
515 masked = apic->therm_loc_vec_tbl.mask;
518 vec_num = apic->perf_ctr_loc_vec_tbl.vec;
519 del_mode = apic->perf_ctr_loc_vec_tbl.msg_type;
520 masked = apic->perf_ctr_loc_vec_tbl.mask;
523 vec_num = apic->lint0_vec_tbl.vec;
524 del_mode = apic->lint0_vec_tbl.msg_type;
525 masked = apic->lint0_vec_tbl.mask;
528 vec_num = apic->lint1_vec_tbl.vec;
529 del_mode = apic->lint1_vec_tbl.msg_type;
530 masked = apic->lint1_vec_tbl.mask;
533 vec_num = apic->err_vec_tbl.vec;
534 del_mode = APIC_FIXED_DELIVERY;
535 masked = apic->err_vec_tbl.mask;
538 PrintError("apic %u: core ?: Invalid APIC interrupt type\n", apic->lapic_id.val);
542 // interrupt is masked, don't send
544 PrintDebug("apic %u: core ?: Inerrupt is masked\n", apic->lapic_id.val);
548 if (del_mode == APIC_FIXED_DELIVERY) {
549 //PrintDebug("Activating internal APIC IRQ %d\n", vec_num);
550 return activate_apic_irq_nolock(apic, vec_num);
552 PrintError("apic %u: core ?: Unhandled Delivery Mode\n", apic->lapic_id.val);
558 // Caller is expected to have locked the destination apic
559 static inline int should_deliver_cluster_ipi(struct guest_info * dst_core,
560 struct apic_state * dst_apic, uint8_t mda) {
562 if ( ((mda & 0xf0) == (dst_apic->log_dst.dst_log_id & 0xf0)) && /* (I am in the cluster and */
563 ((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { /* I am in the set) */
565 PrintDebug("apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n",
566 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
567 dst_apic->log_dst.dst_log_id);
571 PrintDebug("apic %u core %u: rejecting clustered IRQ (mda 0x%x != log_dst 0x%x)\n",
572 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
573 dst_apic->log_dst.dst_log_id);
578 // Caller is expected to have locked the destiation apic
579 static inline int should_deliver_flat_ipi(struct guest_info * dst_core,
580 struct apic_state * dst_apic, uint8_t mda) {
582 if (dst_apic->log_dst.dst_log_id & mda) { // I am in the set
584 PrintDebug("apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n",
585 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
586 dst_apic->log_dst.dst_log_id);
592 PrintDebug("apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n",
593 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
594 dst_apic->log_dst.dst_log_id);
600 // Caller is expected to have locked the destiation apic
601 static int should_deliver_ipi(struct guest_info * dst_core,
602 struct apic_state * dst_apic, uint8_t mda) {
605 if (dst_apic->dst_fmt.model == 0xf) {
608 /* always deliver broadcast */
612 return should_deliver_flat_ipi(dst_core, dst_apic, mda);
614 } else if (dst_apic->dst_fmt.model == 0x0) {
617 /* always deliver broadcast */
621 return should_deliver_cluster_ipi(dst_core, dst_apic, mda);
624 PrintError("apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n",
625 dst_apic->lapic_id.val, dst_core->cpu_id, dst_apic->dst_fmt.model);
630 // Caller is expected to have locked the destination apic
631 // Only the src_apic pointer is used
632 static int deliver_ipi(struct apic_state * src_apic,
633 struct apic_state * dst_apic,
634 uint32_t vector, uint8_t del_mode) {
637 struct guest_info * dst_core = dst_apic->core;
642 case APIC_FIXED_DELIVERY:
643 case APIC_LOWEST_DELIVERY:
645 // caller needs to have decided which apic to deliver to!
647 PrintDebug("delivering IRQ %d to core %u\n", vector, dst_core->cpu_id);
649 do_xcall=activate_apic_irq_nolock(dst_apic, vector);
652 PrintError("Failed to activate apic irq!\n");
656 if (do_xcall && (dst_apic != src_apic)) {
657 // Assume core # is same as logical processor for now
658 // TODO FIX THIS FIX THIS
659 // THERE SHOULD BE: guestapicid->virtualapicid map,
660 // cpu_id->logical processor map
661 // host maitains logical proc->phsysical proc
662 PrintDebug(" non-local core with new interrupt, forcing it to exit now\n");
664 #ifdef CONFIG_MULTITHREAD_OS
665 v3_interrupt_cpu(dst_core->vm_info, dst_core->cpu_id, 0);
673 case APIC_INIT_DELIVERY: {
675 PrintDebug(" INIT delivery to core %u\n", dst_core->cpu_id);
677 // TODO: any APIC reset on dest core (shouldn't be needed, but not sure...)
680 if (dst_apic->ipi_state != INIT_ST) {
681 PrintError(" Warning: core %u is not in INIT state (mode = %d), ignored (assuming this is the deassert)\n",
682 dst_core->cpu_id, dst_apic->ipi_state);
683 // Only a warning, since INIT INIT SIPI is common
687 // We transition the target core to SIPI state
688 dst_apic->ipi_state = SIPI; // note: locking should not be needed here
690 // That should be it since the target core should be
691 // waiting in host on this transition
692 // either it's on another core or on a different preemptive thread
693 // in both cases, it will quickly notice this transition
694 // in particular, we should not need to force an exit here
696 PrintDebug(" INIT delivery done\n");
700 case APIC_SIPI_DELIVERY: {
703 if (dst_apic->ipi_state != SIPI) {
704 PrintError(" core %u is not in SIPI state (mode = %d), ignored!\n",
705 dst_core->cpu_id, dst_apic->ipi_state);
709 // Write the RIP, CS, and descriptor
710 // assume the rest is already good to go
712 // vector VV -> rip at 0
714 // This means we start executing at linear address VV000
716 // So the selector needs to be VV00
717 // and the base needs to be VV000
720 dst_core->segments.cs.selector = vector << 8;
721 dst_core->segments.cs.limit = 0xffff;
722 dst_core->segments.cs.base = vector << 12;
724 PrintDebug(" SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
725 vector, dst_core->segments.cs.selector, dst_core->cpu_id);
726 // Maybe need to adjust the APIC?
728 // We transition the target core to SIPI state
729 dst_core->core_run_state = CORE_RUNNING; // note: locking should not be needed here
730 dst_apic->ipi_state = STARTED;
732 // As with INIT, we should not need to do anything else
734 PrintDebug(" SIPI delivery done\n");
738 case APIC_SMI_DELIVERY:
739 case APIC_RES1_DELIVERY: // reserved
740 case APIC_NMI_DELIVERY:
741 case APIC_EXTINT_DELIVERY: // ExtInt
743 PrintError("IPI %d delivery is unsupported\n", del_mode);
751 // route_ipi is responsible for all locking
752 // the assumption is that you enter with no locks
753 // there is a global lock for the icc bus, so only
754 // one route_ipi progresses at any time
755 // destination apics are locked as needed
756 // if multiple apic locks are acquired at any point,
757 // this is done in the order of the array, so no
758 // deadlock should be possible
759 static int route_ipi(struct apic_dev_state * apic_dev,
760 struct apic_state * src_apic,
761 struct int_cmd_reg * icr) {
762 struct apic_state * dest_apic = NULL;
765 //v3_lock(apic_dev->ipi_lock); // this may not be needed
766 // now I know only one IPI is being routed, this one
767 // also, I do not have any apic locks
768 // I need to acquire locks on pairs of src/dest apics
769 // and I will do that using the total order
770 // given by their cores
773 PrintDebug("apic: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
774 deliverymode_str[icr->del_mode],
777 (icr->dst_mode == 0) ? "(physical)" : "(logical)",
778 shorthand_str[icr->dst_shorthand],
784 V3_Print("apic: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
785 deliverymode_str[icr->del_mode],
788 (icr->dst_mode == 0) ? "(physical)" : "(logical)",
789 shorthand_str[icr->dst_shorthand],
797 switch (icr->dst_shorthand) {
799 case APIC_SHORTHAND_NONE: // no shorthand
800 if (icr->dst_mode == APIC_DEST_PHYSICAL) {
802 if (icr->dst >= apic_dev->num_apics) {
803 PrintError("apic: Attempted send to unregistered apic id=%u\n", icr->dst);
804 goto route_ipi_out_bad;
808 dest_apic = &(apic_dev->apics[icr->dst]);
810 V3_Print("apic: phsyical destination of %u (apic %u at 0x%p)\n", icr->dst,dest_apic->lapic_id.val,dest_apic);
812 v3_lock(dest_apic->lock);
814 if (deliver_ipi(src_apic, dest_apic,
815 icr->vec, icr->del_mode) == -1) {
816 PrintError("apic: Could not deliver IPI\n");
817 v3_unlock(dest_apic->lock);
818 goto route_ipi_out_bad;
821 v3_unlock(dest_apic->lock);
823 V3_Print("apic: done\n");
825 } else if (icr->dst_mode == APIC_DEST_LOGICAL) {
827 if (icr->del_mode!=APIC_LOWEST_DELIVERY ) {
828 // logical, but not lowest priority
829 // we immediately trigger
830 // fixed, smi, reserved, nmi, init, sipi, etc
833 uint8_t mda = icr->dst;
835 for (i = 0; i < apic_dev->num_apics; i++) {
837 dest_apic = &(apic_dev->apics[i]);
839 v3_lock(dest_apic->lock);
841 int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
843 if (del_flag == -1) {
844 PrintError("apic: Error checking delivery mode\n");
845 v3_unlock(dest_apic->lock);
846 goto route_ipi_out_bad;
847 } else if (del_flag == 1) {
848 if (deliver_ipi(src_apic, dest_apic,
849 icr->vec, icr->del_mode) == -1) {
850 PrintError("apic: Error: Could not deliver IPI\n");
851 v3_unlock(dest_apic->lock);
852 goto route_ipi_out_bad;
856 v3_unlock(dest_apic->lock);
858 } else { //APIC_LOWEST_DELIVERY
859 // logical, lowest priority
860 // scan, keeping a lock on the current best, then trigger
863 struct apic_state * cur_best_apic = NULL;
865 uint8_t mda = icr->dst;
870 // Note that even if there are multiple concurrent
871 // copies of this loop executing, they are all
872 // locking in the same order
874 for (i = 0; i < apic_dev->num_apics; i++) {
876 dest_apic = &(apic_dev->apics[i]);
878 v3_lock(dest_apic->lock);
881 int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
883 if (del_flag == -1) {
884 PrintError("apic: Error checking delivery mode\n");
885 v3_unlock(dest_apic->lock);
886 if (cur_best_apic && cur_best_apic!=dest_apic) {
887 v3_unlock(cur_best_apic->lock);
889 goto route_ipi_out_bad;
890 } else if (del_flag == 1) {
891 // update priority for lowest priority scan
892 if (!cur_best_apic) {
893 cur_best_apic=dest_apic; // note we leave it locked
894 have_cur_lock=0; // we will unlock as cur_best_apic
895 } else if (dest_apic->task_prio.val < cur_best_apic->task_prio.val) {
896 // we now unlock the current best one and then switch
897 // so in the end we have a lock on the new cur_best_apic
898 v3_unlock(cur_best_apic->lock);
899 cur_best_apic=dest_apic;
900 have_cur_lock=0; // will unlock as cur_best_apic
904 v3_unlock(dest_apic->lock);
908 // now we will deliver to the best one if it exists
910 if (!cur_best_apic) {
911 PrintDebug("apic: lowest priority deliver, but no destinations!\n");
913 if (deliver_ipi(src_apic, cur_best_apic,
914 icr->vec, icr->del_mode) == -1) {
915 PrintError("apic: Error: Could not deliver IPI\n");
916 v3_unlock(cur_best_apic->lock);
917 goto route_ipi_out_bad;
919 v3_unlock(cur_best_apic->lock);
921 //V3_Print("apic: logical, lowest priority delivery to apic %u\n",cur_best_apic->lapic_id.val);
928 case APIC_SHORTHAND_SELF: // self
930 /* I assume I am already locked! */
932 if (src_apic == NULL) { /* this is not an apic, but it's trying to send to itself??? */
933 PrintError("apic: Sending IPI to self from generic IPI sender\n");
937 v3_lock(src_apic->lock);
939 if (icr->dst_mode == APIC_DEST_PHYSICAL) { /* physical delivery */
940 if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
941 PrintError("apic: Could not deliver IPI to self (physical)\n");
942 v3_unlock(src_apic->lock);
943 goto route_ipi_out_bad;
945 } else if (icr->dst_mode == APIC_DEST_LOGICAL) { /* logical delivery */
946 PrintError("apic: use of logical delivery in self (untested)\n");
947 if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
948 PrintError("apic: Could not deliver IPI to self (logical)\n");
949 v3_unlock(src_apic->lock);
950 goto route_ipi_out_bad;
953 v3_unlock(src_apic->lock);
956 case APIC_SHORTHAND_ALL:
957 case APIC_SHORTHAND_ALL_BUT_ME: { /* all and all-but-me */
958 /* assuming that logical verus physical doesn't matter
959 although it is odd that both are used */
963 for (i = 0; i < apic_dev->num_apics; i++) {
964 dest_apic = &(apic_dev->apics[i]);
967 if ((dest_apic != src_apic) || (icr->dst_shorthand == APIC_SHORTHAND_ALL)) {
968 v3_lock(dest_apic->lock);
969 if (deliver_ipi(src_apic, dest_apic, icr->vec, icr->del_mode) == -1) {
970 PrintError("apic: Error: Could not deliver IPI\n");
971 v3_unlock(dest_apic->lock);
972 goto route_ipi_out_bad;
974 v3_unlock(dest_apic->lock);
981 PrintError("apic: Error routing IPI, invalid Mode (%d)\n", icr->dst_shorthand);
982 goto route_ipi_out_bad;
986 // route_ipi_out_good:
987 //v3_unlock(apic_dev->ipi_lock);
991 //v3_unlock(apic_dev->ipi_lock);
996 // External function, expected to acquire lock on apic
997 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
998 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
999 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1000 addr_t reg_addr = guest_addr - apic->base_addr;
1001 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
1004 v3_lock(apic->lock);
1006 PrintDebug("apic %u: core %u: at %p: Read apic address space (%p)\n",
1007 apic->lapic_id.val, core->cpu_id, apic, (void *)guest_addr);
1009 if (msr->apic_enable == 0) {
1010 PrintError("apic %u: core %u: Read from APIC address space with disabled APIC, apic msr=0x%llx\n",
1011 apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
1013 goto apic_read_out_bad;
1017 /* Because "May not be supported" doesn't matter to Linux developers... */
1018 /* if (length != 4) { */
1019 /* PrintError("Invalid apic read length (%d)\n", length); */
1023 switch (reg_addr & ~0x3) {
1025 // Well, only an idiot would read from a architectural write only register
1027 // PrintError("Attempting to read from write only register\n");
1032 case APIC_ID_OFFSET:
1033 val = apic->lapic_id.val;
1035 case APIC_VERSION_OFFSET:
1036 val = apic->apic_ver.val;
1039 val = apic->task_prio.val;
1042 val = apic->arb_prio.val;
1045 val = apic->proc_prio.val;
1047 case REMOTE_READ_OFFSET:
1048 val = apic->rem_rd_data;
1051 val = apic->log_dst.val;
1054 val = apic->dst_fmt.val;
1056 case SPURIOUS_INT_VEC_OFFSET:
1057 val = apic->spurious_int.val;
1060 val = apic->err_status.val;
1062 case TMR_LOC_VEC_TBL_OFFSET:
1063 val = apic->tmr_vec_tbl.val;
1065 case LINT0_VEC_TBL_OFFSET:
1066 val = apic->lint0_vec_tbl.val;
1068 case LINT1_VEC_TBL_OFFSET:
1069 val = apic->lint1_vec_tbl.val;
1071 case ERR_VEC_TBL_OFFSET:
1072 val = apic->err_vec_tbl.val;
1074 case TMR_INIT_CNT_OFFSET:
1075 val = apic->tmr_init_cnt;
1077 case TMR_DIV_CFG_OFFSET:
1078 val = apic->tmr_div_cfg.val;
1082 val = *(uint32_t *)(apic->int_en_reg);
1085 val = *(uint32_t *)(apic->int_en_reg + 4);
1088 val = *(uint32_t *)(apic->int_en_reg + 8);
1091 val = *(uint32_t *)(apic->int_en_reg + 12);
1094 val = *(uint32_t *)(apic->int_en_reg + 16);
1097 val = *(uint32_t *)(apic->int_en_reg + 20);
1100 val = *(uint32_t *)(apic->int_en_reg + 24);
1103 val = *(uint32_t *)(apic->int_en_reg + 28);
1107 val = *(uint32_t *)(apic->int_svc_reg);
1110 val = *(uint32_t *)(apic->int_svc_reg + 4);
1113 val = *(uint32_t *)(apic->int_svc_reg + 8);
1116 val = *(uint32_t *)(apic->int_svc_reg + 12);
1119 val = *(uint32_t *)(apic->int_svc_reg + 16);
1122 val = *(uint32_t *)(apic->int_svc_reg + 20);
1125 val = *(uint32_t *)(apic->int_svc_reg + 24);
1128 val = *(uint32_t *)(apic->int_svc_reg + 28);
1132 val = *(uint32_t *)(apic->trig_mode_reg);
1135 val = *(uint32_t *)(apic->trig_mode_reg + 4);
1138 val = *(uint32_t *)(apic->trig_mode_reg + 8);
1141 val = *(uint32_t *)(apic->trig_mode_reg + 12);
1144 val = *(uint32_t *)(apic->trig_mode_reg + 16);
1147 val = *(uint32_t *)(apic->trig_mode_reg + 20);
1150 val = *(uint32_t *)(apic->trig_mode_reg + 24);
1153 val = *(uint32_t *)(apic->trig_mode_reg + 28);
1157 val = *(uint32_t *)(apic->int_req_reg);
1160 val = *(uint32_t *)(apic->int_req_reg + 4);
1163 val = *(uint32_t *)(apic->int_req_reg + 8);
1166 val = *(uint32_t *)(apic->int_req_reg + 12);
1169 val = *(uint32_t *)(apic->int_req_reg + 16);
1172 val = *(uint32_t *)(apic->int_req_reg + 20);
1175 val = *(uint32_t *)(apic->int_req_reg + 24);
1178 val = *(uint32_t *)(apic->int_req_reg + 28);
1180 case TMR_CUR_CNT_OFFSET:
1181 val = apic->tmr_cur_cnt;
1184 // We are not going to implement these....
1185 case THERM_LOC_VEC_TBL_OFFSET:
1186 val = apic->therm_loc_vec_tbl.val;
1188 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1189 val = apic->perf_ctr_loc_vec_tbl.val;
1194 // handled registers
1195 case INT_CMD_LO_OFFSET:
1196 val = apic->int_cmd.lo;
1198 case INT_CMD_HI_OFFSET:
1199 val = apic->int_cmd.hi;
1202 // handle current timer count
1204 // Unhandled Registers
1205 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1206 val = apic->ext_intr_vec_tbl[0].val;
1208 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1209 val = apic->ext_intr_vec_tbl[1].val;
1211 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1212 val = apic->ext_intr_vec_tbl[2].val;
1214 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1215 val = apic->ext_intr_vec_tbl[3].val;
1219 case EXT_APIC_FEATURE_OFFSET:
1220 case EXT_APIC_CMD_OFFSET:
1224 PrintError("apic %u: core %u: Read from Unhandled APIC Register: %x (getting zero)\n",
1225 apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
1226 goto apic_read_out_bad;
1231 uint_t byte_addr = reg_addr & 0x3;
1232 uint8_t * val_ptr = (uint8_t *)dst;
1234 *val_ptr = *(((uint8_t *)&val) + byte_addr);
1236 } else if ((length == 2) &&
1237 ((reg_addr & 0x3) == 0x3)) {
1238 uint_t byte_addr = reg_addr & 0x3;
1239 uint16_t * val_ptr = (uint16_t *)dst;
1240 *val_ptr = *(((uint16_t *)&val) + byte_addr);
1242 } else if (length == 4) {
1243 uint32_t * val_ptr = (uint32_t *)dst;
1247 PrintError("apic %u: core %u: Invalid apic read length (%d)\n",
1248 apic->lapic_id.val, core->cpu_id, length);
1249 goto apic_read_out_bad;
1252 PrintDebug("apic %u: core %u: Read finished (val=%x)\n",
1253 apic->lapic_id.val, core->cpu_id, *(uint32_t *)dst);
1256 // apic_read_out_good:
1257 v3_unlock(apic->lock);
1261 v3_unlock(apic->lock);
1269 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data) {
1270 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1271 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1272 addr_t reg_addr = guest_addr - apic->base_addr;
1273 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
1274 uint32_t op_val = *(uint32_t *)src;
1277 v3_lock(apic->lock);
1279 PrintDebug("apic %u: core %u: at %p and priv_data is at %p\n",
1280 apic->lapic_id.val, core->cpu_id, apic, priv_data);
1282 PrintDebug("apic %u: core %u: write to address space (%p) (val=%x)\n",
1283 apic->lapic_id.val, core->cpu_id, (void *)guest_addr, *(uint32_t *)src);
1285 if (msr->apic_enable == 0) {
1286 PrintError("apic %u: core %u: Write to APIC address space with disabled APIC, apic msr=0x%llx\n",
1287 apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
1288 goto apic_write_out_bad;
1293 PrintError("apic %u: core %u: Invalid apic write length (%d)\n",
1294 apic->lapic_id.val, length, core->cpu_id);
1295 goto apic_write_out_bad;
1299 case REMOTE_READ_OFFSET:
1300 case APIC_VERSION_OFFSET:
1327 case EXT_APIC_FEATURE_OFFSET:
1329 PrintError("apic %u: core %u: Attempting to write to read only register %p (error)\n",
1330 apic->lapic_id.val, core->cpu_id, (void *)reg_addr);
1331 // goto apic_write_out_bad;
1336 case APIC_ID_OFFSET:
1337 V3_Print("apic %u: core %u: my id is being changed to %u\n",
1338 apic->lapic_id.val, core->cpu_id, op_val);
1340 apic->lapic_id.val = op_val;
1343 apic->task_prio.val = op_val;
1346 PrintDebug("apic %u: core %u: setting log_dst.val to 0x%x\n",
1347 apic->lapic_id.val, core->cpu_id, op_val);
1348 apic->log_dst.val = op_val;
1351 apic->dst_fmt.val = op_val;
1353 case SPURIOUS_INT_VEC_OFFSET:
1354 apic->spurious_int.val = op_val;
1357 apic->err_status.val = op_val;
1359 case TMR_LOC_VEC_TBL_OFFSET:
1360 apic->tmr_vec_tbl.val = op_val;
1362 case THERM_LOC_VEC_TBL_OFFSET:
1363 apic->therm_loc_vec_tbl.val = op_val;
1365 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1366 apic->perf_ctr_loc_vec_tbl.val = op_val;
1368 case LINT0_VEC_TBL_OFFSET:
1369 apic->lint0_vec_tbl.val = op_val;
1371 case LINT1_VEC_TBL_OFFSET:
1372 apic->lint1_vec_tbl.val = op_val;
1374 case ERR_VEC_TBL_OFFSET:
1375 apic->err_vec_tbl.val = op_val;
1377 case TMR_INIT_CNT_OFFSET:
1378 apic->tmr_init_cnt = op_val;
1379 apic->tmr_cur_cnt = op_val;
1381 case TMR_CUR_CNT_OFFSET:
1382 apic->tmr_cur_cnt = op_val;
1384 case TMR_DIV_CFG_OFFSET:
1385 apic->tmr_div_cfg.val = op_val;
1389 // Enable mask (256 bits)
1391 *(uint32_t *)(apic->int_en_reg) = op_val;
1394 *(uint32_t *)(apic->int_en_reg + 4) = op_val;
1397 *(uint32_t *)(apic->int_en_reg + 8) = op_val;
1400 *(uint32_t *)(apic->int_en_reg + 12) = op_val;
1403 *(uint32_t *)(apic->int_en_reg + 16) = op_val;
1406 *(uint32_t *)(apic->int_en_reg + 20) = op_val;
1409 *(uint32_t *)(apic->int_en_reg + 24) = op_val;
1412 *(uint32_t *)(apic->int_en_reg + 28) = op_val;
1415 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1416 apic->ext_intr_vec_tbl[0].val = op_val;
1418 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1419 apic->ext_intr_vec_tbl[1].val = op_val;
1421 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1422 apic->ext_intr_vec_tbl[2].val = op_val;
1424 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1425 apic->ext_intr_vec_tbl[3].val = op_val;
1431 // do eoi (we already have the lock)
1435 case INT_CMD_LO_OFFSET: {
1436 // execute command (we already have the lock)
1438 struct int_cmd_reg tmp_icr;
1440 apic->int_cmd.lo = op_val;
1442 tmp_icr=apic->int_cmd;
1444 // V3_Print("apic %u: core %u: sending cmd 0x%llx to apic %u\n",
1445 // apic->lapic_id.val, core->cpu_id,
1446 // apic->int_cmd.val, apic->int_cmd.dst);
1450 v3_unlock(apic->lock);
1452 // route_ipi is responsible for locking apics, so we go in unlocked)
1453 if (route_ipi(apic_dev, apic, &tmp_icr) == -1) {
1454 PrintError("IPI Routing failure\n");
1455 goto apic_write_out_bad;
1458 // v3_lock(apic->lock); // expected for leaving this function
1463 case INT_CMD_HI_OFFSET: {
1464 // already have the lock
1466 PrintError("apic %u: core %u: writing command high=0x%x while in_icr=1\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
1469 apic->int_cmd.hi = op_val;
1470 //V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
1476 // Unhandled Registers
1477 case EXT_APIC_CMD_OFFSET:
1480 PrintError("apic %u: core %u: Write to Unhandled APIC Register: %x (ignored)\n",
1481 apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
1483 goto apic_write_out_bad;
1486 PrintDebug("apic %u: core %u: Write finished\n", apic->lapic_id.val, core->cpu_id);
1488 // apic_write_out_good:
1489 v3_unlock(apic->lock);
1493 v3_unlock(apic->lock);
1499 /* Interrupt Controller Functions */
1501 // internally used, expects caller to lock
1502 static int apic_intr_pending_nolock(struct guest_info * core, void * private_data) {
1503 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1504 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1505 int req_irq = get_highest_irr(apic);
1506 int svc_irq = get_highest_isr(apic);
1508 // PrintDebug("apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->cpu_id,req_irq,svc_irq);
1510 if ((req_irq >= 0) &&
1511 (req_irq > svc_irq)) {
1518 // externally visible, so must lock itself
1519 static int apic_intr_pending(struct guest_info * core, void * private_data) {
1520 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1521 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1524 v3_lock(apic->lock);
1526 rc=apic_intr_pending_nolock(core,private_data);
1528 v3_unlock(apic->lock);
1533 // Internal - no lock
1534 static int apic_get_intr_number_nolock(struct guest_info * core, void * private_data) {
1535 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1536 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1537 int req_irq = get_highest_irr(apic);
1538 int svc_irq = get_highest_isr(apic);
1540 if (svc_irq == -1) {
1542 } else if (svc_irq < req_irq) {
1550 // Externally visible, so must lock itself
1551 static int apic_get_intr_number(struct guest_info * core, void * private_data) {
1552 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1553 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1556 v3_lock(apic->lock);
1558 rc=apic_get_intr_number_nolock(core,private_data);
1560 v3_unlock(apic->lock);
1567 // Here there is no source APIC, so there is no need to lock it
1568 // Furthermore, the expectation is that route_ipi will lock the destiation apic
1569 int v3_apic_send_ipi(struct v3_vm_info * vm, struct v3_gen_ipi * ipi, void * dev_data) {
1570 struct apic_dev_state * apic_dev = (struct apic_dev_state *)
1571 (((struct vm_device *)dev_data)->private_data);
1572 struct int_cmd_reg tmp_icr;
1574 // zero out all the fields
1577 tmp_icr.vec = ipi->vector;
1578 tmp_icr.del_mode = ipi->mode;
1579 tmp_icr.dst_mode = ipi->logical;
1580 tmp_icr.trig_mode = ipi->trigger_mode;
1581 tmp_icr.dst_shorthand = ipi->dst_shorthand;
1582 tmp_icr.dst = ipi->dst;
1584 // route_ipi is responsible for locking the destination apic
1585 return route_ipi(apic_dev, NULL, &tmp_icr);
1589 int v3_apic_raise_intr(struct v3_vm_info * vm, uint32_t irq, uint32_t dst, void * dev_data) {
1590 struct apic_dev_state * apic_dev = (struct apic_dev_state *)
1591 (((struct vm_device*)dev_data)->private_data);
1592 struct apic_state * apic = &(apic_dev->apics[dst]);
1595 PrintDebug("apic %u core ?: raising interrupt IRQ %u (dst = %u).\n", apic->lapic_id.val, irq, dst);
1597 v3_lock(apic->lock);
1599 do_xcall=activate_apic_irq_nolock(apic, irq);
1602 PrintError("Failed to activate apic irq\n");
1603 v3_unlock(apic->lock);
1607 if (do_xcall>0 && (V3_Get_CPU() != dst)) {
1608 #ifdef CONFIG_MULTITHREAD_OS
1609 v3_interrupt_cpu(vm, dst, 0);
1615 v3_unlock(apic->lock);
1620 // internal - caller must lock
1621 static int apic_begin_irq_nolock(struct guest_info * core, void * private_data, int irq) {
1622 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1623 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1624 int major_offset = (irq & ~0x00000007) >> 3;
1625 int minor_offset = irq & 0x00000007;
1626 uint8_t *req_location = apic->int_req_reg + major_offset;
1627 uint8_t *svc_location = apic->int_svc_reg + major_offset;
1628 uint8_t flag = 0x01 << minor_offset;
1630 if (*req_location & flag) {
1631 // we will only pay attention to a begin irq if we
1632 // know that we initiated it!
1633 *svc_location |= flag;
1634 *req_location &= ~flag;
1637 //PrintDebug("apic %u: core %u: begin irq for %d ignored since I don't own it\n",
1638 // apic->lapic_id.val, core->cpu_id, irq);
1644 // Since this is called, externally, it should lock the apic
1645 static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
1646 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1647 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1650 v3_lock(apic->lock);
1652 rc=apic_begin_irq_nolock(core,private_data,irq);
1654 v3_unlock(apic->lock);
1661 /* Timer Functions */
1662 // Caller will lock the apic
1663 static void apic_update_time_nolock(struct guest_info * core,
1664 uint64_t cpu_cycles, uint64_t cpu_freq,
1666 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1667 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1669 // The 32 bit GCC runtime is a pile of shit
1671 uint64_t tmr_ticks = 0;
1673 uint32_t tmr_ticks = 0;
1676 uint8_t tmr_div = *(uint8_t *)&(apic->tmr_div_cfg.val);
1677 uint_t shift_num = 0;
1680 // Check whether this is true:
1681 // -> If the Init count is zero then the timer is disabled
1682 // and doesn't just blitz interrupts to the CPU
1683 if ((apic->tmr_init_cnt == 0) ||
1684 ( (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_ONESHOT) &&
1685 (apic->tmr_cur_cnt == 0))) {
1686 //PrintDebug("apic %u: core %u: APIC timer not yet initialized\n",apic->lapic_id.val,info->cpu_id);
1704 case APIC_TMR_DIV16:
1707 case APIC_TMR_DIV32:
1710 case APIC_TMR_DIV64:
1713 case APIC_TMR_DIV128:
1717 PrintError("apic %u: core %u: Invalid Timer Divider configuration\n",
1718 apic->lapic_id.val, core->cpu_id);
1722 tmr_ticks = cpu_cycles >> shift_num;
1723 // PrintDebug("Timer Ticks: %p\n", (void *)tmr_ticks);
1725 if (tmr_ticks < apic->tmr_cur_cnt) {
1726 apic->tmr_cur_cnt -= tmr_ticks;
1728 tmr_ticks -= apic->tmr_cur_cnt;
1729 apic->tmr_cur_cnt = 0;
1732 PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
1733 apic->lapic_id.val, core->cpu_id,
1734 apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
1736 if (apic_intr_pending_nolock(core, priv_data)) {
1737 PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
1738 apic->lapic_id.val, core->cpu_id,
1739 apic_get_intr_number(core, priv_data));
1742 if (activate_internal_irq_nolock(apic, APIC_TMR_INT) == -1) {
1743 PrintError("apic %u: core %u: Could not raise Timer interrupt\n",
1744 apic->lapic_id.val, core->cpu_id);
1747 if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
1748 tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
1749 apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
1757 static void apic_update_time(struct guest_info * core,
1758 uint64_t cpu_cycles, uint64_t cpu_freq,
1760 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1761 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1763 v3_lock(apic->lock);
1765 apic_update_time_nolock(core,cpu_cycles,cpu_freq,priv_data);
1767 v3_unlock(apic->lock);
1772 static struct intr_ctrl_ops intr_ops = {
1773 .intr_pending = apic_intr_pending,
1774 .get_intr_number = apic_get_intr_number,
1775 .begin_irq = apic_begin_irq,
1779 static struct v3_timer_ops timer_ops = {
1780 .update_timer = apic_update_time,
1786 static int apic_free(struct apic_dev_state * apic_dev) {
1788 struct v3_vm_info * vm = NULL;
1790 for (i = 0; i < apic_dev->num_apics; i++) {
1791 struct apic_state * apic = &(apic_dev->apics[i]);
1792 struct guest_info * core = apic->core;
1796 v3_remove_intr_controller(core, apic->controller_handle);
1799 v3_remove_timer(core, apic->timer);
1806 v3_unhook_msr(vm, BASE_ADDR_MSR);
1813 static struct v3_device_ops dev_ops = {
1814 .free = (int (*)(void *))apic_free,
1819 static int apic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1820 char * dev_id = v3_cfg_val(cfg, "ID");
1821 struct apic_dev_state * apic_dev = NULL;
1824 PrintDebug("apic: creating an APIC for each core\n");
1826 apic_dev = (struct apic_dev_state *)V3_Malloc(sizeof(struct apic_dev_state) +
1827 sizeof(struct apic_state) * vm->num_cores);
1829 apic_dev->num_apics = vm->num_cores;
1831 //v3_lock_init(&(apic_dev->ipi_lock));
1833 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, apic_dev);
1836 PrintError("apic: Could not attach device %s\n", dev_id);
1842 for (i = 0; i < vm->num_cores; i++) {
1843 struct apic_state * apic = &(apic_dev->apics[i]);
1844 struct guest_info * core = &(vm->cores[i]);
1848 init_apic_state(apic, i);
1850 apic->controller_handle = v3_register_intr_controller(core, &intr_ops, apic_dev);
1852 apic->timer = v3_add_timer(core, &timer_ops, apic_dev);
1854 if (apic->timer == NULL) {
1855 PrintError("APIC: Failed to attach timer to core %d\n", i);
1856 v3_remove_device(dev);
1860 v3_hook_full_mem(vm, core->cpu_id, apic->base_addr, apic->base_addr + PAGE_SIZE_4KB, apic_read, apic_write, apic_dev);
1862 PrintDebug("apic %u: (setup device): done, my id is %u\n", i, apic->lapic_id.val);
1865 #ifdef CONFIG_DEBUG_APIC
1866 for (i = 0; i < vm->num_cores; i++) {
1867 struct apic_state * apic = &(apic_dev->apics[i]);
1868 PrintDebug("apic: sanity check: apic %u (at %p) has id %u and msr value %llx and core at %p\n",
1869 i, apic, apic->lapic_id.val, apic->base_addr_msr.value,apic->core);
1874 PrintDebug("apic: priv_data is at %p\n", apic_dev);
1876 v3_hook_msr(vm, BASE_ADDR_MSR, read_apic_msr, write_apic_msr, apic_dev);
1883 device_register("LAPIC", apic_init)