2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Authors: Jack Lange <jarusl@cs.northwestern.edu>
15 * Peter Dinda <pdinda@northwestern.edu> (SMP)
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <devices/apic.h>
23 #include <devices/apic_regs.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vmm_msr.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vm_guest.h>
28 #include <palacios/vmm_types.h>
31 #ifndef CONFIG_DEBUG_APIC
33 #define PrintDebug(fmt, args...)
36 #ifdef CONFIG_DEBUG_APIC
37 static char * shorthand_str[] = {
44 static char * deliverymode_str[] = {
56 typedef enum { APIC_TMR_INT, APIC_THERM_INT, APIC_PERF_INT,
57 APIC_LINT0_INT, APIC_LINT1_INT, APIC_ERR_INT } apic_irq_type_t;
59 #define APIC_FIXED_DELIVERY 0x0
60 #define APIC_SMI_DELIVERY 0x2
61 #define APIC_NMI_DELIVERY 0x4
62 #define APIC_INIT_DELIVERY 0x5
63 #define APIC_EXTINT_DELIVERY 0x7
66 #define BASE_ADDR_MSR 0x0000001B
67 #define DEFAULT_BASE_ADDR 0xfee00000
69 #define APIC_ID_OFFSET 0x020
70 #define APIC_VERSION_OFFSET 0x030
71 #define TPR_OFFSET 0x080
72 #define APR_OFFSET 0x090
73 #define PPR_OFFSET 0x0a0
74 #define EOI_OFFSET 0x0b0
75 #define REMOTE_READ_OFFSET 0x0c0
76 #define LDR_OFFSET 0x0d0
77 #define DFR_OFFSET 0x0e0
78 #define SPURIOUS_INT_VEC_OFFSET 0x0f0
80 #define ISR_OFFSET0 0x100 // 0x100 - 0x170
81 #define ISR_OFFSET1 0x110 // 0x100 - 0x170
82 #define ISR_OFFSET2 0x120 // 0x100 - 0x170
83 #define ISR_OFFSET3 0x130 // 0x100 - 0x170
84 #define ISR_OFFSET4 0x140 // 0x100 - 0x170
85 #define ISR_OFFSET5 0x150 // 0x100 - 0x170
86 #define ISR_OFFSET6 0x160 // 0x100 - 0x170
87 #define ISR_OFFSET7 0x170 // 0x100 - 0x170
89 #define TRIG_OFFSET0 0x180 // 0x180 - 0x1f0
90 #define TRIG_OFFSET1 0x190 // 0x180 - 0x1f0
91 #define TRIG_OFFSET2 0x1a0 // 0x180 - 0x1f0
92 #define TRIG_OFFSET3 0x1b0 // 0x180 - 0x1f0
93 #define TRIG_OFFSET4 0x1c0 // 0x180 - 0x1f0
94 #define TRIG_OFFSET5 0x1d0 // 0x180 - 0x1f0
95 #define TRIG_OFFSET6 0x1e0 // 0x180 - 0x1f0
96 #define TRIG_OFFSET7 0x1f0 // 0x180 - 0x1f0
99 #define IRR_OFFSET0 0x200 // 0x200 - 0x270
100 #define IRR_OFFSET1 0x210 // 0x200 - 0x270
101 #define IRR_OFFSET2 0x220 // 0x200 - 0x270
102 #define IRR_OFFSET3 0x230 // 0x200 - 0x270
103 #define IRR_OFFSET4 0x240 // 0x200 - 0x270
104 #define IRR_OFFSET5 0x250 // 0x200 - 0x270
105 #define IRR_OFFSET6 0x260 // 0x200 - 0x270
106 #define IRR_OFFSET7 0x270 // 0x200 - 0x270
109 #define ESR_OFFSET 0x280
110 #define INT_CMD_LO_OFFSET 0x300
111 #define INT_CMD_HI_OFFSET 0x310
112 #define TMR_LOC_VEC_TBL_OFFSET 0x320
113 #define THERM_LOC_VEC_TBL_OFFSET 0x330
114 #define PERF_CTR_LOC_VEC_TBL_OFFSET 0x340
115 #define LINT0_VEC_TBL_OFFSET 0x350
116 #define LINT1_VEC_TBL_OFFSET 0x360
117 #define ERR_VEC_TBL_OFFSET 0x370
118 #define TMR_INIT_CNT_OFFSET 0x380
119 #define TMR_CUR_CNT_OFFSET 0x390
120 #define TMR_DIV_CFG_OFFSET 0x3e0
121 #define EXT_APIC_FEATURE_OFFSET 0x400
122 #define EXT_APIC_CMD_OFFSET 0x410
123 #define SEOI_OFFSET 0x420
125 #define IER_OFFSET0 0x480 // 0x480 - 0x4f0
126 #define IER_OFFSET1 0x490 // 0x480 - 0x4f0
127 #define IER_OFFSET2 0x4a0 // 0x480 - 0x4f0
128 #define IER_OFFSET3 0x4b0 // 0x480 - 0x4f0
129 #define IER_OFFSET4 0x4c0 // 0x480 - 0x4f0
130 #define IER_OFFSET5 0x4d0 // 0x480 - 0x4f0
131 #define IER_OFFSET6 0x4e0 // 0x480 - 0x4f0
132 #define IER_OFFSET7 0x4f0 // 0x480 - 0x4f0
134 #define EXT_INT_LOC_VEC_TBL_OFFSET0 0x500 // 0x500 - 0x530
135 #define EXT_INT_LOC_VEC_TBL_OFFSET1 0x510 // 0x500 - 0x530
136 #define EXT_INT_LOC_VEC_TBL_OFFSET2 0x520 // 0x500 - 0x530
137 #define EXT_INT_LOC_VEC_TBL_OFFSET3 0x530 // 0x500 - 0x530
144 uint8_t bootstrap_cpu : 1;
146 uint8_t apic_enable : 1;
147 uint64_t base_addr : 40;
149 } __attribute__((packed));
150 } __attribute__((packed));
151 } __attribute__((packed));
155 typedef enum {INIT_ST,
157 STARTED} ipi_state_t;
159 struct apic_dev_state;
165 struct apic_msr base_addr_msr;
168 /* memory map registers */
170 struct lapic_id_reg lapic_id;
171 struct apic_ver_reg apic_ver;
172 struct ext_apic_ctrl_reg ext_apic_ctrl;
173 struct local_vec_tbl_reg local_vec_tbl;
174 struct tmr_vec_tbl_reg tmr_vec_tbl;
175 struct tmr_div_cfg_reg tmr_div_cfg;
176 struct lint_vec_tbl_reg lint0_vec_tbl;
177 struct lint_vec_tbl_reg lint1_vec_tbl;
178 struct perf_ctr_loc_vec_tbl_reg perf_ctr_loc_vec_tbl;
179 struct therm_loc_vec_tbl_reg therm_loc_vec_tbl;
180 struct err_vec_tbl_reg err_vec_tbl;
181 struct err_status_reg err_status;
182 struct spurious_int_reg spurious_int;
183 struct int_cmd_reg int_cmd;
184 struct log_dst_reg log_dst;
185 struct dst_fmt_reg dst_fmt;
186 struct arb_prio_reg arb_prio;
187 struct task_prio_reg task_prio;
188 struct proc_prio_reg proc_prio;
189 struct ext_apic_feature_reg ext_apic_feature;
190 struct spec_eoi_reg spec_eoi;
193 uint32_t tmr_cur_cnt;
194 uint32_t tmr_init_cnt;
197 struct local_vec_tbl_reg ext_intr_vec_tbl[4];
199 uint32_t rem_rd_data;
202 ipi_state_t ipi_state;
204 uint8_t int_req_reg[32];
205 uint8_t int_svc_reg[32];
206 uint8_t int_en_reg[32];
207 uint8_t trig_mode_reg[32];
209 struct guest_info * core;
211 void * controller_handle;
213 struct v3_timer * timer;
226 struct apic_dev_state {
228 v3_lock_t ipi_lock; // acquired by route_ipi - only one IPI active at a time
230 struct apic_state apics[0];
231 } __attribute__((packed));
237 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data);
238 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data);
241 static void init_apic_state(struct apic_state * apic, uint32_t id) {
242 apic->base_addr = DEFAULT_BASE_ADDR;
245 // boot processor, enabled
246 apic->base_addr_msr.value = 0x0000000000000900LL;
248 // ap processor, enabled
249 apic->base_addr_msr.value = 0x0000000000000800LL;
252 // same base address regardless of ap or main
253 apic->base_addr_msr.value |= ((uint64_t)DEFAULT_BASE_ADDR);
255 PrintDebug("apic %u: (init_apic_state): msr=0x%llx\n",id, apic->base_addr_msr.value);
257 PrintDebug("apic %u: (init_apic_state): Sizeof Interrupt Request Register %d, should be 32\n",
258 id, (uint_t)sizeof(apic->int_req_reg));
260 memset(apic->int_req_reg, 0, sizeof(apic->int_req_reg));
261 memset(apic->int_svc_reg, 0, sizeof(apic->int_svc_reg));
262 memset(apic->int_en_reg, 0xff, sizeof(apic->int_en_reg));
263 memset(apic->trig_mode_reg, 0, sizeof(apic->trig_mode_reg));
265 apic->eoi = 0x00000000;
266 apic->rem_rd_data = 0x00000000;
267 apic->tmr_init_cnt = 0x00000000;
268 apic->tmr_cur_cnt = 0x00000000;
270 apic->lapic_id.val = id;
272 apic->ipi_state = INIT_ST;
274 // The P6 has 6 LVT entries, so we set the value to (6-1)...
275 apic->apic_ver.val = 0x80050010;
277 apic->task_prio.val = 0x00000000;
278 apic->arb_prio.val = 0x00000000;
279 apic->proc_prio.val = 0x00000000;
280 apic->log_dst.val = 0x00000000;
281 apic->dst_fmt.val = 0xffffffff;
282 apic->spurious_int.val = 0x000000ff;
283 apic->err_status.val = 0x00000000;
284 apic->int_cmd.val = 0x0000000000000000LL;
285 apic->tmr_vec_tbl.val = 0x00010000;
286 apic->therm_loc_vec_tbl.val = 0x00010000;
287 apic->perf_ctr_loc_vec_tbl.val = 0x00010000;
288 apic->lint0_vec_tbl.val = 0x00010000;
289 apic->lint1_vec_tbl.val = 0x00010000;
290 apic->err_vec_tbl.val = 0x00010000;
291 apic->tmr_div_cfg.val = 0x00000000;
292 //apic->ext_apic_feature.val = 0x00000007;
293 apic->ext_apic_feature.val = 0x00040007;
294 apic->ext_apic_ctrl.val = 0x00000000;
295 apic->spec_eoi.val = 0x00000000;
297 v3_lock_init(&(apic->lock));
307 // MSR handler - locks apic itself
308 static int read_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t * dst, void * priv_data) {
309 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
310 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
312 PrintDebug("apic %u: core %u: MSR read\n", apic->lapic_id.val, core->cpu_id);
314 dst->value = apic->base_addr;
315 v3_unlock(apic->lock);
319 // MSR handler - locks apic itself
320 static int write_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t src, void * priv_data) {
321 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
322 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
323 struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->cpu_id, apic->base_addr);
326 PrintDebug("apic %u: core %u: MSR write\n", apic->lapic_id.val, core->cpu_id);
328 if (old_reg == NULL) {
330 PrintError("apic %u: core %u: APIC Base address region does not exit...\n",
331 apic->lapic_id.val, core->cpu_id);
337 v3_delete_mem_region(core->vm_info, old_reg);
339 apic->base_addr = src.value;
341 if (v3_hook_full_mem(core->vm_info, core->cpu_id, apic->base_addr,
342 apic->base_addr + PAGE_SIZE_4KB,
343 apic_read, apic_write, apic_dev) == -1) {
344 PrintError("apic %u: core %u: Could not hook new APIC Base address\n",
345 apic->lapic_id.val, core->cpu_id);
346 v3_unlock(apic->lock);
350 v3_unlock(apic->lock);
355 // irq_num is the bit offset into a 256 bit buffer...
358 // 0 = OK, no interrupt needed now
359 // 1 = OK, interrupt needed now
360 // the caller is expeced to have locked the apic
361 static int activate_apic_irq_nolock(struct apic_state * apic, uint32_t irq_num) {
362 int major_offset = (irq_num & ~0x00000007) >> 3;
363 int minor_offset = irq_num & 0x00000007;
364 uint8_t * req_location = apic->int_req_reg + major_offset;
365 uint8_t * en_location = apic->int_en_reg + major_offset;
366 uint8_t flag = 0x1 << minor_offset;
369 if (irq_num <= 15 || irq_num>255) {
370 PrintError("apic %u: core %d: Attempting to raise an invalid interrupt: %d\n",
371 apic->lapic_id.val, apic->core->cpu_id, irq_num);
376 PrintDebug("apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
378 if (*req_location & flag) {
379 PrintDebug("Interrupt %d coallescing\n", irq_num);
383 if (*en_location & flag) {
384 *req_location |= flag;
387 PrintError("apic %u: core %d: activate_apic_irq_nolock to deliver irq 0x%x when in_icr=1\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
393 PrintDebug("apic %u: core %d: Interrupt not enabled... %.2x\n",
394 apic->lapic_id.val, apic->core->cpu_id,*en_location);
401 // Caller is expected to have locked the apic
402 static int get_highest_isr(struct apic_state * apic) {
405 // We iterate backwards to find the highest priority
406 for (i = 31; i >= 0; i--) {
407 uint8_t * svc_major = apic->int_svc_reg + i;
409 if ((*svc_major) & 0xff) {
410 for (j = 7; j >= 0; j--) {
411 uint8_t flag = 0x1 << j;
412 if ((*svc_major) & flag) {
413 return ((i * 8) + j);
423 // Caller is expected to have locked the apic
424 static int get_highest_irr(struct apic_state * apic) {
427 // We iterate backwards to find the highest priority
428 for (i = 31; i >= 0; i--) {
429 uint8_t * req_major = apic->int_req_reg + i;
431 if ((*req_major) & 0xff) {
432 for (j = 7; j >= 0; j--) {
433 uint8_t flag = 0x1 << j;
434 if ((*req_major) & flag) {
435 return ((i * 8) + j);
446 // Caller is expected to have locked the apic
447 static int apic_do_eoi(struct apic_state * apic) {
448 int isr_irq = get_highest_isr(apic);
451 int major_offset = (isr_irq & ~0x00000007) >> 3;
452 int minor_offset = isr_irq & 0x00000007;
453 uint8_t flag = 0x1 << minor_offset;
454 uint8_t * svc_location = apic->int_svc_reg + major_offset;
456 PrintDebug("apic %u: core ?: Received APIC EOI for IRQ %d\n", apic->lapic_id.val,isr_irq);
458 *svc_location &= ~flag;
460 #ifdef CONFIG_CRAY_XT
462 if ((isr_irq == 238) ||
464 PrintDebug("apic %u: core ?: Acking IRQ %d\n", apic->lapic_id.val,isr_irq);
467 if (isr_irq == 238) {
472 //PrintError("apic %u: core ?: Spurious EOI...\n",apic->lapic_id.val);
478 // Caller is expected to have locked the apic
479 static int activate_internal_irq_nolock(struct apic_state * apic, apic_irq_type_t int_type) {
480 uint32_t vec_num = 0;
481 uint32_t del_mode = 0;
487 vec_num = apic->tmr_vec_tbl.vec;
488 del_mode = APIC_FIXED_DELIVERY;
489 masked = apic->tmr_vec_tbl.mask;
492 vec_num = apic->therm_loc_vec_tbl.vec;
493 del_mode = apic->therm_loc_vec_tbl.msg_type;
494 masked = apic->therm_loc_vec_tbl.mask;
497 vec_num = apic->perf_ctr_loc_vec_tbl.vec;
498 del_mode = apic->perf_ctr_loc_vec_tbl.msg_type;
499 masked = apic->perf_ctr_loc_vec_tbl.mask;
502 vec_num = apic->lint0_vec_tbl.vec;
503 del_mode = apic->lint0_vec_tbl.msg_type;
504 masked = apic->lint0_vec_tbl.mask;
507 vec_num = apic->lint1_vec_tbl.vec;
508 del_mode = apic->lint1_vec_tbl.msg_type;
509 masked = apic->lint1_vec_tbl.mask;
512 vec_num = apic->err_vec_tbl.vec;
513 del_mode = APIC_FIXED_DELIVERY;
514 masked = apic->err_vec_tbl.mask;
517 PrintError("apic %u: core ?: Invalid APIC interrupt type\n", apic->lapic_id.val);
521 // interrupt is masked, don't send
523 PrintDebug("apic %u: core ?: Inerrupt is masked\n", apic->lapic_id.val);
527 if (del_mode == APIC_FIXED_DELIVERY) {
528 //PrintDebug("Activating internal APIC IRQ %d\n", vec_num);
529 return activate_apic_irq_nolock(apic, vec_num);
531 PrintError("apic %u: core ?: Unhandled Delivery Mode\n", apic->lapic_id.val);
537 // Caller is expected to have locked the destination apic
538 static inline int should_deliver_cluster_ipi(struct guest_info * dst_core,
539 struct apic_state * dst_apic, uint8_t mda) {
541 if ( ((mda & 0xf0) == (dst_apic->log_dst.dst_log_id & 0xf0)) && /* (I am in the cluster and */
542 ((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { /* I am in the set) */
544 PrintDebug("apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n",
545 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
546 dst_apic->log_dst.dst_log_id);
550 PrintDebug("apic %u core %u: rejecting clustered IRQ (mda 0x%x != log_dst 0x%x)\n",
551 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
552 dst_apic->log_dst.dst_log_id);
557 // Caller is expected to have locked the destiation apic
558 static inline int should_deliver_flat_ipi(struct guest_info * dst_core,
559 struct apic_state * dst_apic, uint8_t mda) {
561 if (dst_apic->log_dst.dst_log_id & mda) { // I am in the set
563 PrintDebug("apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n",
564 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
565 dst_apic->log_dst.dst_log_id);
571 PrintDebug("apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n",
572 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
573 dst_apic->log_dst.dst_log_id);
579 // Caller is expected to have locked the destiation apic
580 static int should_deliver_ipi(struct guest_info * dst_core,
581 struct apic_state * dst_apic, uint8_t mda) {
584 if (dst_apic->dst_fmt.model == 0xf) {
587 /* always deliver broadcast */
591 return should_deliver_flat_ipi(dst_core, dst_apic, mda);
593 } else if (dst_apic->dst_fmt.model == 0x0) {
596 /* always deliver broadcast */
600 return should_deliver_cluster_ipi(dst_core, dst_apic, mda);
603 PrintError("apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n",
604 dst_apic->lapic_id.val, dst_core->cpu_id, dst_apic->dst_fmt.model);
609 // Caller is expected to have locked the source apic (if any) and destination apic
610 static int deliver_ipi(struct apic_state * src_apic,
611 struct apic_state * dst_apic,
612 uint32_t vector, uint8_t del_mode) {
615 struct guest_info * dst_core = dst_apic->core;
621 case 1: // lowest priority - caller needs to have decided which apic to deliver to!
622 PrintDebug("delivering IRQ %d to core %u\n", vector, dst_core->cpu_id);
624 do_xcall=activate_apic_irq_nolock(dst_apic, vector);
627 PrintError("Failed to activate apic irq!\n");
631 if (do_xcall && (dst_apic != src_apic)) {
632 // Assume core # is same as logical processor for now
633 // TODO FIX THIS FIX THIS
634 // THERE SHOULD BE: guestapicid->virtualapicid map,
635 // cpu_id->logical processor map
636 // host maitains logical proc->phsysical proc
637 PrintDebug(" non-local core with new interrupt, forcing it to exit now\n");
639 #ifdef CONFIG_MULTITHREAD_OS
640 v3_interrupt_cpu(dst_core->vm_info, dst_core->cpu_id, 0);
649 PrintDebug(" INIT delivery to core %u\n", dst_core->cpu_id);
651 // TODO: any APIC reset on dest core (shouldn't be needed, but not sure...)
654 if (dst_apic->ipi_state != INIT_ST) {
655 PrintError(" Warning: core %u is not in INIT state (mode = %d), ignored (assuming this is the deassert)\n",
656 dst_core->cpu_id, dst_apic->ipi_state);
657 // Only a warning, since INIT INIT SIPI is common
661 // We transition the target core to SIPI state
662 dst_apic->ipi_state = SIPI; // note: locking should not be needed here
664 // That should be it since the target core should be
665 // waiting in host on this transition
666 // either it's on another core or on a different preemptive thread
667 // in both cases, it will quickly notice this transition
668 // in particular, we should not need to force an exit here
670 PrintDebug(" INIT delivery done\n");
677 if (dst_apic->ipi_state != SIPI) {
678 PrintError(" core %u is not in SIPI state (mode = %d), ignored!\n",
679 dst_core->cpu_id, dst_apic->ipi_state);
683 // Write the RIP, CS, and descriptor
684 // assume the rest is already good to go
686 // vector VV -> rip at 0
688 // This means we start executing at linear address VV000
690 // So the selector needs to be VV00
691 // and the base needs to be VV000
694 dst_core->segments.cs.selector = vector << 8;
695 dst_core->segments.cs.limit = 0xffff;
696 dst_core->segments.cs.base = vector << 12;
698 PrintDebug(" SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
699 vector, dst_core->segments.cs.selector, dst_core->cpu_id);
700 // Maybe need to adjust the APIC?
702 // We transition the target core to SIPI state
703 dst_core->core_run_state = CORE_RUNNING; // note: locking should not be needed here
704 dst_apic->ipi_state = STARTED;
706 // As with INIT, we should not need to do anything else
708 PrintDebug(" SIPI delivery done\n");
717 PrintError("IPI %d delivery is unsupported\n", del_mode);
725 // Caller is expected to have locked the source apic, if any
726 // route_ipi will lock the destination apics
729 Note that this model introduces a potential deadlock:
731 APIC A-> APIC B while APIC B -> APIC A
736 This deadlock condition is not currently handled.
737 A good way of handling it might be to check to see if the
738 destination apic is currently sending an IPI, and,
739 if so, back out and ask the caller to drop the sender lock
740 reacquire it, and then try route_ipi again. However,
741 logical delivery complicates this considerably since
742 we can hit the above situation in the middle of sending
743 the ipi to a group of destination apics.
747 static int route_ipi(struct apic_dev_state * apic_dev,
748 struct apic_state * src_apic,
749 struct int_cmd_reg * icr) {
750 struct apic_state * dest_apic = NULL;
753 v3_lock(apic_dev->ipi_lock);
754 // now I know only one IPI is being routed, this one
755 // also, I do not have any apic locks
756 // I need to acquire locks on pairs of src/dest apics
757 // and I will do that using the total order
758 // given by their cores
761 PrintDebug("apic: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
762 deliverymode_str[icr->del_mode],
765 (icr->dst_mode == 0) ? "(physical)" : "(logical)",
766 shorthand_str[icr->dst_shorthand],
772 V3_Print("apic: IPI %u from apic %p to %s %u (icr=0x%llx)\n",
775 (icr->dst_mode == 0) ? "(physical)" : "(logical)",
782 /* Locking issue: we hold src_apic already. We will acquire dest_apic if needed */
783 /* But this could lead to deadlock - we really need to have a total ordering */
785 switch (icr->dst_shorthand) {
787 case 0: // no shorthand
788 if (icr->dst_mode == 0) {
791 if (icr->dst >= apic_dev->num_apics) {
792 PrintError("apic: Attempted send to unregistered apic id=%u\n", icr->dst);
797 dest_apic = &(apic_dev->apics[icr->dst]);
799 v3_lock(dest_apic->lock);
801 if (deliver_ipi(src_apic, dest_apic,
802 icr->vec, icr->del_mode) == -1) {
803 PrintError("apic: Could not deliver IPI\n");
804 v3_unlock(dest_apic->lock);
808 v3_unlock(dest_apic->lock);
812 if (icr->del_mode!=1) {
813 // logical, but not lowest priority
814 // we immediately trigger
815 // fixed, smi, reserved, nmi, init, sipi, etc
819 uint8_t mda = icr->dst;
821 for (i = 0; i < apic_dev->num_apics; i++) {
823 dest_apic = &(apic_dev->apics[i]);
826 if (src_apic==0 || dest_apic!=src_apic) {
827 v3_lock(dest_apic->lock);
833 int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
835 if (del_flag == -1) {
836 PrintError("apic: Error checking delivery mode\n");
838 v3_unlock(dest_apic->lock);
841 } else if (del_flag == 1) {
842 if (deliver_ipi(src_apic, dest_apic,
843 icr->vec, icr->del_mode) == -1) {
844 PrintError("apic: Error: Could not deliver IPI\n");
846 v3_unlock(dest_apic->lock);
853 v3_unlock(dest_apic->lock);
857 // logical, lowest priority
858 // scan, then trigger
860 int have_cur_lock; // do we have a lock on the one we are now considering?
861 struct apic_state * cur_best_apic = NULL;
863 uint8_t mda = icr->dst;
865 for (i = 0; i < apic_dev->num_apics; i++) {
867 dest_apic = &(apic_dev->apics[i]);
870 if (src_apic==0 || dest_apic!=src_apic) {
871 v3_lock(dest_apic->lock);
877 int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
879 if (del_flag == -1) {
880 PrintError("apic: Error checking delivery mode\n");
882 v3_unlock(dest_apic->lock);
884 if (cur_best_apic && cur_best_apic!=src_apic) {
885 v3_unlock(cur_best_apic->lock);
889 } else if (del_flag == 1) {
890 // update priority for lowest priority scan
891 if (!cur_best_apic) {
892 cur_best_apic=dest_apic;
893 have_cur_lock=0; // will unlock as cur_best_apic
894 } else if (dest_apic->task_prio.val < cur_best_apic->task_prio.val) {
895 // we now unlock the current best one and then switch
896 // so in the end we have a lock on the new cur_best_apic
897 if (cur_best_apic!=src_apic) {
898 v3_unlock(cur_best_apic->lock);
900 cur_best_apic=dest_apic;
905 v3_unlock(dest_apic->lock);
909 // now we will deliver to the best one if it exists
910 if (!cur_best_apic) {
911 PrintDebug("apic: lowest priority deliver, but no destinations!\n");
913 if (deliver_ipi(src_apic, cur_best_apic,
914 icr->vec, icr->del_mode) == -1) {
915 PrintError("apic: Error: Could not deliver IPI\n");
916 if (cur_best_apic!=src_apic) {
917 v3_unlock(cur_best_apic->lock);
921 if (cur_best_apic!=src_apic) {
922 v3_unlock(cur_best_apic->lock);
924 //V3_Print("apic: logical, lowest priority delivery to apic %u\n",cur_best_apic->lapic_id.val);
934 /* I assume I am already locked! */
936 if (src_apic == NULL) { /* this is not an apic, but it's trying to send to itself??? */
937 PrintError("apic: Sending IPI to self from generic IPI sender\n");
941 if (icr->dst_mode == 0) { /* physical delivery */
942 if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
943 PrintError("apic: Could not deliver IPI to self (physical)\n");
946 } else { /* logical delivery */
947 PrintError("apic: use of logical delivery in self (untested)\n");
948 if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
949 PrintError("apic: Could not deliver IPI to self (logical)\n");
956 case 3: { /* all and all-but-me */
957 /* assuming that logical verus physical doesn't matter
958 although it is odd that both are used */
963 for (i = 0; i < apic_dev->num_apics; i++) {
964 dest_apic = &(apic_dev->apics[i]);
967 if ((dest_apic != src_apic) || (icr->dst_shorthand == 2)) {
968 if (src_apic==0 || dest_apic!=src_apic) {
969 v3_lock(dest_apic->lock);
974 if (deliver_ipi(src_apic, dest_apic, icr->vec, icr->del_mode) == -1) {
975 PrintError("apic: Error: Could not deliver IPI\n");
977 v3_unlock(dest_apic->lock);
982 v3_unlock(dest_apic->lock);
990 PrintError("apic: Error routing IPI, invalid Mode (%d)\n", icr->dst_shorthand);
999 // External function, expected to acquire lock on apic
1000 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
1001 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1002 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1003 addr_t reg_addr = guest_addr - apic->base_addr;
1004 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
1007 v3_lock(apic->lock);
1009 PrintDebug("apic %u: core %u: at %p: Read apic address space (%p)\n",
1010 apic->lapic_id.val, core->cpu_id, apic, (void *)guest_addr);
1012 if (msr->apic_enable == 0) {
1013 PrintError("apic %u: core %u: Read from APIC address space with disabled APIC, apic msr=0x%llx\n",
1014 apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
1016 goto apic_read_out_bad;
1020 /* Because "May not be supported" doesn't matter to Linux developers... */
1021 /* if (length != 4) { */
1022 /* PrintError("Invalid apic read length (%d)\n", length); */
1026 switch (reg_addr & ~0x3) {
1028 // Well, only an idiot would read from a architectural write only register
1030 // PrintError("Attempting to read from write only register\n");
1035 case APIC_ID_OFFSET:
1036 val = apic->lapic_id.val;
1038 case APIC_VERSION_OFFSET:
1039 val = apic->apic_ver.val;
1042 val = apic->task_prio.val;
1045 val = apic->arb_prio.val;
1048 val = apic->proc_prio.val;
1050 case REMOTE_READ_OFFSET:
1051 val = apic->rem_rd_data;
1054 val = apic->log_dst.val;
1057 val = apic->dst_fmt.val;
1059 case SPURIOUS_INT_VEC_OFFSET:
1060 val = apic->spurious_int.val;
1063 val = apic->err_status.val;
1065 case TMR_LOC_VEC_TBL_OFFSET:
1066 val = apic->tmr_vec_tbl.val;
1068 case LINT0_VEC_TBL_OFFSET:
1069 val = apic->lint0_vec_tbl.val;
1071 case LINT1_VEC_TBL_OFFSET:
1072 val = apic->lint1_vec_tbl.val;
1074 case ERR_VEC_TBL_OFFSET:
1075 val = apic->err_vec_tbl.val;
1077 case TMR_INIT_CNT_OFFSET:
1078 val = apic->tmr_init_cnt;
1080 case TMR_DIV_CFG_OFFSET:
1081 val = apic->tmr_div_cfg.val;
1085 val = *(uint32_t *)(apic->int_en_reg);
1088 val = *(uint32_t *)(apic->int_en_reg + 4);
1091 val = *(uint32_t *)(apic->int_en_reg + 8);
1094 val = *(uint32_t *)(apic->int_en_reg + 12);
1097 val = *(uint32_t *)(apic->int_en_reg + 16);
1100 val = *(uint32_t *)(apic->int_en_reg + 20);
1103 val = *(uint32_t *)(apic->int_en_reg + 24);
1106 val = *(uint32_t *)(apic->int_en_reg + 28);
1110 val = *(uint32_t *)(apic->int_svc_reg);
1113 val = *(uint32_t *)(apic->int_svc_reg + 4);
1116 val = *(uint32_t *)(apic->int_svc_reg + 8);
1119 val = *(uint32_t *)(apic->int_svc_reg + 12);
1122 val = *(uint32_t *)(apic->int_svc_reg + 16);
1125 val = *(uint32_t *)(apic->int_svc_reg + 20);
1128 val = *(uint32_t *)(apic->int_svc_reg + 24);
1131 val = *(uint32_t *)(apic->int_svc_reg + 28);
1135 val = *(uint32_t *)(apic->trig_mode_reg);
1138 val = *(uint32_t *)(apic->trig_mode_reg + 4);
1141 val = *(uint32_t *)(apic->trig_mode_reg + 8);
1144 val = *(uint32_t *)(apic->trig_mode_reg + 12);
1147 val = *(uint32_t *)(apic->trig_mode_reg + 16);
1150 val = *(uint32_t *)(apic->trig_mode_reg + 20);
1153 val = *(uint32_t *)(apic->trig_mode_reg + 24);
1156 val = *(uint32_t *)(apic->trig_mode_reg + 28);
1160 val = *(uint32_t *)(apic->int_req_reg);
1163 val = *(uint32_t *)(apic->int_req_reg + 4);
1166 val = *(uint32_t *)(apic->int_req_reg + 8);
1169 val = *(uint32_t *)(apic->int_req_reg + 12);
1172 val = *(uint32_t *)(apic->int_req_reg + 16);
1175 val = *(uint32_t *)(apic->int_req_reg + 20);
1178 val = *(uint32_t *)(apic->int_req_reg + 24);
1181 val = *(uint32_t *)(apic->int_req_reg + 28);
1183 case TMR_CUR_CNT_OFFSET:
1184 val = apic->tmr_cur_cnt;
1187 // We are not going to implement these....
1188 case THERM_LOC_VEC_TBL_OFFSET:
1189 val = apic->therm_loc_vec_tbl.val;
1191 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1192 val = apic->perf_ctr_loc_vec_tbl.val;
1197 // handled registers
1198 case INT_CMD_LO_OFFSET:
1199 val = apic->int_cmd.lo;
1201 case INT_CMD_HI_OFFSET:
1202 val = apic->int_cmd.hi;
1205 // handle current timer count
1207 // Unhandled Registers
1208 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1209 val = apic->ext_intr_vec_tbl[0].val;
1211 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1212 val = apic->ext_intr_vec_tbl[1].val;
1214 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1215 val = apic->ext_intr_vec_tbl[2].val;
1217 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1218 val = apic->ext_intr_vec_tbl[3].val;
1222 case EXT_APIC_FEATURE_OFFSET:
1223 case EXT_APIC_CMD_OFFSET:
1227 PrintError("apic %u: core %u: Read from Unhandled APIC Register: %x (getting zero)\n",
1228 apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
1229 goto apic_read_out_bad;
1234 uint_t byte_addr = reg_addr & 0x3;
1235 uint8_t * val_ptr = (uint8_t *)dst;
1237 *val_ptr = *(((uint8_t *)&val) + byte_addr);
1239 } else if ((length == 2) &&
1240 ((reg_addr & 0x3) == 0x3)) {
1241 uint_t byte_addr = reg_addr & 0x3;
1242 uint16_t * val_ptr = (uint16_t *)dst;
1243 *val_ptr = *(((uint16_t *)&val) + byte_addr);
1245 } else if (length == 4) {
1246 uint32_t * val_ptr = (uint32_t *)dst;
1250 PrintError("apic %u: core %u: Invalid apic read length (%d)\n",
1251 apic->lapic_id.val, core->cpu_id, length);
1252 goto apic_read_out_bad;
1255 PrintDebug("apic %u: core %u: Read finished (val=%x)\n",
1256 apic->lapic_id.val, core->cpu_id, *(uint32_t *)dst);
1259 // apic_read_out_good:
1260 v3_unlock(apic->lock);
1264 v3_unlock(apic->lock);
1272 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data) {
1273 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1274 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1275 addr_t reg_addr = guest_addr - apic->base_addr;
1276 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
1277 uint32_t op_val = *(uint32_t *)src;
1280 v3_lock(apic->lock);
1282 PrintDebug("apic %u: core %u: at %p and priv_data is at %p\n",
1283 apic->lapic_id.val, core->cpu_id, apic, priv_data);
1285 PrintDebug("apic %u: core %u: write to address space (%p) (val=%x)\n",
1286 apic->lapic_id.val, core->cpu_id, (void *)guest_addr, *(uint32_t *)src);
1288 if (msr->apic_enable == 0) {
1289 PrintError("apic %u: core %u: Write to APIC address space with disabled APIC, apic msr=0x%llx\n",
1290 apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
1291 goto apic_write_out_bad;
1296 PrintError("apic %u: core %u: Invalid apic write length (%d)\n",
1297 apic->lapic_id.val, length, core->cpu_id);
1298 goto apic_write_out_bad;
1302 case REMOTE_READ_OFFSET:
1303 case APIC_VERSION_OFFSET:
1330 case EXT_APIC_FEATURE_OFFSET:
1332 PrintError("apic %u: core %u: Attempting to write to read only register %p (error)\n",
1333 apic->lapic_id.val, core->cpu_id, (void *)reg_addr);
1334 // goto apic_write_out_bad;
1339 case APIC_ID_OFFSET:
1340 V3_Print("apic %u: core %u: my id is being changed to %u\n",
1341 apic->lapic_id.val, core->cpu_id, op_val);
1343 apic->lapic_id.val = op_val;
1346 apic->task_prio.val = op_val;
1349 PrintDebug("apic %u: core %u: setting log_dst.val to 0x%x\n",
1350 apic->lapic_id.val, core->cpu_id, op_val);
1351 apic->log_dst.val = op_val;
1354 apic->dst_fmt.val = op_val;
1356 case SPURIOUS_INT_VEC_OFFSET:
1357 apic->spurious_int.val = op_val;
1360 apic->err_status.val = op_val;
1362 case TMR_LOC_VEC_TBL_OFFSET:
1363 apic->tmr_vec_tbl.val = op_val;
1365 case THERM_LOC_VEC_TBL_OFFSET:
1366 apic->therm_loc_vec_tbl.val = op_val;
1368 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1369 apic->perf_ctr_loc_vec_tbl.val = op_val;
1371 case LINT0_VEC_TBL_OFFSET:
1372 apic->lint0_vec_tbl.val = op_val;
1374 case LINT1_VEC_TBL_OFFSET:
1375 apic->lint1_vec_tbl.val = op_val;
1377 case ERR_VEC_TBL_OFFSET:
1378 apic->err_vec_tbl.val = op_val;
1380 case TMR_INIT_CNT_OFFSET:
1381 apic->tmr_init_cnt = op_val;
1382 apic->tmr_cur_cnt = op_val;
1384 case TMR_CUR_CNT_OFFSET:
1385 apic->tmr_cur_cnt = op_val;
1387 case TMR_DIV_CFG_OFFSET:
1388 apic->tmr_div_cfg.val = op_val;
1392 // Enable mask (256 bits)
1394 *(uint32_t *)(apic->int_en_reg) = op_val;
1397 *(uint32_t *)(apic->int_en_reg + 4) = op_val;
1400 *(uint32_t *)(apic->int_en_reg + 8) = op_val;
1403 *(uint32_t *)(apic->int_en_reg + 12) = op_val;
1406 *(uint32_t *)(apic->int_en_reg + 16) = op_val;
1409 *(uint32_t *)(apic->int_en_reg + 20) = op_val;
1412 *(uint32_t *)(apic->int_en_reg + 24) = op_val;
1415 *(uint32_t *)(apic->int_en_reg + 28) = op_val;
1418 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1419 apic->ext_intr_vec_tbl[0].val = op_val;
1421 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1422 apic->ext_intr_vec_tbl[1].val = op_val;
1424 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1425 apic->ext_intr_vec_tbl[2].val = op_val;
1427 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1428 apic->ext_intr_vec_tbl[3].val = op_val;
1434 // do eoi (we already have the lock)
1438 case INT_CMD_LO_OFFSET: {
1439 // execute command (we already have the lock)
1441 struct int_cmd_reg tmp_icr;
1443 apic->int_cmd.lo = op_val;
1445 tmp_icr=apic->int_cmd;
1447 // V3_Print("apic %u: core %u: sending cmd 0x%llx to apic %u\n",
1448 // apic->lapic_id.val, core->cpu_id,
1449 // apic->int_cmd.val, apic->int_cmd.dst);
1454 v3_unlock(apic->lock);
1456 // route_ipi is responsible for locking both source and destiation(s)
1457 if (route_ipi(apic_dev, apic, &tmp_icr) == -1) {
1458 PrintError("IPI Routing failure\n");
1459 goto apic_write_out_bad;
1461 v3_lock(apic->lock);
1467 case INT_CMD_HI_OFFSET: {
1468 // already have the lock
1470 PrintError("apic %u: core %u: writing command high=0x%x while in_icr=1\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
1473 apic->int_cmd.hi = op_val;
1474 //V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
1480 // Unhandled Registers
1481 case EXT_APIC_CMD_OFFSET:
1484 PrintError("apic %u: core %u: Write to Unhandled APIC Register: %x (ignored)\n",
1485 apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
1487 goto apic_write_out_bad;
1490 PrintDebug("apic %u: core %u: Write finished\n", apic->lapic_id.val, core->cpu_id);
1492 // apic_write_out_good:
1493 v3_unlock(apic->lock);
1497 v3_unlock(apic->lock);
1503 /* Interrupt Controller Functions */
1505 // internally used, expects caller to lock
1506 static int apic_intr_pending_nolock(struct guest_info * core, void * private_data) {
1507 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1508 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1509 int req_irq = get_highest_irr(apic);
1510 int svc_irq = get_highest_isr(apic);
1512 // PrintDebug("apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->cpu_id,req_irq,svc_irq);
1514 if ((req_irq >= 0) &&
1515 (req_irq > svc_irq)) {
1522 // externally visible, so must lock itself
1523 static int apic_intr_pending(struct guest_info * core, void * private_data) {
1524 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1525 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1528 v3_lock(apic->lock);
1530 rc=apic_intr_pending_nolock(core,private_data);
1532 v3_unlock(apic->lock);
1537 // Internal - no lock
1538 static int apic_get_intr_number_nolock(struct guest_info * core, void * private_data) {
1539 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1540 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1541 int req_irq = get_highest_irr(apic);
1542 int svc_irq = get_highest_isr(apic);
1544 if (svc_irq == -1) {
1546 } else if (svc_irq < req_irq) {
1554 // Externally visible, so must lock itself
1555 static int apic_get_intr_number(struct guest_info * core, void * private_data) {
1556 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1557 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1560 v3_lock(apic->lock);
1562 rc=apic_get_intr_number_nolock(core,private_data);
1564 v3_unlock(apic->lock);
1571 // Here there is no source APIC, so there is no need to lock it
1572 // Furthermore, the expectation is that route_ipi will lock the destiation apic
1573 int v3_apic_send_ipi(struct v3_vm_info * vm, struct v3_gen_ipi * ipi, void * dev_data) {
1574 struct apic_dev_state * apic_dev = (struct apic_dev_state *)
1575 (((struct vm_device *)dev_data)->private_data);
1576 struct int_cmd_reg tmp_icr;
1578 // zero out all the fields
1581 tmp_icr.vec = ipi->vector;
1582 tmp_icr.del_mode = ipi->mode;
1583 tmp_icr.dst_mode = ipi->logical;
1584 tmp_icr.trig_mode = ipi->trigger_mode;
1585 tmp_icr.dst_shorthand = ipi->dst_shorthand;
1586 tmp_icr.dst = ipi->dst;
1588 // route_ipi is responsible for locking the destination apic
1589 return route_ipi(apic_dev, NULL, &tmp_icr);
1593 int v3_apic_raise_intr(struct v3_vm_info * vm, uint32_t irq, uint32_t dst, void * dev_data) {
1594 struct apic_dev_state * apic_dev = (struct apic_dev_state *)
1595 (((struct vm_device*)dev_data)->private_data);
1596 struct apic_state * apic = &(apic_dev->apics[dst]);
1599 PrintDebug("apic %u core ?: raising interrupt IRQ %u (dst = %u).\n", apic->lapic_id.val, irq, dst);
1601 v3_lock(apic->lock);
1603 do_xcall=activate_apic_irq_nolock(apic, irq);
1606 PrintError("Failed to activate apic irq\n");
1607 v3_unlock(apic->lock);
1611 if (do_xcall && (V3_Get_CPU() != dst)) {
1612 #ifdef CONFIG_MULTITHREAD_OS
1613 v3_interrupt_cpu(vm, dst, 0);
1619 v3_unlock(apic->lock);
1624 // internal - caller must lock
1625 static int apic_begin_irq_nolock(struct guest_info * core, void * private_data, int irq) {
1626 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1627 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1628 int major_offset = (irq & ~0x00000007) >> 3;
1629 int minor_offset = irq & 0x00000007;
1630 uint8_t *req_location = apic->int_req_reg + major_offset;
1631 uint8_t *svc_location = apic->int_svc_reg + major_offset;
1632 uint8_t flag = 0x01 << minor_offset;
1634 if (*req_location & flag) {
1635 // we will only pay attention to a begin irq if we
1636 // know that we initiated it!
1637 *svc_location |= flag;
1638 *req_location &= ~flag;
1641 //PrintDebug("apic %u: core %u: begin irq for %d ignored since I don't own it\n",
1642 // apic->lapic_id.val, core->cpu_id, irq);
1648 // Since this is called, externally, it should lock the apic
1649 static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
1650 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1651 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1654 v3_lock(apic->lock);
1656 rc=apic_begin_irq_nolock(core,private_data,irq);
1658 v3_unlock(apic->lock);
1665 /* Timer Functions */
1666 // Caller will lock the apic
1667 static void apic_update_time_nolock(struct guest_info * core,
1668 uint64_t cpu_cycles, uint64_t cpu_freq,
1670 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1671 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1673 // The 32 bit GCC runtime is a pile of shit
1675 uint64_t tmr_ticks = 0;
1677 uint32_t tmr_ticks = 0;
1680 uint8_t tmr_div = *(uint8_t *)&(apic->tmr_div_cfg.val);
1681 uint_t shift_num = 0;
1684 // Check whether this is true:
1685 // -> If the Init count is zero then the timer is disabled
1686 // and doesn't just blitz interrupts to the CPU
1687 if ((apic->tmr_init_cnt == 0) ||
1688 ( (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_ONESHOT) &&
1689 (apic->tmr_cur_cnt == 0))) {
1690 //PrintDebug("apic %u: core %u: APIC timer not yet initialized\n",apic->lapic_id.val,info->cpu_id);
1708 case APIC_TMR_DIV16:
1711 case APIC_TMR_DIV32:
1714 case APIC_TMR_DIV64:
1717 case APIC_TMR_DIV128:
1721 PrintError("apic %u: core %u: Invalid Timer Divider configuration\n",
1722 apic->lapic_id.val, core->cpu_id);
1726 tmr_ticks = cpu_cycles >> shift_num;
1727 // PrintDebug("Timer Ticks: %p\n", (void *)tmr_ticks);
1729 if (tmr_ticks < apic->tmr_cur_cnt) {
1730 apic->tmr_cur_cnt -= tmr_ticks;
1732 tmr_ticks -= apic->tmr_cur_cnt;
1733 apic->tmr_cur_cnt = 0;
1736 PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
1737 apic->lapic_id.val, core->cpu_id,
1738 apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
1740 if (apic_intr_pending_nolock(core, priv_data)) {
1741 PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
1742 apic->lapic_id.val, core->cpu_id,
1743 apic_get_intr_number(core, priv_data));
1746 if (activate_internal_irq_nolock(apic, APIC_TMR_INT) == -1) {
1747 PrintError("apic %u: core %u: Could not raise Timer interrupt\n",
1748 apic->lapic_id.val, core->cpu_id);
1751 if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
1752 tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
1753 apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
1761 static void apic_update_time(struct guest_info * core,
1762 uint64_t cpu_cycles, uint64_t cpu_freq,
1764 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1765 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1767 v3_lock(apic->lock);
1769 apic_update_time_nolock(core,cpu_cycles,cpu_freq,priv_data);
1771 v3_unlock(apic->lock);
1776 static struct intr_ctrl_ops intr_ops = {
1777 .intr_pending = apic_intr_pending,
1778 .get_intr_number = apic_get_intr_number,
1779 .begin_irq = apic_begin_irq,
1783 static struct v3_timer_ops timer_ops = {
1784 .update_timer = apic_update_time,
1790 static int apic_free(struct apic_dev_state * apic_dev) {
1792 struct v3_vm_info * vm = NULL;
1794 for (i = 0; i < apic_dev->num_apics; i++) {
1795 struct apic_state * apic = &(apic_dev->apics[i]);
1796 struct guest_info * core = apic->core;
1800 v3_remove_intr_controller(core, apic->controller_handle);
1803 v3_remove_timer(core, apic->timer);
1810 v3_unhook_msr(vm, BASE_ADDR_MSR);
1817 static struct v3_device_ops dev_ops = {
1818 .free = (int (*)(void *))apic_free,
1823 static int apic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1824 char * dev_id = v3_cfg_val(cfg, "ID");
1825 struct apic_dev_state * apic_dev = NULL;
1828 PrintDebug("apic: creating an APIC for each core\n");
1830 apic_dev = (struct apic_dev_state *)V3_Malloc(sizeof(struct apic_dev_state) +
1831 sizeof(struct apic_state) * vm->num_cores);
1833 apic_dev->num_apics = vm->num_cores;
1835 v3_lock_init(&(apic_dev->ipi_lock));
1837 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, apic_dev);
1840 PrintError("apic: Could not attach device %s\n", dev_id);
1846 for (i = 0; i < vm->num_cores; i++) {
1847 struct apic_state * apic = &(apic_dev->apics[i]);
1848 struct guest_info * core = &(vm->cores[i]);
1852 init_apic_state(apic, i);
1854 apic->controller_handle = v3_register_intr_controller(core, &intr_ops, apic_dev);
1856 apic->timer = v3_add_timer(core, &timer_ops, apic_dev);
1858 if (apic->timer == NULL) {
1859 PrintError("APIC: Failed to attach timer to core %d\n", i);
1860 v3_remove_device(dev);
1864 v3_hook_full_mem(vm, core->cpu_id, apic->base_addr, apic->base_addr + PAGE_SIZE_4KB, apic_read, apic_write, apic_dev);
1866 PrintDebug("apic %u: (setup device): done, my id is %u\n", i, apic->lapic_id.val);
1869 #ifdef CONFIG_DEBUG_APIC
1870 for (i = 0; i < vm->num_cores; i++) {
1871 struct apic_state * apic = &(apic_dev->apics[i]);
1872 PrintDebug("apic: sanity check: apic %u (at %p) has id %u and msr value %llx and core at %p\n",
1873 i, apic, apic->lapic_id.val, apic->base_addr_msr.value,apic->core);
1878 PrintDebug("apic: priv_data is at %p\n", apic_dev);
1880 v3_hook_msr(vm, BASE_ADDR_MSR, read_apic_msr, write_apic_msr, apic_dev);
1887 device_register("LAPIC", apic_init)