2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Authors: Jack Lange <jarusl@cs.northwestern.edu>
15 * Peter Dinda <pdinda@northwestern.edu> (SMP)
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <devices/apic.h>
23 #include <devices/apic_regs.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vmm_msr.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vm_guest.h>
28 #include <palacios/vmm_types.h>
33 #ifndef CONFIG_DEBUG_APIC
35 #define PrintDebug(fmt, args...)
39 #ifdef CONFIG_DEBUG_APIC
40 static char * shorthand_str[] = {
47 static char * deliverymode_str[] = {
59 // Temporary removal of locking
60 #define v3_lock(p) p=p
61 #define v3_unlock(p) p=p
64 typedef enum { APIC_TMR_INT, APIC_THERM_INT, APIC_PERF_INT,
65 APIC_LINT0_INT, APIC_LINT1_INT, APIC_ERR_INT } apic_irq_type_t;
67 #define APIC_FIXED_DELIVERY 0x0
68 #define APIC_LOWEST_DELIVERY 0x1
69 #define APIC_SMI_DELIVERY 0x2
70 #define APIC_RES1_DELIVERY 0x3
71 #define APIC_NMI_DELIVERY 0x4
72 #define APIC_INIT_DELIVERY 0x5
73 #define APIC_SIPI_DELIVERY 0x6
74 #define APIC_EXTINT_DELIVERY 0x7
76 #define APIC_SHORTHAND_NONE 0x0
77 #define APIC_SHORTHAND_SELF 0x1
78 #define APIC_SHORTHAND_ALL 0x2
79 #define APIC_SHORTHAND_ALL_BUT_ME 0x3
81 #define APIC_DEST_PHYSICAL 0x0
82 #define APIC_DEST_LOGICAL 0x1
85 #define BASE_ADDR_MSR 0x0000001B
86 #define DEFAULT_BASE_ADDR 0xfee00000
88 #define APIC_ID_OFFSET 0x020
89 #define APIC_VERSION_OFFSET 0x030
90 #define TPR_OFFSET 0x080
91 #define APR_OFFSET 0x090
92 #define PPR_OFFSET 0x0a0
93 #define EOI_OFFSET 0x0b0
94 #define REMOTE_READ_OFFSET 0x0c0
95 #define LDR_OFFSET 0x0d0
96 #define DFR_OFFSET 0x0e0
97 #define SPURIOUS_INT_VEC_OFFSET 0x0f0
99 #define ISR_OFFSET0 0x100 // 0x100 - 0x170
100 #define ISR_OFFSET1 0x110 // 0x100 - 0x170
101 #define ISR_OFFSET2 0x120 // 0x100 - 0x170
102 #define ISR_OFFSET3 0x130 // 0x100 - 0x170
103 #define ISR_OFFSET4 0x140 // 0x100 - 0x170
104 #define ISR_OFFSET5 0x150 // 0x100 - 0x170
105 #define ISR_OFFSET6 0x160 // 0x100 - 0x170
106 #define ISR_OFFSET7 0x170 // 0x100 - 0x170
108 #define TRIG_OFFSET0 0x180 // 0x180 - 0x1f0
109 #define TRIG_OFFSET1 0x190 // 0x180 - 0x1f0
110 #define TRIG_OFFSET2 0x1a0 // 0x180 - 0x1f0
111 #define TRIG_OFFSET3 0x1b0 // 0x180 - 0x1f0
112 #define TRIG_OFFSET4 0x1c0 // 0x180 - 0x1f0
113 #define TRIG_OFFSET5 0x1d0 // 0x180 - 0x1f0
114 #define TRIG_OFFSET6 0x1e0 // 0x180 - 0x1f0
115 #define TRIG_OFFSET7 0x1f0 // 0x180 - 0x1f0
118 #define IRR_OFFSET0 0x200 // 0x200 - 0x270
119 #define IRR_OFFSET1 0x210 // 0x200 - 0x270
120 #define IRR_OFFSET2 0x220 // 0x200 - 0x270
121 #define IRR_OFFSET3 0x230 // 0x200 - 0x270
122 #define IRR_OFFSET4 0x240 // 0x200 - 0x270
123 #define IRR_OFFSET5 0x250 // 0x200 - 0x270
124 #define IRR_OFFSET6 0x260 // 0x200 - 0x270
125 #define IRR_OFFSET7 0x270 // 0x200 - 0x270
128 #define ESR_OFFSET 0x280
129 #define INT_CMD_LO_OFFSET 0x300
130 #define INT_CMD_HI_OFFSET 0x310
131 #define TMR_LOC_VEC_TBL_OFFSET 0x320
132 #define THERM_LOC_VEC_TBL_OFFSET 0x330
133 #define PERF_CTR_LOC_VEC_TBL_OFFSET 0x340
134 #define LINT0_VEC_TBL_OFFSET 0x350
135 #define LINT1_VEC_TBL_OFFSET 0x360
136 #define ERR_VEC_TBL_OFFSET 0x370
137 #define TMR_INIT_CNT_OFFSET 0x380
138 #define TMR_CUR_CNT_OFFSET 0x390
139 #define TMR_DIV_CFG_OFFSET 0x3e0
140 #define EXT_APIC_FEATURE_OFFSET 0x400
141 #define EXT_APIC_CMD_OFFSET 0x410
142 #define SEOI_OFFSET 0x420
144 #define IER_OFFSET0 0x480 // 0x480 - 0x4f0
145 #define IER_OFFSET1 0x490 // 0x480 - 0x4f0
146 #define IER_OFFSET2 0x4a0 // 0x480 - 0x4f0
147 #define IER_OFFSET3 0x4b0 // 0x480 - 0x4f0
148 #define IER_OFFSET4 0x4c0 // 0x480 - 0x4f0
149 #define IER_OFFSET5 0x4d0 // 0x480 - 0x4f0
150 #define IER_OFFSET6 0x4e0 // 0x480 - 0x4f0
151 #define IER_OFFSET7 0x4f0 // 0x480 - 0x4f0
153 #define EXT_INT_LOC_VEC_TBL_OFFSET0 0x500 // 0x500 - 0x530
154 #define EXT_INT_LOC_VEC_TBL_OFFSET1 0x510 // 0x500 - 0x530
155 #define EXT_INT_LOC_VEC_TBL_OFFSET2 0x520 // 0x500 - 0x530
156 #define EXT_INT_LOC_VEC_TBL_OFFSET3 0x530 // 0x500 - 0x530
163 uint8_t bootstrap_cpu : 1;
165 uint8_t apic_enable : 1;
166 uint64_t base_addr : 40;
168 } __attribute__((packed));
169 } __attribute__((packed));
170 } __attribute__((packed));
174 typedef enum {INIT_ST,
176 STARTED} ipi_state_t;
178 struct apic_dev_state;
184 struct apic_msr base_addr_msr;
187 /* memory map registers */
189 struct lapic_id_reg lapic_id;
190 struct apic_ver_reg apic_ver;
191 struct ext_apic_ctrl_reg ext_apic_ctrl;
192 struct local_vec_tbl_reg local_vec_tbl;
193 struct tmr_vec_tbl_reg tmr_vec_tbl;
194 struct tmr_div_cfg_reg tmr_div_cfg;
195 struct lint_vec_tbl_reg lint0_vec_tbl;
196 struct lint_vec_tbl_reg lint1_vec_tbl;
197 struct perf_ctr_loc_vec_tbl_reg perf_ctr_loc_vec_tbl;
198 struct therm_loc_vec_tbl_reg therm_loc_vec_tbl;
199 struct err_vec_tbl_reg err_vec_tbl;
200 struct err_status_reg err_status;
201 struct spurious_int_reg spurious_int;
202 struct int_cmd_reg int_cmd;
203 struct log_dst_reg log_dst;
204 struct dst_fmt_reg dst_fmt;
205 struct arb_prio_reg arb_prio;
206 struct task_prio_reg task_prio;
207 struct proc_prio_reg proc_prio;
208 struct ext_apic_feature_reg ext_apic_feature;
209 struct spec_eoi_reg spec_eoi;
212 uint32_t tmr_cur_cnt;
213 uint32_t tmr_init_cnt;
216 struct local_vec_tbl_reg ext_intr_vec_tbl[4];
218 uint32_t rem_rd_data;
221 ipi_state_t ipi_state;
223 uint8_t int_req_reg[32];
224 uint8_t int_svc_reg[32];
225 uint8_t int_en_reg[32];
226 uint8_t trig_mode_reg[32];
228 struct guest_info * core;
230 void * controller_handle;
232 struct v3_timer * timer;
245 struct apic_dev_state {
247 // v3_lock_t ipi_lock; // acquired by route_ipi - only one IPI active at a time
249 struct apic_state apics[0];
250 } __attribute__((packed));
256 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data);
257 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data);
260 static void init_apic_state(struct apic_state * apic, uint32_t id) {
261 apic->base_addr = DEFAULT_BASE_ADDR;
264 // boot processor, enabled
265 apic->base_addr_msr.value = 0x0000000000000900LL;
267 // ap processor, enabled
268 apic->base_addr_msr.value = 0x0000000000000800LL;
271 // same base address regardless of ap or main
272 apic->base_addr_msr.value |= ((uint64_t)DEFAULT_BASE_ADDR);
274 PrintDebug("apic %u: (init_apic_state): msr=0x%llx\n",id, apic->base_addr_msr.value);
276 PrintDebug("apic %u: (init_apic_state): Sizeof Interrupt Request Register %d, should be 32\n",
277 id, (uint_t)sizeof(apic->int_req_reg));
279 memset(apic->int_req_reg, 0, sizeof(apic->int_req_reg));
280 memset(apic->int_svc_reg, 0, sizeof(apic->int_svc_reg));
281 memset(apic->int_en_reg, 0xff, sizeof(apic->int_en_reg));
282 memset(apic->trig_mode_reg, 0, sizeof(apic->trig_mode_reg));
284 apic->eoi = 0x00000000;
285 apic->rem_rd_data = 0x00000000;
286 apic->tmr_init_cnt = 0x00000000;
287 apic->tmr_cur_cnt = 0x00000000;
289 apic->lapic_id.val = id;
291 apic->ipi_state = INIT_ST;
293 // The P6 has 6 LVT entries, so we set the value to (6-1)...
294 apic->apic_ver.val = 0x80050010;
296 apic->task_prio.val = 0x00000000;
297 apic->arb_prio.val = 0x00000000;
298 apic->proc_prio.val = 0x00000000;
299 apic->log_dst.val = 0x00000000;
300 apic->dst_fmt.val = 0xffffffff;
301 apic->spurious_int.val = 0x000000ff;
302 apic->err_status.val = 0x00000000;
303 apic->int_cmd.val = 0x0000000000000000LL;
304 apic->tmr_vec_tbl.val = 0x00010000;
305 apic->therm_loc_vec_tbl.val = 0x00010000;
306 apic->perf_ctr_loc_vec_tbl.val = 0x00010000;
307 apic->lint0_vec_tbl.val = 0x00010000;
308 apic->lint1_vec_tbl.val = 0x00010000;
309 apic->err_vec_tbl.val = 0x00010000;
310 apic->tmr_div_cfg.val = 0x00000000;
311 //apic->ext_apic_feature.val = 0x00000007;
312 apic->ext_apic_feature.val = 0x00040007;
313 apic->ext_apic_ctrl.val = 0x00000000;
314 apic->spec_eoi.val = 0x00000000;
316 v3_lock_init(&(apic->lock));
325 // MSR handler - locks apic itself
326 static int read_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t * dst, void * priv_data) {
327 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
328 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
330 PrintDebug("apic %u: core %u: MSR read\n", apic->lapic_id.val, core->cpu_id);
332 dst->value = apic->base_addr;
333 v3_unlock(apic->lock);
337 // MSR handler - locks apic itself
338 static int write_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t src, void * priv_data) {
339 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
340 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
341 struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->cpu_id, apic->base_addr);
344 PrintDebug("apic %u: core %u: MSR write\n", apic->lapic_id.val, core->cpu_id);
346 if (old_reg == NULL) {
348 PrintError("apic %u: core %u: APIC Base address region does not exit...\n",
349 apic->lapic_id.val, core->cpu_id);
355 v3_delete_mem_region(core->vm_info, old_reg);
357 apic->base_addr = src.value;
359 if (v3_hook_full_mem(core->vm_info, core->cpu_id, apic->base_addr,
360 apic->base_addr + PAGE_SIZE_4KB,
361 apic_read, apic_write, apic_dev) == -1) {
362 PrintError("apic %u: core %u: Could not hook new APIC Base address\n",
363 apic->lapic_id.val, core->cpu_id);
364 v3_unlock(apic->lock);
368 v3_unlock(apic->lock);
373 // irq_num is the bit offset into a 256 bit buffer...
376 // 0 = OK, no interrupt needed now
377 // 1 = OK, interrupt needed now
378 // the caller is expeced to have locked the apic
379 static int activate_apic_irq_nolock(struct apic_state * apic, uint32_t irq_num) {
380 int major_offset = (irq_num & ~0x00000007) >> 3;
381 int minor_offset = irq_num & 0x00000007;
382 uint8_t * req_location = apic->int_req_reg + major_offset;
383 uint8_t * en_location = apic->int_en_reg + major_offset;
384 uint8_t flag = 0x1 << minor_offset;
387 if (irq_num <= 15 || irq_num>255) {
388 PrintError("apic %u: core %d: Attempting to raise an invalid interrupt: %d\n",
389 apic->lapic_id.val, apic->core->cpu_id, irq_num);
394 PrintDebug("apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
396 if (*req_location & flag) {
397 PrintDebug("Interrupt %d coallescing\n", irq_num);
401 if (*en_location & flag) {
402 *req_location |= flag;
405 PrintError("apic %u: core %d: activate_apic_irq_nolock to deliver irq 0x%x when in_icr=1\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
411 PrintDebug("apic %u: core %d: Interrupt not enabled... %.2x\n",
412 apic->lapic_id.val, apic->core->cpu_id,*en_location);
419 // Caller is expected to have locked the apic
420 static int get_highest_isr(struct apic_state * apic) {
423 // We iterate backwards to find the highest priority
424 for (i = 31; i >= 0; i--) {
425 uint8_t * svc_major = apic->int_svc_reg + i;
427 if ((*svc_major) & 0xff) {
428 for (j = 7; j >= 0; j--) {
429 uint8_t flag = 0x1 << j;
430 if ((*svc_major) & flag) {
431 return ((i * 8) + j);
441 // Caller is expected to have locked the apic
442 static int get_highest_irr(struct apic_state * apic) {
445 // We iterate backwards to find the highest priority
446 for (i = 31; i >= 0; i--) {
447 uint8_t * req_major = apic->int_req_reg + i;
449 if ((*req_major) & 0xff) {
450 for (j = 7; j >= 0; j--) {
451 uint8_t flag = 0x1 << j;
452 if ((*req_major) & flag) {
453 return ((i * 8) + j);
464 // Caller is expected to have locked the apic
465 static int apic_do_eoi(struct apic_state * apic) {
466 int isr_irq = get_highest_isr(apic);
469 int major_offset = (isr_irq & ~0x00000007) >> 3;
470 int minor_offset = isr_irq & 0x00000007;
471 uint8_t flag = 0x1 << minor_offset;
472 uint8_t * svc_location = apic->int_svc_reg + major_offset;
474 PrintDebug("apic %u: core ?: Received APIC EOI for IRQ %d\n", apic->lapic_id.val,isr_irq);
476 *svc_location &= ~flag;
478 #ifdef CONFIG_CRAY_XT
480 if ((isr_irq == 238) ||
482 PrintDebug("apic %u: core ?: Acking IRQ %d\n", apic->lapic_id.val,isr_irq);
485 if (isr_irq == 238) {
490 //PrintError("apic %u: core ?: Spurious EOI...\n",apic->lapic_id.val);
496 // Caller is expected to have locked the apic
497 static int activate_internal_irq_nolock(struct apic_state * apic, apic_irq_type_t int_type) {
498 uint32_t vec_num = 0;
499 uint32_t del_mode = 0;
505 vec_num = apic->tmr_vec_tbl.vec;
506 del_mode = APIC_FIXED_DELIVERY;
507 masked = apic->tmr_vec_tbl.mask;
510 vec_num = apic->therm_loc_vec_tbl.vec;
511 del_mode = apic->therm_loc_vec_tbl.msg_type;
512 masked = apic->therm_loc_vec_tbl.mask;
515 vec_num = apic->perf_ctr_loc_vec_tbl.vec;
516 del_mode = apic->perf_ctr_loc_vec_tbl.msg_type;
517 masked = apic->perf_ctr_loc_vec_tbl.mask;
520 vec_num = apic->lint0_vec_tbl.vec;
521 del_mode = apic->lint0_vec_tbl.msg_type;
522 masked = apic->lint0_vec_tbl.mask;
525 vec_num = apic->lint1_vec_tbl.vec;
526 del_mode = apic->lint1_vec_tbl.msg_type;
527 masked = apic->lint1_vec_tbl.mask;
530 vec_num = apic->err_vec_tbl.vec;
531 del_mode = APIC_FIXED_DELIVERY;
532 masked = apic->err_vec_tbl.mask;
535 PrintError("apic %u: core ?: Invalid APIC interrupt type\n", apic->lapic_id.val);
539 // interrupt is masked, don't send
541 PrintDebug("apic %u: core ?: Inerrupt is masked\n", apic->lapic_id.val);
545 if (del_mode == APIC_FIXED_DELIVERY) {
546 //PrintDebug("Activating internal APIC IRQ %d\n", vec_num);
547 return activate_apic_irq_nolock(apic, vec_num);
549 PrintError("apic %u: core ?: Unhandled Delivery Mode\n", apic->lapic_id.val);
555 // Caller is expected to have locked the destination apic
556 static inline int should_deliver_cluster_ipi(struct guest_info * dst_core,
557 struct apic_state * dst_apic, uint8_t mda) {
559 if ( ((mda & 0xf0) == (dst_apic->log_dst.dst_log_id & 0xf0)) && /* (I am in the cluster and */
560 ((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { /* I am in the set) */
562 PrintDebug("apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n",
563 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
564 dst_apic->log_dst.dst_log_id);
568 PrintDebug("apic %u core %u: rejecting clustered IRQ (mda 0x%x != log_dst 0x%x)\n",
569 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
570 dst_apic->log_dst.dst_log_id);
575 // Caller is expected to have locked the destiation apic
576 static inline int should_deliver_flat_ipi(struct guest_info * dst_core,
577 struct apic_state * dst_apic, uint8_t mda) {
579 if (dst_apic->log_dst.dst_log_id & mda) { // I am in the set
581 PrintDebug("apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n",
582 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
583 dst_apic->log_dst.dst_log_id);
589 PrintDebug("apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n",
590 dst_apic->lapic_id.val, dst_core->cpu_id, mda,
591 dst_apic->log_dst.dst_log_id);
597 // Caller is expected to have locked the destiation apic
598 static int should_deliver_ipi(struct guest_info * dst_core,
599 struct apic_state * dst_apic, uint8_t mda) {
602 if (dst_apic->dst_fmt.model == 0xf) {
605 /* always deliver broadcast */
609 return should_deliver_flat_ipi(dst_core, dst_apic, mda);
611 } else if (dst_apic->dst_fmt.model == 0x0) {
614 /* always deliver broadcast */
618 return should_deliver_cluster_ipi(dst_core, dst_apic, mda);
621 PrintError("apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n",
622 dst_apic->lapic_id.val, dst_core->cpu_id, dst_apic->dst_fmt.model);
627 // Caller is expected to have locked the destination apic
628 // Only the src_apic pointer is used
629 static int deliver_ipi(struct apic_state * src_apic,
630 struct apic_state * dst_apic,
631 uint32_t vector, uint8_t del_mode) {
634 struct guest_info * dst_core = dst_apic->core;
639 case APIC_FIXED_DELIVERY:
640 case APIC_LOWEST_DELIVERY:
642 // caller needs to have decided which apic to deliver to!
644 PrintDebug("delivering IRQ %d to core %u\n", vector, dst_core->cpu_id);
646 do_xcall=activate_apic_irq_nolock(dst_apic, vector);
649 PrintError("Failed to activate apic irq!\n");
653 if (do_xcall && (dst_apic != src_apic)) {
654 // Assume core # is same as logical processor for now
655 // TODO FIX THIS FIX THIS
656 // THERE SHOULD BE: guestapicid->virtualapicid map,
657 // cpu_id->logical processor map
658 // host maitains logical proc->phsysical proc
659 PrintDebug(" non-local core with new interrupt, forcing it to exit now\n");
661 #ifdef CONFIG_MULTITHREAD_OS
662 v3_interrupt_cpu(dst_core->vm_info, dst_core->cpu_id, 0);
670 case APIC_INIT_DELIVERY: {
672 PrintDebug(" INIT delivery to core %u\n", dst_core->cpu_id);
674 // TODO: any APIC reset on dest core (shouldn't be needed, but not sure...)
677 if (dst_apic->ipi_state != INIT_ST) {
678 PrintError(" Warning: core %u is not in INIT state (mode = %d), ignored (assuming this is the deassert)\n",
679 dst_core->cpu_id, dst_apic->ipi_state);
680 // Only a warning, since INIT INIT SIPI is common
684 // We transition the target core to SIPI state
685 dst_apic->ipi_state = SIPI; // note: locking should not be needed here
687 // That should be it since the target core should be
688 // waiting in host on this transition
689 // either it's on another core or on a different preemptive thread
690 // in both cases, it will quickly notice this transition
691 // in particular, we should not need to force an exit here
693 PrintDebug(" INIT delivery done\n");
697 case APIC_SIPI_DELIVERY: {
700 if (dst_apic->ipi_state != SIPI) {
701 PrintError(" core %u is not in SIPI state (mode = %d), ignored!\n",
702 dst_core->cpu_id, dst_apic->ipi_state);
706 // Write the RIP, CS, and descriptor
707 // assume the rest is already good to go
709 // vector VV -> rip at 0
711 // This means we start executing at linear address VV000
713 // So the selector needs to be VV00
714 // and the base needs to be VV000
717 dst_core->segments.cs.selector = vector << 8;
718 dst_core->segments.cs.limit = 0xffff;
719 dst_core->segments.cs.base = vector << 12;
721 PrintDebug(" SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
722 vector, dst_core->segments.cs.selector, dst_core->cpu_id);
723 // Maybe need to adjust the APIC?
725 // We transition the target core to SIPI state
726 dst_core->core_run_state = CORE_RUNNING; // note: locking should not be needed here
727 dst_apic->ipi_state = STARTED;
729 // As with INIT, we should not need to do anything else
731 PrintDebug(" SIPI delivery done\n");
735 case APIC_SMI_DELIVERY:
736 case APIC_RES1_DELIVERY: // reserved
737 case APIC_NMI_DELIVERY:
738 case APIC_EXTINT_DELIVERY: // ExtInt
740 PrintError("IPI %d delivery is unsupported\n", del_mode);
748 static struct apic_state * find_physical_apic(struct apic_dev_state *apic_dev, struct int_cmd_reg *icr)
752 if (icr->dst >0 && icr->dst < apic_dev->num_apics) {
753 // see if it simply is the core id
754 if (apic_dev->apics[icr->dst].lapic_id.val == icr->dst) {
755 return &(apic_dev->apics[icr->dst]);
759 for (i=0;i<apic_dev->num_apics;i++) {
760 if (apic_dev->apics[i].lapic_id.val == icr->dst) {
761 return &(apic_dev->apics[i]);
769 // route_ipi is responsible for all locking
770 // the assumption is that you enter with no locks
771 // there is a global lock for the icc bus, so only
772 // one route_ipi progresses at any time
773 // destination apics are locked as needed
774 // if multiple apic locks are acquired at any point,
775 // this is done in the order of the array, so no
776 // deadlock should be possible
777 static int route_ipi(struct apic_dev_state * apic_dev,
778 struct apic_state * src_apic,
779 struct int_cmd_reg * icr) {
780 struct apic_state * dest_apic = NULL;
783 //v3_lock(apic_dev->ipi_lock); // this may not be needed
784 // now I know only one IPI is being routed, this one
785 // also, I do not have any apic locks
786 // I need to acquire locks on pairs of src/dest apics
787 // and I will do that using the total order
788 // given by their cores
791 PrintDebug("apic: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
792 deliverymode_str[icr->del_mode],
795 (icr->dst_mode == 0) ? "(physical)" : "(logical)",
796 shorthand_str[icr->dst_shorthand],
802 V3_Print("apic: IPI %s %u from apic %p to %s %s %u (icr=0x%llx)\n",
803 deliverymode_str[icr->del_mode],
806 (icr->dst_mode == 0) ? "(physical)" : "(logical)",
807 shorthand_str[icr->dst_shorthand],
815 switch (icr->dst_shorthand) {
817 case APIC_SHORTHAND_NONE: // no shorthand
818 if (icr->dst_mode == APIC_DEST_PHYSICAL) {
820 dest_apic=find_physical_apic(apic_dev,icr);
822 if (dest_apic==NULL) {
823 PrintError("apic: Attempted send to unregistered apic id=%u\n", icr->dst);
824 goto route_ipi_out_bad;
829 //V3_Print("apic: phsyical destination of %u (apic %u at 0x%p)\n", icr->dst,dest_apic->lapic_id.val,dest_apic);
831 v3_lock(dest_apic->lock);
833 if (deliver_ipi(src_apic, dest_apic,
834 icr->vec, icr->del_mode) == -1) {
835 PrintError("apic: Could not deliver IPI\n");
836 v3_unlock(dest_apic->lock);
837 goto route_ipi_out_bad;
840 v3_unlock(dest_apic->lock);
842 //V3_Print("apic: done\n");
844 } else if (icr->dst_mode == APIC_DEST_LOGICAL) {
846 if (icr->del_mode!=APIC_LOWEST_DELIVERY ) {
847 // logical, but not lowest priority
848 // we immediately trigger
849 // fixed, smi, reserved, nmi, init, sipi, etc
852 uint8_t mda = icr->dst;
854 for (i = 0; i < apic_dev->num_apics; i++) {
856 dest_apic = &(apic_dev->apics[i]);
858 v3_lock(dest_apic->lock);
860 int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
862 if (del_flag == -1) {
863 PrintError("apic: Error checking delivery mode\n");
864 v3_unlock(dest_apic->lock);
865 goto route_ipi_out_bad;
866 } else if (del_flag == 1) {
867 if (deliver_ipi(src_apic, dest_apic,
868 icr->vec, icr->del_mode) == -1) {
869 PrintError("apic: Error: Could not deliver IPI\n");
870 v3_unlock(dest_apic->lock);
871 goto route_ipi_out_bad;
875 v3_unlock(dest_apic->lock);
877 } else { //APIC_LOWEST_DELIVERY
878 // logical, lowest priority
879 // scan, keeping a lock on the current best, then trigger
882 struct apic_state * cur_best_apic = NULL;
884 uint8_t mda = icr->dst;
889 // Note that even if there are multiple concurrent
890 // copies of this loop executing, they are all
891 // locking in the same order
893 for (i = 0; i < apic_dev->num_apics; i++) {
895 dest_apic = &(apic_dev->apics[i]);
897 v3_lock(dest_apic->lock);
900 int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
902 if (del_flag == -1) {
903 PrintError("apic: Error checking delivery mode\n");
904 v3_unlock(dest_apic->lock);
905 if (cur_best_apic && cur_best_apic!=dest_apic) {
906 v3_unlock(cur_best_apic->lock);
908 goto route_ipi_out_bad;
909 } else if (del_flag == 1) {
910 // update priority for lowest priority scan
911 if (!cur_best_apic) {
912 cur_best_apic=dest_apic; // note we leave it locked
913 have_cur_lock=0; // we will unlock as cur_best_apic
914 } else if (dest_apic->task_prio.val < cur_best_apic->task_prio.val) {
915 // we now unlock the current best one and then switch
916 // so in the end we have a lock on the new cur_best_apic
917 v3_unlock(cur_best_apic->lock);
918 cur_best_apic=dest_apic;
919 have_cur_lock=0; // will unlock as cur_best_apic
923 v3_unlock(dest_apic->lock);
927 // now we will deliver to the best one if it exists
929 if (!cur_best_apic) {
930 PrintDebug("apic: lowest priority deliver, but no destinations!\n");
932 if (deliver_ipi(src_apic, cur_best_apic,
933 icr->vec, icr->del_mode) == -1) {
934 PrintError("apic: Error: Could not deliver IPI\n");
935 v3_unlock(cur_best_apic->lock);
936 goto route_ipi_out_bad;
938 v3_unlock(cur_best_apic->lock);
940 //V3_Print("apic: logical, lowest priority delivery to apic %u\n",cur_best_apic->lapic_id.val);
947 case APIC_SHORTHAND_SELF: // self
949 /* I assume I am already locked! */
951 if (src_apic == NULL) { /* this is not an apic, but it's trying to send to itself??? */
952 PrintError("apic: Sending IPI to self from generic IPI sender\n");
956 v3_lock(src_apic->lock);
958 if (icr->dst_mode == APIC_DEST_PHYSICAL) { /* physical delivery */
959 if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
960 PrintError("apic: Could not deliver IPI to self (physical)\n");
961 v3_unlock(src_apic->lock);
962 goto route_ipi_out_bad;
964 } else if (icr->dst_mode == APIC_DEST_LOGICAL) { /* logical delivery */
965 PrintError("apic: use of logical delivery in self (untested)\n");
966 if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
967 PrintError("apic: Could not deliver IPI to self (logical)\n");
968 v3_unlock(src_apic->lock);
969 goto route_ipi_out_bad;
972 v3_unlock(src_apic->lock);
975 case APIC_SHORTHAND_ALL:
976 case APIC_SHORTHAND_ALL_BUT_ME: { /* all and all-but-me */
977 /* assuming that logical verus physical doesn't matter
978 although it is odd that both are used */
982 for (i = 0; i < apic_dev->num_apics; i++) {
983 dest_apic = &(apic_dev->apics[i]);
986 if ((dest_apic != src_apic) || (icr->dst_shorthand == APIC_SHORTHAND_ALL)) {
987 v3_lock(dest_apic->lock);
988 if (deliver_ipi(src_apic, dest_apic, icr->vec, icr->del_mode) == -1) {
989 PrintError("apic: Error: Could not deliver IPI\n");
990 v3_unlock(dest_apic->lock);
991 goto route_ipi_out_bad;
993 v3_unlock(dest_apic->lock);
1000 PrintError("apic: Error routing IPI, invalid Mode (%d)\n", icr->dst_shorthand);
1001 goto route_ipi_out_bad;
1005 // route_ipi_out_good:
1006 //v3_unlock(apic_dev->ipi_lock);
1010 //v3_unlock(apic_dev->ipi_lock);
1015 // External function, expected to acquire lock on apic
1016 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
1017 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1018 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1019 addr_t reg_addr = guest_addr - apic->base_addr;
1020 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
1023 v3_lock(apic->lock);
1025 PrintDebug("apic %u: core %u: at %p: Read apic address space (%p)\n",
1026 apic->lapic_id.val, core->cpu_id, apic, (void *)guest_addr);
1028 if (msr->apic_enable == 0) {
1029 PrintError("apic %u: core %u: Read from APIC address space with disabled APIC, apic msr=0x%llx\n",
1030 apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
1032 goto apic_read_out_bad;
1036 /* Because "May not be supported" doesn't matter to Linux developers... */
1037 /* if (length != 4) { */
1038 /* PrintError("Invalid apic read length (%d)\n", length); */
1042 switch (reg_addr & ~0x3) {
1044 // Well, only an idiot would read from a architectural write only register
1046 // PrintError("Attempting to read from write only register\n");
1051 case APIC_ID_OFFSET:
1052 val = apic->lapic_id.val;
1054 case APIC_VERSION_OFFSET:
1055 val = apic->apic_ver.val;
1058 val = apic->task_prio.val;
1061 val = apic->arb_prio.val;
1064 val = apic->proc_prio.val;
1066 case REMOTE_READ_OFFSET:
1067 val = apic->rem_rd_data;
1070 val = apic->log_dst.val;
1073 val = apic->dst_fmt.val;
1075 case SPURIOUS_INT_VEC_OFFSET:
1076 val = apic->spurious_int.val;
1079 val = apic->err_status.val;
1081 case TMR_LOC_VEC_TBL_OFFSET:
1082 val = apic->tmr_vec_tbl.val;
1084 case LINT0_VEC_TBL_OFFSET:
1085 val = apic->lint0_vec_tbl.val;
1087 case LINT1_VEC_TBL_OFFSET:
1088 val = apic->lint1_vec_tbl.val;
1090 case ERR_VEC_TBL_OFFSET:
1091 val = apic->err_vec_tbl.val;
1093 case TMR_INIT_CNT_OFFSET:
1094 val = apic->tmr_init_cnt;
1096 case TMR_DIV_CFG_OFFSET:
1097 val = apic->tmr_div_cfg.val;
1101 val = *(uint32_t *)(apic->int_en_reg);
1104 val = *(uint32_t *)(apic->int_en_reg + 4);
1107 val = *(uint32_t *)(apic->int_en_reg + 8);
1110 val = *(uint32_t *)(apic->int_en_reg + 12);
1113 val = *(uint32_t *)(apic->int_en_reg + 16);
1116 val = *(uint32_t *)(apic->int_en_reg + 20);
1119 val = *(uint32_t *)(apic->int_en_reg + 24);
1122 val = *(uint32_t *)(apic->int_en_reg + 28);
1126 val = *(uint32_t *)(apic->int_svc_reg);
1129 val = *(uint32_t *)(apic->int_svc_reg + 4);
1132 val = *(uint32_t *)(apic->int_svc_reg + 8);
1135 val = *(uint32_t *)(apic->int_svc_reg + 12);
1138 val = *(uint32_t *)(apic->int_svc_reg + 16);
1141 val = *(uint32_t *)(apic->int_svc_reg + 20);
1144 val = *(uint32_t *)(apic->int_svc_reg + 24);
1147 val = *(uint32_t *)(apic->int_svc_reg + 28);
1151 val = *(uint32_t *)(apic->trig_mode_reg);
1154 val = *(uint32_t *)(apic->trig_mode_reg + 4);
1157 val = *(uint32_t *)(apic->trig_mode_reg + 8);
1160 val = *(uint32_t *)(apic->trig_mode_reg + 12);
1163 val = *(uint32_t *)(apic->trig_mode_reg + 16);
1166 val = *(uint32_t *)(apic->trig_mode_reg + 20);
1169 val = *(uint32_t *)(apic->trig_mode_reg + 24);
1172 val = *(uint32_t *)(apic->trig_mode_reg + 28);
1176 val = *(uint32_t *)(apic->int_req_reg);
1179 val = *(uint32_t *)(apic->int_req_reg + 4);
1182 val = *(uint32_t *)(apic->int_req_reg + 8);
1185 val = *(uint32_t *)(apic->int_req_reg + 12);
1188 val = *(uint32_t *)(apic->int_req_reg + 16);
1191 val = *(uint32_t *)(apic->int_req_reg + 20);
1194 val = *(uint32_t *)(apic->int_req_reg + 24);
1197 val = *(uint32_t *)(apic->int_req_reg + 28);
1199 case TMR_CUR_CNT_OFFSET:
1200 val = apic->tmr_cur_cnt;
1203 // We are not going to implement these....
1204 case THERM_LOC_VEC_TBL_OFFSET:
1205 val = apic->therm_loc_vec_tbl.val;
1207 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1208 val = apic->perf_ctr_loc_vec_tbl.val;
1213 // handled registers
1214 case INT_CMD_LO_OFFSET:
1215 val = apic->int_cmd.lo;
1217 case INT_CMD_HI_OFFSET:
1218 val = apic->int_cmd.hi;
1221 // handle current timer count
1223 // Unhandled Registers
1224 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1225 val = apic->ext_intr_vec_tbl[0].val;
1227 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1228 val = apic->ext_intr_vec_tbl[1].val;
1230 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1231 val = apic->ext_intr_vec_tbl[2].val;
1233 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1234 val = apic->ext_intr_vec_tbl[3].val;
1238 case EXT_APIC_FEATURE_OFFSET:
1239 case EXT_APIC_CMD_OFFSET:
1243 PrintError("apic %u: core %u: Read from Unhandled APIC Register: %x (getting zero)\n",
1244 apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
1245 goto apic_read_out_bad;
1250 uint_t byte_addr = reg_addr & 0x3;
1251 uint8_t * val_ptr = (uint8_t *)dst;
1253 *val_ptr = *(((uint8_t *)&val) + byte_addr);
1255 } else if ((length == 2) &&
1256 ((reg_addr & 0x3) == 0x3)) {
1257 uint_t byte_addr = reg_addr & 0x3;
1258 uint16_t * val_ptr = (uint16_t *)dst;
1259 *val_ptr = *(((uint16_t *)&val) + byte_addr);
1261 } else if (length == 4) {
1262 uint32_t * val_ptr = (uint32_t *)dst;
1266 PrintError("apic %u: core %u: Invalid apic read length (%d)\n",
1267 apic->lapic_id.val, core->cpu_id, length);
1268 goto apic_read_out_bad;
1271 PrintDebug("apic %u: core %u: Read finished (val=%x)\n",
1272 apic->lapic_id.val, core->cpu_id, *(uint32_t *)dst);
1275 // apic_read_out_good:
1276 v3_unlock(apic->lock);
1280 v3_unlock(apic->lock);
1288 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data) {
1289 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1290 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1291 addr_t reg_addr = guest_addr - apic->base_addr;
1292 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
1293 uint32_t op_val = *(uint32_t *)src;
1296 v3_lock(apic->lock);
1298 PrintDebug("apic %u: core %u: at %p and priv_data is at %p\n",
1299 apic->lapic_id.val, core->cpu_id, apic, priv_data);
1301 PrintDebug("apic %u: core %u: write to address space (%p) (val=%x)\n",
1302 apic->lapic_id.val, core->cpu_id, (void *)guest_addr, *(uint32_t *)src);
1304 if (msr->apic_enable == 0) {
1305 PrintError("apic %u: core %u: Write to APIC address space with disabled APIC, apic msr=0x%llx\n",
1306 apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
1307 goto apic_write_out_bad;
1312 PrintError("apic %u: core %u: Invalid apic write length (%d)\n",
1313 apic->lapic_id.val, length, core->cpu_id);
1314 goto apic_write_out_bad;
1318 case REMOTE_READ_OFFSET:
1319 case APIC_VERSION_OFFSET:
1346 case EXT_APIC_FEATURE_OFFSET:
1348 PrintError("apic %u: core %u: Attempting to write to read only register %p (error)\n",
1349 apic->lapic_id.val, core->cpu_id, (void *)reg_addr);
1350 // goto apic_write_out_bad;
1355 case APIC_ID_OFFSET:
1356 //V3_Print("apic %u: core %u: my id is being changed to %u\n",
1357 // apic->lapic_id.val, core->cpu_id, op_val);
1359 apic->lapic_id.val = op_val;
1362 apic->task_prio.val = op_val;
1365 PrintDebug("apic %u: core %u: setting log_dst.val to 0x%x\n",
1366 apic->lapic_id.val, core->cpu_id, op_val);
1367 apic->log_dst.val = op_val;
1370 apic->dst_fmt.val = op_val;
1372 case SPURIOUS_INT_VEC_OFFSET:
1373 apic->spurious_int.val = op_val;
1376 apic->err_status.val = op_val;
1378 case TMR_LOC_VEC_TBL_OFFSET:
1379 apic->tmr_vec_tbl.val = op_val;
1381 case THERM_LOC_VEC_TBL_OFFSET:
1382 apic->therm_loc_vec_tbl.val = op_val;
1384 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1385 apic->perf_ctr_loc_vec_tbl.val = op_val;
1387 case LINT0_VEC_TBL_OFFSET:
1388 apic->lint0_vec_tbl.val = op_val;
1390 case LINT1_VEC_TBL_OFFSET:
1391 apic->lint1_vec_tbl.val = op_val;
1393 case ERR_VEC_TBL_OFFSET:
1394 apic->err_vec_tbl.val = op_val;
1396 case TMR_INIT_CNT_OFFSET:
1397 apic->tmr_init_cnt = op_val;
1398 apic->tmr_cur_cnt = op_val;
1400 case TMR_CUR_CNT_OFFSET:
1401 apic->tmr_cur_cnt = op_val;
1403 case TMR_DIV_CFG_OFFSET:
1404 apic->tmr_div_cfg.val = op_val;
1408 // Enable mask (256 bits)
1410 *(uint32_t *)(apic->int_en_reg) = op_val;
1413 *(uint32_t *)(apic->int_en_reg + 4) = op_val;
1416 *(uint32_t *)(apic->int_en_reg + 8) = op_val;
1419 *(uint32_t *)(apic->int_en_reg + 12) = op_val;
1422 *(uint32_t *)(apic->int_en_reg + 16) = op_val;
1425 *(uint32_t *)(apic->int_en_reg + 20) = op_val;
1428 *(uint32_t *)(apic->int_en_reg + 24) = op_val;
1431 *(uint32_t *)(apic->int_en_reg + 28) = op_val;
1434 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1435 apic->ext_intr_vec_tbl[0].val = op_val;
1437 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1438 apic->ext_intr_vec_tbl[1].val = op_val;
1440 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1441 apic->ext_intr_vec_tbl[2].val = op_val;
1443 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1444 apic->ext_intr_vec_tbl[3].val = op_val;
1450 // do eoi (we already have the lock)
1454 case INT_CMD_LO_OFFSET: {
1455 // execute command (we already have the lock)
1457 struct int_cmd_reg tmp_icr;
1459 apic->int_cmd.lo = op_val;
1461 tmp_icr=apic->int_cmd;
1463 // V3_Print("apic %u: core %u: sending cmd 0x%llx to apic %u\n",
1464 // apic->lapic_id.val, core->cpu_id,
1465 // apic->int_cmd.val, apic->int_cmd.dst);
1469 v3_unlock(apic->lock);
1471 // route_ipi is responsible for locking apics, so we go in unlocked)
1472 if (route_ipi(apic_dev, apic, &tmp_icr) == -1) {
1473 PrintError("IPI Routing failure\n");
1474 goto apic_write_out_bad;
1477 // v3_lock(apic->lock); // expected for leaving this function
1482 case INT_CMD_HI_OFFSET: {
1483 // already have the lock
1485 PrintError("apic %u: core %u: writing command high=0x%x while in_icr=1\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
1488 apic->int_cmd.hi = op_val;
1489 //V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
1495 // Unhandled Registers
1496 case EXT_APIC_CMD_OFFSET:
1499 PrintError("apic %u: core %u: Write to Unhandled APIC Register: %x (ignored)\n",
1500 apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
1502 goto apic_write_out_bad;
1505 PrintDebug("apic %u: core %u: Write finished\n", apic->lapic_id.val, core->cpu_id);
1507 // apic_write_out_good:
1508 v3_unlock(apic->lock);
1512 v3_unlock(apic->lock);
1518 /* Interrupt Controller Functions */
1520 // internally used, expects caller to lock
1521 static int apic_intr_pending_nolock(struct guest_info * core, void * private_data) {
1522 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1523 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1524 int req_irq = get_highest_irr(apic);
1525 int svc_irq = get_highest_isr(apic);
1527 // PrintDebug("apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->cpu_id,req_irq,svc_irq);
1529 if ((req_irq >= 0) &&
1530 (req_irq > svc_irq)) {
1537 // externally visible, so must lock itself
1538 static int apic_intr_pending(struct guest_info * core, void * private_data) {
1539 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1540 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1543 v3_lock(apic->lock);
1545 rc=apic_intr_pending_nolock(core,private_data);
1547 v3_unlock(apic->lock);
1552 // Internal - no lock
1553 static int apic_get_intr_number_nolock(struct guest_info * core, void * private_data) {
1554 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1555 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1556 int req_irq = get_highest_irr(apic);
1557 int svc_irq = get_highest_isr(apic);
1559 if (svc_irq == -1) {
1561 } else if (svc_irq < req_irq) {
1569 // Externally visible, so must lock itself
1570 static int apic_get_intr_number(struct guest_info * core, void * private_data) {
1571 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1572 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1575 v3_lock(apic->lock);
1577 rc=apic_get_intr_number_nolock(core,private_data);
1579 v3_unlock(apic->lock);
1586 // Here there is no source APIC, so there is no need to lock it
1587 // Furthermore, the expectation is that route_ipi will lock the destiation apic
1588 int v3_apic_send_ipi(struct v3_vm_info * vm, struct v3_gen_ipi * ipi, void * dev_data) {
1589 struct apic_dev_state * apic_dev = (struct apic_dev_state *)
1590 (((struct vm_device *)dev_data)->private_data);
1591 struct int_cmd_reg tmp_icr;
1593 // zero out all the fields
1596 tmp_icr.vec = ipi->vector;
1597 tmp_icr.del_mode = ipi->mode;
1598 tmp_icr.dst_mode = ipi->logical;
1599 tmp_icr.trig_mode = ipi->trigger_mode;
1600 tmp_icr.dst_shorthand = ipi->dst_shorthand;
1601 tmp_icr.dst = ipi->dst;
1603 // route_ipi is responsible for locking the destination apic
1604 return route_ipi(apic_dev, NULL, &tmp_icr);
1608 int v3_apic_raise_intr(struct v3_vm_info * vm, uint32_t irq, uint32_t dst, void * dev_data) {
1609 struct apic_dev_state * apic_dev = (struct apic_dev_state *)
1610 (((struct vm_device*)dev_data)->private_data);
1611 struct apic_state * apic = &(apic_dev->apics[dst]);
1614 PrintDebug("apic %u core ?: raising interrupt IRQ %u (dst = %u).\n", apic->lapic_id.val, irq, dst);
1616 v3_lock(apic->lock);
1618 do_xcall=activate_apic_irq_nolock(apic, irq);
1621 PrintError("Failed to activate apic irq\n");
1622 v3_unlock(apic->lock);
1626 if (do_xcall>0 && (V3_Get_CPU() != dst)) {
1627 #ifdef CONFIG_MULTITHREAD_OS
1628 v3_interrupt_cpu(vm, dst, 0);
1634 v3_unlock(apic->lock);
1639 // internal - caller must lock
1640 static int apic_begin_irq_nolock(struct guest_info * core, void * private_data, int irq) {
1641 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1642 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1643 int major_offset = (irq & ~0x00000007) >> 3;
1644 int minor_offset = irq & 0x00000007;
1645 uint8_t *req_location = apic->int_req_reg + major_offset;
1646 uint8_t *svc_location = apic->int_svc_reg + major_offset;
1647 uint8_t flag = 0x01 << minor_offset;
1649 if (*req_location & flag) {
1650 // we will only pay attention to a begin irq if we
1651 // know that we initiated it!
1652 *svc_location |= flag;
1653 *req_location &= ~flag;
1656 //PrintDebug("apic %u: core %u: begin irq for %d ignored since I don't own it\n",
1657 // apic->lapic_id.val, core->cpu_id, irq);
1663 // Since this is called, externally, it should lock the apic
1664 static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
1665 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1666 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1669 v3_lock(apic->lock);
1671 rc=apic_begin_irq_nolock(core,private_data,irq);
1673 v3_unlock(apic->lock);
1680 /* Timer Functions */
1681 // Caller will lock the apic
1682 static void apic_update_time_nolock(struct guest_info * core,
1683 uint64_t cpu_cycles, uint64_t cpu_freq,
1685 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1686 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1688 // The 32 bit GCC runtime is a pile of shit
1690 uint64_t tmr_ticks = 0;
1692 uint32_t tmr_ticks = 0;
1695 uint8_t tmr_div = *(uint8_t *)&(apic->tmr_div_cfg.val);
1696 uint_t shift_num = 0;
1699 // Check whether this is true:
1700 // -> If the Init count is zero then the timer is disabled
1701 // and doesn't just blitz interrupts to the CPU
1702 if ((apic->tmr_init_cnt == 0) ||
1703 ( (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_ONESHOT) &&
1704 (apic->tmr_cur_cnt == 0))) {
1705 //PrintDebug("apic %u: core %u: APIC timer not yet initialized\n",apic->lapic_id.val,info->cpu_id);
1723 case APIC_TMR_DIV16:
1726 case APIC_TMR_DIV32:
1729 case APIC_TMR_DIV64:
1732 case APIC_TMR_DIV128:
1736 PrintError("apic %u: core %u: Invalid Timer Divider configuration\n",
1737 apic->lapic_id.val, core->cpu_id);
1741 tmr_ticks = cpu_cycles >> shift_num;
1742 // PrintDebug("Timer Ticks: %p\n", (void *)tmr_ticks);
1744 if (tmr_ticks < apic->tmr_cur_cnt) {
1745 apic->tmr_cur_cnt -= tmr_ticks;
1747 tmr_ticks -= apic->tmr_cur_cnt;
1748 apic->tmr_cur_cnt = 0;
1751 PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
1752 apic->lapic_id.val, core->cpu_id,
1753 apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
1755 if (apic_intr_pending_nolock(core, priv_data)) {
1756 PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
1757 apic->lapic_id.val, core->cpu_id,
1758 apic_get_intr_number(core, priv_data));
1761 if (activate_internal_irq_nolock(apic, APIC_TMR_INT) == -1) {
1762 PrintError("apic %u: core %u: Could not raise Timer interrupt\n",
1763 apic->lapic_id.val, core->cpu_id);
1766 if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
1767 tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
1768 apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
1776 static void apic_update_time(struct guest_info * core,
1777 uint64_t cpu_cycles, uint64_t cpu_freq,
1779 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1780 struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
1782 v3_lock(apic->lock);
1784 apic_update_time_nolock(core,cpu_cycles,cpu_freq,priv_data);
1786 v3_unlock(apic->lock);
1791 static struct intr_ctrl_ops intr_ops = {
1792 .intr_pending = apic_intr_pending,
1793 .get_intr_number = apic_get_intr_number,
1794 .begin_irq = apic_begin_irq,
1798 static struct v3_timer_ops timer_ops = {
1799 .update_timer = apic_update_time,
1805 static int apic_free(struct apic_dev_state * apic_dev) {
1807 struct v3_vm_info * vm = NULL;
1809 for (i = 0; i < apic_dev->num_apics; i++) {
1810 struct apic_state * apic = &(apic_dev->apics[i]);
1811 struct guest_info * core = apic->core;
1815 v3_remove_intr_controller(core, apic->controller_handle);
1818 v3_remove_timer(core, apic->timer);
1825 v3_unhook_msr(vm, BASE_ADDR_MSR);
1832 static struct v3_device_ops dev_ops = {
1833 .free = (int (*)(void *))apic_free,
1838 static int apic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1839 char * dev_id = v3_cfg_val(cfg, "ID");
1840 struct apic_dev_state * apic_dev = NULL;
1843 PrintDebug("apic: creating an APIC for each core\n");
1845 apic_dev = (struct apic_dev_state *)V3_Malloc(sizeof(struct apic_dev_state) +
1846 sizeof(struct apic_state) * vm->num_cores);
1848 apic_dev->num_apics = vm->num_cores;
1850 //v3_lock_init(&(apic_dev->ipi_lock));
1852 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, apic_dev);
1855 PrintError("apic: Could not attach device %s\n", dev_id);
1861 for (i = 0; i < vm->num_cores; i++) {
1862 struct apic_state * apic = &(apic_dev->apics[i]);
1863 struct guest_info * core = &(vm->cores[i]);
1867 init_apic_state(apic, i);
1869 apic->controller_handle = v3_register_intr_controller(core, &intr_ops, apic_dev);
1871 apic->timer = v3_add_timer(core, &timer_ops, apic_dev);
1873 if (apic->timer == NULL) {
1874 PrintError("APIC: Failed to attach timer to core %d\n", i);
1875 v3_remove_device(dev);
1879 v3_hook_full_mem(vm, core->cpu_id, apic->base_addr, apic->base_addr + PAGE_SIZE_4KB, apic_read, apic_write, apic_dev);
1881 PrintDebug("apic %u: (setup device): done, my id is %u\n", i, apic->lapic_id.val);
1884 #ifdef CONFIG_DEBUG_APIC
1885 for (i = 0; i < vm->num_cores; i++) {
1886 struct apic_state * apic = &(apic_dev->apics[i]);
1887 PrintDebug("apic: sanity check: apic %u (at %p) has id %u and msr value %llx and core at %p\n",
1888 i, apic, apic->lapic_id.val, apic->base_addr_msr.value,apic->core);
1893 PrintDebug("apic: priv_data is at %p\n", apic_dev);
1895 v3_hook_msr(vm, BASE_ADDR_MSR, read_apic_msr, write_apic_msr, apic_dev);
1902 device_register("LAPIC", apic_init)