2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Authors: Jack Lange <jarusl@cs.northwestern.edu>
15 * Peter Dinda <pdinda@northwestern.edu> (SMP)
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <devices/apic.h>
23 #include <devices/apic_regs.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vmm_msr.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vm_guest.h>
28 #include <palacios/vmm_types.h>
31 #include <palacios/vmm_queue.h>
32 #include <palacios/vmm_lock.h>
34 /* The locking in this file is nasty.
35 * There are 3 different locking approaches that are taken, depending on the APIC operation
36 * 1. Queue locks. Actual irq insertions are done via queueing irq ops at the dest apic.
37 * The destination apic's core is responsible for draining the queue, and actually
38 * setting the vector table.
39 * 2. State lock. This is a standard lock taken when internal apic state is read/written.
40 * When an irq's destination is determined this lock is taken to examine the apic's
42 * 3. VM barrier lock. This is taken when actual VM core state is changed (via SIPI).
47 #ifndef V3_CONFIG_DEBUG_APIC
49 #define PrintDebug(fmt, args...)
52 static char * shorthand_str[] = {
59 static char * deliverymode_str[] = {
72 typedef enum { APIC_TMR_INT, APIC_THERM_INT, APIC_PERF_INT,
73 APIC_LINT0_INT, APIC_LINT1_INT, APIC_ERR_INT } apic_irq_type_t;
76 #define APIC_SHORTHAND_NONE 0x0
77 #define APIC_SHORTHAND_SELF 0x1
78 #define APIC_SHORTHAND_ALL 0x2
79 #define APIC_SHORTHAND_ALL_BUT_ME 0x3
81 #define APIC_DEST_PHYSICAL 0x0
82 #define APIC_DEST_LOGICAL 0x1
85 #define BASE_ADDR_MSR 0x0000001B
86 #define DEFAULT_BASE_ADDR 0xfee00000
88 #define APIC_ID_OFFSET 0x020
89 #define APIC_VERSION_OFFSET 0x030
90 #define TPR_OFFSET 0x080
91 #define APR_OFFSET 0x090
92 #define PPR_OFFSET 0x0a0
93 #define EOI_OFFSET 0x0b0
94 #define REMOTE_READ_OFFSET 0x0c0
95 #define LDR_OFFSET 0x0d0
96 #define DFR_OFFSET 0x0e0
97 #define SPURIOUS_INT_VEC_OFFSET 0x0f0
99 #define ISR_OFFSET0 0x100 // 0x100 - 0x170
100 #define ISR_OFFSET1 0x110 // 0x100 - 0x170
101 #define ISR_OFFSET2 0x120 // 0x100 - 0x170
102 #define ISR_OFFSET3 0x130 // 0x100 - 0x170
103 #define ISR_OFFSET4 0x140 // 0x100 - 0x170
104 #define ISR_OFFSET5 0x150 // 0x100 - 0x170
105 #define ISR_OFFSET6 0x160 // 0x100 - 0x170
106 #define ISR_OFFSET7 0x170 // 0x100 - 0x170
108 #define TRIG_OFFSET0 0x180 // 0x180 - 0x1f0
109 #define TRIG_OFFSET1 0x190 // 0x180 - 0x1f0
110 #define TRIG_OFFSET2 0x1a0 // 0x180 - 0x1f0
111 #define TRIG_OFFSET3 0x1b0 // 0x180 - 0x1f0
112 #define TRIG_OFFSET4 0x1c0 // 0x180 - 0x1f0
113 #define TRIG_OFFSET5 0x1d0 // 0x180 - 0x1f0
114 #define TRIG_OFFSET6 0x1e0 // 0x180 - 0x1f0
115 #define TRIG_OFFSET7 0x1f0 // 0x180 - 0x1f0
118 #define IRR_OFFSET0 0x200 // 0x200 - 0x270
119 #define IRR_OFFSET1 0x210 // 0x200 - 0x270
120 #define IRR_OFFSET2 0x220 // 0x200 - 0x270
121 #define IRR_OFFSET3 0x230 // 0x200 - 0x270
122 #define IRR_OFFSET4 0x240 // 0x200 - 0x270
123 #define IRR_OFFSET5 0x250 // 0x200 - 0x270
124 #define IRR_OFFSET6 0x260 // 0x200 - 0x270
125 #define IRR_OFFSET7 0x270 // 0x200 - 0x270
128 #define ESR_OFFSET 0x280
129 #define INT_CMD_LO_OFFSET 0x300
130 #define INT_CMD_HI_OFFSET 0x310
131 #define TMR_LOC_VEC_TBL_OFFSET 0x320
132 #define THERM_LOC_VEC_TBL_OFFSET 0x330
133 #define PERF_CTR_LOC_VEC_TBL_OFFSET 0x340
134 #define LINT0_VEC_TBL_OFFSET 0x350
135 #define LINT1_VEC_TBL_OFFSET 0x360
136 #define ERR_VEC_TBL_OFFSET 0x370
137 #define TMR_INIT_CNT_OFFSET 0x380
138 #define TMR_CUR_CNT_OFFSET 0x390
139 #define TMR_DIV_CFG_OFFSET 0x3e0
140 #define EXT_APIC_FEATURE_OFFSET 0x400
141 #define EXT_APIC_CMD_OFFSET 0x410
142 #define SEOI_OFFSET 0x420
144 #define IER_OFFSET0 0x480 // 0x480 - 0x4f0
145 #define IER_OFFSET1 0x490 // 0x480 - 0x4f0
146 #define IER_OFFSET2 0x4a0 // 0x480 - 0x4f0
147 #define IER_OFFSET3 0x4b0 // 0x480 - 0x4f0
148 #define IER_OFFSET4 0x4c0 // 0x480 - 0x4f0
149 #define IER_OFFSET5 0x4d0 // 0x480 - 0x4f0
150 #define IER_OFFSET6 0x4e0 // 0x480 - 0x4f0
151 #define IER_OFFSET7 0x4f0 // 0x480 - 0x4f0
153 #define EXT_INT_LOC_VEC_TBL_OFFSET0 0x500 // 0x500 - 0x530
154 #define EXT_INT_LOC_VEC_TBL_OFFSET1 0x510 // 0x500 - 0x530
155 #define EXT_INT_LOC_VEC_TBL_OFFSET2 0x520 // 0x500 - 0x530
156 #define EXT_INT_LOC_VEC_TBL_OFFSET3 0x530 // 0x500 - 0x530
163 uint8_t bootstrap_cpu : 1;
165 uint8_t apic_enable : 1;
166 uint64_t base_addr : 40;
168 } __attribute__((packed));
169 } __attribute__((packed));
170 } __attribute__((packed));
176 struct irq_queue_entry {
178 int (*ack)(struct guest_info * core, uint32_t irq, void * private_data);
181 struct list_head list_node;
187 typedef enum {INIT_ST,
189 STARTED} ipi_state_t;
191 struct apic_dev_state;
197 struct apic_msr base_addr_msr;
200 /* memory map registers */
202 struct lapic_id_reg lapic_id;
203 struct apic_ver_reg apic_ver;
204 struct ext_apic_ctrl_reg ext_apic_ctrl;
205 struct local_vec_tbl_reg local_vec_tbl;
206 struct tmr_vec_tbl_reg tmr_vec_tbl;
207 struct tmr_div_cfg_reg tmr_div_cfg;
208 struct lint_vec_tbl_reg lint0_vec_tbl;
209 struct lint_vec_tbl_reg lint1_vec_tbl;
210 struct perf_ctr_loc_vec_tbl_reg perf_ctr_loc_vec_tbl;
211 struct therm_loc_vec_tbl_reg therm_loc_vec_tbl;
212 struct err_vec_tbl_reg err_vec_tbl;
213 struct err_status_reg err_status;
214 struct spurious_int_reg spurious_int;
215 struct int_cmd_reg int_cmd;
216 struct log_dst_reg log_dst;
217 struct dst_fmt_reg dst_fmt;
218 //struct arb_prio_reg arb_prio; // computed on the fly
219 //struct task_prio_reg task_prio; // stored in core.ctrl_regs.apic_tpr
220 //struct proc_prio_reg proc_prio; // computed on the fly
221 struct ext_apic_feature_reg ext_apic_feature;
222 struct spec_eoi_reg spec_eoi;
225 uint32_t tmr_cur_cnt;
226 uint32_t tmr_init_cnt;
227 uint32_t missed_ints;
229 struct local_vec_tbl_reg ext_intr_vec_tbl[4];
231 uint32_t rem_rd_data;
234 ipi_state_t ipi_state;
236 uint8_t int_req_reg[32];
237 uint8_t int_svc_reg[32];
238 uint8_t int_en_reg[32];
239 uint8_t trig_mode_reg[32];
242 int (*ack)(struct guest_info * core, uint32_t irq, void * private_data);
246 struct guest_info * core;
248 void * controller_handle;
250 struct v3_timer * timer;
256 uint64_t num_entries;
257 struct list_head entries;
268 struct apic_dev_state {
271 v3_lock_t state_lock;
273 struct apic_state apics[0];
274 } __attribute__((packed));
280 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data);
281 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data);
283 static void set_apic_tpr(struct apic_state *apic, uint32_t val);
285 static int is_apic_bsp(struct apic_state * apic) {
286 return ((apic->base_addr_msr.value & 0x0000000000000100LL) != 0);
293 static void init_apic_state(struct apic_state * apic, uint32_t id) {
294 apic->base_addr = DEFAULT_BASE_ADDR;
297 // boot processor, enabled
298 apic->base_addr_msr.value = 0x0000000000000900LL;
300 // ap processor, enabled
301 apic->base_addr_msr.value = 0x0000000000000800LL;
304 // same base address regardless of ap or main
305 apic->base_addr_msr.value |= ((uint64_t)DEFAULT_BASE_ADDR);
307 PrintDebug(VM_NONE, VCORE_NONE, "apic %u: (init_apic_state): msr=0x%llx\n",id, apic->base_addr_msr.value);
309 PrintDebug(VM_NONE, VCORE_NONE, "apic %u: (init_apic_state): Sizeof Interrupt Request Register %d, should be 32\n",
310 id, (uint_t)sizeof(apic->int_req_reg));
312 memset(apic->int_req_reg, 0, sizeof(apic->int_req_reg));
313 memset(apic->int_svc_reg, 0, sizeof(apic->int_svc_reg));
314 memset(apic->int_en_reg, 0xff, sizeof(apic->int_en_reg));
315 memset(apic->trig_mode_reg, 0, sizeof(apic->trig_mode_reg));
317 apic->eoi = 0x00000000;
318 apic->rem_rd_data = 0x00000000;
319 apic->tmr_init_cnt = 0x00000000;
320 apic->tmr_cur_cnt = 0x00000000;
321 apic->missed_ints = 0;
323 // note that it's the *lower* 24 bits that are
324 // reserved, not the upper 24.
325 apic->lapic_id.val = 0;
326 apic->lapic_id.apic_id = id;
328 apic->ipi_state = INIT_ST;
330 // The P6 has 6 LVT entries, so we set the value to (6-1)...
331 apic->apic_ver.val = 0x80050010;
333 set_apic_tpr(apic,0x00000000);
334 // note that arbitration priority and processor priority are derived values
335 // and are computed on the fly
337 apic->log_dst.val = 0x00000000;
338 apic->dst_fmt.val = 0xffffffff;
339 apic->spurious_int.val = 0x000000ff;
340 apic->err_status.val = 0x00000000;
341 apic->int_cmd.val = 0x0000000000000000LL;
342 apic->tmr_vec_tbl.val = 0x00010000;
343 apic->therm_loc_vec_tbl.val = 0x00010000;
344 apic->perf_ctr_loc_vec_tbl.val = 0x00010000;
345 apic->lint0_vec_tbl.val = 0x00010000;
346 apic->lint1_vec_tbl.val = 0x00010000;
347 apic->err_vec_tbl.val = 0x00010000;
348 apic->tmr_div_cfg.val = 0x00000000;
349 //apic->ext_apic_feature.val = 0x00000007;
350 apic->ext_apic_feature.val = 0x00040007;
351 apic->ext_apic_ctrl.val = 0x00000000;
352 apic->spec_eoi.val = 0x00000000;
355 INIT_LIST_HEAD(&(apic->irq_queue.entries));
356 v3_lock_init(&(apic->irq_queue.lock));
357 apic->irq_queue.num_entries = 0;
365 static int read_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t * dst, void * priv_data) {
366 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
367 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
369 PrintDebug(core->vm_info, core, "apic %u: core %u: MSR read getting %llx\n", apic->lapic_id.val, core->vcpu_id, apic->base_addr_msr.value);
371 dst->value = apic->base_addr_msr.value;
377 static int write_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t src, void * priv_data) {
378 struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
379 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
380 struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, apic->base_addr);
383 PrintDebug(core->vm_info, core, "apic %u: core %u: MSR write of %llx\n", apic->lapic_id.val, core->vcpu_id, src.value);
385 if (old_reg == NULL) {
387 PrintError(core->vm_info, core, "apic %u: core %u: APIC Base address region does not exit...\n",
388 apic->lapic_id.val, core->vcpu_id);
394 v3_delete_mem_region(core->vm_info, old_reg);
396 apic->base_addr_msr.value = src.value;
398 apic->base_addr = src.value & ~0xfffULL;
400 if (v3_hook_full_mem(core->vm_info, core->vcpu_id, apic->base_addr,
401 apic->base_addr + PAGE_SIZE_4KB,
402 apic_read, apic_write, apic_dev) == -1) {
403 PrintError(core->vm_info, core, "apic %u: core %u: Could not hook new APIC Base address\n",
404 apic->lapic_id.val, core->vcpu_id);
417 // irq_num is the bit offset into a 256 bit buffer...
418 static int activate_apic_irq(struct apic_state * apic, uint32_t irq_num,
419 int (*ack)(struct guest_info * core, uint32_t irq, void * private_data),
420 void * private_data) {
421 int major_offset = (irq_num & ~0x00000007) >> 3;
422 int minor_offset = irq_num & 0x00000007;
423 uint8_t * req_location = apic->int_req_reg + major_offset;
424 uint8_t * en_location = apic->int_en_reg + major_offset;
425 uint8_t flag = 0x1 << minor_offset;
428 PrintDebug(VM_NONE, VCORE_NONE, "apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->vcpu_id, irq_num);
430 if (*req_location & flag) {
431 PrintDebug(VM_NONE, VCORE_NONE, "Interrupt %d coallescing\n", irq_num);
435 if (*en_location & flag) {
436 *req_location |= flag;
437 apic->irq_ack_cbs[irq_num].ack = ack;
438 apic->irq_ack_cbs[irq_num].private_data = private_data;
442 PrintDebug(VM_NONE, VCORE_NONE, "apic %u: core %d: Interrupt not enabled... %.2x\n",
443 apic->lapic_id.val, apic->core->vcpu_id, *en_location);
451 static int add_apic_irq_entry(struct apic_state * apic, uint32_t irq_num,
452 int (*ack)(struct guest_info * core, uint32_t irq, void * private_data),
453 void * private_data) {
454 unsigned int flags = 0;
455 struct irq_queue_entry * entry = NULL;
458 PrintError(VM_NONE, VCORE_NONE, "core %d: Attempting to raise an invalid interrupt: %d\n",
459 apic->core->vcpu_id, irq_num);
463 entry = V3_Malloc(sizeof(struct irq_queue_entry));
466 PrintError(VM_NONE, VCORE_NONE, "Could not allocate irq queue entry\n");
470 entry->vector = irq_num;
472 entry->private_data = private_data;
474 flags = v3_lock_irqsave(apic->irq_queue.lock);
476 list_add_tail(&(entry->list_node), &(apic->irq_queue.entries));
477 apic->irq_queue.num_entries++;
479 v3_unlock_irqrestore(apic->irq_queue.lock, flags);
485 static void drain_irq_entries(struct apic_state * apic) {
488 unsigned int flags = 0;
489 struct irq_queue_entry * entry = NULL;
491 flags = v3_lock_irqsave(apic->irq_queue.lock);
493 if (!list_empty(&(apic->irq_queue.entries))) {
494 struct list_head * q_entry = apic->irq_queue.entries.next;
495 entry = list_entry(q_entry, struct irq_queue_entry, list_node);
497 apic->irq_queue.num_entries--;
501 v3_unlock_irqrestore(apic->irq_queue.lock, flags);
507 activate_apic_irq(apic, entry->vector, entry->ack, entry->private_data);
516 static int get_highest_isr(struct apic_state * apic) {
519 // We iterate backwards to find the highest priority in-service request
520 for (i = 31; i >= 0; i--) {
521 uint8_t * svc_major = apic->int_svc_reg + i;
523 if ((*svc_major) & 0xff) {
524 for (j = 7; j >= 0; j--) {
525 uint8_t flag = 0x1 << j;
526 if ((*svc_major) & flag) {
527 return ((i * 8) + j);
538 static int get_highest_irr(struct apic_state * apic) {
541 // We iterate backwards to find the highest priority enabled requested interrupt
542 for (i = 31; i >= 0; i--) {
543 uint8_t * req_major = apic->int_req_reg + i;
544 uint8_t * en_major = apic->int_en_reg + i;
546 if ((*req_major) & 0xff) {
547 for (j = 7; j >= 0; j--) {
548 uint8_t flag = 0x1 << j;
549 if ((*req_major & *en_major) & flag) {
550 return ((i * 8) + j);
560 static uint32_t get_isrv(struct apic_state *apic)
562 int isr = get_highest_isr(apic);
565 return (uint32_t) isr;
571 static uint32_t get_irrv(struct apic_state *apic)
573 int irr = get_highest_irr(apic);
576 return (uint32_t) irr;
583 static uint32_t get_apic_tpr(struct apic_state *apic)
585 return (uint32_t) (apic->core->ctrl_regs.apic_tpr); // see comment in vmm_ctrl_regs.c for how this works
589 static void set_apic_tpr(struct apic_state *apic, uint32_t val)
591 PrintDebug(VM_NONE, VCORE_NONE, "Set apic_tpr to 0x%x from apic reg path\n",val);
592 apic->core->ctrl_regs.apic_tpr = (uint64_t) val; // see comment in vmm_ctrl_regs.c for how this works
595 static uint32_t get_apic_ppr(struct apic_state *apic)
597 uint32_t tpr = get_apic_tpr(apic);
598 uint32_t isrv = get_isrv(apic);
599 uint32_t tprlevel, isrlevel;
602 tprlevel = (tpr >> 4) & 0xf;
603 isrlevel = (isrv >> 4) & 0xf;
605 if (tprlevel>=isrlevel) {
606 ppr = tpr; // get class and subclass
608 ppr = (isrlevel << 4); // get class only
616 static uint32_t get_apic_apr(struct apic_state *apic)
618 uint32_t tpr = get_apic_tpr(apic);
619 uint32_t isrv = get_isrv(apic);
620 uint32_t irrv = get_irrv(apic);
621 uint32_t tprlevel, isrlevel, irrlevel;
623 tprlevel = (tpr >> 4) & 0xf;
624 isrlevel = (isrv >> 4) & 0xf;
625 irrlevel = (irrv >> 4) & 0xf;
627 if (tprlevel >= isrlevel) {
628 if (tprlevel >= irrlevel) {
629 return tpr; // get both class and subclass
631 return irrlevel << 4; // get class only
634 if (isrlevel >= irrlevel) {
635 return isrlevel << 4; // get class only
637 return irrlevel << 4; // get class only
644 static int apic_do_eoi(struct guest_info * core, struct apic_state * apic) {
645 int isr_irq = get_highest_isr(apic);
648 int major_offset = (isr_irq & ~0x00000007) >> 3;
649 int minor_offset = isr_irq & 0x00000007;
650 uint8_t flag = 0x1 << minor_offset;
651 uint8_t * svc_location = apic->int_svc_reg + major_offset;
653 PrintDebug(core->vm_info, core, "apic %u: core ?: Received APIC EOI for IRQ %d\n", apic->lapic_id.val,isr_irq);
655 *svc_location &= ~flag;
657 if (apic->irq_ack_cbs[isr_irq].ack) {
658 apic->irq_ack_cbs[isr_irq].ack(core, isr_irq, apic->irq_ack_cbs[isr_irq].private_data);
661 #ifdef V3_CONFIG_CRAY_XT
663 if ((isr_irq == 238) ||
665 PrintDebug(core->vm_info, core, "apic %u: core ?: Acking IRQ %d\n", apic->lapic_id.val,isr_irq);
668 if (isr_irq == 238) {
673 //PrintError(core->vm_info, core, "apic %u: core ?: Spurious EOI...\n",apic->lapic_id.val);
680 static int activate_internal_irq(struct apic_state * apic, apic_irq_type_t int_type) {
681 uint32_t vec_num = 0;
682 uint32_t del_mode = 0;
688 vec_num = apic->tmr_vec_tbl.vec;
689 del_mode = IPI_FIXED;
690 masked = apic->tmr_vec_tbl.mask;
693 vec_num = apic->therm_loc_vec_tbl.vec;
694 del_mode = apic->therm_loc_vec_tbl.msg_type;
695 masked = apic->therm_loc_vec_tbl.mask;
698 vec_num = apic->perf_ctr_loc_vec_tbl.vec;
699 del_mode = apic->perf_ctr_loc_vec_tbl.msg_type;
700 masked = apic->perf_ctr_loc_vec_tbl.mask;
703 vec_num = apic->lint0_vec_tbl.vec;
704 del_mode = apic->lint0_vec_tbl.msg_type;
705 masked = apic->lint0_vec_tbl.mask;
708 vec_num = apic->lint1_vec_tbl.vec;
709 del_mode = apic->lint1_vec_tbl.msg_type;
710 masked = apic->lint1_vec_tbl.mask;
713 vec_num = apic->err_vec_tbl.vec;
714 del_mode = IPI_FIXED;
715 masked = apic->err_vec_tbl.mask;
718 PrintError(VM_NONE, VCORE_NONE, "apic %u: core ?: Invalid APIC interrupt type\n", apic->lapic_id.val);
722 // interrupt is masked, don't send
724 PrintDebug(VM_NONE, VCORE_NONE, "apic %u: core ?: Inerrupt is masked\n", apic->lapic_id.val);
728 if (del_mode == IPI_FIXED) {
729 //PrintDebug(VM_NONE, VCORE_NONE, "Activating internal APIC IRQ %d\n", vec_num);
730 return add_apic_irq_entry(apic, vec_num, NULL, NULL);
732 PrintError(VM_NONE, VCORE_NONE, "apic %u: core ?: Unhandled Delivery Mode\n", apic->lapic_id.val);
739 static inline int should_deliver_cluster_ipi(struct apic_dev_state * apic_dev,
740 struct guest_info * dst_core,
741 struct apic_state * dst_apic, uint8_t mda) {
746 if ( ((mda & 0xf0) == (dst_apic->log_dst.dst_log_id & 0xf0)) && /* (I am in the cluster and */
747 ((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { /* I am in the set) */
755 PrintDebug(VM_NONE, VCORE_NONE, "apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n",
756 dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
757 dst_apic->log_dst.dst_log_id);
759 PrintDebug(VM_NONE, VCORE_NONE, "apic %u core %u: rejecting clustered IRQ (mda 0x%x != log_dst 0x%x)\n",
760 dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
761 dst_apic->log_dst.dst_log_id);
768 static inline int should_deliver_flat_ipi(struct apic_dev_state * apic_dev,
769 struct guest_info * dst_core,
770 struct apic_state * dst_apic, uint8_t mda) {
775 if ((dst_apic->log_dst.dst_log_id & mda) != 0) { // I am in the set
783 PrintDebug(VM_NONE, VCORE_NONE, "apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n",
784 dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
785 dst_apic->log_dst.dst_log_id);
787 PrintDebug(VM_NONE, VCORE_NONE, "apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n",
788 dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
789 dst_apic->log_dst.dst_log_id);
798 static int should_deliver_ipi(struct apic_dev_state * apic_dev,
799 struct guest_info * dst_core,
800 struct apic_state * dst_apic, uint8_t mda) {
804 flags = v3_lock_irqsave(apic_dev->state_lock);
806 if (dst_apic->dst_fmt.model == 0xf) {
809 /* always deliver broadcast */
812 ret = should_deliver_flat_ipi(apic_dev, dst_core, dst_apic, mda);
814 } else if (dst_apic->dst_fmt.model == 0x0) {
817 /* always deliver broadcast */
820 ret = should_deliver_cluster_ipi(apic_dev, dst_core, dst_apic, mda);
827 v3_unlock_irqrestore(apic_dev->state_lock, flags);
831 PrintError(VM_NONE, VCORE_NONE, "apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n",
832 dst_apic->lapic_id.val, dst_core->vcpu_id, dst_apic->dst_fmt.model);
841 // Only the src_apic pointer is used
842 static int deliver_ipi(struct apic_state * src_apic,
843 struct apic_state * dst_apic,
844 struct v3_gen_ipi * ipi) {
847 struct guest_info * dst_core = dst_apic->core;
853 case IPI_LOWEST_PRIO: {
855 // caller needs to have decided which apic to deliver to!
857 PrintDebug(VM_NONE, VCORE_NONE, "delivering IRQ %d to core %u\n", ipi->vector, dst_core->vcpu_id);
859 add_apic_irq_entry(dst_apic, ipi->vector, ipi->ack, ipi->private_data);
861 if (dst_apic != src_apic) {
862 PrintDebug(VM_NONE, VCORE_NONE, " non-local core with new interrupt, forcing it to exit now\n");
863 v3_interrupt_cpu(dst_core->vm_info, dst_core->pcpu_id, 0);
870 PrintDebug(VM_NONE, VCORE_NONE, " INIT delivery to core %u\n", dst_core->vcpu_id);
872 if (is_apic_bsp(dst_apic)) {
873 PrintError(VM_NONE, VCORE_NONE, "Attempted to INIT BSP CPU. Ignoring since I have no idea what the hell to do...\n");
878 if (dst_apic->ipi_state != INIT_ST) {
879 v3_raise_barrier(dst_core->vm_info, src_apic->core);
880 dst_core->core_run_state = CORE_STOPPED;
881 dst_apic->ipi_state = INIT_ST;
882 v3_lower_barrier(dst_core->vm_info);
886 // We transition the target core to SIPI state
887 dst_apic->ipi_state = SIPI; // note: locking should not be needed here
889 // That should be it since the target core should be
890 // waiting in host on this transition
891 // either it's on another core or on a different preemptive thread
892 // in both cases, it will quickly notice this transition
893 // in particular, we should not need to force an exit here
895 PrintDebug(VM_NONE, VCORE_NONE, " INIT delivery done\n");
902 if (dst_apic->ipi_state != SIPI) {
903 PrintError(VM_NONE, VCORE_NONE, " core %u is not in SIPI state (mode = %d), ignored!\n",
904 dst_core->vcpu_id, dst_apic->ipi_state);
908 v3_reset_vm_core(dst_core, ipi->vector);
910 PrintDebug(VM_NONE, VCORE_NONE, " SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
911 ipi->vector, dst_core->segments.cs.selector, dst_core->vcpu_id);
912 // Maybe need to adjust the APIC?
914 // We transition the target core to SIPI state
915 dst_core->core_run_state = CORE_RUNNING; // note: locking should not be needed here
916 dst_apic->ipi_state = STARTED;
918 // As with INIT, we should not need to do anything else
920 PrintDebug(VM_NONE, VCORE_NONE, " SIPI delivery done\n");
925 case IPI_EXTINT: // EXTINT
926 /* Two possible things to do here:
927 * 1. Ignore the IPI and assume the 8259a (PIC) will handle it
928 * 2. Add 32 to the vector and inject it...
929 * We probably just want to do 1 here, and assume the raise_irq() will hit the 8259a.
934 case IPI_RES1: // reserved
937 PrintError(VM_NONE, VCORE_NONE, "IPI %d delivery is unsupported\n", ipi->mode);
945 static struct apic_state * find_physical_apic(struct apic_dev_state * apic_dev, uint32_t dst_idx) {
946 struct apic_state * dst_apic = NULL;
950 flags = v3_lock_irqsave(apic_dev->state_lock);
952 if ( (dst_idx > 0) && (dst_idx < apic_dev->num_apics) ) {
953 // see if it simply is the core id
954 if (apic_dev->apics[dst_idx].lapic_id.apic_id == dst_idx) {
955 dst_apic = &(apic_dev->apics[dst_idx]);
959 for (i = 0; i < apic_dev->num_apics; i++) {
960 if (apic_dev->apics[i].lapic_id.apic_id == dst_idx) {
961 dst_apic = &(apic_dev->apics[i]);
965 v3_unlock_irqrestore(apic_dev->state_lock, flags);
972 static int route_ipi(struct apic_dev_state * apic_dev,
973 struct apic_state * src_apic,
974 struct v3_gen_ipi * ipi) {
975 struct apic_state * dest_apic = NULL;
978 PrintDebug(VM_NONE, VCORE_NONE, "apic: IPI %s %u from apic %p to %s %s %u\n",
979 deliverymode_str[ipi->mode],
982 (ipi->logical == 0) ? "(physical)" : "(logical)",
983 shorthand_str[ipi->dst_shorthand],
987 switch (ipi->dst_shorthand) {
989 case APIC_SHORTHAND_NONE: // no shorthand
990 if (ipi->logical == APIC_DEST_PHYSICAL) {
992 dest_apic = find_physical_apic(apic_dev, ipi->dst);
994 if (dest_apic == NULL) {
995 PrintError(VM_NONE, VCORE_NONE, "apic: Attempted send to unregistered apic id=%u\n", ipi->dst);
999 if (deliver_ipi(src_apic, dest_apic, ipi) == -1) {
1000 PrintError(VM_NONE, VCORE_NONE, "apic: Could not deliver IPI\n");
1005 PrintDebug(VM_NONE, VCORE_NONE, "apic: done\n");
1007 } else if (ipi->logical == APIC_DEST_LOGICAL) {
1009 if (ipi->mode != IPI_LOWEST_PRIO) {
1011 uint8_t mda = ipi->dst;
1013 // logical, but not lowest priority
1014 // we immediately trigger
1015 // fixed, smi, reserved, nmi, init, sipi, etc
1018 for (i = 0; i < apic_dev->num_apics; i++) {
1021 dest_apic = &(apic_dev->apics[i]);
1023 del_flag = should_deliver_ipi(apic_dev, dest_apic->core, dest_apic, mda);
1025 if (del_flag == -1) {
1027 PrintError(VM_NONE, VCORE_NONE, "apic: Error checking delivery mode\n");
1029 } else if (del_flag == 1) {
1031 if (deliver_ipi(src_apic, dest_apic, ipi) == -1) {
1032 PrintError(VM_NONE, VCORE_NONE, "apic: Error: Could not deliver IPI\n");
1037 } else { // APIC_LOWEST_DELIVERY
1038 struct apic_state * cur_best_apic = NULL;
1039 uint32_t cur_best_apr;
1040 uint8_t mda = ipi->dst;
1043 // logical, lowest priority
1045 for (i = 0; i < apic_dev->num_apics; i++) {
1048 dest_apic = &(apic_dev->apics[i]);
1050 del_flag = should_deliver_ipi(apic_dev, dest_apic->core, dest_apic, mda);
1052 if (del_flag == -1) {
1053 PrintError(VM_NONE, VCORE_NONE, "apic: Error checking delivery mode\n");
1056 } else if (del_flag == 1) {
1057 // update priority for lowest priority scan
1060 flags = v3_lock_irqsave(apic_dev->state_lock);
1062 if (cur_best_apic == 0) {
1063 cur_best_apic = dest_apic;
1064 cur_best_apr = get_apic_apr(dest_apic) & 0xf0;
1066 uint32_t dest_apr = get_apic_apr(dest_apic) & 0xf0;
1067 if (dest_apr < cur_best_apr) {
1068 cur_best_apic = dest_apic;
1069 cur_best_apr = dest_apr;
1073 v3_unlock_irqrestore(apic_dev->state_lock, flags);
1078 // now we will deliver to the best one if it exists
1079 if (!cur_best_apic) {
1080 PrintDebug(VM_NONE, VCORE_NONE, "apic: lowest priority deliver, but no destinations!\n");
1082 if (deliver_ipi(src_apic, cur_best_apic, ipi) == -1) {
1083 PrintError(VM_NONE, VCORE_NONE, "apic: Error: Could not deliver IPI\n");
1086 //V3_Print(VM_NONE, VCORE_NONE, "apic: logical, lowest priority delivery to apic %u\n",cur_best_apic->lapic_id.val);
1093 case APIC_SHORTHAND_SELF: // self
1095 if (src_apic == NULL) { /* this is not an apic, but it's trying to send to itself??? */
1096 PrintError(VM_NONE, VCORE_NONE, "apic: Sending IPI to self from generic IPI sender\n");
1102 if (ipi->logical == APIC_DEST_PHYSICAL) { /* physical delivery */
1103 if (deliver_ipi(src_apic, src_apic, ipi) == -1) {
1104 PrintError(VM_NONE, VCORE_NONE, "apic: Could not deliver IPI to self (physical)\n");
1107 } else if (ipi->logical == APIC_DEST_LOGICAL) { /* logical delivery */
1108 PrintError(VM_NONE, VCORE_NONE, "apic: use of logical delivery in self (untested)\n");
1110 if (deliver_ipi(src_apic, src_apic, ipi) == -1) {
1111 PrintError(VM_NONE, VCORE_NONE, "apic: Could not deliver IPI to self (logical)\n");
1118 case APIC_SHORTHAND_ALL:
1119 case APIC_SHORTHAND_ALL_BUT_ME: { /* all and all-but-me */
1120 /* assuming that logical verus physical doesn't matter
1121 although it is odd that both are used */
1124 for (i = 0; i < apic_dev->num_apics; i++) {
1125 dest_apic = &(apic_dev->apics[i]);
1127 if ((dest_apic != src_apic) || (ipi->dst_shorthand == APIC_SHORTHAND_ALL)) {
1128 if (deliver_ipi(src_apic, dest_apic, ipi) == -1) {
1129 PrintError(VM_NONE, VCORE_NONE, "apic: Error: Could not deliver IPI\n");
1138 PrintError(VM_NONE, VCORE_NONE, "apic: Error routing IPI, invalid Mode (%d)\n", ipi->dst_shorthand);
1146 // External function, expected to acquire lock on apic
1147 static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
1148 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1149 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1150 addr_t reg_addr = guest_addr - apic->base_addr;
1151 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
1155 PrintDebug(core->vm_info, core, "apic %u: core %u: at %p: Read apic address space (%p)\n",
1156 apic->lapic_id.val, core->vcpu_id, apic, (void *)guest_addr);
1158 if (msr->apic_enable == 0) {
1159 PrintError(core->vm_info, core, "apic %u: core %u: Read from APIC address space with disabled APIC, apic msr=0x%llx\n",
1160 apic->lapic_id.val, core->vcpu_id, apic->base_addr_msr.value);
1165 /* Because "May not be supported" doesn't matter to Linux developers... */
1166 /* if (length != 4) { */
1167 /* PrintError(core->vm_info, core, "Invalid apic read length (%d)\n", length); */
1171 switch (reg_addr & ~0x3) {
1173 // Well, only an idiot would read from a architectural write only register
1175 // PrintError(core->vm_info, core, "Attempting to read from write only register\n");
1180 case APIC_ID_OFFSET:
1181 val = apic->lapic_id.val;
1183 case APIC_VERSION_OFFSET:
1184 val = apic->apic_ver.val;
1187 val = get_apic_tpr(apic);
1190 val = get_apic_apr(apic);
1193 val = get_apic_ppr(apic);
1195 case REMOTE_READ_OFFSET:
1196 val = apic->rem_rd_data;
1199 val = apic->log_dst.val;
1202 val = apic->dst_fmt.val;
1204 case SPURIOUS_INT_VEC_OFFSET:
1205 val = apic->spurious_int.val;
1208 val = apic->err_status.val;
1210 case TMR_LOC_VEC_TBL_OFFSET:
1211 val = apic->tmr_vec_tbl.val;
1213 case LINT0_VEC_TBL_OFFSET:
1214 val = apic->lint0_vec_tbl.val;
1216 case LINT1_VEC_TBL_OFFSET:
1217 val = apic->lint1_vec_tbl.val;
1219 case ERR_VEC_TBL_OFFSET:
1220 val = apic->err_vec_tbl.val;
1222 case TMR_INIT_CNT_OFFSET:
1223 val = apic->tmr_init_cnt;
1225 case TMR_DIV_CFG_OFFSET:
1226 val = apic->tmr_div_cfg.val;
1230 val = *(uint32_t *)(apic->int_en_reg);
1233 val = *(uint32_t *)(apic->int_en_reg + 4);
1236 val = *(uint32_t *)(apic->int_en_reg + 8);
1239 val = *(uint32_t *)(apic->int_en_reg + 12);
1242 val = *(uint32_t *)(apic->int_en_reg + 16);
1245 val = *(uint32_t *)(apic->int_en_reg + 20);
1248 val = *(uint32_t *)(apic->int_en_reg + 24);
1251 val = *(uint32_t *)(apic->int_en_reg + 28);
1255 val = *(uint32_t *)(apic->int_svc_reg);
1258 val = *(uint32_t *)(apic->int_svc_reg + 4);
1261 val = *(uint32_t *)(apic->int_svc_reg + 8);
1264 val = *(uint32_t *)(apic->int_svc_reg + 12);
1267 val = *(uint32_t *)(apic->int_svc_reg + 16);
1270 val = *(uint32_t *)(apic->int_svc_reg + 20);
1273 val = *(uint32_t *)(apic->int_svc_reg + 24);
1276 val = *(uint32_t *)(apic->int_svc_reg + 28);
1280 val = *(uint32_t *)(apic->trig_mode_reg);
1283 val = *(uint32_t *)(apic->trig_mode_reg + 4);
1286 val = *(uint32_t *)(apic->trig_mode_reg + 8);
1289 val = *(uint32_t *)(apic->trig_mode_reg + 12);
1292 val = *(uint32_t *)(apic->trig_mode_reg + 16);
1295 val = *(uint32_t *)(apic->trig_mode_reg + 20);
1298 val = *(uint32_t *)(apic->trig_mode_reg + 24);
1301 val = *(uint32_t *)(apic->trig_mode_reg + 28);
1305 val = *(uint32_t *)(apic->int_req_reg);
1308 val = *(uint32_t *)(apic->int_req_reg + 4);
1311 val = *(uint32_t *)(apic->int_req_reg + 8);
1314 val = *(uint32_t *)(apic->int_req_reg + 12);
1317 val = *(uint32_t *)(apic->int_req_reg + 16);
1320 val = *(uint32_t *)(apic->int_req_reg + 20);
1323 val = *(uint32_t *)(apic->int_req_reg + 24);
1326 val = *(uint32_t *)(apic->int_req_reg + 28);
1328 case TMR_CUR_CNT_OFFSET:
1329 val = apic->tmr_cur_cnt;
1332 // We are not going to implement these....
1333 case THERM_LOC_VEC_TBL_OFFSET:
1334 val = apic->therm_loc_vec_tbl.val;
1336 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1337 val = apic->perf_ctr_loc_vec_tbl.val;
1342 // handled registers
1343 case INT_CMD_LO_OFFSET:
1344 val = apic->int_cmd.lo;
1346 case INT_CMD_HI_OFFSET:
1347 val = apic->int_cmd.hi;
1350 // handle current timer count
1352 // Unhandled Registers
1353 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1354 val = apic->ext_intr_vec_tbl[0].val;
1356 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1357 val = apic->ext_intr_vec_tbl[1].val;
1359 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1360 val = apic->ext_intr_vec_tbl[2].val;
1362 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1363 val = apic->ext_intr_vec_tbl[3].val;
1367 case EXT_APIC_FEATURE_OFFSET:
1368 case EXT_APIC_CMD_OFFSET:
1372 PrintError(core->vm_info, core, "apic %u: core %u: Read from Unhandled APIC Register: %x (getting zero)\n",
1373 apic->lapic_id.val, core->vcpu_id, (uint32_t)reg_addr);
1379 uint_t byte_addr = reg_addr & 0x3;
1380 uint8_t * val_ptr = (uint8_t *)dst;
1382 *val_ptr = *(((uint8_t *)&val) + byte_addr);
1384 } else if ((length == 2) &&
1385 ((reg_addr & 0x3) != 0x3)) {
1386 uint_t byte_addr = reg_addr & 0x3;
1387 uint16_t * val_ptr = (uint16_t *)dst;
1388 *val_ptr = *(((uint16_t *)&val) + byte_addr);
1390 } else if (length == 4) {
1391 uint32_t * val_ptr = (uint32_t *)dst;
1395 PrintError(core->vm_info, core, "apic %u: core %u: Invalid apic read length (%d)\n",
1396 apic->lapic_id.val, core->vcpu_id, length);
1400 PrintDebug(core->vm_info, core, "apic %u: core %u: Read finished (val=%x)\n",
1401 apic->lapic_id.val, core->vcpu_id, *(uint32_t *)dst);
1410 static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data) {
1411 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1412 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1413 addr_t reg_addr = guest_addr - apic->base_addr;
1414 struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
1415 uint32_t op_val = *(uint32_t *)src;
1418 PrintDebug(core->vm_info, core, "apic %u: core %u: at %p and priv_data is at %p\n",
1419 apic->lapic_id.val, core->vcpu_id, apic, priv_data);
1421 PrintDebug(core->vm_info, core, "apic %u: core %u: write to address space (%p) (val=%x)\n",
1422 apic->lapic_id.val, core->vcpu_id, (void *)guest_addr, *(uint32_t *)src);
1424 if (msr->apic_enable == 0) {
1425 PrintError(core->vm_info, core, "apic %u: core %u: Write to APIC address space with disabled APIC, apic msr=0x%llx\n",
1426 apic->lapic_id.val, core->vcpu_id, apic->base_addr_msr.value);
1432 PrintError(core->vm_info, core, "apic %u: core %u: Invalid apic write length (%d)\n",
1433 apic->lapic_id.val, length, core->vcpu_id);
1438 case REMOTE_READ_OFFSET:
1439 case APIC_VERSION_OFFSET:
1466 case EXT_APIC_FEATURE_OFFSET:
1468 PrintError(core->vm_info, core, "apic %u: core %u: Attempting to write to read only register %p (error)\n",
1469 apic->lapic_id.val, core->vcpu_id, (void *)reg_addr);
1474 case APIC_ID_OFFSET:
1475 //V3_Print(core->vm_info, core, "apic %u: core %u: my id is being changed to %u\n",
1476 // apic->lapic_id.val, core->vcpu_id, op_val);
1478 apic->lapic_id.val = op_val;
1481 set_apic_tpr(apic,op_val);
1484 PrintDebug(core->vm_info, core, "apic %u: core %u: setting log_dst.val to 0x%x\n",
1485 apic->lapic_id.val, core->vcpu_id, op_val);
1486 flags = v3_lock_irqsave(apic_dev->state_lock);
1487 apic->log_dst.val = op_val;
1488 v3_unlock_irqrestore(apic_dev->state_lock, flags);
1491 flags = v3_lock_irqsave(apic_dev->state_lock);
1492 apic->dst_fmt.val = op_val;
1493 v3_unlock_irqrestore(apic_dev->state_lock, flags);
1495 case SPURIOUS_INT_VEC_OFFSET:
1496 apic->spurious_int.val = op_val;
1499 apic->err_status.val = op_val;
1501 case TMR_LOC_VEC_TBL_OFFSET:
1502 apic->tmr_vec_tbl.val = op_val;
1504 case THERM_LOC_VEC_TBL_OFFSET:
1505 apic->therm_loc_vec_tbl.val = op_val;
1507 case PERF_CTR_LOC_VEC_TBL_OFFSET:
1508 apic->perf_ctr_loc_vec_tbl.val = op_val;
1510 case LINT0_VEC_TBL_OFFSET:
1511 apic->lint0_vec_tbl.val = op_val;
1513 case LINT1_VEC_TBL_OFFSET:
1514 apic->lint1_vec_tbl.val = op_val;
1516 case ERR_VEC_TBL_OFFSET:
1517 apic->err_vec_tbl.val = op_val;
1519 case TMR_INIT_CNT_OFFSET:
1520 apic->tmr_init_cnt = op_val;
1521 apic->tmr_cur_cnt = op_val;
1523 case TMR_CUR_CNT_OFFSET:
1524 apic->tmr_cur_cnt = op_val;
1526 case TMR_DIV_CFG_OFFSET:
1527 PrintDebug(core->vm_info, core, "apic %u: core %u: setting tmr_div_cfg to 0x%x\n",
1528 apic->lapic_id.val, core->vcpu_id, op_val);
1529 apic->tmr_div_cfg.val = op_val;
1533 // Enable mask (256 bits)
1535 *(uint32_t *)(apic->int_en_reg) = op_val;
1538 *(uint32_t *)(apic->int_en_reg + 4) = op_val;
1541 *(uint32_t *)(apic->int_en_reg + 8) = op_val;
1544 *(uint32_t *)(apic->int_en_reg + 12) = op_val;
1547 *(uint32_t *)(apic->int_en_reg + 16) = op_val;
1550 *(uint32_t *)(apic->int_en_reg + 20) = op_val;
1553 *(uint32_t *)(apic->int_en_reg + 24) = op_val;
1556 *(uint32_t *)(apic->int_en_reg + 28) = op_val;
1559 case EXT_INT_LOC_VEC_TBL_OFFSET0:
1560 apic->ext_intr_vec_tbl[0].val = op_val;
1562 case EXT_INT_LOC_VEC_TBL_OFFSET1:
1563 apic->ext_intr_vec_tbl[1].val = op_val;
1565 case EXT_INT_LOC_VEC_TBL_OFFSET2:
1566 apic->ext_intr_vec_tbl[2].val = op_val;
1568 case EXT_INT_LOC_VEC_TBL_OFFSET3:
1569 apic->ext_intr_vec_tbl[3].val = op_val;
1576 apic_do_eoi(core, apic);
1579 case INT_CMD_LO_OFFSET: {
1582 struct v3_gen_ipi tmp_ipi;
1584 apic->int_cmd.lo = op_val;
1586 tmp_ipi.vector = apic->int_cmd.vec;
1587 tmp_ipi.mode = apic->int_cmd.del_mode;
1588 tmp_ipi.logical = apic->int_cmd.dst_mode;
1589 tmp_ipi.trigger_mode = apic->int_cmd.trig_mode;
1590 tmp_ipi.dst_shorthand = apic->int_cmd.dst_shorthand;
1591 tmp_ipi.dst = apic->int_cmd.dst;
1594 tmp_ipi.private_data = NULL;
1597 // V3_Print(core->vm_info, core, "apic %u: core %u: sending cmd 0x%llx to apic %u\n",
1598 // apic->lapic_id.val, core->vcpu_id,
1599 // apic->int_cmd.val, apic->int_cmd.dst);
1601 if (route_ipi(apic_dev, apic, &tmp_ipi) == -1) {
1602 PrintError(core->vm_info, core, "IPI Routing failure\n");
1608 case INT_CMD_HI_OFFSET: {
1609 apic->int_cmd.hi = op_val;
1610 //V3_Print(core->vm_info, core, "apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->vcpu_id,apic->int_cmd.hi);
1613 // Unhandled Registers
1614 case EXT_APIC_CMD_OFFSET:
1617 PrintError(core->vm_info, core, "apic %u: core %u: Write to Unhandled APIC Register: %x (ignored)\n",
1618 apic->lapic_id.val, core->vcpu_id, (uint32_t)reg_addr);
1623 PrintDebug(core->vm_info, core, "apic %u: core %u: Write finished\n", apic->lapic_id.val, core->vcpu_id);
1631 /* Interrupt Controller Functions */
1634 static int apic_intr_pending(struct guest_info * core, void * private_data) {
1635 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1636 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1640 // Activate all queued IRQ entries
1641 drain_irq_entries(apic);
1643 // Check for newly activated entries
1644 req_irq = get_highest_irr(apic);
1645 svc_irq = get_highest_isr(apic);
1647 // PrintDebug(core->vm_info, core, "apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->vcpu_id,req_irq,svc_irq);
1650 if ((req_irq >= 0) &&
1651 (req_irq > svc_irq)) {
1653 // We have a new requested vector that is higher priority than
1654 // the vector that is in-service
1656 uint32_t ppr = get_apic_ppr(apic);
1658 if ((req_irq & 0xf0) > (ppr & 0xf0)) {
1659 // it's also higher priority than the current
1660 // processor priority. Therefore this
1661 // interrupt can go in now.
1664 // processor priority is currently too high
1665 // for this interrupt to go in now.
1666 // note that if tpr=0xf?, then ppr=0xf?
1667 // and thus all vectors will be masked
1668 // as required (tpr=0xf? => all masked)
1672 // the vector that is in service is higher
1673 // priority than any new requested vector
1680 static int apic_get_intr_number(struct guest_info * core, void * private_data) {
1681 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1682 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1683 int req_irq = get_highest_irr(apic);
1684 int svc_irq = get_highest_isr(apic);
1687 // for the logic here, see the comments for apic_intr_pending
1688 if ((req_irq >=0) &&
1689 (req_irq > svc_irq)) {
1691 uint32_t ppr = get_apic_ppr(apic);
1693 if ((req_irq & 0xf0) > (ppr & 0xf0)) {
1696 // hmm, this should not have happened, but, anyway,
1697 // no interrupt is currently ready to go in
1707 int v3_apic_send_ipi(struct v3_vm_info * vm, struct v3_gen_ipi * ipi, void * dev_data) {
1708 struct apic_dev_state * apic_dev = (struct apic_dev_state *)
1709 (((struct vm_device *)dev_data)->private_data);
1711 return route_ipi(apic_dev, NULL, ipi);
1716 static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
1717 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
1718 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1719 int major_offset = (irq & ~0x00000007) >> 3;
1720 int minor_offset = irq & 0x00000007;
1721 uint8_t *req_location = apic->int_req_reg + major_offset;
1722 uint8_t *svc_location = apic->int_svc_reg + major_offset;
1723 uint8_t flag = 0x01 << minor_offset;
1725 if (*req_location & flag) {
1726 // we will only pay attention to a begin irq if we
1727 // know that we initiated it!
1728 *svc_location |= flag;
1729 *req_location &= ~flag;
1732 //PrintDebug(core->vm_info, core, "apic %u: core %u: begin irq for %d ignored since I don't own it\n",
1733 // apic->lapic_id.val, core->vcpu_id, irq);
1740 /* Timer Functions */
1742 static void apic_inject_timer_intr(struct guest_info *core,
1744 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1745 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1747 PrintDebug(core->vm_info, core, "apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d)\n",
1748 apic->lapic_id.val, core->vcpu_id,
1749 apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt);
1751 if (apic_intr_pending(core, priv_data)) {
1752 PrintDebug(core->vm_info, core, "apic %u: core %u: Overriding pending IRQ %d\n",
1753 apic->lapic_id.val, core->vcpu_id,
1754 apic_get_intr_number(core, priv_data));
1757 if (activate_internal_irq(apic, APIC_TMR_INT) == -1) {
1758 PrintError(core->vm_info, core, "apic %u: core %u: Could not raise Timer interrupt\n",
1759 apic->lapic_id.val, core->vcpu_id);
1768 static void apic_update_time(struct guest_info * core,
1769 uint64_t cpu_cycles, uint64_t cpu_freq,
1771 struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
1772 struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
1774 // The 32 bit GCC runtime is a pile of shit
1776 uint64_t tmr_ticks = 0;
1778 uint32_t tmr_ticks = 0;
1781 uint8_t tmr_div = *(uint8_t *)&(apic->tmr_div_cfg.val);
1782 uint_t shift_num = 0;
1785 // Check whether this is true:
1786 // -> If the Init count is zero then the timer is disabled
1787 // and doesn't just blitz interrupts to the CPU
1788 if ((apic->tmr_init_cnt == 0) ||
1789 ( (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_ONESHOT) &&
1790 (apic->tmr_cur_cnt == 0))) {
1791 //PrintDebug(core->vm_info, core, "apic %u: core %u: APIC timer not yet initialized\n",apic->lapic_id.val,info->vcpu_id);
1809 case APIC_TMR_DIV16:
1812 case APIC_TMR_DIV32:
1815 case APIC_TMR_DIV64:
1818 case APIC_TMR_DIV128:
1822 PrintError(core->vm_info, core, "apic %u: core %u: Invalid Timer Divider configuration\n",
1823 apic->lapic_id.val, core->vcpu_id);
1827 tmr_ticks = cpu_cycles >> shift_num;
1828 // PrintDebug(core->vm_info, core, "Timer Ticks: %p\n", (void *)tmr_ticks);
1830 if (tmr_ticks < apic->tmr_cur_cnt) {
1831 apic->tmr_cur_cnt -= tmr_ticks;
1832 #ifdef V3_CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS
1833 if (apic->missed_ints && !apic_intr_pending(core, priv_data)) {
1834 PrintDebug(core->vm_info, core, "apic %u: core %u: Injecting queued APIC timer interrupt.\n",
1835 apic->lapic_id.val, core->vcpu_id);
1836 apic_inject_timer_intr(core, priv_data);
1837 apic->missed_ints--;
1839 #endif /* CONFIG_APIC_ENQUEUE_MISSED_TMR_IRQS */
1841 tmr_ticks -= apic->tmr_cur_cnt;
1842 apic->tmr_cur_cnt = 0;
1844 apic_inject_timer_intr(core, priv_data);
1846 if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
1847 int queued_ints = tmr_ticks / apic->tmr_init_cnt;
1848 tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
1849 apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
1850 apic->missed_ints += queued_ints;
1857 static struct intr_ctrl_ops intr_ops = {
1858 .intr_pending = apic_intr_pending,
1859 .get_intr_number = apic_get_intr_number,
1860 .begin_irq = apic_begin_irq,
1864 static struct v3_timer_ops timer_ops = {
1865 .update_timer = apic_update_time,
1871 static int apic_free(struct apic_dev_state * apic_dev) {
1873 struct v3_vm_info * vm = NULL;
1875 for (i = 0; i < apic_dev->num_apics; i++) {
1876 struct apic_state * apic = &(apic_dev->apics[i]);
1877 struct guest_info * core = apic->core;
1881 v3_remove_intr_controller(core, apic->controller_handle);
1884 v3_remove_timer(core, apic->timer);
1887 v3_lock_deinit(&(apic->irq_queue.lock));
1893 v3_unhook_msr(vm, BASE_ADDR_MSR);
1895 v3_lock_deinit(&(apic_dev->state_lock));
1901 #ifdef V3_CONFIG_CHECKPOINT
1904 #define MAKE_KEY(x) snprintf(key,KEY_MAX,"%s%d",x,i);
1906 static int apic_save(struct v3_chkpt_ctx * ctx, void * private_data) {
1907 struct apic_dev_state * apic_state = (struct apic_dev_state *)private_data;
1912 V3_CHKPT_SAVE(ctx, "NUM_APICS", apic_state->num_apics,savefailout);
1914 for (i = 0; i < apic_state->num_apics; i++) {
1915 drain_irq_entries(&(apic_state->apics[i]));
1917 MAKE_KEY("BASE_ADDR");
1918 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].base_addr,savefailout);
1919 MAKE_KEY("BASE_ADDR_MSR");
1920 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].base_addr_msr,savefailout);
1921 MAKE_KEY("LAPIC_ID");
1922 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].lapic_id,savefailout);
1923 MAKE_KEY("APIC_VER");
1924 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].apic_ver,savefailout);
1925 MAKE_KEY("EXT_APIC_CTRL");
1926 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].ext_apic_ctrl,savefailout);
1927 MAKE_KEY("LOCAL_VEC_TBL");
1928 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].local_vec_tbl,savefailout);
1929 MAKE_KEY("TMR_VEC_TBL");
1930 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].tmr_vec_tbl,savefailout);
1931 MAKE_KEY("TMR_DIV_CFG");
1932 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].tmr_div_cfg,savefailout);
1933 MAKE_KEY("LINT0_VEC_TBL");
1934 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].lint0_vec_tbl,savefailout);
1935 MAKE_KEY("LINT1_VEC_TBL");
1936 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].lint1_vec_tbl,savefailout);
1937 MAKE_KEY("PERF_CTR_LOC_VEC_TBL");
1938 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].perf_ctr_loc_vec_tbl,savefailout);
1939 MAKE_KEY("THERM_LOC_VEC_TBL");
1940 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].therm_loc_vec_tbl,savefailout);
1941 MAKE_KEY("ERR_VEC_TBL");
1942 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].err_vec_tbl,savefailout);
1943 MAKE_KEY("ERR_STATUS");
1944 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].err_status,savefailout);
1945 MAKE_KEY("SPURIOUS_INT");
1946 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].spurious_int,savefailout);
1947 MAKE_KEY("INT_CMD");
1948 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].int_cmd,savefailout);
1949 MAKE_KEY("LOG_DST");
1950 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].log_dst,savefailout);
1951 MAKE_KEY("DST_FMT");
1952 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].dst_fmt,savefailout);
1954 // APR and PPR are stored only for compatability
1955 // TPR is in APIC_TPR, APR and PPR are derived
1957 temp = get_apic_apr(&(apic_state->apics[i]));
1958 MAKE_KEY("ARB_PRIO");
1959 V3_CHKPT_SAVE(ctx, key, temp,savefailout);
1960 temp = get_apic_tpr(&(apic_state->apics[i]));
1961 MAKE_KEY("TASK_PRIO");
1962 V3_CHKPT_SAVE(ctx,key,temp,savefailout);
1963 temp = get_apic_ppr(&(apic_state->apics[i]));
1964 MAKE_KEY("PROC_PRIO");
1965 V3_CHKPT_SAVE(ctx, key,temp,savefailout);
1967 MAKE_KEY("EXT_APIC_FEATURE");
1968 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].ext_apic_feature,savefailout);
1969 MAKE_KEY("SPEC_EOI");
1970 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].spec_eoi,savefailout);
1971 MAKE_KEY("TMR_CUR_CNT");
1972 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].tmr_cur_cnt,savefailout);
1974 MAKE_KEY("TMR_INIT_CNT");
1975 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].tmr_init_cnt,savefailout);
1976 MAKE_KEY("EXT_INTR_VEC_TBL");
1977 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].ext_intr_vec_tbl,savefailout);
1979 MAKE_KEY("REM_RD_DATA");
1980 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].rem_rd_data,savefailout);
1981 MAKE_KEY("IPI_STATE");
1982 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].ipi_state,savefailout);
1983 MAKE_KEY("INT_REQ_REG");
1984 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].int_req_reg,savefailout);
1985 MAKE_KEY("INT_SVC_REG");
1986 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].int_svc_reg,savefailout);
1987 MAKE_KEY("INT_EN_REG");
1988 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].int_en_reg,savefailout);
1989 MAKE_KEY("TRIG_MODE_REG");
1990 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].trig_mode_reg,savefailout);
1992 V3_CHKPT_SAVE(ctx, key, apic_state->apics[i].eoi,savefailout);
1999 PrintError(VM_NONE, VCORE_NONE, "Failed to save apic\n");
2003 static int apic_load(struct v3_chkpt_ctx * ctx, void * private_data) {
2004 struct apic_dev_state *apic_state = (struct apic_dev_state *)private_data;
2009 V3_CHKPT_LOAD(ctx,"NUM_APICS", apic_state->num_apics, loadfailout);
2011 for (i = 0; i < apic_state->num_apics; i++) {
2012 drain_irq_entries(&(apic_state->apics[i]));
2014 MAKE_KEY("BASE_ADDR");
2015 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].base_addr,loadfailout);
2016 MAKE_KEY("BASE_ADDR_MSR");
2017 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].base_addr_msr,loadfailout);
2018 MAKE_KEY("LAPIC_ID");
2019 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].lapic_id,loadfailout);
2020 MAKE_KEY("APIC_VER");
2021 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].apic_ver,loadfailout);
2022 MAKE_KEY("EXT_APIC_CTRL");
2023 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].ext_apic_ctrl,loadfailout);
2024 MAKE_KEY("LOCAL_VEC_TBL");
2025 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].local_vec_tbl,loadfailout);
2026 MAKE_KEY("TMR_VEC_TBL");
2027 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].tmr_vec_tbl,loadfailout);
2028 MAKE_KEY("TMR_DIV_CFG");
2029 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].tmr_div_cfg,loadfailout);
2030 MAKE_KEY("LINT0_VEC_TBL");
2031 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].lint0_vec_tbl,loadfailout);
2032 MAKE_KEY("LINT1_VEC_TBL");
2033 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].lint1_vec_tbl,loadfailout);
2034 MAKE_KEY("PERF_CTR_LOC_VEC_TBL");
2035 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].perf_ctr_loc_vec_tbl,loadfailout);
2036 MAKE_KEY("THERM_LOC_VEC_TBL");
2037 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].therm_loc_vec_tbl,loadfailout);
2038 MAKE_KEY("ERR_VEC_TBL");
2039 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].err_vec_tbl,loadfailout);
2040 MAKE_KEY("ERR_STATUS");
2041 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].err_status,loadfailout);
2042 MAKE_KEY("SPURIOUS_INT");
2043 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].spurious_int,loadfailout);
2044 MAKE_KEY("INT_CMD");
2045 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].int_cmd,loadfailout);
2046 MAKE_KEY("LOG_DST");
2047 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].log_dst,loadfailout);
2048 MAKE_KEY("DST_FMT");
2049 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].dst_fmt,loadfailout);
2051 // APR and PPR are stored only for compatability
2052 // TPR is in APIC_TPR, APR and PPR are derived
2054 MAKE_KEY("ARB_PRIO");
2055 V3_CHKPT_LOAD(ctx, key, temp,loadfailout);
2058 MAKE_KEY("TASK_PRIO");
2059 V3_CHKPT_LOAD(ctx,key,temp,loadfailout);
2060 set_apic_tpr(&(apic_state->apics[i]),temp);
2062 MAKE_KEY("PROC_PRIO");
2063 V3_CHKPT_LOAD(ctx, key,temp,loadfailout);
2067 MAKE_KEY("EXT_APIC_FEATURE");
2068 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].ext_apic_feature,loadfailout);
2069 MAKE_KEY("SPEC_EOI");
2070 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].spec_eoi,loadfailout);
2071 MAKE_KEY("TMR_CUR_CNT");
2072 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].tmr_cur_cnt,loadfailout);
2074 MAKE_KEY("TMR_INIT_CNT");
2075 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].tmr_init_cnt,loadfailout);
2076 MAKE_KEY("EXT_INTR_VEC_TBL");
2077 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].ext_intr_vec_tbl,loadfailout);
2079 MAKE_KEY("REM_RD_DATA");
2080 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].rem_rd_data,loadfailout);
2081 MAKE_KEY("IPI_STATE");
2082 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].ipi_state,loadfailout);
2083 MAKE_KEY("INT_REQ_REG");
2084 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].int_req_reg,loadfailout);
2085 MAKE_KEY("INT_SVC_REG");
2086 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].int_svc_reg,loadfailout);
2087 MAKE_KEY("INT_EN_REG");
2088 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].int_en_reg,loadfailout);
2089 MAKE_KEY("TRIG_MODE_REG");
2090 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].trig_mode_reg,loadfailout);
2092 V3_CHKPT_LOAD(ctx, key, apic_state->apics[i].eoi,loadfailout);
2099 PrintError(VM_NONE,VCORE_NONE, "Failed to load apic\n");
2106 static struct v3_device_ops dev_ops = {
2107 .free = (int (*)(void *))apic_free,
2108 #ifdef V3_CONFIG_CHECKPOINT
2116 static int apic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
2117 char * dev_id = v3_cfg_val(cfg, "ID");
2118 struct apic_dev_state * apic_dev = NULL;
2121 PrintDebug(vm, VCORE_NONE, "apic: creating an APIC for each core\n");
2123 apic_dev = (struct apic_dev_state *)V3_Malloc(sizeof(struct apic_dev_state) +
2124 sizeof(struct apic_state) * vm->num_cores);
2128 PrintError(vm, VCORE_NONE, "Failed to allocate space for APIC\n");
2133 sizeof(struct apic_dev_state) +
2134 sizeof(struct apic_state) * vm->num_cores);
2136 apic_dev->num_apics = vm->num_cores;
2137 v3_lock_init(&(apic_dev->state_lock));
2139 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, apic_dev);
2142 PrintError(vm, VCORE_NONE, "apic: Could not attach device %s\n", dev_id);
2148 for (i = 0; i < vm->num_cores; i++) {
2149 struct apic_state * apic = &(apic_dev->apics[i]);
2150 struct guest_info * core = &(vm->cores[i]);
2154 init_apic_state(apic, i);
2156 apic->controller_handle = v3_register_intr_controller(core, &intr_ops, apic_dev);
2158 apic->timer = v3_add_timer(core, &timer_ops, apic_dev);
2160 if (apic->timer == NULL) {
2161 PrintError(vm, VCORE_NONE,"APIC: Failed to attach timer to core %d\n", i);
2162 v3_remove_device(dev);
2166 v3_hook_full_mem(vm, core->vcpu_id, apic->base_addr, apic->base_addr + PAGE_SIZE_4KB, apic_read, apic_write, apic_dev);
2168 PrintDebug(vm, VCORE_NONE, "apic %u: (setup device): done, my id is %u\n", i, apic->lapic_id.val);
2171 #ifdef V3_CONFIG_DEBUG_APIC
2172 for (i = 0; i < vm->num_cores; i++) {
2173 struct apic_state * apic = &(apic_dev->apics[i]);
2174 PrintDebug(vm, VCORE_NONE, "apic: sanity check: apic %u (at %p) has id %u and msr value %llx and core at %p\n",
2175 i, apic, apic->lapic_id.val, apic->base_addr_msr.value,apic->core);
2180 PrintDebug(vm, VCORE_NONE, "apic: priv_data is at %p\n", apic_dev);
2182 v3_hook_msr(vm, BASE_ADDR_MSR, read_apic_msr, write_apic_msr, apic_dev);
2189 device_register("LAPIC", apic_init)