2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2009, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2009, Lei Xia <lxia@northwestern.edu>
12 * Copyright (c) 2009, Chang Seok Bae <jhuell@gmail.com>
13 * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org>
14 * All rights reserved.
16 * Author: Jack Lange <jarusl@cs.northwestern.edu>
17 * Lei Xia <lxia@northwestern.edu>
18 * Chang Seok Bae <jhuell@gmail.com>
20 * This is free software. You are permitted to use,
21 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
26 #include <palacios/vmm.h>
27 #include <palacios/vmm_types.h>
28 #include <palacios/vmm_io.h>
29 #include <palacios/vmm_intr.h>
30 #include <palacios/vmm_rbtree.h>
31 #include <palacios/vmm_dev_mgr.h>
33 #include <devices/pci.h>
34 #include <devices/pci_types.h>
36 #include <palacios/vm_guest.h>
37 #include <palacios/vm_guest_mem.h>
40 #include <devices/apic.h>
43 #ifndef V3_CONFIG_DEBUG_PCI
45 #define PrintDebug(fmt, args...)
49 #define CONFIG_ADDR_PORT 0x0cf8
50 #define CONFIG_DATA_PORT 0x0cfc
52 #define PCI_DEV_IO_PORT_BASE 0xc000
54 #define PCI_BUS_COUNT 1
56 // This must always be a multiple of 8
57 #define MAX_BUS_DEVICES 32
59 #define PCI_CAP_ID_MSI 0x05
60 #define PCI_CAP_ID_MSIX 0x11
72 uint_t hi_reg_num : 4;
75 } __attribute__((packed));
76 } __attribute__((packed));
77 } __attribute__((packed));
86 // Red Black tree containing all attached devices
87 struct rb_root devices;
89 // Bitmap of the allocated device numbers
90 uint8_t dev_map[MAX_BUS_DEVICES / 8];
93 int (*raise_pci_irq)(struct pci_device * pci_dev, void * dev_data, struct v3_irq * vec);
94 int (*lower_pci_irq)(struct pci_device * pci_dev, void * dev_data, struct v3_irq * vec);
100 struct pci_internal {
101 // Configuration address register
102 struct pci_addr_reg addr_reg;
104 // Base IO Port which PCI devices will register with...
105 uint16_t dev_io_base;
108 struct pci_bus bus_list[PCI_BUS_COUNT];
113 struct cfg_range_hook {
117 int (*write)(struct pci_device * pci_dev, uint32_t offset,
118 void * src, uint_t length, void * private_data);
120 int (*read)(struct pci_device * pci_dev, uint32_t offset,
121 void * dst, uint_t length, void * private_data);
125 struct list_head list_node;
135 struct list_head cap_node;
139 // These mark read only fields in the pci config header.
140 // If a bit is 1, then the field is writable in the header
142 * BIST is disabled by default (All writes to it will be dropped
143 * Cardbus CIS is disabled (All writes are dropped)
144 * Writes to capability pointer are disabled
146 static uint8_t pci_hdr_write_mask_00[64] = { 0x00, 0x00, 0x00, 0x00, /* Device ID, Vendor ID */
147 0xbf, 0xff, 0x00, 0xf9, /* Command, status */
148 0x00, 0x00, 0x00, 0x00, /* Revision ID, Class code */
149 0x00, 0xff, 0x00, 0x00, /* CacheLine Size, Latency Timer, Header Type, BIST */
150 0xff, 0xff, 0xff, 0xff, /* BAR 0 */
151 0xff, 0xff, 0xff, 0xff, /* BAR 1 */
152 0xff, 0xff, 0xff, 0xff, /* BAR 2 */
153 0xff, 0xff, 0xff, 0xff, /* BAR 3 */
154 0xff, 0xff, 0xff, 0xff, /* BAR 4 */
155 0xff, 0xff, 0xff, 0xff, /* BAR 5 */
156 0x00, 0x00, 0x00, 0x00, /* CardBus CIS Ptr */
157 0xff, 0xff, 0xff, 0xff, /* SubSystem Vendor ID, SubSystem ID */
158 0xff, 0xff, 0xff, 0xff, /* ExpRom BAR */
159 0x00, 0x00, 0x00, 0x00, /* CAP ptr (0xfc to enable), RSVD */
160 0x00, 0x00, 0x00, 0x00, /* Reserved */
161 0xff, 0x00, 0x00, 0x00 /* INTR Line, INTR Pin, MIN_GNT, MAX_LAT */
167 #ifdef V3_CONFIG_DEBUG_PCI
169 static void pci_dump_state(struct pci_internal * pci_state) {
170 struct rb_node * node = v3_rb_first(&(pci_state->bus_list[0].devices));
171 struct pci_device * tmp_dev = NULL;
173 PrintDebug("===PCI: Dumping state Begin ==========\n");
176 tmp_dev = rb_entry(node, struct pci_device, dev_tree_node);
178 PrintDebug("PCI Device Number: %d (%s):\n", tmp_dev->dev_num, tmp_dev->name);
179 PrintDebug("irq = %d\n", tmp_dev->config_header.intr_line);
180 PrintDebug("Vend ID: 0x%x\n", tmp_dev->config_header.vendor_id);
181 PrintDebug("Device ID: 0x%x\n", tmp_dev->config_header.device_id);
183 } while ((node = v3_rb_next(node)));
185 PrintDebug("====PCI: Dumping state End==========\n");
193 // Scan the dev_map bitmap for the first '0' bit
194 static int get_free_dev_num(struct pci_bus * bus) {
197 for (i = 0; i < sizeof(bus->dev_map); i++) {
198 PrintDebug("i=%d\n", i);
199 if (bus->dev_map[i] != 0xff) {
201 for (j = 0; j < 8; j++) {
202 PrintDebug("\tj=%d\n", j);
203 if (!(bus->dev_map[i] & (0x1 << j))) {
204 return ((i * 8) + j);
213 static void allocate_dev_num(struct pci_bus * bus, int dev_num) {
214 int major = (dev_num / 8);
215 int minor = dev_num % 8;
217 bus->dev_map[major] |= (0x1 << minor);
223 struct pci_device * __add_device_to_bus(struct pci_bus * bus, struct pci_device * dev) {
225 struct rb_node ** p = &(bus->devices.rb_node);
226 struct rb_node * parent = NULL;
227 struct pci_device * tmp_dev = NULL;
231 tmp_dev = rb_entry(parent, struct pci_device, dev_tree_node);
233 if (dev->devfn < tmp_dev->devfn) {
235 } else if (dev->devfn > tmp_dev->devfn) {
242 rb_link_node(&(dev->dev_tree_node), parent, p);
249 struct pci_device * add_device_to_bus(struct pci_bus * bus, struct pci_device * dev) {
251 struct pci_device * ret = NULL;
253 if ((ret = __add_device_to_bus(bus, dev))) {
257 v3_rb_insert_color(&(dev->dev_tree_node), &(bus->devices));
259 allocate_dev_num(bus, dev->dev_num);
265 static struct pci_device * get_device(struct pci_bus * bus, uint8_t dev_num, uint8_t fn_num) {
266 struct rb_node * n = bus->devices.rb_node;
267 struct pci_device * dev = NULL;
268 uint8_t devfn = ((dev_num & 0x1f) << 3) | (fn_num & 0x7);
271 dev = rb_entry(n, struct pci_device, dev_tree_node);
273 if (devfn < dev->devfn) {
275 } else if (devfn > dev->devfn) {
288 // There won't be many hooks at all, so unordered lists are acceptible for now
289 static struct cfg_range_hook * find_cfg_range_hook(struct pci_device * pci, uint32_t start, uint32_t length) {
290 uint32_t end = start + length - 1; // end is inclusive
291 struct cfg_range_hook * hook = NULL;
293 list_for_each_entry(hook, &(pci->cfg_hooks), list_node) {
294 uint32_t hook_end = hook->start + hook->length - 1;
295 if (!((hook->start > end) || (hook_end < start))) {
304 int v3_pci_hook_config_range(struct pci_device * pci,
305 uint32_t start, uint32_t length,
306 int (*write)(struct pci_device * pci_dev, uint32_t offset,
307 void * src, uint_t length, void * private_data),
308 int (*read)(struct pci_device * pci_dev, uint32_t offset,
309 void * dst, uint_t length, void * private_data),
310 void * private_data) {
311 struct cfg_range_hook * hook = NULL;
314 if (find_cfg_range_hook(pci, start, length)) {
315 PrintError("Tried to hook an already hooked config region\n");
319 hook = V3_Malloc(sizeof(struct cfg_range_hook));
322 PrintError("Could not allocate range hook\n");
326 memset(hook, 0, sizeof(struct cfg_range_hook));
329 hook->length = length;
330 hook->private_data = private_data;
334 list_add(&(hook->list_node), &(pci->cfg_hooks));
343 // Note byte ordering: LSB -> MSB
344 static uint8_t msi_32_rw_bitmask[10] = { 0x00, 0x00, /* ID, next ptr */
345 0x71, 0x00, /* MSG CTRL */
346 0xfc, 0xff, 0xff, 0xff, /* MSG ADDR */
347 0xff, 0xff}; /* MSG DATA */
349 static uint8_t msi_64_rw_bitmask[14] = { 0x00, 0x00, /* ID, next ptr */
350 0x71, 0x00, /* MSG CTRL */
351 0xfc, 0xff, 0xff, 0xff, /* MSG LO ADDR */
352 0xff, 0xff, 0xff, 0xff, /* MSG HI ADDR */
353 0xff, 0xff}; /* MSG DATA */
355 static uint8_t msi_64pervect_rw_bitmask[24] = { 0x00, 0x00, /* ID, next ptr */
356 0x71, 0x00, /* MSG CTRL */
357 0xfc, 0xff, 0xff, 0xff, /* MSG LO CTRL */
358 0xff, 0xff, 0xff, 0xff, /* MSG HI ADDR */
359 0xff, 0xff, /* MSG DATA */
360 0x00, 0x00, /* RSVD */
361 0xff, 0xff, 0xff, 0xff,
362 0x00, 0x00, 0x00, 0x00};
364 static uint8_t msix_rw_bitmask[12] = { 0x00, 0x00, /* ID, next ptr */
366 0xff, 0xff, 0xff, 0xff,
367 0x08, 0xff, 0xff, 0xff};
370 /* I am completely guessing what the format is here.
371 I only have version 1 of the PCIe spec and cannot download version 2 or 3
372 without paying the PCI-SIG $3000 a year for membership.
373 So this is just cobbled together from the version 1 spec and KVM.
377 static uint8_t pciev1_rw_bitmask[20] = { 0x00, 0x00, /* ID, next ptr */
378 0x00, 0x00, /* PCIE CAP register */
379 0x00, 0x00, 0x00, 0x00, /* DEV CAP */
380 0xff, 0xff, /* DEV CTRL */
381 0x0f, 0x00, /* DEV STATUS */
382 0x00, 0x00, 0x00, 0x00, /* LINK CAP */
383 0xfb, 0x01, /* LINK CTRL */
384 0x00, 0x00 /* LINK STATUS */
388 static uint8_t pciev2_rw_bitmask[60] = { 0x00, 0x00, /* ID, next ptr */
389 0x00, 0x00, /* PCIE CAP register */
390 0x00, 0x00, 0x00, 0x00, /* DEV CAP */
391 0xff, 0xff, /* DEV CTRL */
392 0x0f, 0x00, /* DEV STATUS */
393 0x00, 0x00, 0x00, 0x00, /* LINK CAP */
394 0xfb, 0x01, /* LINK CTRL */
395 0x00, 0x00, /* LINK STATUS */
396 0x00, 0x00, 0x00, 0x00, /* SLOT CAP ?? */
397 0x00, 0x00, /* SLOT CTRL ?? */
398 0x00, 0x00, /* SLOT STATUS */
399 0x00, 0x00, /* ROOT CTRL */
400 0x00, 0x00, /* ROOT CAP */
401 0x00, 0x00, 0x00, 0x00, /* ROOT STATUS */
402 0x00, 0x00, 0x00, 0x00, /* WHO THE FUCK KNOWS */
403 0x00, 0x00, 0x00, 0x00,
404 0x00, 0x00, 0x00, 0x00,
405 0x00, 0x00, 0x00, 0x00,
406 0x00, 0x00, 0x00, 0x00
409 static uint8_t pm_rw_bitmask[] = { 0x00, 0x00, /* ID, next ptr */
410 0x00, 0x00, /* PWR MGMT CAPS */
411 0x03, 0x9f, /* PWR MGMT CTRL */
412 0x00, 0x00 /* PMCSR_BSE, Data */
417 int cap_write(struct pci_device * pci, uint32_t offset, void * src, uint_t length, void * private_data) {
418 struct pci_cap * cap = private_data;
419 uint32_t cap_offset = cap->offset;
420 pci_cap_type_t cap_type = cap->id;
422 uint32_t write_offset = offset - cap_offset;
423 void * cap_ptr = &(pci->config_space[cap_offset + 2]);
426 int msi_was_enabled = 0;
427 int msix_was_enabled = 0;
430 V3_Print("CAP write trapped (val=%x, cfg_offset=%d, write_offset=%d)\n", *(uint32_t *)src, offset, write_offset);
432 if (cap_type == PCI_CAP_MSI) {
433 struct msi_msg_ctrl * msg_ctrl = cap_ptr;
435 if (msg_ctrl->msi_enable == 1) {
438 } else if (cap_type == PCI_CAP_MSIX) {
439 struct msix_cap * msix_cap = cap_ptr;
441 if (msix_cap->msg_ctrl.msix_enable == 1) {
442 msix_was_enabled = 1;
446 for (i = 0; i < length; i++) {
449 if (cap_type == PCI_CAP_MSI) {
450 struct msi_msg_ctrl * msg_ctrl = cap_ptr;
452 V3_Print("MSI Cap Ctrl=%x\n", *(uint16_t *)pci->msi_cap);
453 V3_Print("MSI ADDR=%x\n", *(uint32_t *)(cap_ptr + 2));
454 V3_Print("MSI HI ADDR=%x\n", *(uint32_t *)(cap_ptr + 6));
455 V3_Print("MSI Data=%x\n", *(uint16_t *)(cap_ptr + 10));
457 if (msg_ctrl->cap_64bit) {
458 if (msg_ctrl->per_vect_mask) {
459 mask = msi_64pervect_rw_bitmask[write_offset];
461 mask = msi_64_rw_bitmask[write_offset];
464 mask = msi_32_rw_bitmask[write_offset];
466 } else if (cap_type == PCI_CAP_MSIX) {
467 mask = msix_rw_bitmask[write_offset];
468 } else if (cap_type == PCI_CAP_PCIE) {
469 struct pcie_cap_reg * pcie_cap = cap_ptr;
471 if (pcie_cap->version == 1) {
472 mask = pciev1_rw_bitmask[write_offset];
473 } else if (pcie_cap->version == 2) {
474 mask = pciev2_rw_bitmask[write_offset];
478 } else if (cap_type == PCI_CAP_PM) {
479 mask = pm_rw_bitmask[write_offset];
482 pci->config_space[offset + i] &= ~mask;
483 pci->config_space[offset + i] |= ((*(uint8_t *)(src + i)) & mask);
489 if (pci->cmd_update) {
491 /* Detect changes to interrupt types for cmd updates */
492 if (cap_type == PCI_CAP_MSI) {
493 struct msi_msg_ctrl * msg_ctrl = cap_ptr;
495 V3_Print("msi_was_enabled=%d, msi_is_enabled=%d\n", msi_was_enabled, msg_ctrl->msi_enable);
497 if ((msg_ctrl->msi_enable == 1) && (msi_was_enabled == 0)) {
498 pci->irq_type = IRQ_MSI;
499 pci->cmd_update(pci, PCI_CMD_MSI_ENABLE, msg_ctrl->mult_msg_enable, pci->priv_data);
500 } else if ((msg_ctrl->msi_enable == 0) && (msi_was_enabled == 1)) {
501 pci->irq_type = IRQ_NONE;
502 pci->cmd_update(pci, PCI_CMD_MSI_DISABLE, 0, pci->priv_data);
504 } else if (cap_type == PCI_CAP_MSIX) {
505 struct msix_cap * msix_cap = cap_ptr;
507 if ((msix_cap->msg_ctrl.msix_enable == 1) && (msix_was_enabled == 0)) {
508 pci->irq_type = IRQ_MSIX;
509 pci->cmd_update(pci, PCI_CMD_MSIX_ENABLE, msix_cap->msg_ctrl.table_size, pci->priv_data);
510 } else if ((msix_cap->msg_ctrl.msix_enable == 0) && (msix_was_enabled == 1)) {
511 pci->irq_type = IRQ_NONE;
512 pci->cmd_update(pci, PCI_CMD_MSIX_DISABLE, msix_cap->msg_ctrl.table_size, pci->priv_data);
521 static int init_pci_cap(struct pci_device * pci, pci_cap_type_t cap_type, uint_t cap_offset) {
522 void * cap_ptr = &(pci->config_space[cap_offset + 2]);
524 if (cap_type == PCI_CAP_MSI) {
525 struct msi32_msg_addr * msi = cap_ptr;
527 // We only expose a basic 32 bit MSI interface
528 msi->msg_ctrl.msi_enable = 0;
529 msi->msg_ctrl.mult_msg_enable = 0;
530 msi->msg_ctrl.cap_64bit = 0;
531 msi->msg_ctrl.per_vect_mask = 0;
536 } else if (cap_type == PCI_CAP_MSIX) {
540 } else if (cap_type == PCI_CAP_PCIE) {
541 struct pcie_cap_v2 * pcie = cap_ptr;
543 // The v1 and v2 formats are identical for the first X bytes
544 // So we use the v2 struct, and only modify extended fields if v2 is detected
546 pcie->dev_cap.fn_level_reset = 0;
548 pcie->dev_ctrl.val &= 0x70e0; // only preserve max_payload_size and max_read_req_size untouched
549 pcie->dev_ctrl.relaxed_order_enable = 1;
550 pcie->dev_ctrl.no_snoop_enable = 1;
552 pcie->dev_status.val = 0;
554 pcie->link_cap.val &= 0x0003ffff;
556 pcie->link_status.val &= 0x03ff;
558 if (pcie->pcie_cap.version >= 2) {
561 pcie->slot_status = 0;
565 pcie->root_status = 0;
567 } else if (cap_type == PCI_CAP_PM) {
576 // enumerate all capabilities and disable them.
577 static int scan_pci_caps(struct pci_device * pci) {
578 uint_t cap_offset = pci->config_header.cap_ptr;
580 V3_Print("Scanning for Capabilities (cap_offset=%d)\n", cap_offset);
582 while (cap_offset != 0) {
583 uint8_t id = pci->config_space[cap_offset];
584 uint8_t next = pci->config_space[cap_offset + 1];
586 V3_Print("Found Capability 0x%x at offset %d (0x%x)\n",
587 id, cap_offset, cap_offset);
589 struct pci_cap * cap = V3_Malloc(sizeof(struct pci_cap));
592 PrintError("Error allocating PCI CAP info\n");
595 memset(cap, 0, sizeof(struct pci_cap));
598 cap->offset = cap_offset;
600 list_add(&(cap->cap_node), &(pci->capabilities));
602 // set correct init values
603 init_pci_cap(pci, id, cap_offset);
606 // set to the next pointer
610 // Disable Capabilities
611 pci->config_header.cap_ptr = 0;
613 // Hook Cap pointer to return cached config space value
614 if (v3_pci_hook_config_range(pci, 0x34, 1,
615 NULL, NULL, NULL) == -1) {
616 PrintError("Could not hook cap pointer\n");
623 // Disable all PCIE extended capabilities for now
624 pci->config_space[0x100] = 0;
625 pci->config_space[0x101] = 0;
626 pci->config_space[0x102] = 0;
627 pci->config_space[0x103] = 0;
635 int v3_pci_enable_capability(struct pci_device * pci, pci_cap_type_t cap_type) {
637 struct pci_cap * tmp_cap = NULL;
638 struct pci_cap * cap = NULL;
639 void * cap_ptr = NULL;
642 list_for_each_entry(tmp_cap, &(pci->capabilities), cap_node) {
643 if (tmp_cap->id == cap_type) {
649 if ((cap == NULL) || (cap->enabled)) {
654 V3_Print("Found Capability %x at %x (%d)\n", cap_type, cap->offset, cap->offset);
656 // found the capability
658 // mark it as enabled
661 cap_ptr = &(pci->config_space[cap->offset + 2]);
663 if (cap_type == PCI_CAP_MSI) {
664 pci->msi_cap = cap_ptr;
666 if (pci->msi_cap->cap_64bit) {
667 if (pci->msi_cap->per_vect_mask) {
668 // 64 bit MSI w/ per vector masking
678 } else if (cap_type == PCI_CAP_MSIX) {
679 pci->msix_cap = cap_ptr;
681 // disable passthrough for MSIX BAR
683 pci->bar[pci->msix_cap->bir].type = PCI_BAR_MEM32;
686 } else if (cap_type == PCI_CAP_PCIE) {
687 struct pcie_cap_reg * pcie_cap = (struct pcie_cap_reg *)&(pci->config_space[cap->offset + 2]);
689 if (pcie_cap->version == 1) {
691 } else if (pcie_cap->version == 2) {
696 } else if (cap_type == PCI_CAP_PM) {
701 V3_Print("Hooking capability range (offset=%d, size=%d)\n", cap->offset, size);
703 if (v3_pci_hook_config_range(pci, cap->offset, size + 2,
704 cap_write, NULL, cap) == -1) {
705 PrintError("Could not hook config range (start=%d, size=%d)\n",
706 cap->offset + 2, size);
712 // link it to the active capabilities list
713 pci->config_space[cap->offset + 1] = pci->config_header.cap_ptr;
714 pci->config_header.cap_ptr = cap->offset; // add to the head of the list
722 static int addr_port_read(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
723 struct pci_internal * pci_state = priv_data;
724 int reg_offset = port & 0x3;
725 uint8_t * reg_addr = ((uint8_t *)&(pci_state->addr_reg.val)) + reg_offset;
727 PrintDebug("Reading PCI Address Port (%x): %x len=%d\n", port, pci_state->addr_reg.val, length);
729 if (reg_offset + length > 4) {
730 PrintError("Invalid Address port write\n");
734 memcpy(dst, reg_addr, length);
740 static int addr_port_write(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
741 struct pci_internal * pci_state = priv_data;
742 int reg_offset = port & 0x3;
743 uint8_t * reg_addr = ((uint8_t *)&(pci_state->addr_reg.val)) + reg_offset;
745 if (reg_offset + length > 4) {
746 PrintError("Invalid Address port write\n");
750 // Set address register
751 memcpy(reg_addr, src, length);
753 PrintDebug("Writing PCI Address Port(%x): AddrReg=%x (op_val = %x, len=%d) \n", port, pci_state->addr_reg.val, *(uint32_t *)src, length);
759 static int data_port_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * priv_data) {
760 struct pci_internal * pci_state = priv_data;
761 struct pci_device * pci_dev = NULL;
762 uint_t reg_num = (pci_state->addr_reg.hi_reg_num << 16) +(pci_state->addr_reg.reg_num << 2) + (port & 0x3);
764 int bytes_left = length;
766 if (pci_state->addr_reg.bus_num != 0) {
767 memset(dst, 0xff, length);
772 pci_dev = get_device(&(pci_state->bus_list[0]), pci_state->addr_reg.dev_num, pci_state->addr_reg.fn_num);
774 if (pci_dev == NULL) {
775 memset(dst, 0xff, length);
779 PrintDebug("Reading PCI Data register. bus = %d, dev = %d, fn = %d, reg = %d (%x), cfg_reg = %x\n",
780 pci_state->addr_reg.bus_num,
781 pci_state->addr_reg.dev_num,
782 pci_state->addr_reg.fn_num,
784 pci_state->addr_reg.val);
787 while (bytes_left > 0) {
788 struct cfg_range_hook * cfg_hook = find_cfg_range_hook(pci_dev, reg_num + i, 1);
789 void * cfg_dst = &(pci_dev->config_space[reg_num + i]);
792 uint_t range_len = cfg_hook->length - ((reg_num + i) - cfg_hook->start);
793 range_len = (range_len > bytes_left) ? bytes_left : range_len;
795 if (cfg_hook->read) {
796 cfg_hook->read(pci_dev, reg_num + i, cfg_dst, range_len, cfg_hook->private_data);
799 bytes_left -= range_len;
802 if (pci_dev->config_read) {
803 if (pci_dev->config_read(pci_dev, reg_num + i, cfg_dst, 1, pci_dev->priv_data) != 0) {
804 PrintError("Error in config_read from PCI device (%s)\n", pci_dev->name);
813 memcpy(dst, &(pci_dev->config_space[reg_num]), length);
815 PrintDebug("\tVal=%x, len=%d\n", *(uint32_t *)dst, length);
822 static int bar_update(struct pci_device * pci_dev, uint32_t offset,
823 void * src, uint_t length, void * private_data) {
824 struct v3_pci_bar * bar = (struct v3_pci_bar *)private_data;
825 int bar_offset = offset & ~0x03;
826 int bar_num = (bar_offset - 0x10) / 4;
827 uint32_t new_val = *(uint32_t *)src;
829 PrintDebug("Updating BAR Register (Dev=%s) (bar=%d) (old_val=0x%x) (new_val=0x%x)\n",
830 pci_dev->name, bar_num, bar->val, new_val);
832 // Cache the changes locally
833 memcpy(&(pci_dev->config_space[offset]), src, length);
835 if (bar->type == PCI_BAR_PASSTHROUGH) {
836 if (bar->bar_write(bar_num, (void *)(pci_dev->config_space + bar_offset), bar->private_data) == -1) {
837 PrintError("Error in Passthrough bar write operation\n");
844 // Else we are a virtualized BAR
846 *(uint32_t *)(pci_dev->config_space + offset) &= bar->mask;
852 PrintDebug("\tRehooking %d IO ports from base 0x%x to 0x%x for %d ports\n",
853 bar->num_ports, PCI_IO_BASE(bar->val), PCI_IO_BASE(new_val),
856 // only do this if pci device is enabled....
857 if (!(pci_dev->config_header.status & 0x1)) {
858 PrintError("PCI Device IO space not enabled\n");
861 for (i = 0; i < bar->num_ports; i++) {
863 PrintDebug("Rehooking PCI IO port (old port=%u) (new port=%u)\n",
864 PCI_IO_BASE(bar->val) + i, PCI_IO_BASE(new_val) + i);
866 v3_unhook_io_port(pci_dev->vm, PCI_IO_BASE(bar->val) + i);
868 if (v3_hook_io_port(pci_dev->vm, PCI_IO_BASE(new_val) + i,
869 bar->io_read, bar->io_write,
870 bar->private_data) == -1) {
872 PrintError("Could not hook PCI IO port (old port=%u) (new port=%u)\n",
873 PCI_IO_BASE(bar->val) + i, PCI_IO_BASE(new_val) + i);
874 v3_print_io_map(pci_dev->vm);
883 case PCI_BAR_MEM32: {
884 v3_unhook_mem(pci_dev->vm, V3_MEM_CORE_ANY, (addr_t)(bar->val));
887 v3_hook_full_mem(pci_dev->vm, V3_MEM_CORE_ANY, PCI_MEM32_BASE(new_val),
888 PCI_MEM32_BASE(new_val) + (bar->num_pages * PAGE_SIZE_4KB),
889 bar->mem_read, bar->mem_write, pci_dev->priv_data);
891 PrintError("Write hooks not supported for PCI\n");
900 PrintDebug("Reprogramming an unsupported BAR register (Dev=%s) (bar=%d) (val=%x)\n",
901 pci_dev->name, bar_num, new_val);
905 PrintError("Invalid Bar Reg updated (bar=%d)\n", bar_num);
913 static int data_port_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * priv_data) {
914 struct pci_internal * pci_state = priv_data;
915 struct pci_device * pci_dev = NULL;
916 uint_t reg_num = (pci_state->addr_reg.hi_reg_num << 16) +(pci_state->addr_reg.reg_num << 2) + (port & 0x3);
920 if (pci_state->addr_reg.bus_num != 0) {
924 PrintDebug("Writing PCI Data register. bus = %d, dev = %d, fn = %d, reg = %d (0x%x) addr_reg = 0x%x (val=0x%x, len=%d)\n",
925 pci_state->addr_reg.bus_num,
926 pci_state->addr_reg.dev_num,
927 pci_state->addr_reg.fn_num,
929 pci_state->addr_reg.val,
930 *(uint32_t *)src, length);
933 pci_dev = get_device(&(pci_state->bus_list[0]), pci_state->addr_reg.dev_num, pci_state->addr_reg.fn_num);
935 if (pci_dev == NULL) {
936 PrintError("Writing configuration space for non-present device (dev_num=%d)\n",
937 pci_state->addr_reg.dev_num);
941 /* update the config space
942 If a hook has been registered for a given region, call the hook with the max write length
945 struct cfg_range_hook * cfg_hook = find_cfg_range_hook(pci_dev, reg_num + i, 1);
948 uint_t range_len = cfg_hook->length - ((reg_num + i) - cfg_hook->start);
949 range_len = (range_len > length) ? length : range_len;
951 if (cfg_hook->write) {
952 cfg_hook->write(pci_dev, reg_num + i, (void *)(src + i), range_len, cfg_hook->private_data);
958 // send the writes to the cached config space, and to the generic callback if present
962 mask = pci_hdr_write_mask_00[reg_num + i];
966 uint8_t new_val = *(uint8_t *)(src + i);
967 uint8_t old_val = pci_dev->config_space[reg_num + i];
969 pci_dev->config_space[reg_num + i] = ((new_val & mask) | (old_val & ~mask));
971 if (pci_dev->config_write) {
972 pci_dev->config_write(pci_dev, reg_num + i, &(pci_dev->config_space[reg_num + i]), 1, pci_dev->priv_data);
986 static int exp_rom_write(struct pci_device * pci_dev, uint32_t offset,
987 void * src, uint_t length, void * private_data) {
988 int bar_offset = offset & ~0x03;
990 if (pci_dev->exp_rom_update) {
991 pci_dev->exp_rom_update(pci_dev, (void *)(pci_dev->config_space + bar_offset), pci_dev->priv_data);
996 PrintError("Expansion ROM update not handled. Will appear to not Exist\n");
1002 static int cmd_write(struct pci_device * pci_dev, uint32_t offset,
1003 void * src, uint_t length, void * private_data) {
1007 struct pci_cmd_reg old_cmd;
1008 struct pci_cmd_reg new_cmd;
1009 old_cmd.val = pci_dev->config_header.command;
1011 for (i = 0; i < length; i++) {
1012 uint8_t mask = pci_hdr_write_mask_00[offset + i];
1013 uint8_t new_val = *(uint8_t *)(src + i);
1014 uint8_t old_val = pci_dev->config_space[offset + i];
1016 pci_dev->config_space[offset + i] = ((new_val & mask) | (old_val & ~mask));
1019 new_cmd.val = pci_dev->config_header.command;
1022 if (pci_dev->cmd_update) {
1023 if ((new_cmd.intx_disable == 1) && (old_cmd.intx_disable == 0)) {
1024 pci_dev->irq_type = IRQ_NONE;
1025 pci_dev->cmd_update(pci_dev, PCI_CMD_INTX_DISABLE, 0, pci_dev->priv_data);
1026 } else if ((new_cmd.intx_disable == 0) && (old_cmd.intx_disable == 1)) {
1027 pci_dev->irq_type = IRQ_INTX;
1028 pci_dev->cmd_update(pci_dev, PCI_CMD_INTX_ENABLE, 0, pci_dev->priv_data);
1032 if ((new_cmd.dma_enable == 1) && (old_cmd.dma_enable == 0)) {
1033 pci_dev->cmd_update(pci_dev, PCI_CMD_DMA_ENABLE, 0, pci_dev->priv_data);
1034 } else if ((new_cmd.dma_enable == 0) && (old_cmd.dma_enable == 1)) {
1035 pci_dev->cmd_update(pci_dev, PCI_CMD_DMA_DISABLE, 0, pci_dev->priv_data);
1043 static void init_pci_busses(struct pci_internal * pci_state) {
1046 for (i = 0; i < PCI_BUS_COUNT; i++) {
1047 pci_state->bus_list[i].bus_num = i;
1048 pci_state->bus_list[i].devices.rb_node = NULL;
1049 memset(pci_state->bus_list[i].dev_map, 0, sizeof(pci_state->bus_list[i].dev_map));
1054 static int pci_free(struct pci_internal * pci_state) {
1059 for (i = 0; i < PCI_BUS_COUNT; i++) {
1060 struct pci_bus * bus = &(pci_state->bus_list[i]);
1061 struct rb_node * node = v3_rb_first(&(bus->devices));
1062 struct pci_device * dev = NULL;
1065 dev = rb_entry(node, struct pci_device, dev_tree_node);
1066 node = v3_rb_next(node);
1068 v3_rb_erase(&(dev->dev_tree_node), &(bus->devices));
1070 // Free config range hooks
1072 struct cfg_range_hook * hook = NULL;
1073 struct cfg_range_hook * tmp = NULL;
1074 list_for_each_entry_safe(hook, tmp, &(dev->cfg_hooks), list_node) {
1075 list_del(&(hook->list_node));
1082 struct pci_cap * cap = NULL;
1083 struct pci_cap * tmp = NULL;
1084 list_for_each_entry_safe(cap, tmp, &(dev->cfg_hooks), cap_node) {
1085 list_del(&(cap->cap_node));
1099 #ifdef V3_CONFIG_CHECKPOINT
1101 #include <palacios/vmm_sprintf.h>
1103 static int pci_save(struct v3_chkpt_ctx * ctx, void * private_data) {
1104 struct pci_internal * pci = (struct pci_internal *)private_data;
1108 v3_chkpt_save_32(ctx, "ADDR_REG", &(pci->addr_reg.val));
1109 v3_chkpt_save_16(ctx, "IO_BASE", &(pci->dev_io_base));
1111 for (i = 0; i < PCI_BUS_COUNT; i++) {
1112 struct pci_bus * bus = &(pci->bus_list[i]);
1113 struct rb_node * node = v3_rb_first(&(bus->devices));
1114 struct pci_device * dev = NULL;
1115 struct v3_chkpt_ctx * bus_ctx = NULL;
1117 snprintf(buf, 128, "pci-%d", i);
1119 bus_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
1122 struct v3_chkpt_ctx * dev_ctx = NULL;
1124 dev = rb_entry(node, struct pci_device, dev_tree_node);
1126 snprintf(buf, 128, "pci-%d.%d-%d", i, dev->dev_num, dev->fn_num);
1127 dev_ctx = v3_chkpt_open_ctx(bus_ctx->chkpt, bus_ctx, buf);
1129 v3_chkpt_save(dev_ctx, "CONFIG_SPACE", 256, dev->config_space);
1131 for (bar_idx = 0; bar_idx < 6; bar_idx++) {
1132 snprintf(buf, 128, "BAR-%d", bar_idx);
1133 v3_chkpt_save_32(dev_ctx, buf, &(dev->bar[bar_idx].val));
1136 node = v3_rb_next(node);
1145 static int pci_load(struct v3_chkpt_ctx * ctx, void * private_data) {
1146 struct pci_internal * pci = (struct pci_internal *)private_data;
1150 v3_chkpt_load_32(ctx, "ADDR_REG", &(pci->addr_reg.val));
1151 v3_chkpt_load_16(ctx, "IO_BASE", &(pci->dev_io_base));
1153 for (i = 0; i < PCI_BUS_COUNT; i++) {
1154 struct pci_bus * bus = &(pci->bus_list[i]);
1155 struct rb_node * node = v3_rb_first(&(bus->devices));
1156 struct pci_device * dev = NULL;
1157 struct v3_chkpt_ctx * bus_ctx = NULL;
1159 snprintf(buf, 128, "pci-%d", i);
1161 bus_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
1164 struct v3_chkpt_ctx * dev_ctx = NULL;
1166 dev = rb_entry(node, struct pci_device, dev_tree_node);
1168 snprintf(buf, 128, "pci-%d.%d-%d", i, dev->dev_num, dev->fn_num);
1169 dev_ctx = v3_chkpt_open_ctx(bus_ctx->chkpt, bus_ctx, buf);
1171 v3_chkpt_load(dev_ctx, "CONFIG_SPACE", 256, dev->config_space);
1173 for (bar_idx = 0; bar_idx < 6; bar_idx++) {
1174 snprintf(buf, 128, "BAR-%d", bar_idx);
1175 v3_chkpt_load_32(dev_ctx, buf, &(dev->bar[bar_idx].val));
1178 node = v3_rb_next(node);
1192 static struct v3_device_ops dev_ops = {
1193 .free = (int (*)(void *))pci_free,
1194 #ifdef V3_CONFIG_CHECKPOINT
1203 static int pci_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1204 struct pci_internal * pci_state = V3_Malloc(sizeof(struct pci_internal));
1207 PrintError("Cannot allocate in init\n");
1212 char * dev_id = v3_cfg_val(cfg, "ID");
1215 PrintDebug("PCI internal at %p\n",(void *)pci_state);
1217 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, pci_state);
1220 PrintError("Could not attach device %s\n", dev_id);
1226 pci_state->addr_reg.val = 0;
1227 pci_state->dev_io_base = PCI_DEV_IO_PORT_BASE;
1229 init_pci_busses(pci_state);
1231 PrintDebug("Sizeof config header=%d\n", (int)sizeof(struct pci_config_header));
1233 for (i = 0; i < 4; i++) {
1234 ret |= v3_dev_hook_io(dev, CONFIG_ADDR_PORT + i, &addr_port_read, &addr_port_write);
1235 ret |= v3_dev_hook_io(dev, CONFIG_DATA_PORT + i, &data_port_read, &data_port_write);
1239 PrintError("Error hooking PCI IO ports\n");
1240 v3_remove_device(dev);
1248 device_register("PCI", pci_init)
1251 static inline int init_bars(struct v3_vm_info * vm, struct pci_device * pci_dev) {
1254 for (i = 0; i < 6; i++) {
1255 int bar_offset = 0x10 + (4 * i);
1256 struct v3_pci_bar * bar = &(pci_dev->bar[i]);
1258 if (bar->type == PCI_BAR_IO) {
1260 bar->mask = (~((bar->num_ports) - 1)) | 0x01;
1262 if (bar->default_base_port != 0xffff) {
1263 bar->val = bar->default_base_port & bar->mask;
1268 bar->val |= 0x00000001;
1270 for (j = 0; j < bar->num_ports; j++) {
1272 if (bar->default_base_port != 0xffff) {
1273 if (v3_hook_io_port(vm, bar->default_base_port + j,
1274 bar->io_read, bar->io_write,
1275 bar->private_data) == -1) {
1276 PrintError("Could not hook default io port %x\n", bar->default_base_port + j);
1282 *(uint32_t *)(pci_dev->config_space + bar_offset) = bar->val;
1284 } else if (bar->type == PCI_BAR_MEM32) {
1285 bar->mask = ~((bar->num_pages << 12) - 1);
1286 bar->mask |= 0xf; // preserve the configuration flags
1288 if (bar->default_base_addr != 0xffffffff) {
1289 bar->val = bar->default_base_addr & bar->mask;
1295 if (bar->mem_read) {
1297 v3_hook_full_mem(vm, V3_MEM_CORE_ANY, bar->default_base_addr,
1298 bar->default_base_addr + (bar->num_pages * PAGE_SIZE_4KB),
1299 bar->mem_read, bar->mem_write, pci_dev->priv_data);
1300 } else if (bar->mem_write) {
1302 PrintError("Write hooks not supported for PCI devices\n");
1305 v3_hook_write_mem(pci_dev->vm_dev->vm, bar->default_base_addr,
1306 bar->default_base_addr + (bar->num_pages * PAGE_SIZE_4KB),
1307 bar->mem_write, pci_dev->vm_dev);
1310 // set the prefetchable flag...
1311 bar->val |= 0x00000008;
1315 *(uint32_t *)(pci_dev->config_space + bar_offset) = bar->val;
1317 } else if (bar->type == PCI_BAR_MEM24) {
1318 PrintError("16 Bit memory ranges not supported (reg: %d)\n", i);
1320 } else if (bar->type == PCI_BAR_NONE) {
1321 bar->val = 0x00000000;
1322 bar->mask = 0x00000000; // This ensures that all updates will be dropped
1323 *(uint32_t *)(pci_dev->config_space + bar_offset) = bar->val;
1324 } else if (bar->type == PCI_BAR_PASSTHROUGH) {
1326 // Call the bar init function to get the local cached value
1327 bar->bar_init(i, &(bar->val), bar->private_data);
1330 PrintError("Invalid BAR type for bar #%d\n", i);
1334 v3_pci_hook_config_range(pci_dev, bar_offset, 4, bar_update, NULL, bar);
1341 int v3_pci_set_irq_bridge(struct vm_device * pci_bus, int bus_num,
1342 int (*raise_pci_irq)(struct pci_device * pci_dev, void * dev_data, struct v3_irq * vec),
1343 int (*lower_pci_irq)(struct pci_device * pci_dev, void * dev_data, struct v3_irq * vec),
1345 struct pci_internal * pci_state = (struct pci_internal *)pci_bus->private_data;
1348 pci_state->bus_list[bus_num].raise_pci_irq = raise_pci_irq;
1349 pci_state->bus_list[bus_num].lower_pci_irq = lower_pci_irq;
1350 pci_state->bus_list[bus_num].irq_dev_data = priv_data;
1355 int v3_pci_raise_irq(struct vm_device * pci_bus, struct pci_device * dev, uint32_t vec_index) {
1359 vec.private_data = NULL;
1360 vec.irq = vec_index;
1362 return v3_pci_raise_acked_irq(pci_bus, dev, vec);
1365 int v3_pci_lower_irq(struct vm_device * pci_bus, struct pci_device * dev, uint32_t vec_index) {
1368 vec.irq = vec_index;
1370 vec.private_data = NULL;
1372 return v3_pci_lower_acked_irq(pci_bus, dev, vec);
1375 int v3_pci_raise_acked_irq(struct vm_device * pci_bus, struct pci_device * dev, struct v3_irq vec) {
1376 struct pci_internal * pci_state = (struct pci_internal *)pci_bus->private_data;
1377 struct pci_bus * bus = &(pci_state->bus_list[dev->bus_num]);
1380 if (dev->irq_type == IRQ_INTX) {
1381 return bus->raise_pci_irq(dev, bus->irq_dev_data, &vec);
1382 } else if (dev->irq_type == IRQ_MSI) {
1383 struct v3_gen_ipi ipi;
1384 struct msi_addr * addr = NULL;
1385 struct msi_data * data = NULL;
1387 if (dev->msi_cap->cap_64bit) {
1388 if (dev->msi_cap->per_vect_mask) {
1389 struct msi64_pervec_msg_addr * msi = (void *)dev->msi_cap;
1390 addr = &(msi->addr);
1391 data = &(msi->data);
1393 struct msi64_msg_addr * msi = (void *)dev->msi_cap;
1394 addr = &(msi->addr);
1395 data = &(msi->data);
1398 struct msi32_msg_addr * msi = (void *)dev->msi_cap;
1399 addr = &(msi->addr);
1400 data = &(msi->data);
1403 memset(&ipi, 0, sizeof(struct v3_gen_ipi));
1405 // decode MSI fields into IPI
1407 ipi.vector = data->vector + vec.irq;
1408 ipi.mode = data->del_mode;
1409 ipi.logical = addr->dst_mode;
1410 ipi.trigger_mode = data->trig_mode;
1411 ipi.dst_shorthand = 0;
1412 ipi.dst = addr->dst_id;
1415 v3_apic_send_ipi(dev->vm, &ipi, dev->apic_dev);
1418 } else if (dev->irq_type == IRQ_MSIX) {
1419 addr_t msix_table_gpa = 0;
1420 struct msix_table * msix_table = NULL;
1421 uint_t bar_idx = dev->msix_cap->bir;
1422 struct v3_gen_ipi ipi;
1423 struct msi_addr * addr = NULL;
1424 struct msi_data * data = NULL;
1426 if (dev->bar[bar_idx].type != PCI_BAR_MEM32) {
1427 PrintError("Non 32bit MSIX BAR registers are not supported\n");
1431 msix_table_gpa = dev->bar[bar_idx].val;
1432 msix_table_gpa += dev->msix_cap->table_offset;
1434 if (v3_gpa_to_hva(&(dev->vm->cores[0]), msix_table_gpa, (void *)&(msix_table)) != 0) {
1435 PrintError("Could not translate MSIX Table GPA (%p)\n", (void *)msix_table_gpa);
1439 memset(&ipi, 0, sizeof(struct v3_gen_ipi));
1441 data = &(msix_table->entries[vec.irq].data);
1442 addr = &(msix_table->entries[vec.irq].addr);;
1444 // decode MSIX fields into IPI
1445 ipi.vector = data->vector + vec.irq;
1446 ipi.mode = data->del_mode;
1447 ipi.logical = addr->dst_mode;
1448 ipi.trigger_mode = data->trig_mode;
1449 ipi.dst_shorthand = 0;
1450 ipi.dst = addr->dst_id;
1454 V3_Print("Decode MSIX\n");
1456 v3_apic_send_ipi(dev->vm, &ipi, dev->apic_dev);
1461 // Should never get here
1466 int v3_pci_lower_acked_irq(struct vm_device * pci_bus, struct pci_device * dev, struct v3_irq vec) {
1467 if (dev->irq_type == IRQ_INTX) {
1468 struct pci_internal * pci_state = (struct pci_internal *)pci_bus->private_data;
1469 struct pci_bus * bus = &(pci_state->bus_list[dev->bus_num]);
1471 return bus->lower_pci_irq(dev, bus->irq_dev_data, &vec);
1478 // if dev_num == -1, auto assign
1479 struct pci_device * v3_pci_register_device(struct vm_device * pci,
1480 pci_device_type_t dev_type,
1485 struct v3_pci_bar * bars,
1486 int (*config_write)(struct pci_device * pci_dev, uint32_t reg_num, void * src,
1487 uint_t length, void * priv_data),
1488 int (*config_read)(struct pci_device * pci_dev, uint32_t reg_num, void * dst,
1489 uint_t length, void * priv_data),
1490 int (*cmd_update)(struct pci_device * pci_dev, pci_cmd_t cmd, uint64_t arg, void * priv_data),
1491 int (*exp_rom_update)(struct pci_device * pci_dev, uint32_t * src, void * priv_data),
1494 struct pci_internal * pci_state = (struct pci_internal *)pci->private_data;
1495 struct pci_bus * bus = &(pci_state->bus_list[bus_num]);
1496 struct pci_device * pci_dev = NULL;
1499 if (dev_num > MAX_BUS_DEVICES) {
1500 PrintError("Requested Invalid device number (%d)\n", dev_num);
1504 if (dev_num == PCI_AUTO_DEV_NUM) {
1505 PrintDebug("Searching for free device number\n");
1506 if ((dev_num = get_free_dev_num(bus)) == -1) {
1507 PrintError("No more available PCI slots on bus %d\n", bus->bus_num);
1512 PrintDebug("Checking for PCI Device\n");
1514 if (get_device(bus, dev_num, fn_num) != NULL) {
1515 PrintError("PCI Device already registered at slot %d on bus %d\n",
1516 dev_num, bus->bus_num);
1521 pci_dev = (struct pci_device *)V3_Malloc(sizeof(struct pci_device));
1523 if (pci_dev == NULL) {
1524 PrintError("Could not allocate pci device\n");
1528 memset(pci_dev, 0, sizeof(struct pci_device));
1531 pci_dev->type = dev_type;
1533 switch (pci_dev->type) {
1534 case PCI_STD_DEVICE:
1535 pci_dev->config_header.header_type = 0x00;
1537 case PCI_MULTIFUNCTION:
1538 pci_dev->config_header.header_type = 0x80;
1541 PrintError("Unhandled PCI Device Type: %d\n", dev_type);
1547 pci_dev->bus_num = bus_num;
1548 pci_dev->dev_num = dev_num;
1549 pci_dev->fn_num = fn_num;
1551 strncpy(pci_dev->name, name, sizeof(pci_dev->name));
1552 pci_dev->vm = pci->vm;
1553 pci_dev->priv_data = priv_data;
1555 INIT_LIST_HEAD(&(pci_dev->cfg_hooks));
1556 INIT_LIST_HEAD(&(pci_dev->capabilities));
1560 // locate APIC for MSI/MSI-X
1561 pci_dev->apic_dev = v3_find_dev(pci->vm, "apic");
1564 // register update callbacks
1565 pci_dev->config_write = config_write;
1566 pci_dev->config_read = config_read;
1567 pci_dev->cmd_update = cmd_update;
1568 pci_dev->exp_rom_update = exp_rom_update;
1575 // Only 256 bytes for now, should expand it in the future
1576 for (i = 0; i < 256; i++) {
1577 config_read(pci_dev, i, &(pci_dev->config_space[i]), 1, pci_dev->priv_data);
1581 V3_Print("Scanning for Capabilities\n");
1584 scan_pci_caps(pci_dev);
1586 pci_dev->irq_type = IRQ_INTX;
1588 V3_Print("Caps scanned\n");
1590 // hook important regions
1591 v3_pci_hook_config_range(pci_dev, 0x30, 4, exp_rom_write, NULL, NULL); // ExpRom
1592 v3_pci_hook_config_range(pci_dev, 0x04, 2, cmd_write, NULL, NULL); // CMD Reg
1600 for (i = 0; i < 6; i ++) {
1601 pci_dev->bar[i].type = bars[i].type;
1602 pci_dev->bar[i].private_data = bars[i].private_data;
1604 if (pci_dev->bar[i].type == PCI_BAR_IO) {
1605 pci_dev->bar[i].num_ports = bars[i].num_ports;
1607 // This is a horrible HACK becaues the BIOS is supposed to set the PCI base ports
1608 // And if the BIOS doesn't, Linux just happily overlaps device port assignments
1609 if (bars[i].default_base_port != (uint16_t)-1) {
1610 pci_dev->bar[i].default_base_port = bars[i].default_base_port;
1612 pci_dev->bar[i].default_base_port = pci_state->dev_io_base;
1613 pci_state->dev_io_base += ( 0x100 * ((bars[i].num_ports / 0x100) + 1) );
1616 pci_dev->bar[i].io_read = bars[i].io_read;
1617 pci_dev->bar[i].io_write = bars[i].io_write;
1618 } else if (pci_dev->bar[i].type == PCI_BAR_MEM32) {
1619 pci_dev->bar[i].num_pages = bars[i].num_pages;
1620 pci_dev->bar[i].default_base_addr = bars[i].default_base_addr;
1621 pci_dev->bar[i].mem_read = bars[i].mem_read;
1622 pci_dev->bar[i].mem_write = bars[i].mem_write;
1623 } else if (pci_dev->bar[i].type == PCI_BAR_PASSTHROUGH) {
1624 pci_dev->bar[i].bar_init = bars[i].bar_init;
1625 pci_dev->bar[i].bar_write = bars[i].bar_write;
1627 pci_dev->bar[i].num_pages = 0;
1628 pci_dev->bar[i].default_base_addr = 0;
1629 pci_dev->bar[i].mem_read = NULL;
1630 pci_dev->bar[i].mem_write = NULL;
1634 if (init_bars(pci->vm, pci_dev) == -1) {
1635 PrintError("could not initialize bar registers\n");
1640 add_device_to_bus(bus, pci_dev);
1642 #ifdef V3_CONFIG_DEBUG_PCI
1643 pci_dump_state(pci_state);