From: Lei Xia Date: Mon, 8 Mar 2010 21:12:01 +0000 (-0600) Subject: Update on the VNET Bridge virtio device X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=commitdiff_plain;h=83790cde959d56c148be1b5f64d2e4a4414acac9;p=palacios.git Update on the VNET Bridge virtio device --- diff --git a/Kconfig b/Kconfig index 8a7f97c..447826e 100644 --- a/Kconfig +++ b/Kconfig @@ -108,6 +108,26 @@ config DEBUG_VNET help Enable the Vnet debug in Palacios +config VNET_PROFILE + depends on EXPERIMENTAL && VNET + bool "Enable Vnet Profiling in Palacios" + default n + help + Enable the Vnet performance profiling in Palacios + +config VNET_BRG + bool "Enable VNET Bridge" + default n + depends on PCI && EXPERIMENTAL && VNET + help + Enable the VNET Bridge device + +config DEBUG_VNET_BRG + bool "VNET Bridge Debugging" + default n + depends on VNET_BRG && DEBUG_ON + help + Enable debugging for the VNET Bridge Device endmenu diff --git a/palacios/include/palacios/vmm_vnet.h b/palacios/include/palacios/vmm_vnet.h index a22894a..51f501b 100644 --- a/palacios/include/palacios/vmm_vnet.h +++ b/palacios/include/palacios/vmm_vnet.h @@ -99,7 +99,9 @@ int v3_vnet_add_route(struct v3_vnet_route route); int V3_init_vnet(); -//int v3_vnet_add_bridge(struct v3_vm_info * vm, uint8_t mac[6]); +int v3_vnet_add_bridge(struct v3_vm_info * vm, + int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data), + void * priv_data); int v3_vnet_add_dev(struct v3_vm_info *info, uint8_t mac[6], int (*dev_input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data), diff --git a/palacios/src/devices/Kconfig b/palacios/src/devices/Kconfig index 43643ce..b5e625e 100644 --- a/palacios/src/devices/Kconfig +++ b/palacios/src/devices/Kconfig @@ -159,11 +159,11 @@ config DEBUG_VIRTIO_NET Enable debugging for the Linux Virtio Network Device config VNET_NIC - bool "Enable VNET NIC Device" + bool "Enable VNET VIrtio NIC Device" default n depends on PCI && EXPERIMENTAL help - Enable the VNET NIC backend device + Enable the VNET Virtio backend device config DEBUG_VNET_NIC bool "VNET NIC Device Debugging" diff --git a/palacios/src/devices/Makefile b/palacios/src/devices/Makefile index 10cd8a9..5d9f398 100644 --- a/palacios/src/devices/Makefile +++ b/palacios/src/devices/Makefile @@ -35,4 +35,4 @@ obj-$(CONFIG_PASSTHROUGH_PCI) += pci_passthrough.o obj-$(CONFIG_SYMMOD) += lnx_virtio_symmod.o -obj-$(CONFIG_VNET) += lnx_virtio_vnet.o \ No newline at end of file +obj-$(CONFIG_VNET_BRG) += lnx_virtio_vnet.o diff --git a/palacios/src/devices/lnx_virtio_vnet.c b/palacios/src/devices/lnx_virtio_vnet.c index 8282477..fea13c6 100644 --- a/palacios/src/devices/lnx_virtio_vnet.c +++ b/palacios/src/devices/lnx_virtio_vnet.c @@ -22,9 +22,16 @@ #include #include #include +#include #include +#ifndef CONFIG_DEBUG_VNET_BRG +#undef PrintDebug +#define PrintDebug(fmt, args...) +#endif + + #define QUEUE_SIZE 128 #define NUM_QUEUES 3 @@ -34,25 +41,24 @@ struct vnet_config { } __attribute__((packed)); +#define CTRL_QUEUE 0 +#define RECV_QUEUE 2 +#define XMIT_QUEUE 1 struct virtio_vnet_state { + struct v3_vm_info *vm; struct vnet_config vnet_cfg; struct virtio_config virtio_cfg; - struct vm_device * pci_bus; struct pci_device * pci_dev; - - -#define CTRL_QUEUE 0 -#define RECV_QUEUE 1 -#define XMIT_QUEUE 2 + struct virtio_queue queue[NUM_QUEUES]; struct virtio_queue * cur_queue; - int io_range_size; + v3_lock_t lock; }; #define VNET_GET_ROUTES 10 @@ -69,21 +75,36 @@ struct vnet_ctrl_hdr { uint32_t num_cmds; } __attribute__((packed)); +#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ +#define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 << 10)) + +struct virtio_net_hdr { + uint8_t flags; + +#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */ + uint8_t gso_type; + uint16_t hdr_len; /* Ethernet + IP + tcp/udp hdrs */ + uint16_t gso_size; /* Bytes to append to hdr_len per frame */ + uint16_t csum_start; /* Position to start checksumming from */ + uint16_t csum_offset; /* Offset after that to place checksum */ +}__attribute__((packed)); -static int virtio_reset(struct virtio_vnet_state * virtio) { - memset(virtio->queue, 0, sizeof(struct virtio_queue) * 2); +static int virtio_reset(struct virtio_vnet_state * vnet_brg) { - virtio->cur_queue = &(virtio->queue[0]); + memset(vnet_brg->queue, 0, sizeof(struct virtio_queue) * 2); - virtio->virtio_cfg.status = 0; - virtio->virtio_cfg.pci_isr = 0; + vnet_brg->cur_queue = &(vnet_brg->queue[0]); - virtio->queue[0].queue_size = QUEUE_SIZE; - virtio->queue[1].queue_size = QUEUE_SIZE; - virtio->queue[2].queue_size = QUEUE_SIZE; + vnet_brg->virtio_cfg.status = 0; + vnet_brg->virtio_cfg.pci_isr = 0; - memset(&(virtio->vnet_cfg), 0, sizeof(struct vnet_config)); + vnet_brg->queue[0].queue_size = QUEUE_SIZE; + vnet_brg->queue[1].queue_size = QUEUE_SIZE; + vnet_brg->queue[2].queue_size = QUEUE_SIZE; + + memset(&(vnet_brg->vnet_cfg), 0, sizeof(struct vnet_config)); + v3_lock_init(&(vnet_brg->lock)); return 0; } @@ -108,7 +129,7 @@ static int get_desc_count(struct virtio_queue * q, int index) { static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) { struct virtio_queue * q = &(vnet_state->queue[0]); - PrintDebug("VirtioVNET: Virtio Kick on command queue\n"); + PrintDebug("VNET Bridge: Handling command queue\n"); while (q->cur_avail_idx < q->avail->index) { struct vring_desc * hdr_desc = NULL; @@ -126,7 +147,7 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * PrintDebug("Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx); if (desc_cnt < 3) { - PrintError("VirtioVNET cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt); + PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt); return -1; } @@ -201,15 +222,199 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * return 0; } +static int copy_data_to_desc(struct guest_info *core, + struct vring_desc * desc, + uchar_t * buf, + uint_t buf_len) +{ + uint32_t len; + uint8_t * desc_buf = NULL; + + if (guest_pa_to_host_va(core, desc->addr_gpa, (addr_t *)&(desc_buf)) == -1) { + PrintError("Could not translate buffer address\n"); + return -1; + } + len = (desc->length < buf_len)?desc->length:buf_len; + memcpy(desc_buf, buf, len); + + return len; +} + + +static int vnet_brg_rx(struct v3_vnet_pkt *pkt, void * private_data) { + struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data; + struct virtio_queue * q = &(vnet_brg->queue[RECV_QUEUE]); + char *buf = (char *)pkt; + int ret_val; + int pkt_head_len = ((addr_t)pkt->data) - ((addr_t)pkt); + uint32_t data_len = sizeof(struct v3_vnet_pkt); //TODO: should not transfer the whole struct, only the data size + unsigned long flags; + + flags = v3_lock_irqsave(vnet_brg->lock); + + PrintDebug("VNET Bridge: RX: pkt sent to guest size: %d\n, pkt_header_len: %d\n", data_len, pkt_head_len); + v3_hexdump(buf, 100, NULL, 0); + + if (q->ring_avail_addr == 0) { + PrintError("Queue is not set\n"); + ret_val = -1; + goto exit; + } + + if (q->last_avail_idx > q->avail->index) + q->idx_overflow = true; + q->last_avail_idx = q->avail->index; + + if (q->cur_avail_idx < q->avail->index || (q->idx_overflow && q->cur_avail_idx < q->avail->index+65536)){ + uint16_t buf_idx = q->avail->ring[q->cur_avail_idx % q->queue_size]; + uint32_t len = 0; + uint32_t offset = 0; + struct vring_desc * buf_desc = NULL; + + buf_desc = &(q->desc[buf_idx]); + PrintDebug("VNET Bridge RX: buffer desc len: %d\n", buf_desc->length); + + len = copy_data_to_desc(&(vnet_brg->vm->cores[0]), buf_desc, buf + offset, data_len - offset); + if (len == -1) { + ret_val = -1; + goto exit; + } + offset += len; + buf_desc->length = len; + + if (offset >= data_len) { + buf_desc->flags &= ~VIRTIO_NEXT_FLAG; + }else { + PrintDebug("VNET Bridge: RX: Pkt not fit into one desc buf\n"); + } + + q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size]; + q->used->ring[q->used->index % q->queue_size].length = data_len; // This should be the total length of data sent to guest (header+pkt_data) + q->used->index++; + + int last_idx = q->cur_avail_idx; + q->cur_avail_idx++; + if (q->cur_avail_idx < last_idx) + q->idx_overflow = false; + } else { + PrintDebug("VNET Bridge: Handle RX: Fails to send, no available buffer: current_idx:%d, availIdx: %d\n", q->cur_avail_idx, q->avail->index); + } + + if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) { + v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev); + vnet_brg->virtio_cfg.pci_isr = 0x1; + PrintDebug("Raising IRQ %d\n", vnet_brg->pci_dev->config_header.intr_line); + } + + ret_val = 0; + +exit: + + v3_unlock_irqrestore(vnet_brg->lock, flags); + + return ret_val; +} + +static int vnet_send(struct v3_vnet_pkt *pkt, int pkt_len, void * private_data){ + struct guest_info *core = (struct guest_info *)private_data; + +#ifdef CONFIG_DEBUG_VNET_BRG + { + PrintDebug("VNET Bridge: send pkt size: %d\n", pkt->size); + v3_hexdump(pkt->data,pkt->size, NULL, 0); + } +#endif + +#ifdef CONFIG_VNET_PROFILE + uint64_t start, end; + rdtscll(start); + core->vnet_times.time_copy_from_guest = start - core->vnet_times.virtio_handle_start; +#endif + + pkt->src_type = LINK_EDGE; + + return v3_vnet_send_pkt(pkt, (void *)core); +} + +static int pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg, struct vring_desc * buf_desc) +{ + uint8_t * buf = NULL; + uint32_t len = buf_desc->length; + struct v3_vnet_pkt *pkt; + + if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) { + PrintError("Could not translate buffer address\n"); + return -1; + } + + pkt = (struct v3_vnet_pkt *)buf; + if (vnet_send(pkt, len, (void *)core) == -1) { + return -1; + } + + return 0; +} + +static int handle_pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg) +{ + struct virtio_queue * q = &(vnet_brg->queue[XMIT_QUEUE]); + + if (q->avail->index < q->last_avail_idx) { + q->idx_overflow = true; + } + + q->last_avail_idx = q->avail->index; + + PrintDebug("VNET Bridge Device: Handle TX\n"); + + while (q->cur_avail_idx < q->avail->index || + (q->idx_overflow && q->cur_avail_idx < (q->avail->index + 65536))) { + struct vring_desc * buf_desc = NULL; + uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size]; + + buf_desc = &(q->desc[desc_idx]); + + PrintDebug("VNET Bridge: Handle TX buf_len: %d\n", buf_desc->length); + + if (pkt_tx(core, vnet_brg, buf_desc) == -1) { + PrintError("Error handling nic operation\n"); + return -1; + } + + q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size]; + q->used->ring[q->used->index % q->queue_size].length = buf_desc->length; // What do we set this to???? + q->used->index++; + + int last_idx = q->cur_avail_idx; + q->cur_avail_idx ++; + if (q->cur_avail_idx < last_idx) { + q->idx_overflow = false; + } + } + + if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) { + v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev); + vnet_brg->virtio_cfg.pci_isr = 0x1; + } + +#ifdef CONFIG_VNET_PROFILE + uint64_t time; + rdtscll(time); + core->vnet_times.total_handle_time = time - core->vnet_times.virtio_handle_start; + core->vnet_times.print = true; +#endif + + return 0; +} static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) { - struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data; - int port_idx = port % vnet_state->io_range_size; + struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data; + int port_idx = port % vnet_brg->io_range_size; - PrintDebug("VirtioVNET: VIRTIO VNET Write for port %d len=%d, value=%x\n", + PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", port, length, *(uint32_t *)src); - PrintDebug("VirtioVNET: port idx=%d\n", port_idx); + PrintDebug("VNET Bridge: port idx=%d\n", port_idx); switch (port_idx) { @@ -217,9 +422,8 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, if (length != 4) { PrintError("Illegal write length for guest features\n"); return -1; - } - - vnet_state->virtio_cfg.guest_features = *(uint32_t *)src; + } + vnet_brg->virtio_cfg.guest_features = *(uint32_t *)src; break; case VRING_PG_NUM_PORT: @@ -227,41 +431,39 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, addr_t pfn = *(uint32_t *)src; addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT); - vnet_state->cur_queue->pfn = pfn; + vnet_brg->cur_queue->pfn = pfn; - vnet_state->cur_queue->ring_desc_addr = page_addr ; - vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc)); - vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \ + vnet_brg->cur_queue->ring_desc_addr = page_addr ; + vnet_brg->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc)); + vnet_brg->cur_queue->ring_used_addr = ( vnet_brg->cur_queue->ring_avail_addr + \ sizeof(struct vring_avail) + \ (QUEUE_SIZE * sizeof(uint16_t))); // round up to next page boundary. - vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff; + vnet_brg->cur_queue->ring_used_addr = (vnet_brg->cur_queue->ring_used_addr + 0xfff) & ~0xfff; - if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) { + if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_desc_addr, (addr_t *)&(vnet_brg->cur_queue->desc)) == -1) { PrintError("Could not translate ring descriptor address\n"); return -1; } - - if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) { + if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_avail_addr, (addr_t *)&(vnet_brg->cur_queue->avail)) == -1) { PrintError("Could not translate ring available address\n"); return -1; } - - if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) { + if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_used_addr, (addr_t *)&(vnet_brg->cur_queue->used)) == -1) { PrintError("Could not translate ring used address\n"); return -1; } - PrintDebug("VirtioVNET: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n", - (void *)(vnet_state->cur_queue->ring_desc_addr), - (void *)(vnet_state->cur_queue->ring_avail_addr), - (void *)(vnet_state->cur_queue->ring_used_addr)); + PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n", + (void *)(vnet_brg->cur_queue->ring_desc_addr), + (void *)(vnet_brg->cur_queue->ring_avail_addr), + (void *)(vnet_brg->cur_queue->ring_used_addr)); - PrintDebug("VirtioVNET: RingDesc=%p, Avail=%p, Used=%p\n", - vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used); + PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", + vnet_brg->cur_queue->desc, vnet_brg->cur_queue->avail, vnet_brg->cur_queue->used); } else { PrintError("Illegal write length for page frame number\n"); @@ -269,51 +471,53 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, } break; case VRING_Q_SEL_PORT: - vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src; + vnet_brg->virtio_cfg.vring_queue_selector = *(uint16_t *)src; - if (vnet_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) { - PrintError("Virtio Symbiotic device has no qeueues. Selected %d\n", - vnet_state->virtio_cfg.vring_queue_selector); + if (vnet_brg->virtio_cfg.vring_queue_selector > NUM_QUEUES) { + PrintError("VNET Bridge device has no qeueues. Selected %d\n", + vnet_brg->virtio_cfg.vring_queue_selector); return -1; } - vnet_state->cur_queue = &(vnet_state->queue[vnet_state->virtio_cfg.vring_queue_selector]); + vnet_brg->cur_queue = &(vnet_brg->queue[vnet_brg->virtio_cfg.vring_queue_selector]); break; case VRING_Q_NOTIFY_PORT: { uint16_t queue_idx = *(uint16_t *)src; - PrintDebug("VirtioVNET: Handling Kick\n"); + PrintDebug("VNET Bridge: Handling Kick\n"); if (queue_idx == 0) { - if (handle_cmd_kick(core, vnet_state) == -1) { - PrintError("Could not handle VNET Control command\n"); + if (handle_cmd_kick(core, vnet_brg) == -1) { + PrintError("Could not handle Virtio VNET Control command\n"); return -1; } } else if (queue_idx == 1) { - - // down queue + if (handle_pkt_tx(core, vnet_brg) == -1){ + PrintError("Could not handle Virtio VNET TX\n"); + return -1; + } } else if (queue_idx == 2) { - // up queue + PrintDebug("VNET Bridge: receive kick on RX Queue\n"); } else { - PrintError("Kick on invalid queue (%d)\n", queue_idx); + PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx); return -1; } break; } case VIRTIO_STATUS_PORT: - vnet_state->virtio_cfg.status = *(uint8_t *)src; + vnet_brg->virtio_cfg.status = *(uint8_t *)src; - if (vnet_state->virtio_cfg.status == 0) { - PrintDebug("VirtioVNET: Resetting device\n"); - virtio_reset(vnet_state); + if (vnet_brg->virtio_cfg.status == 0) { + PrintDebug("VNET Bridge: Resetting device\n"); + virtio_reset(vnet_brg); } break; case VIRTIO_ISR_PORT: - vnet_state->virtio_cfg.pci_isr = *(uint8_t *)src; + vnet_brg->virtio_cfg.pci_isr = *(uint8_t *)src; break; default: return -1; @@ -396,7 +600,12 @@ static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, u return length; } +static int vnet_brg_input(struct v3_vm_info * vm, + struct v3_vnet_pkt * pkt, + void * private_data){ + return vnet_brg_rx(pkt, private_data); +} static struct v3_device_ops dev_ops = { .free = NULL, @@ -406,23 +615,25 @@ static struct v3_device_ops dev_ops = { }; -static int vnet_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { +static int vnet_brg_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus")); - struct virtio_vnet_state * virtio_state = NULL; + struct virtio_vnet_state * vbrg_state = NULL; struct pci_device * pci_dev = NULL; char * name = v3_cfg_val(cfg, "name"); - PrintDebug("VirtioVNET: Initializing VIRTIO VNET Control device\n"); + PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name); if (pci_bus == NULL) { - PrintError("VirtIO devices require a PCI Bus"); + PrintError("VNET Bridge device require a PCI Bus"); return -1; } - virtio_state = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state)); - memset(virtio_state, 0, sizeof(struct virtio_vnet_state)); + vbrg_state = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state)); + memset(vbrg_state, 0, sizeof(struct virtio_vnet_state)); + + vbrg_state->vm = vm; - struct vm_device * dev = v3_allocate_device(name, &dev_ops, virtio_state); + struct vm_device * dev = v3_allocate_device(name, &dev_ops, vbrg_state); if (v3_attach_device(vm, dev) == -1) { PrintError("Could not attach device %s\n", name); @@ -437,38 +648,35 @@ static int vnet_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { int tmp_ports = num_ports; int i; - // This gets the number of ports, rounded up to a power of 2 - virtio_state->io_range_size = 1; // must be a power of 2 + vbrg_state->io_range_size = 1; // must be a power of 2 while (tmp_ports > 0) { tmp_ports >>= 1; - virtio_state->io_range_size <<= 1; + vbrg_state->io_range_size <<= 1; } // this is to account for any low order bits being set in num_ports // if there are none, then num_ports was already a power of 2 so we shift right to reset it - if ((num_ports & ((virtio_state->io_range_size >> 1) - 1)) == 0) { - virtio_state->io_range_size >>= 1; + if ((num_ports & ((vbrg_state->io_range_size >> 1) - 1)) == 0) { + vbrg_state->io_range_size >>= 1; } - for (i = 0; i < 6; i++) { bars[i].type = PCI_BAR_NONE; } bars[0].type = PCI_BAR_IO; bars[0].default_base_port = -1; - bars[0].num_ports = virtio_state->io_range_size; - + bars[0].num_ports = vbrg_state->io_range_size; bars[0].io_read = virtio_io_read; bars[0].io_write = virtio_io_write; - bars[0].private_data = virtio_state; + bars[0].private_data = vbrg_state; pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 0, PCI_AUTO_DEV_NUM, 0, "LNX_VIRTIO_VNET", bars, - NULL, NULL, NULL, virtio_state); + NULL, NULL, NULL, vbrg_state); if (!pci_dev) { PrintError("Could not register PCI Device\n"); @@ -477,29 +685,45 @@ static int vnet_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID; pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID; - - pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID; pci_dev->config_header.class = PCI_CLASS_MEMORY; pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM; - pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID; - - pci_dev->config_header.intr_pin = 1; - pci_dev->config_header.max_latency = 1; // ?? (qemu does it...) - virtio_state->pci_dev = pci_dev; - virtio_state->pci_bus = pci_bus; + vbrg_state->pci_dev = pci_dev; + vbrg_state->pci_bus = pci_bus; } - virtio_reset(virtio_state); + virtio_reset(vbrg_state); + + v3_vnet_add_bridge(vm, vnet_brg_input, (void *)vbrg_state); + +//for temporary hack +#if 1 + { + uchar_t dstmac[6] = {0xff,0xff,0xff,0xff,0xff,0xff}; + uchar_t zeromac[6] = {0,0,0,0,0,0}; + + struct v3_vnet_route route; + route.dst_id = 0; + route.dst_type = LINK_EDGE; + route.src_id = -1; + route.src_type = LINK_ANY; + + memcpy(route.dst_mac, dstmac, 6); + route.dst_mac_qual = MAC_NONE; + memcpy(route.src_mac, zeromac, 6); + route.src_mac_qual = MAC_ANY; + + v3_vnet_add_route(route); + } +#endif - return 0; } -device_register("LNX_VIRTIO_VNET", vnet_init) +device_register("LNX_VNET_BRG", vnet_brg_init) diff --git a/palacios/src/devices/vnet_nic.c b/palacios/src/devices/vnet_nic.c index c1f6dc9..70e45c3 100644 --- a/palacios/src/devices/vnet_nic.c +++ b/palacios/src/devices/vnet_nic.c @@ -269,7 +269,7 @@ static int vnet_nic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { //for temporary hack -#if 1 +#if 0 { uchar_t tapmac[6] = {0x00,0x02,0x55,0x67,0x42,0x39}; //for Intel-VT test HW //uchar_t tapmac[6] = {0x6e,0xa8,0x75,0xf4,0x82,0x95}; diff --git a/palacios/src/palacios/vmm_vnet.c b/palacios/src/palacios/vmm_vnet.c index cac5dc0..7f9248b 100644 --- a/palacios/src/palacios/vmm_vnet.c +++ b/palacios/src/palacios/vmm_vnet.c @@ -47,7 +47,7 @@ struct vnet_dev { uint8_t mac_addr[6]; struct v3_vm_info * vm; - int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data); + int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data); void * private_data; int dev_id; @@ -55,6 +55,14 @@ struct vnet_dev { } __attribute__((packed)); +struct vnet_brg_dev { + struct v3_vm_info * vm; + + int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data); + void * private_data; +} __attribute__((packed)); + + @@ -83,10 +91,12 @@ struct route_list { static struct { struct list_head routes; struct list_head devs; - + int num_routes; int num_devs; + struct vnet_brg_dev *bridge; + v3_lock_t lock; struct gen_queue * inpkt_q; @@ -116,15 +126,16 @@ static void print_route(struct vnet_route_info *route){ PrintDebug("Src dev id (%d), src type (%d)", route->route_def.src_id, route->route_def.src_type); - PrintDebug("Dst dev id (%d), dst type (%d)", + PrintDebug("Dst dev id (%d), dst type (%d)\n", route->route_def.dst_id, route->route_def.dst_type); - PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_input (%p), dst_dev_data (%p)\n", + if (route->route_def.dst_type == LINK_INTERFACE) { + PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_input (%p), dst_dev_data (%p)\n", route->dst_dev, route->dst_dev->dev_id, route->dst_dev->input, route->dst_dev->private_data); - + } } static void dump_routes(){ @@ -441,12 +452,21 @@ static int handle_one_pkt(struct v3_vnet_pkt * pkt, void *private_data) { } #endif - PrintDebug("Vnet: HandleOnePacket: %d\n", matched_routes->num_routes); + PrintDebug("Vnet: HandleOnePacket: route matches %d\n", matched_routes->num_routes); for (i = 0; i < matched_routes->num_routes; i++) { struct vnet_route_info * route = matched_routes->routes[i]; if (route->route_def.dst_type == LINK_EDGE) { - + pkt->dst_type = LINK_EDGE; + pkt->dst_id = route->route_def.dst_id; + if (vnet_state.bridge == NULL) { + PrintDebug("VNET: No bridge to sent data to links\n"); + continue; + } + if (vnet_state.bridge->input(vnet_state.bridge->vm, pkt, vnet_state.bridge->private_data) == -1) { + PrintDebug("VNET: Packet not sent properly\n"); + continue; + } } else if (route->route_def.dst_type == LINK_INTERFACE) { if (route->dst_dev->input(route->dst_dev->vm, pkt, route->dst_dev->private_data) == -1) { PrintDebug("VNET: Packet not sent properly\n"); @@ -528,6 +548,32 @@ exit: } +int v3_vnet_add_bridge(struct v3_vm_info * vm, + int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data), + void * priv_data){ + unsigned long flags; + + flags = v3_lock_irqsave(vnet_state.lock); + + if(vnet_state.bridge != NULL){ + PrintDebug("Vnet: Replace current bridge with a new one\n"); + } else { + vnet_state.bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev)); + if (vnet_state.bridge == NULL) { + PrintError("Malloc Fails\n"); + return -1; + } + } + + vnet_state.bridge->vm = vm; + vnet_state.bridge->input = input; + vnet_state.bridge->private_data = priv_data; + + v3_unlock_irqrestore(vnet_state.lock, flags); + + return 0; +} + int V3_init_vnet() { INIT_LIST_HEAD(&(vnet_state.routes));