X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fdevices%2Flnx_virtio_vnet.c;h=4569bc7177e2e58a059a7fd2ee0afd43bfbd4a87;hb=ed487950f94cc28008dcf38f68f3e2a1472f9d93;hp=f88bf28a8dc50cb494e2d8c894237f38380680a6;hpb=a04b0778a7f74005560b99255de2c6d2df4d5292;p=palacios.git diff --git a/palacios/src/devices/lnx_virtio_vnet.c b/palacios/src/devices/lnx_virtio_vnet.c index f88bf28..4569bc7 100644 --- a/palacios/src/devices/lnx_virtio_vnet.c +++ b/palacios/src/devices/lnx_virtio_vnet.c @@ -34,7 +34,7 @@ #endif -#define QUEUE_SIZE 8192 +#define QUEUE_SIZE 4096 #define CMD_QUEUE_SIZE 128 #define NUM_QUEUES 3 @@ -63,7 +63,13 @@ struct virtio_vnet_state { int io_range_size; v3_lock_t lock; - ulong_t pkt_sent, pkt_recv, pkt_drop; + uint32_t pkt_sent; + uint32_t pkt_recv; + uint32_t pkt_drop; + uint32_t tx_exit; + uint32_t rx_exit; + uint32_t total_exit; + int ready; }; @@ -143,7 +149,8 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * uint8_t status = 0; - PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx); + PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", + desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx); if (desc_cnt < 3) { PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt); @@ -152,7 +159,7 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * hdr_desc = &(q->desc[desc_idx]); - if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) { + if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) { PrintError("Could not translate VirtioVNET header address\n"); return -1; } @@ -167,7 +174,7 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * buf_desc = &(q->desc[desc_idx]); - if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) { + if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) { PrintError("Could not translate route address\n"); return -1; } @@ -194,7 +201,7 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * status_desc = &(q->desc[desc_idx]); - if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) { + if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) { PrintError("VirtioVNET Error could not translate status address\n"); return -1; } @@ -228,38 +235,38 @@ static int vnet_pkt_input_cb(struct v3_vm_info * vm, struct v3_vnet_pkt vnet_pk int ret_val = -1; unsigned long flags; uint16_t sent; - struct v3_vnet_pkt *pkt; + struct v3_vnet_pkt * pkt = NULL; - if(pkt_num <= 0) + if (pkt_num <= 0) { return 0; + } flags = v3_lock_irqsave(vnet_state->lock); if (q->ring_avail_addr == 0) { PrintError("Queue is not set\n"); - goto exit; + v3_unlock_irqrestore(vnet_state->lock, flags); + return ret_val; } PrintDebug("VNET Bridge: RX: running on cpu: %d, num of pkts: %d\n", V3_Get_CPU(), pkt_num); - for(sent = 0; sent < pkt_num; sent ++) { + for (sent = 0; sent < pkt_num; sent++) { pkt = &vnet_pkts[sent]; - vnet_state->pkt_recv ++; + vnet_state->pkt_recv++; if (q->cur_avail_idx != q->avail->index) { uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size]; struct vring_desc * pkt_desc = NULL; struct vnet_bridge_pkt * virtio_pkt = NULL; - //if(q->cur_avail_idx % 100 == 0) - // PrintError("cur_avai_idx %d, idx: %d\n", q->cur_avail_idx, q->avail->index); - pkt_desc = &(q->desc[pkt_idx]); PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length); - if (guest_pa_to_host_va(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) { + if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) { PrintError("Could not translate buffer address\n"); - goto exit; + v3_unlock_irqrestore(vnet_state->lock, flags); + return ret_val; } PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id); @@ -270,19 +277,19 @@ static int vnet_pkt_input_cb(struct v3_vm_info * vm, struct v3_vnet_pkt vnet_pk memcpy(virtio_pkt->pkt, pkt->data, pkt->size); q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size]; - q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); // This should be the total length of data sent to guest (header+pkt_data) + q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); q->used->index++; q->cur_avail_idx++; } else { - //PrintError("VNET Bridge: guest RX buffer full: cur_avai_idx %d, idx: %d\nDisable Bridge\n", q->cur_avail_idx, q->avail->index); - vnet_state->pkt_drop ++; + vnet_state->pkt_drop++; v3_vnet_disable_bridge(); } } - if(sent == 0){ - goto exit; + if (sent == 0) { + v3_unlock_irqrestore(vnet_state->lock, flags); + return ret_val; } if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) { @@ -293,31 +300,42 @@ static int vnet_pkt_input_cb(struct v3_vm_info * vm, struct v3_vnet_pkt vnet_pk ret_val = 0; -exit: - + #ifdef CONFIG_VNET_PROFILE - if (vnet_state->pkt_recv % 10000 == 0) - PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld\n", - vnet_state->pkt_sent, - vnet_state->pkt_recv, - vnet_state->pkt_drop); - + if (vnet_state->pkt_recv % 200000 == 0) + PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n", + vnet_state->pkt_sent, + vnet_state->pkt_recv, + vnet_state->pkt_drop, + vnet_state->total_exit, + vnet_state->tx_exit, + vnet_state->rx_exit); #endif v3_unlock_irqrestore(vnet_state->lock, flags); - + return ret_val; + } -static void vnet_pkt_input_xcall(void *data){ - struct v3_vnet_bridge_input_args *args = (struct v3_vnet_bridge_input_args *)data; +static void vnet_pkt_input_xcall(void * data) { + struct v3_vnet_bridge_input_args * args = (struct v3_vnet_bridge_input_args *)data; vnet_pkt_input_cb(args->vm, args->vnet_pkts, args->pkt_num, args->private_data); } -static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) -{ +static int handle_pkt_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) { struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]); + unsigned long flags = 0; + int recvd = 0; + int cpu = V3_Get_CPU(); + + flags = v3_lock_irqsave(vnet_state->lock); + + if (q->ring_avail_addr == 0) { + v3_unlock_irqrestore(vnet_state->lock,flags); + return 0; + } while (q->cur_avail_idx != q->avail->index) { uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size]; @@ -328,54 +346,77 @@ static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * v PrintDebug("VNET Bridge: Handle TX desc buf_len: %d\n", pkt_desc->length); - if (guest_pa_to_host_va(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) { + if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) { PrintError("Could not translate buffer address\n"); return -1; } PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id); - v3_vnet_bridge_rx(virtio_pkt->pkt, virtio_pkt->pkt_size, virtio_pkt->link_id); + v3_vnet_rx(virtio_pkt->pkt, virtio_pkt->pkt_size, virtio_pkt->link_id, LINK_EDGE); q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size]; q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to???? q->used->index++; - vnet_state->pkt_sent ++; + vnet_state->pkt_sent++; + recvd++; q->cur_avail_idx++; } - //interrupt the vnet to poll pkts - int cpu = V3_Get_CPU(); - cpu = (cpu == 0)?1:0; - V3_lapic_send_ipi(cpu, V3_VNET_POLLING_VECTOR); + if (recvd == 0) { + v3_unlock_irqrestore(vnet_state->lock,flags); + return 0; + } - if((vnet_state->pkt_sent % (QUEUE_SIZE/20)) == 0) { //optimized for guest's, batch the interrupts - if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) { - v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev); - vnet_state->virtio_cfg.pci_isr = 0x1; - } + //PrintError("In polling get %d\n", recvd); + + //if on the dom0 core, interrupt the domU core to poll pkts + //otherwise, call the polling directly + + + if (vnet_state->vm->cores[0].cpu_id == cpu) { + cpu = (cpu == 0) ? 1 : 0; + v3_interrupt_cpu(vnet_state->vm, cpu, V3_VNET_POLLING_VECTOR); + } else { + v3_vnet_polling(); } + if ((vnet_state->pkt_sent % (QUEUE_SIZE/20)) == 0) { + //optimized for guest's, batch the interrupts + + if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) { + v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev); + vnet_state->virtio_cfg.pci_isr = 0x1; + } + } + #ifdef CONFIG_VNET_PROFILE - if (vnet_state->pkt_sent % 10000 == 0) - PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld\n", - vnet_state->pkt_sent, - vnet_state->pkt_recv, - vnet_state->pkt_drop); - + if (vnet_state->pkt_sent % 200000 == 0) + PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n", + vnet_state->pkt_sent, + vnet_state->pkt_recv, + vnet_state->pkt_drop, + vnet_state->total_exit, + vnet_state->tx_exit, + vnet_state->rx_exit); #endif + v3_unlock_irqrestore(vnet_state->lock,flags); + return 0; } +static int polling_pkt_from_guest(struct v3_vm_info * vm, void *private_data) { + struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data; + + return handle_pkt_kick(&(vm->cores[0]), vnet_state); +} -static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) -{ +static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) { v3_vnet_enable_bridge(); - //PrintError("Enable Bridge\n"); - + return 0; } @@ -387,63 +428,77 @@ static int vnet_virtio_io_write(struct guest_info * core, uint16_t port, void * port, length, *(uint32_t *)src); PrintDebug("VNET Bridge: port idx=%d\n", port_idx); + vnet_state->total_exit++; switch (port_idx) { case GUEST_FEATURES_PORT: + if (length != 4) { PrintError("Illegal write length for guest features\n"); return -1; } + vnet_state->virtio_cfg.guest_features = *(uint32_t *)src; break; - case VRING_PG_NUM_PORT: - if (length == 4) { - addr_t pfn = *(uint32_t *)src; - addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT); + case VRING_PG_NUM_PORT: { - vnet_state->cur_queue->pfn = pfn; - - vnet_state->cur_queue->ring_desc_addr = page_addr ; - vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc)); - vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \ - sizeof(struct vring_avail) + \ - (QUEUE_SIZE * sizeof(uint16_t))); - - // round up to next page boundary. - vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff; + addr_t pfn = *(uint32_t *)src; + addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT); - if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) { - PrintError("Could not translate ring descriptor address\n"); - return -1; - } - - if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) { - PrintError("Could not translate ring available address\n"); - return -1; - } - - if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) { - PrintError("Could not translate ring used address\n"); - return -1; - } - - PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n", - (void *)(vnet_state->cur_queue->ring_desc_addr), - (void *)(vnet_state->cur_queue->ring_avail_addr), - (void *)(vnet_state->cur_queue->ring_used_addr)); - - PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", - vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used); - - if(vnet_state->queue[RECV_QUEUE].avail != NULL){ - vnet_state->ready = 1; - } - } else { + if (length != 4) { PrintError("Illegal write length for page frame number\n"); return -1; } + + + vnet_state->cur_queue->pfn = pfn; + + vnet_state->cur_queue->ring_desc_addr = page_addr ; + vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc)); + vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \ + sizeof(struct vring_avail) + \ + (QUEUE_SIZE * sizeof(uint16_t))); + + // round up to next page boundary. + vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff; + + if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) { + PrintError("Could not translate ring descriptor address\n"); + return -1; + } + + if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) { + PrintError("Could not translate ring available address\n"); + return -1; + } + + if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) { + PrintError("Could not translate ring used address\n"); + return -1; + } + + PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n", + (void *)(vnet_state->cur_queue->ring_desc_addr), + (void *)(vnet_state->cur_queue->ring_avail_addr), + (void *)(vnet_state->cur_queue->ring_used_addr)); + + PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", + vnet_state->cur_queue->desc, + vnet_state->cur_queue->avail, + vnet_state->cur_queue->used); + + if (vnet_state->queue[RECV_QUEUE].avail != NULL){ + vnet_state->ready = 1; + } + + //No notify when there is pkt tx from guest + if (vnet_state->queue[XMIT_QUEUE].used != NULL) { + vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG; + } + break; + } case VRING_Q_SEL_PORT: vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src; @@ -470,12 +525,15 @@ static int vnet_virtio_io_write(struct guest_info * core, uint16_t port, void * if (handle_pkt_kick(core, vnet_state) == -1){ PrintError("Could not handle Virtio VNET TX\n"); return -1; - } + } + vnet_state->tx_exit ++; + //PrintError("Notify on TX\n"); } else if (queue_idx == 2) { if (handle_rx_kick(core, vnet_state) == -1){ PrintError("Could not handle Virtio RX buffer refills Kick\n"); return -1; } + vnet_state->rx_exit ++; } else { PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx); return -1; @@ -510,10 +568,6 @@ static int vnet_virtio_io_read(struct guest_info * core, uint16_t port, void * d struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data; int port_idx = port % vnet_state->io_range_size; -/* - PrintDebug("VirtioVNET: VIRTIO SYMBIOTIC Read for port %d (index =%d), length=%d\n", - port, port_idx, length); -*/ switch (port_idx) { case HOST_FEATURES_PORT: if (length != 4) { @@ -590,9 +644,9 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus")); struct virtio_vnet_state * vnet_state = NULL; struct pci_device * pci_dev = NULL; - char * name = v3_cfg_val(cfg, "name"); + char * dev_id = v3_cfg_val(cfg, "ID"); - PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name); + PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", dev_id); if (pci_bus == NULL) { PrintError("VNET Bridge device require a PCI Bus"); @@ -604,10 +658,10 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { vnet_state->vm = vm; - struct vm_device * dev = v3_allocate_device(name, &dev_ops, vnet_state); + struct vm_device * dev = v3_allocate_device(dev_id, &dev_ops, vnet_state); if (v3_attach_device(vm, dev) == -1) { - PrintError("Could not attach device %s\n", name); + PrintError("Could not attach device %s\n", dev_id); return -1; } @@ -671,7 +725,7 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { virtio_reset(vnet_state); V3_Print("Registering Virtio device as vnet bridge\n"); - v3_vnet_add_bridge(vm, vnet_pkt_input_cb, vnet_pkt_input_xcall, 5, 1000000, (void *)vnet_state); + v3_vnet_add_bridge(vm, vnet_pkt_input_cb, vnet_pkt_input_xcall, polling_pkt_from_guest, 0, 500000, (void *)vnet_state); return 0;