X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fdevices%2Flnx_virtio_nic.c;h=b6fd304265095fcf09e66e2288286608c6c74bce;hb=c80aaab03156ec020bd4a76688ff6d2544955bf3;hp=d3d5cfbe24510ae4f7ab8eec9059d67f4738b0dc;hpb=610d3af422ae2f0eb43d156984bf5d56d3352e46;p=palacios.git diff --git a/palacios/src/devices/lnx_virtio_nic.c b/palacios/src/devices/lnx_virtio_nic.c index d3d5cfb..b6fd304 100644 --- a/palacios/src/devices/lnx_virtio_nic.c +++ b/palacios/src/devices/lnx_virtio_nic.c @@ -7,13 +7,13 @@ * and the University of New Mexico. You can find out more at * http://www.v3vee.org * - * Copyright (c) 2008, Lei Xia - * Copyright (c) 2008, Cui Zheng - * Copyright (c) 2008, The V3VEE Project + * Copyright (c) 2010, Lei Xia + * Copyright (c) 2010, Cui Zheng + * Copyright (c) 2010, The V3VEE Project * All rights reserved. * * Author: Lei Xia - * Cui Zheng + * Cui Zheng * * * This is free software. You are permitted to use, @@ -27,7 +27,7 @@ #include #include #include - +#include #include @@ -36,8 +36,6 @@ #define PrintDebug(fmt, args...) #endif -#define VIRTIO_NIC_PROFILE - #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ #define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 << 10)) @@ -48,25 +46,39 @@ struct virtio_net_hdr { uint8_t gso_type; uint16_t hdr_len; /* Ethernet + IP + tcp/udp hdrs */ uint16_t gso_size; /* Bytes to append to hdr_len per frame */ - uint16_t csum_start; /* Position to start checksumming from */ - uint16_t csum_offset; /* Offset after that to place checksum */ + uint16_t csum_start; /* Position to start checksumming from */ + uint16_t csum_offset; /* Offset after that to place checksum */ }__attribute__((packed)); + +/* This is the version of the header to use when the MRG_RXBUF + * feature has been negotiated. */ +struct virtio_net_hdr_mrg_rxbuf { + struct virtio_net_hdr hdr; + uint16_t num_buffers; /* Number of merged rx buffers */ +}; + -#define QUEUE_SIZE 1024 +#define TX_QUEUE_SIZE 64 +#define RX_QUEUE_SIZE 1024 #define CTRL_QUEUE_SIZE 64 #define ETH_ALEN 6 +#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ +#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ +#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ +#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */ + struct virtio_net_config { - uint8_t mac[ETH_ALEN]; //VIRTIO_NET_F_MAC + uint8_t mac[ETH_ALEN]; /* VIRTIO_NET_F_MAC */ uint16_t status; } __attribute__((packed)); struct virtio_dev_state { struct vm_device * pci_bus; struct list_head dev_list; - struct guest_info *vm; + struct v3_vm_info *vm; }; struct virtio_net_state { @@ -77,29 +89,44 @@ struct virtio_net_state { struct pci_device * pci_dev; int io_range_size; - struct virtio_queue rx_vq; //index 0, rvq in Linux virtio driver, handle packet to guest - struct virtio_queue tx_vq; //index 1, svq in Linux virtio driver, handle packet from guest - struct virtio_queue ctrl_vq; //index 2, ctrol info from guest + struct virtio_queue rx_vq; /* idx 0, pkts to guest */ + struct virtio_queue tx_vq; /* idx 1, pkts from guest */ + struct virtio_queue ctrl_vq; /* idx 2 */ - ulong_t pkt_sent, pkt_recv, pkt_drop; + int buffed_rx; + int tx_disabled; /* stop TX pkts from guest */ - struct v3_dev_net_ops * net_ops; + uint64_t pkt_sent, pkt_recv, pkt_drop; - v3_lock_t lock; + struct v3_dev_net_ops * net_ops; + v3_lock_t rx_lock, tx_lock; void * backend_data; struct virtio_dev_state * virtio_dev; struct list_head dev_link; }; +/* virtio nic error type */ +#define ERR_VIRTIO_OTHER 1 +#define ERR_VIRTIO_RXQ_FULL 2 +#define ERR_VIRTIO_RXQ_NOSET 3 +#define ERR_VIRTIO_TXQ_NOSET 4 +#define ERR_VIRTIO_TXQ_FULL 5 +#define ERR_VIRTIO_TXQ_DISABLED 6 + + static int virtio_free(struct vm_device * dev) { - return -1; + return 0; } static int virtio_init_state(struct virtio_net_state * virtio) { + virtio->rx_vq.queue_size = RX_QUEUE_SIZE; + virtio->tx_vq.queue_size = TX_QUEUE_SIZE; + virtio->ctrl_vq.queue_size = CTRL_QUEUE_SIZE; + virtio->rx_vq.ring_desc_addr = 0; virtio->rx_vq.ring_avail_addr = 0; virtio->rx_vq.ring_used_addr = 0; @@ -118,68 +145,60 @@ static int virtio_init_state(struct virtio_net_state * virtio) virtio->ctrl_vq.pfn = 0; virtio->ctrl_vq.cur_avail_idx = 0; - virtio->virtio_cfg.host_features = 0; - //virtio->virtio_cfg.status = VIRTIO_NET_S_LINK_UP; virtio->virtio_cfg.pci_isr = 0; + + virtio->virtio_cfg.host_features = 0; // (1 << VIRTIO_NET_F_MAC); - if (v3_lock_init(&(virtio->lock)) == -1){ - PrintError("Virtio NIC: Failure to init lock for net_state\n"); + if ((v3_lock_init(&(virtio->rx_lock)) == -1) || + (v3_lock_init(&(virtio->tx_lock)) == -1)){ + PrintError("Virtio NIC: Failure to init locks for net_state\n"); } virtio->pkt_sent = virtio->pkt_recv = virtio->pkt_drop = 0; + virtio->buffed_rx = 0; return 0; } -static int pkt_tx(struct virtio_net_state * virtio, struct vring_desc * buf_desc) +static int +pkt_tx(struct guest_info * core, + struct virtio_net_state * virtio, + struct vring_desc * buf_desc) { uint8_t * buf = NULL; uint32_t len = buf_desc->length; - PrintDebug("Virtio NIC: Handling Virtio Write, net_state: %p\n", virtio); - - if (guest_pa_to_host_va(virtio->virtio_dev->vm, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) { + if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) { PrintError("Could not translate buffer address\n"); - return -1; + return -ERR_VIRTIO_OTHER; } - if (virtio->net_ops->send(buf, len, (void *)virtio, NULL) == -1) { - return -1; - } - - return 0; + return virtio->net_ops->send(buf, len, virtio->backend_data); } -static int build_receive_header(struct virtio_net_hdr * hdr, const void * buf, int raw) { - hdr->flags = 0; - - if (!raw) { - memcpy(hdr, buf, sizeof(struct virtio_net_hdr)); - } else { - memset(hdr, 0, sizeof(struct virtio_net_hdr)); - } - - return 0; -} -static int copy_data_to_desc(struct virtio_net_state * virtio_state, struct vring_desc * desc, uchar_t * buf, uint_t buf_len) +static int +copy_data_to_desc(struct guest_info * core, + struct virtio_net_state * virtio_state, + struct vring_desc * desc, + uchar_t * buf, + uint_t buf_len, + uint_t offset) { uint32_t len; uint8_t * desc_buf = NULL; - if (guest_pa_to_host_va(virtio_state->virtio_dev->vm, desc->addr_gpa, (addr_t *)&(desc_buf)) == -1) { + if (v3_gpa_to_hva(core, desc->addr_gpa, (addr_t *)&(desc_buf)) == -1) { PrintError("Could not translate buffer address\n"); return -1; } - len = (desc->length < buf_len)?desc->length:buf_len; - memcpy(desc_buf, buf, len); + len = (desc->length < buf_len)?(desc->length - offset):buf_len; + memcpy(desc_buf+offset, buf, len); return len; } - - static int get_desc_count(struct virtio_queue * q, int index) { struct vring_desc * tmp_desc = &(q->desc[index]); int cnt = 1; @@ -192,23 +211,62 @@ static int get_desc_count(struct virtio_queue * q, int index) { return cnt; } -static int handle_ctrl(struct virtio_net_state * dev) { +static inline void enable_cb(struct virtio_queue *queue){ + queue->used->flags &= ~ VRING_NO_NOTIFY_FLAG; +} + +static inline void disable_cb(struct virtio_queue *queue) { + queue->used->flags |= VRING_NO_NOTIFY_FLAG; +} + + +/* interrupt the guest, so the guest core get EXIT to Palacios + * this happens when there are either incoming pkts for the guest + * or the guest can start TX pkts again */ +static inline void notify_guest(struct virtio_net_state * virtio){ + v3_interrupt_cpu(virtio->virtio_dev->vm, virtio->virtio_dev->vm->cores[0].cpu_id, 0); +} + + +/* guest free some pkts from rx queue */ +static int handle_rx_kick(struct guest_info *core, + struct virtio_net_state * virtio) +{ + unsigned long flags; + + flags = v3_lock_irqsave(virtio->rx_lock); + + virtio->net_ops->start_rx(virtio->backend_data); + //disable_cb(&virtio->rx_vq); + + v3_unlock_irqrestore(virtio->rx_lock, flags); + + return 0; +} + + +static int handle_ctrl(struct guest_info *core, + struct virtio_net_state * virtio) { + return 0; } -static int handle_pkt_tx(struct virtio_net_state * virtio_state) +static int handle_pkt_tx(struct guest_info *core, + struct virtio_net_state * virtio_state) { struct virtio_queue * q = &(virtio_state->tx_vq); struct virtio_net_hdr * hdr = NULL; + int recved = 0; + unsigned long flags; - if (q->avail->index < q->last_avail_idx) { - q->idx_overflow = true; - } + if (!q->ring_avail_addr) + return -ERR_VIRTIO_TXQ_NOSET; - q->last_avail_idx = q->avail->index; + if(virtio_state->tx_disabled) + return -ERR_VIRTIO_TXQ_DISABLED; - while (q->cur_avail_idx < q->avail->index || - (q->idx_overflow && q->cur_avail_idx < (q->avail->index + 65536))) { + flags = v3_lock_irqsave(virtio_state->tx_lock); + while (q->cur_avail_idx != q->avail->index) { struct vring_desc * hdr_desc = NULL; addr_t hdr_addr = 0; uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size]; @@ -217,53 +275,63 @@ static int handle_pkt_tx(struct virtio_net_state * virtio_state) int i = 0; hdr_desc = &(q->desc[desc_idx]); - if (guest_pa_to_host_va(virtio_state->virtio_dev->vm, hdr_desc->addr_gpa, &(hdr_addr)) == -1) { + if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, &(hdr_addr)) == -1) { PrintError("Could not translate block header address\n"); - return -1; + goto exit_error; } hdr = (struct virtio_net_hdr*)hdr_addr; desc_idx = hdr_desc->next; - + + if(desc_cnt > 2){ + PrintError("VNIC: merged rx buffer not supported\n"); + goto exit_error; + } + + /* here we assumed that one ethernet pkt is not splitted into multiple virtio buffer */ for (i = 0; i < desc_cnt - 1; i++) { struct vring_desc * buf_desc = &(q->desc[desc_idx]); - if (pkt_tx(virtio_state, buf_desc) == -1) { + if (pkt_tx(core, virtio_state, buf_desc) == -1) { PrintError("Error handling nic operation\n"); - return -1; + goto exit_error; } req_len += buf_desc->length; desc_idx = buf_desc->next; } virtio_state->pkt_sent ++; + recved ++; q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size]; q->used->ring[q->used->index % q->queue_size].length = req_len; // What do we set this to???? - q->used->index++; - - int last_idx = q->cur_avail_idx; - + q->used->index ++; + q->cur_avail_idx ++; - - if (q->cur_avail_idx < last_idx) { - q->idx_overflow = false; - } } + v3_unlock_irqrestore(virtio_state->tx_lock, flags); + + if(!recved) + return 0; + if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) { v3_pci_raise_irq(virtio_state->virtio_dev->pci_bus, 0, virtio_state->pci_dev); virtio_state->virtio_cfg.pci_isr = 0x1; } -#ifdef VIRTIO_NIC_PROFILE - if(virtio_state->pkt_sent % 10000 == 0) - PrintError("Virtio NIC: %p, pkt_sent: %ld\n", virtio_state, virtio_state->pkt_sent); -#endif - return 0; + +exit_error: + + v3_unlock_irqrestore(virtio_state->tx_lock, flags); + return -ERR_VIRTIO_OTHER; } -static int virtio_setup_queue(struct virtio_net_state * virtio_state, struct virtio_queue * queue, addr_t pfn, addr_t page_addr) { + +static int virtio_setup_queue(struct guest_info *core, + struct virtio_net_state * virtio_state, + struct virtio_queue * queue, + addr_t pfn, addr_t page_addr) { queue->pfn = pfn; queue->ring_desc_addr = page_addr; @@ -271,20 +339,20 @@ static int virtio_setup_queue(struct virtio_net_state * virtio_state, struct vir queue->ring_used_addr = ((queue->ring_avail_addr) + (sizeof(struct vring_avail)) + (queue->queue_size * sizeof(uint16_t))); - + // round up to next page boundary. queue->ring_used_addr = (queue->ring_used_addr + 0xfff) & ~0xfff; - if (guest_pa_to_host_va(virtio_state->virtio_dev->vm, queue->ring_desc_addr, (addr_t *)&(queue->desc)) == -1) { + if (v3_gpa_to_hva(core, queue->ring_desc_addr, (addr_t *)&(queue->desc)) == -1) { PrintError("Could not translate ring descriptor address\n"); return -1; } - if (guest_pa_to_host_va(virtio_state->virtio_dev->vm, queue->ring_avail_addr, (addr_t *)&(queue->avail)) == -1) { + if (v3_gpa_to_hva(core, queue->ring_avail_addr, (addr_t *)&(queue->avail)) == -1) { PrintError("Could not translate ring available address\n"); return -1; } - if (guest_pa_to_host_va(virtio_state->virtio_dev->vm, queue->ring_used_addr, (addr_t *)&(queue->used)) == -1) { + if (v3_gpa_to_hva(core, queue->ring_used_addr, (addr_t *)&(queue->used)) == -1) { PrintError("Could not translate ring used address\n"); return -1; } @@ -300,7 +368,9 @@ static int virtio_setup_queue(struct virtio_net_state * virtio_state, struct vir return 0; } -static int virtio_io_write(struct guest_info *core, uint16_t port, void * src, uint_t length, void * private_data) +static int virtio_io_write(struct guest_info *core, + uint16_t port, void * src, + uint_t length, void * private_data) { struct virtio_net_state * virtio = (struct virtio_net_state *)private_data; int port_idx = port % virtio->io_range_size; @@ -315,7 +385,6 @@ static int virtio_io_write(struct guest_info *core, uint16_t port, void * src, u return -1; } virtio->virtio_cfg.guest_features = *(uint32_t *)src; - PrintDebug("Setting Guest Features to %x\n", virtio->virtio_cfg.guest_features); break; case VRING_PG_NUM_PORT: @@ -328,13 +397,15 @@ static int virtio_io_write(struct guest_info *core, uint16_t port, void * src, u uint16_t queue_idx = virtio->virtio_cfg.vring_queue_selector; switch (queue_idx) { case 0: - virtio_setup_queue(virtio, &virtio->rx_vq, pfn, page_addr); + virtio_setup_queue(core, virtio, &virtio->rx_vq, pfn, page_addr); + //disable_cb(&virtio->rx_vq); break; case 1: - virtio_setup_queue(virtio, &virtio->tx_vq, pfn, page_addr); + virtio_setup_queue(core, virtio, &virtio->tx_vq, pfn, page_addr); + //disable_cb(&virtio->tx_vq); break; case 2: - virtio_setup_queue(virtio, &virtio->ctrl_vq, pfn, page_addr); + virtio_setup_queue(core, virtio, &virtio->ctrl_vq, pfn, page_addr); break; default: break; @@ -344,7 +415,7 @@ static int virtio_io_write(struct guest_info *core, uint16_t port, void * src, u case VRING_Q_SEL_PORT: virtio->virtio_cfg.vring_queue_selector = *(uint16_t *)src; if (virtio->virtio_cfg.vring_queue_selector > 2) { - PrintError("Virtio NIC device only uses 3 queue, selected %d\n", + PrintError("Virtio NIC: wrong queue idx: %d\n", virtio->virtio_cfg.vring_queue_selector); return -1; } @@ -354,20 +425,19 @@ static int virtio_io_write(struct guest_info *core, uint16_t port, void * src, u { uint16_t queue_idx = *(uint16_t *)src; if (queue_idx == 0){ - PrintDebug("receive queue notification 0, packet get by Guest\n"); + handle_rx_kick(core, virtio); } else if (queue_idx == 1){ - if (handle_pkt_tx(virtio) == -1) { + if (handle_pkt_tx(core, virtio) == -1) { PrintError("Could not handle NIC Notification\n"); return -1; } } else if (queue_idx == 2){ - if (handle_ctrl(virtio) == -1) { + if (handle_ctrl(core, virtio) == -1) { PrintError("Could not handle NIC Notification\n"); return -1; } } else { - PrintError("Virtio NIC device only uses 3 queue, selected %d\n", - queue_idx); + PrintError("Wrong queue index %d\n", queue_idx); } break; } @@ -383,6 +453,7 @@ static int virtio_io_write(struct guest_info *core, uint16_t port, void * src, u case VIRTIO_ISR_PORT: virtio->virtio_cfg.pci_isr = *(uint8_t *)src; break; + default: return -1; break; @@ -391,13 +462,15 @@ static int virtio_io_write(struct guest_info *core, uint16_t port, void * src, u return length; } -static int virtio_io_read(struct guest_info *core, uint16_t port, void * dst, uint_t length, void * private_data) +static int virtio_io_read(struct guest_info *core, + uint16_t port, void * dst, + uint_t length, void * private_data) { struct virtio_net_state * virtio = (struct virtio_net_state *)private_data; int port_idx = port % virtio->io_range_size; uint16_t queue_idx = virtio->virtio_cfg.vring_queue_selector; - PrintDebug("Virtio NIC %p: Read for port %d (index =%d), length=%d", private_data, + PrintDebug("Virtio NIC %p: Read for port %d (index =%d), length=%d\n", private_data, port, port_idx, length); switch (port_idx) { @@ -447,7 +520,6 @@ static int virtio_io_read(struct guest_info *core, uint16_t port, void * dst, ui default: break; } - PrintDebug("queue index: %d, value=0x%x\n", (int)queue_idx, *(uint16_t *)dst); break; case VIRTIO_STATUS_PORT: @@ -465,7 +537,7 @@ static int virtio_io_read(struct guest_info *core, uint16_t port, void * dst, ui break; default: - PrintError("Virtio NIC: Read of Unhandled Virtio Read\n"); + PrintError("Virtio NIC: Read of Unhandled Virtio Read:%d\n", port_idx); return -1; } @@ -473,79 +545,95 @@ static int virtio_io_read(struct guest_info *core, uint16_t port, void * dst, ui } - - static int virtio_rx(uint8_t * buf, uint32_t size, void * private_data) { struct virtio_net_state * virtio = (struct virtio_net_state *)private_data; struct virtio_queue * q = &(virtio->rx_vq); - struct virtio_net_hdr hdr; - uint32_t hdr_len = sizeof(struct virtio_net_hdr); + struct virtio_net_hdr_mrg_rxbuf hdr; + uint32_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); uint32_t data_len = size; uint32_t offset = 0; unsigned long flags; + int ret_val = -ERR_VIRTIO_OTHER; + int raw = 1; - flags = v3_lock_irqsave(virtio->lock); - - PrintDebug("VIRTIO NIC: sending packet to virtio nic %p, size:%d", virtio, size); +#ifndef CONFIG_DEBUG_VIRTIO_NET + { + PrintDebug("Virtio-NIC: virtio_rx: size: %d\n", size); + //v3_hexdump(buf, size, NULL, 0); + } +#endif + + flags = v3_lock_irqsave(virtio->rx_lock); virtio->pkt_recv ++; - data_len -= hdr_len; + if (!raw) + data_len -= hdr_len; - build_receive_header(&hdr, buf, 1); + if (!raw) + memcpy(&hdr, buf, sizeof(struct virtio_net_hdr_mrg_rxbuf)); + else + memset(&hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf)); if (q->ring_avail_addr == 0) { PrintError("Queue is not set\n"); - return -1; + ret_val = -ERR_VIRTIO_RXQ_NOSET; + goto exit; } - if (q->last_avail_idx > q->avail->index) - q->idx_overflow = true; - q->last_avail_idx = q->avail->index; - - if (q->cur_avail_idx < q->avail->index || (q->idx_overflow && q->cur_avail_idx < q->avail->index+65536)){ + if (q->cur_avail_idx != q->avail->index){ addr_t hdr_addr = 0; uint16_t hdr_idx = q->avail->ring[q->cur_avail_idx % q->queue_size]; uint16_t buf_idx = 0; struct vring_desc * hdr_desc = NULL; hdr_desc = &(q->desc[hdr_idx]); - if (guest_pa_to_host_va(virtio->virtio_dev->vm, hdr_desc->addr_gpa, &(hdr_addr)) == -1) { + if (v3_gpa_to_hva(&(virtio->virtio_dev->vm->cores[0]), hdr_desc->addr_gpa, &(hdr_addr)) == -1) { PrintError("Could not translate receive buffer address\n"); - return -1; + goto exit; } - - memcpy((void *)hdr_addr, &hdr, sizeof(struct virtio_net_hdr)); + hdr.num_buffers = 1; + memcpy((void *)hdr_addr, &hdr, sizeof(struct virtio_net_hdr_mrg_rxbuf)); if (offset >= data_len) { hdr_desc->flags &= ~VIRTIO_NEXT_FLAG; } + struct vring_desc * buf_desc = NULL; for (buf_idx = hdr_desc->next; offset < data_len; buf_idx = q->desc[hdr_idx].next) { - struct vring_desc * buf_desc = &(q->desc[buf_idx]); uint32_t len = 0; + buf_desc = &(q->desc[buf_idx]); - len = copy_data_to_desc(virtio, buf_desc, buf + offset, data_len - offset); + len = copy_data_to_desc(&(virtio->virtio_dev->vm->cores[0]), virtio, buf_desc, buf + offset, data_len - offset, 0); offset += len; if (offset < data_len) { buf_desc->flags = VIRTIO_NEXT_FLAG; } buf_desc->length = len; } + buf_desc->flags &= ~VIRTIO_NEXT_FLAG; q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size]; - q->used->ring[q->used->index % q->queue_size].length = data_len + hdr_len; // This should be the total length of data sent to guest (header+pkt_data) + q->used->ring[q->used->index % q->queue_size].length = data_len + hdr_len; /* This should be the total length of data sent to guest (header+pkt_data) */ q->used->index++; - - int last_idx = q->cur_avail_idx; q->cur_avail_idx++; - if (q->cur_avail_idx < last_idx) - q->idx_overflow = false; + + /* if there are certain num of pkts in the RX queue, notify guest + * so guest will exit to palacios + * when it returns, guest gets the virtio rx interrupt */ + if((++virtio->buffed_rx > q->queue_size/5) && + (q->avail->flags & VIRTIO_NO_IRQ_FLAG)) { + if(virtio->virtio_dev->vm->cores[0].cpu_id != V3_Get_CPU()){ + notify_guest(virtio); + } + virtio->buffed_rx = 0; + } } else { virtio->pkt_drop++; - -#ifdef VIRTIO_NIC_PROFILE - PrintError("Virtio NIC: %p, one pkt dropped receieved: %ld, dropped: %ld, sent: %ld curidx: %d, avaiIdx: %d\n", - virtio, virtio->pkt_recv, virtio->pkt_drop, virtio->pkt_sent, q->cur_avail_idx, q->avail->index); -#endif + /* RX queue is full, tell backend to stop RX on this device */ + virtio->net_ops->stop_rx(virtio->backend_data); + enable_cb(&virtio->rx_vq); + + ret_val = -ERR_VIRTIO_RXQ_FULL; + goto exit; } if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) { @@ -554,27 +642,62 @@ static int virtio_rx(uint8_t * buf, uint32_t size, void * private_data) { virtio->virtio_cfg.pci_isr = 0x1; } -#ifdef VIRTIO_NIC_PROFILE - if ((virtio->pkt_recv % 10000) == 0){ - PrintError("Virtio NIC: %p, receieved: %ld, dropped: %ld, sent: %ld\n", - virtio, virtio->pkt_recv, virtio->pkt_drop, virtio->pkt_sent); - } -#endif + ret_val = offset; - v3_unlock_irqrestore(virtio->lock, flags); - - return offset; +exit: + + v3_unlock_irqrestore(virtio->rx_lock, flags); + + return ret_val; } static struct v3_device_ops dev_ops = { .free = virtio_free, - .reset = NULL, - .start = NULL, - .stop = NULL, }; -static int register_dev(struct virtio_dev_state * virtio, struct virtio_net_state * net_state) + +/* TODO: Issue here: which vm info it needs? calling VM or the device's own VM? */ +static void virtio_nic_poll(struct v3_vm_info * vm, void * data){ + struct virtio_net_state * virtio = (struct virtio_net_state *)data; + + handle_pkt_tx(&(vm->cores[0]), virtio); +} + +static void virtio_start_tx(void * data){ + struct virtio_net_state * virtio = (struct virtio_net_state *)data; + unsigned long flags; + + flags = v3_lock_irqsave(virtio->tx_lock); + virtio->tx_disabled = 0; + + /* notify the device's guest to start sending pkt */ + if(virtio->virtio_dev->vm->cores[0].cpu_id != V3_Get_CPU()){ + notify_guest(virtio); + } + v3_unlock_irqrestore(virtio->tx_lock, flags); +} + +static void virtio_stop_tx(void * data){ + struct virtio_net_state * virtio = (struct virtio_net_state *)data; + unsigned long flags; + + flags = v3_lock_irqsave(virtio->tx_lock); + virtio->tx_disabled = 1; + + /* stop the guest to exit to palacios for sending pkt? */ + if(virtio->virtio_dev->vm->cores[0].cpu_id != V3_Get_CPU()){ + disable_cb(&virtio->tx_vq); + } + + v3_unlock_irqrestore(virtio->tx_lock, flags); +} + + + + +static int register_dev(struct virtio_dev_state * virtio, + struct virtio_net_state * net_state) { struct pci_device * pci_dev = NULL; struct v3_pci_bar bars[6]; @@ -609,7 +732,7 @@ static int register_dev(struct virtio_dev_state * virtio, struct virtio_net_stat bars[0].private_data = net_state; pci_dev = v3_pci_register_device(virtio->pci_bus, PCI_STD_DEVICE, - 0, PCI_AUTO_DEV_NUM, 0, + 0, 4/*PCI_AUTO_DEV_NUM*/, 0, "LNX_VIRTIO_NIC", bars, NULL, NULL, NULL, net_state); @@ -631,13 +754,14 @@ static int register_dev(struct virtio_dev_state * virtio, struct virtio_net_stat pci_dev->config_header.intr_pin = 1; pci_dev->config_header.max_latency = 1; // ?? (qemu does it...) - net_state->pci_dev = pci_dev; - net_state->virtio_cfg.host_features = 0; //no features support now - net_state->rx_vq.queue_size = QUEUE_SIZE; - net_state->tx_vq.queue_size = QUEUE_SIZE; - net_state->ctrl_vq.queue_size = CTRL_QUEUE_SIZE; + net_state->pci_dev = pci_dev; net_state->virtio_dev = virtio; - + + uchar_t mac[6] = {0x11,0x11,0x11,0x11,0x11,0x11}; + memcpy(net_state->net_cfg.mac, mac, 6); + + memcpy(pci_dev->config_data, net_state->net_cfg.mac, ETH_ALEN); + virtio_init_state(net_state); return 0; @@ -658,6 +782,9 @@ static int connect_fn(struct v3_vm_info * info, net_state->backend_data = private_data; ops->recv = virtio_rx; + ops->poll = virtio_nic_poll; + ops->start_tx = virtio_start_tx; + ops->stop_tx = virtio_stop_tx; ops->frontend_data = net_state; return 0; @@ -666,9 +793,9 @@ static int connect_fn(struct v3_vm_info * info, static int virtio_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus")); struct virtio_dev_state * virtio_state = NULL; - char * name = v3_cfg_val(cfg, "name"); + char * dev_id = v3_cfg_val(cfg, "ID"); - PrintDebug("Virtio NIC: Initializing VIRTIO Network device: %s\n", name); + PrintDebug("Virtio NIC: Initializing VIRTIO Network device: %s\n", dev_id); if (pci_bus == NULL) { PrintError("Virtio NIC: VirtIO devices require a PCI Bus"); @@ -680,15 +807,19 @@ static int virtio_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { INIT_LIST_HEAD(&(virtio_state->dev_list)); virtio_state->pci_bus = pci_bus; + virtio_state->vm = vm; + + struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, virtio_state); - struct vm_device * dev = v3_allocate_device(name, &dev_ops, virtio_state); - if (v3_attach_device(vm, dev) == -1) { - PrintError("Virtio NIC: Could not attach device %s\n", name); + if (dev == NULL) { + PrintError("Virtio NIC: Could not attach device %s\n", dev_id); + V3_Free(virtio_state); return -1; } - if (v3_dev_add_net_frontend(vm, name, connect_fn, (void *)virtio_state) == -1) { - PrintError("Virtio NIC: Could not register %s as net frontend\n", name); + if (v3_dev_add_net_frontend(vm, dev_id, connect_fn, (void *)virtio_state) == -1) { + PrintError("Virtio NIC: Could not register %s as net frontend\n", dev_id); + v3_remove_device(dev); return -1; }