vnet_brg_s.stats.pkt_to_vmm ++;
- return v3_vnet_send_pkt(&pkt, NULL, 1);
+ return v3_vnet_send_pkt(&pkt, NULL);
}
return -1;
}
- vnet_brg_s.serv_thread = kthread_run(_rx_server, NULL, "vnet-server");
+ vnet_brg_s.serv_thread = kthread_run(_rx_server, NULL, "vnet_brgd");
bridge_ops.input = bridge_send_pkt;
bridge_ops.poll = NULL;
int (*write)(uint8_t * buf, uint64_t lba, uint64_t num_bytes, void * private_data);
};
+
+struct v3_dev_net_ops_cfg{
+ void * frontend_data;
+ char * fnt_mac;
+ int quote;
+ int poll; /* need poll? */
+};
+
struct v3_dev_net_ops {
/* Backend implemented functions */
- int (*send)(uint8_t * buf, uint32_t len, int synchronize, void * private_data);
+ int (*send)(uint8_t * buf, uint32_t len, void * private_data);
/* Frontend implemented functions */
int (*recv)(uint8_t * buf, uint32_t len, void * frnt_data);
+ int (*poll)(int quote, void * frnt_data);
/* This is ugly... */
- void * frontend_data;
- char fnt_mac[ETH_ALEN];
+ struct v3_dev_net_ops_cfg config;
};
struct v3_dev_console_ops {
int v3_vnet_add_route(struct v3_vnet_route route);
void v3_vnet_del_route(uint32_t route_idx);
-int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize);
+int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data);
int v3_vnet_find_dev(uint8_t * mac);
int v3_vnet_stat(struct vnet_stat * stats);
int (*input)(struct v3_vm_info * vm,
struct v3_vnet_pkt * pkt,
void * dev_data);
+
+ /* return >0 means there are more pkts in the queue to be sent */
+ int (*poll)(struct v3_vm_info * vm,
+ int quote,
+ void * dev_data);
};
int v3_init_vnet(void);
void v3_deinit_vnet(void);
int v3_vnet_add_dev(struct v3_vm_info * info, uint8_t * mac,
- struct v3_vnet_dev_ops * ops,
+ struct v3_vnet_dev_ops * ops, int quote, int poll_state,
void * priv_data);
int v3_vnet_del_dev(int dev_id);
{
uint8_t * buf = NULL;
uint32_t len = buf_desc->length;
- int synchronize = virtio->tx_notify;
if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
PrintDebug("Could not translate buffer address\n");
v3_hexdump(buf, len, NULL, 0);
}
- if(virtio->net_ops->send(buf, len, synchronize, virtio->backend_data) < 0){
+ if(virtio->net_ops->send(buf, len, virtio->backend_data) < 0){
virtio->stats.tx_dropped ++;
return -1;
}
}
static int handle_pkt_tx(struct guest_info * core,
- struct virtio_net_state * virtio_state)
+ struct virtio_net_state * virtio_state,
+ int quote)
{
struct virtio_queue *q = &(virtio_state->tx_vq);
- int txed = 0;
- unsigned long flags;
+ int txed = 0, left = 0;
+ unsigned long flags;
if (!q->ring_avail_addr) {
return -1;
q->used->index ++;
q->cur_avail_idx ++;
-
- txed ++;
+
+ if(++txed >= quote && quote > 0){
+ left = (q->cur_avail_idx != q->avail->index);
+ break;
+ }
}
v3_unlock_irqrestore(virtio_state->tx_lock, flags);
virtio_state->stats.rx_interrupts ++;
}
+ V3_Print("Virtio Intr Line %d\n", virtio_state->pci_dev->config_header.intr_line);
+
if(txed > 0) {
- V3_Net_Print(2, "Virtio Handle TX: txed pkts: %d\n", txed);
+ V3_Net_Print(2, "Virtio Handle TX: txed pkts: %d, left %d\n", txed, left);
}
- return 0;
+ return left;
exit_error:
/* receive queue refill */
virtio->stats.tx_interrupts ++;
} else if (queue_idx == 1){
- if (handle_pkt_tx(core, virtio) == -1) {
- PrintError("Could not handle Virtio NIC tx kick\n");
+ if (handle_pkt_tx(core, virtio, 0) < 0) {
+ PrintError("Virtio NIC: Error to handle packet TX\n");
return -1;
}
virtio->stats.tx_interrupts ++;
} else if (queue_idx == 2){
/* ctrl */
} else {
- PrintError("Wrong queue index %d\n", queue_idx);
+ PrintError("Virtio NIC: Wrong queue index %d\n", queue_idx);
}
break;
}
switch (port_idx) {
case HOST_FEATURES_PORT:
if (length != 4) {
- PrintError("Illegal read length for host features\n");
+ PrintError("Virtio NIC: Illegal read length for host features\n");
//return -1;
}
*(uint32_t *)dst = virtio->virtio_cfg.host_features;
case VRING_PG_NUM_PORT:
if (length != 4) {
- PrintError("Illegal read length for page frame number\n");
+ PrintError("Virtio NIC: Illegal read length for page frame number\n");
return -1;
}
switch (queue_idx) {
case VRING_SIZE_PORT:
if (length != 2) {
- PrintError("Illegal read length for vring size\n");
+ PrintError("Virtio NIC: Illegal read length for vring size\n");
return -1;
}
switch (queue_idx) {
case VIRTIO_STATUS_PORT:
if (length != 1) {
- PrintError("Illegal read length for status\n");
+ PrintError("Virtio NIC: Illegal read length for status\n");
return -1;
}
*(uint8_t *)dst = virtio->virtio_cfg.status;
unsigned long flags;
uint8_t kick_guest = 0;
- V3_Net_Print(2, "Virtio-NIC: virtio_rx: size: %d\n", size);
+ V3_Net_Print(2, "Virtio NIC: virtio_rx: size: %d\n", size);
if (!q->ring_avail_addr) {
V3_Net_Print(2, "Virtio NIC: RX Queue not set\n");
len = copy_data_to_desc(&(virtio->virtio_dev->vm->cores[0]),
virtio, buf_desc, buf+offset, size-offset, 0);
if (len < 0){
- V3_Net_Print(2, "Virtio NIC:merged buffer, %d buffer size %d\n",
+ V3_Net_Print(2, "Virtio NIC: merged buffer, %d buffer size %d\n",
hdr.num_buffers, len);
q->cur_avail_idx = old_idx;
goto err_exit;
};
-static int virtio_tx_flush(void * args){
- struct virtio_net_state *virtio = (struct virtio_net_state *)args;
+static int virtio_poll(int quote, void * data){
+ struct virtio_net_state * virtio = (struct virtio_net_state *)data;
- V3_Print("Virtio TX Poll Thread Starting for %s\n",
- virtio->vm->name);
-
- while(1){
- if(virtio->tx_notify == 0){
- handle_pkt_tx(&(virtio->vm->cores[0]), virtio);
- v3_yield(NULL);
- }else {
- vnet_thread_sleep(-1);
- }
- }
-
- return 0;
+ return handle_pkt_tx(&(virtio->vm->cores[0]), virtio, quote);
}
static int register_dev(struct virtio_dev_state * virtio,
int i;
/* This gets the number of ports, rounded up to a power of 2 */
- net_state->io_range_size = 1; // must be a power of 2
+ net_state->io_range_size = 1;
while (tmp_ports > 0) {
tmp_ports >>= 1;
net_state->io_range_size <<= 1;
bars[i].type = PCI_BAR_NONE;
}
- PrintDebug("Virtio-NIC io_range_size = %d\n",
+ PrintDebug("Virtio NIC: io_range_size = %d\n",
net_state->io_range_size);
bars[0].type = PCI_BAR_IO;
bars[0].private_data = net_state;
pci_dev = v3_pci_register_device(virtio->pci_bus, PCI_STD_DEVICE,
- 0, 4/*PCI_AUTO_DEV_NUM*/, 0,
+ 0, PCI_AUTO_DEV_NUM, 0,
"LNX_VIRTIO_NIC", bars,
NULL, NULL, NULL, net_state);
virtio_init_state(net_state);
+ V3_Print("Virtio NIC: Registered Intr Line %d\n", pci_dev->config_header.intr_line);
+
/* Add backend to list of devices */
list_add(&(net_state->dev_link), &(virtio->dev_list));
&timer_ops,net_state);
ops->recv = virtio_rx;
- ops->frontend_data = net_state;
- memcpy(ops->fnt_mac, virtio->mac, ETH_ALEN);
-
- net_state->poll_thread = vnet_start_thread(virtio_tx_flush,
- (void *)net_state, "Virtio_Poll");
+ ops->poll = virtio_poll;
+ ops->config.frontend_data = net_state;
+ ops->config.poll = 1;
+ ops->config.quote = 64;
+ ops->config.fnt_mac = V3_Malloc(ETH_ALEN);
+ memcpy(ops->config.fnt_mac, virtio->mac, ETH_ALEN);
net_state->status = 1;
memcpy(macstr, str, strlen(str));
if (pci_bus == NULL) {
- PrintError("Virtio NIC: VirtIO devices require a PCI Bus");
+ PrintError("Virtio NIC: Virtio device require a PCI Bus");
return -1;
}
/* called by frontend, send pkt to VNET */
static int vnet_nic_send(uint8_t * buf, uint32_t len,
- int synchronize, void * private_data) {
+ void * private_data) {
struct vnet_nic_state * vnetnic = (struct vnet_nic_state *)private_data;
struct v3_vnet_pkt pkt;
v3_hexdump(buf, len, NULL, 0);
}
- return v3_vnet_send_pkt(&pkt, NULL, synchronize);
+ return v3_vnet_send_pkt(&pkt, NULL);
}
/* send pkt to frontend device */
-static int virtio_input(struct v3_vm_info * info,
+static int fnt_input(struct v3_vm_info * info,
struct v3_vnet_pkt * pkt,
void * private_data){
struct vnet_nic_state *vnetnic = (struct vnet_nic_state *)private_data;
pkt->size, pkt->src_id, pkt->src_type, pkt->dst_id, pkt->dst_type);
return vnetnic->net_ops.recv(pkt->data, pkt->size,
- vnetnic->net_ops.frontend_data);
+ vnetnic->net_ops.config.frontend_data);
+}
+
+
+/* poll pkt from frontend device */
+static int fnt_poll(struct v3_vm_info * info,
+ int quote, void * private_data){
+ struct vnet_nic_state *vnetnic = (struct vnet_nic_state *)private_data;
+
+ return vnetnic->net_ops.poll(quote, vnetnic->net_ops.config.frontend_data);
}
};
static struct v3_vnet_dev_ops vnet_dev_ops = {
- .input = virtio_input,
+ .input = fnt_input,
+ .poll = fnt_poll,
};
}
PrintDebug("Vnet-nic: Connect %s to frontend %s\n",
- dev_id, v3_cfg_val(frontend_cfg, "tag"));
+ dev_id, v3_cfg_val(frontend_cfg, "tag"));
- if ((vnet_dev_id = v3_vnet_add_dev(vm, vnetnic->net_ops.fnt_mac, &vnet_dev_ops, (void *)vnetnic)) == -1) {
+ if ((vnet_dev_id = v3_vnet_add_dev(vm, vnetnic->net_ops.config.fnt_mac,
+ &vnet_dev_ops, vnetnic->net_ops.config.quote,
+ vnetnic->net_ops.config.poll, (void *)vnetnic)) == -1) {
PrintError("Vnet-nic device %s fails to registered to VNET\n", dev_id);
v3_remove_device(dev);
* Copyright (c) 2010, Lei Xia <lxia@northwestern.edu>
* Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>
* Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org>
- * All rights reserved.
+ * All rights reserved
*
* Author: Lei Xia <lxia@northwestern.edu>
* Yuan Tang <ytang@northwestern.edu>
#include <vnet/vnet_host.h>
#include <vnet/vnet_vmm.h>
+#include <palacios/vmm_queue.h>
+
#ifndef V3_CONFIG_DEBUG_VNET
#undef Vnet_Debug
#define Vnet_Debug(fmt, args...)
uint8_t mac_addr[ETH_ALEN];
struct v3_vm_info * vm;
struct v3_vnet_dev_ops dev_ops;
+
+ int poll;
+
+#define VNET_MAX_QUOTE 64
+ int quote;
+
void * private_data;
- struct list_head node;
+ struct list_head node;\r
} __attribute__((packed));
uint32_t size_alloc;
};
-#define VNET_QUEUE_SIZE 1024
-struct vnet_queue {
- struct queue_entry buf[VNET_QUEUE_SIZE];
- int head, tail;
- int count;
- vnet_lock_t lock;
-};
static struct {
struct list_head routes;
vnet_lock_t lock;
struct vnet_stat stats;
- struct vnet_thread * pkt_flush_thread;
+ /* device queue that are waiting to be polled */
+ struct v3_queue * poll_devs;
- struct vnet_queue pkt_q;
+ struct vnet_thread * pkt_flush_thread;
struct hashtable * route_cache;
} vnet_state;
}
-int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
+int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
struct route_list * matched_routes = NULL;
unsigned long flags;
int i;
int cpu = V3_Get_CPU();
Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
- cpu, pkt->size, pkt->src_id,
- pkt->src_type, pkt->dst_id, pkt->dst_type);
+ cpu, pkt->size, pkt->src_id,
+ pkt->src_type, pkt->dst_id, pkt->dst_type);
if(net_debug >= 4){
- v3_hexdump(pkt->data, pkt->size, NULL, 0);
+ v3_hexdump(pkt->data, pkt->size, NULL, 0);
}
flags = vnet_lock_irqsave(vnet_state.lock);
}
-static int vnet_pkt_enqueue(struct v3_vnet_pkt * pkt){
- unsigned long flags;
- struct queue_entry * entry;
- struct vnet_queue * q = &(vnet_state.pkt_q);
- uint16_t num_pages;
-
- flags = vnet_lock_irqsave(q->lock);
-
- if (q->count >= VNET_QUEUE_SIZE){
- Vnet_Print(1, "VNET Queue overflow!\n");
- vnet_unlock_irqrestore(q->lock, flags);
- return -1;
- }
-
- q->count ++;
- entry = &(q->buf[q->tail++]);
- q->tail %= VNET_QUEUE_SIZE;
-
- vnet_unlock_irqrestore(q->lock, flags);
-
- /* this is ugly, but should happen very unlikely */
- while(entry->use);
-
- if(entry->size_alloc < pkt->size){
- if(entry->data != NULL){
- Vnet_FreePages(Vnet_PAddr(entry->data), (entry->size_alloc / PAGE_SIZE));
- entry->data = NULL;
- }
-
- num_pages = 1 + (pkt->size / PAGE_SIZE);
- entry->data = Vnet_VAddr(Vnet_AllocPages(num_pages));
- if(entry->data == NULL){
- return -1;
- }
- entry->size_alloc = PAGE_SIZE * num_pages;
- }
-
- entry->pkt.data = entry->data;
- memcpy(&(entry->pkt), pkt, sizeof(struct v3_vnet_pkt));
- memcpy(entry->data, pkt->data, pkt->size);
-
- entry->use = 1;
-
- return 0;
-}
-
-
-int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize) {
- if(synchronize){
- vnet_tx_one_pkt(pkt, NULL);
- }else {
- vnet_pkt_enqueue(pkt);
- Vnet_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size);
- }
-
- return 0;
-}
-
int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac,
- struct v3_vnet_dev_ops *ops,
+ struct v3_vnet_dev_ops *ops, int quote, int poll_state,
void * priv_data){
struct vnet_dev * new_dev = NULL;
unsigned long flags;
new_dev->private_data = priv_data;
new_dev->vm = vm;
new_dev->dev_id = 0;
+ new_dev->quote = quote<VNET_MAX_QUOTE?quote:VNET_MAX_QUOTE;
+ new_dev->poll = poll_state;
flags = vnet_lock_irqsave(vnet_state.lock);
}
+/* can be instanieoued to multiple threads
+ * that runs on multiple cores
+ * or it could be running on a dedicated side core
+ */
static int vnet_tx_flush(void *args){
- unsigned long flags;
- struct queue_entry * entry;
- struct vnet_queue * q = &(vnet_state.pkt_q);
+ struct vnet_dev * dev = NULL;
+ int ret;
- Vnet_Print(0, "VNET/P Handing Pkt Thread Starting ....\n");
+ Vnet_Print(0, "VNET/P Polling Thread Starting ....\n");
/* we need thread sleep/wakeup in Palacios */
while(!vnet_thread_should_stop()){
- flags = vnet_lock_irqsave(q->lock);
-
- if (q->count <= 0){
- vnet_unlock_irqrestore(q->lock, flags);
- Vnet_Yield();
- }else {
- q->count --;
- entry = &(q->buf[q->head++]);
- q->head %= VNET_QUEUE_SIZE;
-
- vnet_unlock_irqrestore(q->lock, flags);
+ dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs);
+ if(dev != NULL){
+ if(dev->poll && dev->dev_ops.poll != NULL){
+ ret = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data);
- /* this is ugly, but should happen very unlikely */
- while(!entry->use);
- vnet_tx_one_pkt(&(entry->pkt), NULL);
-
- /* asynchronizely release allocated memory for buffer entry here */
- entry->use = 0;
+ if (ret < 0){
+ PrintDebug("VNET/P: poll from device %p error!\n", dev);
+ }
- Vnet_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size);
+ v3_enqueue(vnet_state.poll_devs, (addr_t)dev);
+ }
+ }else { /* no device needs to be polled */
+ /* sleep here? */
+ Vnet_Yield();
}
}
return 0;
}
+
int v3_init_vnet() {
memset(&vnet_state, 0, sizeof(vnet_state));
vnet_state.num_routes = 0;
if (vnet_lock_init(&(vnet_state.lock)) == -1){
- PrintError("VNET/P Core: Fails to initiate lock\n");
+ PrintError("VNET/P: Fails to initiate lock\n");
}
vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
if (vnet_state.route_cache == NULL) {
- PrintError("VNET/P Core: Fails to initiate route cache\n");
+ PrintError("VNET/P: Fails to initiate route cache\n");
return -1;
}
- vnet_lock_init(&(vnet_state.pkt_q.lock));
+ vnet_state.poll_devs = v3_create_queue();
- vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "VNET_Pkts");
+ vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "vnetd");
- Vnet_Debug("VNET/P Core is initiated\n");
+ Vnet_Debug("VNET/P is initiated\n");
return 0;
}