From: Lei Xia Date: Wed, 13 Apr 2011 16:12:10 +0000 (-0500) Subject: Fix to VMM/Guest model dynamic switch for Virtio NIC X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=commitdiff_plain;h=65286d916b384837567b0da69b4d5a77f190fdc0;p=palacios.git Fix to VMM/Guest model dynamic switch for Virtio NIC --- diff --git a/palacios/include/palacios/vmm_ethernet.h b/palacios/include/palacios/vmm_ethernet.h index e38e4a7..3794d77 100644 --- a/palacios/include/palacios/vmm_ethernet.h +++ b/palacios/include/palacios/vmm_ethernet.h @@ -31,18 +31,16 @@ #include struct nic_statistics { - uint64_t tx_pkts; + uint32_t tx_pkts; uint64_t tx_bytes; - uint64_t tx_dropped; + uint32_t tx_dropped; - uint64_t rx_pkts; + uint32_t rx_pkts; uint64_t rx_bytes; - uint64_t rx_dropped; + uint32_t rx_dropped; uint32_t interrupts; }; - -typedef enum {VMM_DRIVERN = 1, GUEST_DRIVERN} nic_poll_type_t; static inline int is_multicast_ethaddr(const uint8_t * addr) { diff --git a/palacios/src/devices/lnx_virtio_nic.c b/palacios/src/devices/lnx_virtio_nic.c index 7746560..bb13a69 100644 --- a/palacios/src/devices/lnx_virtio_nic.c +++ b/palacios/src/devices/lnx_virtio_nic.c @@ -58,8 +58,8 @@ struct virtio_net_hdr_mrg_rxbuf { }; -#define TX_QUEUE_SIZE 64 -#define RX_QUEUE_SIZE 1024 +#define TX_QUEUE_SIZE 256 +#define RX_QUEUE_SIZE 4096 #define CTRL_QUEUE_SIZE 64 #define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ @@ -104,15 +104,15 @@ struct virtio_net_state { struct v3_dev_net_ops * net_ops; v3_lock_t rx_lock, tx_lock; - nic_poll_type_t mode; + uint8_t tx_notify, rx_notify; uint32_t tx_pkts, rx_pkts; + uint64_t past_ms; void * backend_data; struct virtio_dev_state * virtio_dev; struct list_head dev_link; }; - static int virtio_init_state(struct virtio_net_state * virtio) { virtio->rx_vq.queue_size = RX_QUEUE_SIZE; @@ -614,7 +614,7 @@ static int virtio_rx(uint8_t * buf, uint32_t size, void * private_data) { v3_unlock_irqrestore(virtio->rx_lock, flags); /* notify guest if guest is running */ - if(virtio->mode == GUEST_DRIVERN){ + if(virtio->rx_notify == 1){ v3_interrupt_cpu(virtio->virtio_dev->vm, virtio->virtio_dev->vm->cores[0].cpu_id, 0); } @@ -652,8 +652,10 @@ static struct v3_device_ops dev_ops = { static void virtio_nic_poll(struct v3_vm_info * vm, int budget, void * data){ struct virtio_net_state * virtio = (struct virtio_net_state *)data; - - handle_pkt_tx(&(vm->cores[0]), virtio); + + if(virtio->tx_notify == 0){ + handle_pkt_tx(&(vm->cores[0]), virtio); + } } static int register_dev(struct virtio_dev_state * virtio, @@ -728,44 +730,56 @@ static int register_dev(struct virtio_dev_state * virtio, return 0; } +#define RATE_UPPER_THRESHOLD 10 /* 10000 pkts per second, around 100Mbits */ +#define RATE_LOWER_THRESHOLD 1 +#define PROFILE_PERIOD 50 /*50ms*/ + /* Timer Functions */ static void virtio_nic_timer(struct guest_info * core, uint64_t cpu_cycles, uint64_t cpu_freq, void * priv_data) { struct virtio_net_state * net_state = (struct virtio_net_state *)priv_data; uint64_t period_ms; - uint32_t pkts_tx=0, pkts_rx=0; - uint32_t tx_rate, rx_rate; - period_ms = (1000*cpu_cycles/cpu_freq); + period_ms = cpu_cycles/cpu_freq; + net_state->past_ms += period_ms; - if(period_ms > 100){ - V3_Print("Virtio NIC timer: last tx %d, last rx: %d\n", pkts_tx, pkts_rx); + if(net_state->past_ms > PROFILE_PERIOD){ + uint32_t tx_rate, rx_rate; + + tx_rate = (net_state->statistics.tx_pkts - net_state->tx_pkts)/net_state->past_ms; /* pkts/per ms */ + rx_rate = (net_state->statistics.rx_pkts - net_state->rx_pkts)/net_state->past_ms; - pkts_tx = net_state->statistics.tx_pkts - net_state->tx_pkts; - pkts_rx = net_state->statistics.rx_pkts - net_state->rx_pkts; net_state->tx_pkts = net_state->statistics.tx_pkts; net_state->rx_pkts = net_state->statistics.rx_pkts; - tx_rate = pkts_tx/period_ms; /* pkts/per ms */ - rx_rate = pkts_rx/period_ms; - - if(tx_rate > 100 && net_state->mode == GUEST_DRIVERN){ - V3_Print("Virtio NIC: Switch to VMM driven mode\n"); + if(tx_rate > RATE_UPPER_THRESHOLD && net_state->tx_notify == 1){ + V3_Print("Virtio NIC: Switch TX to VMM driven mode\n"); disable_cb(&(net_state->tx_vq)); - net_state->mode = VMM_DRIVERN; + net_state->tx_notify = 0; } - if(tx_rate < 10 && net_state->mode == VMM_DRIVERN){ - V3_Print("Virtio NIC: Switch to Guest driven mode\n"); + if(tx_rate < RATE_LOWER_THRESHOLD && net_state->tx_notify == 0){ + V3_Print("Virtio NIC: Switch TX to Guest driven mode\n"); enable_cb(&(net_state->tx_vq)); - net_state->mode = GUEST_DRIVERN; + net_state->tx_notify = 1; } + + if(rx_rate > RATE_UPPER_THRESHOLD && net_state->rx_notify == 1){ + PrintDebug("Virtio NIC: Switch RX to VMM None notify mode\n"); + net_state->rx_notify = 0; + } + + if(rx_rate < RATE_LOWER_THRESHOLD && net_state->rx_notify == 0){ + PrintDebug("Virtio NIC: Switch RX to VMM notify mode\n"); + net_state->rx_notify = 1; + } + + net_state->past_ms = 0; } - - return; } + static struct v3_timer_ops timer_ops = { .update_timer = virtio_nic_timer, }; @@ -785,6 +799,8 @@ static int connect_fn(struct v3_vm_info * info, net_state->net_ops = ops; net_state->backend_data = private_data; net_state->virtio_dev = virtio; + net_state->tx_notify = 1; + net_state->rx_notify = 1; net_state->timer = v3_add_timer(&(info->cores[0]),&timer_ops,net_state); @@ -800,7 +816,9 @@ static int virtio_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus")); struct virtio_dev_state * virtio_state = NULL; char * dev_id = v3_cfg_val(cfg, "ID"); - char * macstr = v3_cfg_val(cfg, "mac"); + char macstr[128]; + char * str = v3_cfg_val(cfg, "mac"); + memcpy(macstr, str, strlen(str)); if (pci_bus == NULL) { PrintError("Virtio NIC: VirtIO devices require a PCI Bus"); diff --git a/palacios/src/palacios/vmm_vnet_core.c b/palacios/src/palacios/vmm_vnet_core.c index a2d06f2..e0e0ac7 100644 --- a/palacios/src/palacios/vmm_vnet_core.c +++ b/palacios/src/palacios/vmm_vnet_core.c @@ -46,7 +46,6 @@ struct vnet_dev { void * private_data; int active; - nic_poll_type_t mode; /*vmm_drivern or guest_drivern */ uint64_t bytes_tx, bytes_rx; uint32_t pkts_tx, pkt_rx; @@ -60,7 +59,7 @@ struct vnet_brg_dev { struct v3_vnet_bridge_ops brg_ops; uint8_t type; - nic_poll_type_t mode; + int active; void * private_data; } __attribute__((packed)); @@ -307,8 +306,8 @@ static struct route_list * match_route(const struct v3_vnet_pkt * pkt) { int max_rank = 0; struct list_head match_list; struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data); - // uint8_t src_type = pkt->src_type; - // uint32_t src_link = pkt->src_id; +// uint8_t src_type = pkt->src_type; + // uint32_t src_link = pkt->src_id; #ifdef CONFIG_DEBUG_VNET { @@ -437,7 +436,6 @@ int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) { PrintDebug("VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n", cpu, pkt->size, pkt->src_id, pkt->src_type, pkt->dst_id, pkt->dst_type); - //v3_hexdump(pkt->data, pkt->size, NULL, 0); } #endif @@ -524,7 +522,6 @@ int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, new_dev->vm = vm; new_dev->dev_id = 0; new_dev->active = 1; - new_dev->mode = GUEST_DRIVERN; flags = v3_lock_irqsave(vnet_state.lock); @@ -633,7 +630,6 @@ int v3_vnet_add_bridge(struct v3_vm_info * vm, tmp_bridge->brg_ops.poll = ops->poll; tmp_bridge->private_data = priv_data; tmp_bridge->active = 1; - tmp_bridge->mode = GUEST_DRIVERN; tmp_bridge->type = type; /* make this atomic to avoid possible race conditions */ @@ -646,15 +642,15 @@ int v3_vnet_add_bridge(struct v3_vm_info * vm, void v3_vnet_do_poll(struct v3_vm_info * vm){ - struct vnet_dev * dev = NULL; + struct vnet_dev * dev = NULL; /* TODO: run this on separate threads * round-robin schedule, with maximal budget for each poll */ list_for_each_entry(dev, &(vnet_state.devs), node) { - if(dev->mode == VMM_DRIVERN){ - dev->dev_ops.poll(vm, -1, dev->private_data); - } + if(dev->dev_ops.poll != NULL){ + dev->dev_ops.poll(vm, -1, dev->private_data); + } } }