};
-#define TX_QUEUE_SIZE 64
-#define RX_QUEUE_SIZE 1024
+#define TX_QUEUE_SIZE 256
+#define RX_QUEUE_SIZE 4096
#define CTRL_QUEUE_SIZE 64
#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
struct v3_dev_net_ops * net_ops;
v3_lock_t rx_lock, tx_lock;
- nic_poll_type_t mode;
+ uint8_t tx_notify, rx_notify;
uint32_t tx_pkts, rx_pkts;
+ uint64_t past_ms;
void * backend_data;
struct virtio_dev_state * virtio_dev;
struct list_head dev_link;
};
-
static int virtio_init_state(struct virtio_net_state * virtio)
{
virtio->rx_vq.queue_size = RX_QUEUE_SIZE;
v3_unlock_irqrestore(virtio->rx_lock, flags);
/* notify guest if guest is running */
- if(virtio->mode == GUEST_DRIVERN){
+ if(virtio->rx_notify == 1){
v3_interrupt_cpu(virtio->virtio_dev->vm, virtio->virtio_dev->vm->cores[0].cpu_id, 0);
}
static void virtio_nic_poll(struct v3_vm_info * vm, int budget, void * data){
struct virtio_net_state * virtio = (struct virtio_net_state *)data;
-
- handle_pkt_tx(&(vm->cores[0]), virtio);
+
+ if(virtio->tx_notify == 0){
+ handle_pkt_tx(&(vm->cores[0]), virtio);
+ }
}
static int register_dev(struct virtio_dev_state * virtio,
return 0;
}
+#define RATE_UPPER_THRESHOLD 10 /* 10000 pkts per second, around 100Mbits */
+#define RATE_LOWER_THRESHOLD 1
+#define PROFILE_PERIOD 50 /*50ms*/
+
/* Timer Functions */
static void virtio_nic_timer(struct guest_info * core,
uint64_t cpu_cycles, uint64_t cpu_freq,
void * priv_data) {
struct virtio_net_state * net_state = (struct virtio_net_state *)priv_data;
uint64_t period_ms;
- uint32_t pkts_tx=0, pkts_rx=0;
- uint32_t tx_rate, rx_rate;
- period_ms = (1000*cpu_cycles/cpu_freq);
+ period_ms = cpu_cycles/cpu_freq;
+ net_state->past_ms += period_ms;
- if(period_ms > 100){
- V3_Print("Virtio NIC timer: last tx %d, last rx: %d\n", pkts_tx, pkts_rx);
+ if(net_state->past_ms > PROFILE_PERIOD){
+ uint32_t tx_rate, rx_rate;
+
+ tx_rate = (net_state->statistics.tx_pkts - net_state->tx_pkts)/net_state->past_ms; /* pkts/per ms */
+ rx_rate = (net_state->statistics.rx_pkts - net_state->rx_pkts)/net_state->past_ms;
- pkts_tx = net_state->statistics.tx_pkts - net_state->tx_pkts;
- pkts_rx = net_state->statistics.rx_pkts - net_state->rx_pkts;
net_state->tx_pkts = net_state->statistics.tx_pkts;
net_state->rx_pkts = net_state->statistics.rx_pkts;
- tx_rate = pkts_tx/period_ms; /* pkts/per ms */
- rx_rate = pkts_rx/period_ms;
-
- if(tx_rate > 100 && net_state->mode == GUEST_DRIVERN){
- V3_Print("Virtio NIC: Switch to VMM driven mode\n");
+ if(tx_rate > RATE_UPPER_THRESHOLD && net_state->tx_notify == 1){
+ V3_Print("Virtio NIC: Switch TX to VMM driven mode\n");
disable_cb(&(net_state->tx_vq));
- net_state->mode = VMM_DRIVERN;
+ net_state->tx_notify = 0;
}
- if(tx_rate < 10 && net_state->mode == VMM_DRIVERN){
- V3_Print("Virtio NIC: Switch to Guest driven mode\n");
+ if(tx_rate < RATE_LOWER_THRESHOLD && net_state->tx_notify == 0){
+ V3_Print("Virtio NIC: Switch TX to Guest driven mode\n");
enable_cb(&(net_state->tx_vq));
- net_state->mode = GUEST_DRIVERN;
+ net_state->tx_notify = 1;
}
+
+ if(rx_rate > RATE_UPPER_THRESHOLD && net_state->rx_notify == 1){
+ PrintDebug("Virtio NIC: Switch RX to VMM None notify mode\n");
+ net_state->rx_notify = 0;
+ }
+
+ if(rx_rate < RATE_LOWER_THRESHOLD && net_state->rx_notify == 0){
+ PrintDebug("Virtio NIC: Switch RX to VMM notify mode\n");
+ net_state->rx_notify = 1;
+ }
+
+ net_state->past_ms = 0;
}
-
- return;
}
+
static struct v3_timer_ops timer_ops = {
.update_timer = virtio_nic_timer,
};
net_state->net_ops = ops;
net_state->backend_data = private_data;
net_state->virtio_dev = virtio;
+ net_state->tx_notify = 1;
+ net_state->rx_notify = 1;
net_state->timer = v3_add_timer(&(info->cores[0]),&timer_ops,net_state);
struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
struct virtio_dev_state * virtio_state = NULL;
char * dev_id = v3_cfg_val(cfg, "ID");
- char * macstr = v3_cfg_val(cfg, "mac");
+ char macstr[128];
+ char * str = v3_cfg_val(cfg, "mac");
+ memcpy(macstr, str, strlen(str));
if (pci_bus == NULL) {
PrintError("Virtio NIC: VirtIO devices require a PCI Bus");
void * private_data;
int active;
- nic_poll_type_t mode; /*vmm_drivern or guest_drivern */
uint64_t bytes_tx, bytes_rx;
uint32_t pkts_tx, pkt_rx;
struct v3_vnet_bridge_ops brg_ops;
uint8_t type;
- nic_poll_type_t mode;
+
int active;
void * private_data;
} __attribute__((packed));
int max_rank = 0;
struct list_head match_list;
struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
- // uint8_t src_type = pkt->src_type;
- // uint32_t src_link = pkt->src_id;
+// uint8_t src_type = pkt->src_type;
+ // uint32_t src_link = pkt->src_id;
#ifdef CONFIG_DEBUG_VNET
{
PrintDebug("VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
cpu, pkt->size, pkt->src_id,
pkt->src_type, pkt->dst_id, pkt->dst_type);
- //v3_hexdump(pkt->data, pkt->size, NULL, 0);
}
#endif
new_dev->vm = vm;
new_dev->dev_id = 0;
new_dev->active = 1;
- new_dev->mode = GUEST_DRIVERN;
flags = v3_lock_irqsave(vnet_state.lock);
tmp_bridge->brg_ops.poll = ops->poll;
tmp_bridge->private_data = priv_data;
tmp_bridge->active = 1;
- tmp_bridge->mode = GUEST_DRIVERN;
tmp_bridge->type = type;
/* make this atomic to avoid possible race conditions */
void v3_vnet_do_poll(struct v3_vm_info * vm){
- struct vnet_dev * dev = NULL;
+ struct vnet_dev * dev = NULL;
/* TODO: run this on separate threads
* round-robin schedule, with maximal budget for each poll
*/
list_for_each_entry(dev, &(vnet_state.devs), node) {
- if(dev->mode == VMM_DRIVERN){
- dev->dev_ops.poll(vm, -1, dev->private_data);
- }
+ if(dev->dev_ops.poll != NULL){
+ dev->dev_ops.poll(vm, -1, dev->private_data);
+ }
}
}