From: Lei Xia Date: Thu, 13 Oct 2011 15:52:29 +0000 (-0500) Subject: A minor fix for VNET X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=commitdiff_plain;h=926fe574cd42bb860c19fd04610451c24b6d42b6 A minor fix for VNET --- diff --git a/palacios/include/palacios/vmm_ethernet.h b/palacios/include/palacios/vmm_ethernet.h index dadb775..c4725a9 100644 --- a/palacios/include/palacios/vmm_ethernet.h +++ b/palacios/include/palacios/vmm_ethernet.h @@ -28,9 +28,9 @@ #define ETH_ALEN 6 #define MIN_MTU 68 -#define MAX_MTU 65535 +#define MAX_MTU 65536 -#define MAX_PACKET_LEN (MAX_MTU) +#define MAX_PACKET_LEN (MAX_MTU + ETHERNET_HEADER_LEN) #ifdef V3_CONFIG_VNET extern int net_debug; diff --git a/palacios/src/devices/lnx_virtio_nic.c b/palacios/src/devices/lnx_virtio_nic.c index 6300eb8..2dc1684 100644 --- a/palacios/src/devices/lnx_virtio_nic.c +++ b/palacios/src/devices/lnx_virtio_nic.c @@ -1,3 +1,4 @@ + /* * This file is part of the Palacios Virtual Machine Monitor developed * by the V3VEE Project with funding from the United States National @@ -131,8 +132,6 @@ struct virtio_net_state { uint8_t mergeable_rx_bufs; struct v3_timer * timer; - struct vnet_thread * poll_thread; - struct nic_statistics stats; struct v3_dev_net_ops * net_ops; @@ -330,8 +329,6 @@ static int handle_pkt_tx(struct guest_info * core, virtio_state->stats.rx_interrupts ++; } - V3_Print("Virtio Intr Line %d\n", virtio_state->pci_dev->config_header.intr_line); - if(txed > 0) { V3_Net_Print(2, "Virtio Handle TX: txed pkts: %d, left %d\n", txed, left); } @@ -425,8 +422,8 @@ static int virtio_io_write(struct guest_info *core, pfn, page_addr); if(virtio->tx_notify == 0){ disable_cb(&virtio->tx_vq); - vnet_thread_wakeup(virtio->poll_thread); } + virtio->status = 1; break; case 2: virtio_setup_queue(core, virtio, @@ -760,7 +757,12 @@ static struct v3_device_ops dev_ops = { static int virtio_poll(int quote, void * data){ struct virtio_net_state * virtio = (struct virtio_net_state *)data; - return handle_pkt_tx(&(virtio->vm->cores[0]), virtio, quote); + if (virtio->status) { + + return handle_pkt_tx(&(virtio->vm->cores[0]), virtio, quote); + } + + return 0; } static int register_dev(struct virtio_dev_state * virtio, @@ -838,7 +840,6 @@ static int register_dev(struct virtio_dev_state * virtio, return 0; } - #define RATE_UPPER_THRESHOLD 10 /* 10000 pkts per second, around 100Mbits */ #define RATE_LOWER_THRESHOLD 1 #define PROFILE_PERIOD 10000 /*us*/ @@ -924,11 +925,12 @@ static int connect_fn(struct v3_vm_info * info, net_state->net_ops = ops; net_state->backend_data = private_data; net_state->virtio_dev = virtio; - net_state->tx_notify = 0; - net_state->rx_notify = 0; + + net_state->tx_notify = 1; + net_state->rx_notify = 1; net_state->timer = v3_add_timer(&(info->cores[0]), - &timer_ops,net_state); + &timer_ops,net_state); ops->recv = virtio_rx; ops->poll = virtio_poll; @@ -938,8 +940,6 @@ static int connect_fn(struct v3_vm_info * info, ops->config.fnt_mac = V3_Malloc(ETH_ALEN); memcpy(ops->config.fnt_mac, virtio->mac, ETH_ALEN); - net_state->status = 1; - return 0; } diff --git a/palacios/src/vnet/vnet_core.c b/palacios/src/vnet/vnet_core.c index 4669029..807708e 100644 --- a/palacios/src/vnet/vnet_core.c +++ b/palacios/src/vnet/vnet_core.c @@ -53,7 +53,7 @@ struct vnet_dev { void * private_data; - struct list_head node; + struct list_head node; } __attribute__((packed)); @@ -303,7 +303,7 @@ void v3_vnet_del_route(uint32_t route_idx){ flags = vnet_lock_irqsave(vnet_state.lock); list_for_each_entry(route, &(vnet_state.routes), node) { - V3_Print("v3_vnet_del_route, route idx: %d\n", route->idx); + Vnet_Print(0, "v3_vnet_del_route, route idx: %d\n", route->idx); if(route->idx == route_idx){ list_del(&(route->node)); Vnet_Free(route); @@ -549,7 +549,7 @@ int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) { int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, - struct v3_vnet_dev_ops *ops, int quote, int poll_state, + struct v3_vnet_dev_ops * ops, int quote, int poll_state, void * priv_data){ struct vnet_dev * new_dev = NULL; unsigned long flags; @@ -563,6 +563,7 @@ int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, memcpy(new_dev->mac_addr, mac, 6); new_dev->dev_ops.input = ops->input; + new_dev->dev_ops.poll = ops->poll; new_dev->private_data = priv_data; new_dev->vm = vm; new_dev->dev_id = 0; @@ -575,6 +576,12 @@ int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, list_add(&(new_dev->node), &(vnet_state.devs)); new_dev->dev_id = ++ vnet_state.dev_idx; vnet_state.num_devs ++; + + if(new_dev->poll) { + v3_enqueue(vnet_state.poll_devs, (addr_t)new_dev); + } + } else { + PrintError("VNET/P: Device with the same MAC is already there\n"); } vnet_unlock_irqrestore(vnet_state.lock, flags); @@ -708,25 +715,23 @@ void v3_vnet_del_bridge(uint8_t type) { * that runs on multiple cores * or it could be running on a dedicated side core */ -static int vnet_tx_flush(void *args){ +static int vnet_tx_flush(void * args){ struct vnet_dev * dev = NULL; int ret; Vnet_Print(0, "VNET/P Polling Thread Starting ....\n"); - /* we need thread sleep/wakeup in Palacios */ while(!vnet_thread_should_stop()){ dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs); if(dev != NULL){ if(dev->poll && dev->dev_ops.poll != NULL){ ret = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data); - + if (ret < 0){ - PrintDebug("VNET/P: poll from device %p error!\n", dev); + Vnet_Print(0, "VNET/P: poll from device %p error!\n", dev); } - - v3_enqueue(vnet_state.poll_devs, (addr_t)dev); } + v3_enqueue(vnet_state.poll_devs, (addr_t)dev); }else { /* no device needs to be polled */ /* sleep here? */ Vnet_Yield(); @@ -736,7 +741,6 @@ static int vnet_tx_flush(void *args){ return 0; } - int v3_init_vnet() { memset(&vnet_state, 0, sizeof(vnet_state)); @@ -758,7 +762,7 @@ int v3_init_vnet() { vnet_state.poll_devs = v3_create_queue(); - vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "vnetd"); + vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "vnetd-1"); Vnet_Debug("VNET/P is initiated\n");