uint8_t mergeable_rx_bufs;
struct v3_timer * timer;
- struct vnet_thread * poll_thread;
-
struct nic_statistics stats;
struct v3_dev_net_ops * net_ops;
virtio_state->stats.rx_interrupts ++;
}
- V3_Print("Virtio Intr Line %d\n", virtio_state->pci_dev->config_header.intr_line);
-
if(txed > 0) {
V3_Net_Print(2, "Virtio Handle TX: txed pkts: %d, left %d\n", txed, left);
}
pfn, page_addr);
if(virtio->tx_notify == 0){
disable_cb(&virtio->tx_vq);
- vnet_thread_wakeup(virtio->poll_thread);
}
+ virtio->status = 1;
break;
case 2:
virtio_setup_queue(core, virtio,
static int virtio_poll(int quote, void * data){
struct virtio_net_state * virtio = (struct virtio_net_state *)data;
- return handle_pkt_tx(&(virtio->vm->cores[0]), virtio, quote);
+ if(virtio->status) {
+ return handle_pkt_tx(&(virtio->vm->cores[0]), virtio, quote);
+ }
+
+ return 0;
}
static int register_dev(struct virtio_dev_state * virtio,
V3_Print("Virtio NIC: Switch TX to VMM driven mode\n");
disable_cb(&(net_state->tx_vq));
net_state->tx_notify = 0;
- vnet_thread_wakeup(net_state->poll_thread);
}
if(tx_rate < RATE_LOWER_THRESHOLD && net_state->tx_notify == 0){
net_state->net_ops = ops;
net_state->backend_data = private_data;
net_state->virtio_dev = virtio;
- net_state->tx_notify = 0;
- net_state->rx_notify = 0;
+ net_state->tx_notify = 1;
+ net_state->rx_notify = 1;
net_state->timer = v3_add_timer(&(info->cores[0]),
- &timer_ops,net_state);
+ &timer_ops,net_state);
ops->recv = virtio_rx;
ops->poll = virtio_poll;
ops->config.fnt_mac = V3_Malloc(ETH_ALEN);
memcpy(ops->config.fnt_mac, virtio->mac, ETH_ALEN);
- net_state->status = 1;
-
return 0;
}
memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
- PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
+ Vnet_Print(0, "VNET/P Core: Failed to insert new route entry to the cache\n");
return -1;
}
flags = vnet_lock_irqsave(vnet_state.lock);
list_for_each_entry(route, &(vnet_state.routes), node) {
- V3_Print("v3_vnet_del_route, route idx: %d\n", route->idx);
+ Vnet_Print(1, "v3_vnet_del_route, route idx: %d\n", route->idx);
if(route->idx == route_idx){
list_del(&(route->node));
Vnet_Free(route);
int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac,
- struct v3_vnet_dev_ops *ops, int quote, int poll_state,
+ struct v3_vnet_dev_ops * ops, int quote, int poll_state,
void * priv_data){
struct vnet_dev * new_dev = NULL;
unsigned long flags;
list_add(&(new_dev->node), &(vnet_state.devs));
new_dev->dev_id = ++ vnet_state.dev_idx;
vnet_state.num_devs ++;
+
+ if(new_dev->poll) {
+ v3_enqueue(vnet_state.poll_devs, (addr_t)new_dev);
+ }
}
vnet_unlock_irqrestore(vnet_state.lock, flags);
/* if the device was found previosly the id should still be 0 */
if (new_dev->dev_id == 0) {
- Vnet_Print(0, "VNET/P Core: Device Already exists\n");
+ Vnet_Print(0, "VNET/P Core: Device with the same MAC Already exists\n");
return -1;
}
vnet_unlock_irqrestore(vnet_state.lock, flags);
if (bridge_free == 0) {
- PrintError("VNET/P Core: Bridge already set\n");
+ Vnet_Print(0, "VNET/P Core: Bridge already set\n");
return -1;
}
tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
if (tmp_bridge == NULL) {
- PrintError("Malloc Fails\n");
+ Vnet_Print(0, "Malloc Fails\n");
vnet_state.bridge = NULL;
return -1;
}
* that runs on multiple cores
* or it could be running on a dedicated side core
*/
-static int vnet_tx_flush(void *args){
+static int vnet_tx_flush(void * args){
struct vnet_dev * dev = NULL;
int ret;
Vnet_Print(0, "VNET/P Polling Thread Starting ....\n");
- /* we need thread sleep/wakeup in Palacios */
while(!vnet_thread_should_stop()){
dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs);
if(dev != NULL){
ret = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data);
if (ret < 0){
- PrintDebug("VNET/P: poll from device %p error!\n", dev);
+ Vnet_Print(1, "VNET/P: poll from device %p error!\n", dev);
}
-
- v3_enqueue(vnet_state.poll_devs, (addr_t)dev);
}
+ v3_enqueue(vnet_state.poll_devs, (addr_t)dev);
}else { /* no device needs to be polled */
/* sleep here? */
Vnet_Yield();
vnet_state.num_routes = 0;
if (vnet_lock_init(&(vnet_state.lock)) == -1){
- PrintError("VNET/P: Fails to initiate lock\n");
+ Vnet_Print(0, "VNET/P: Fails to initiate lock\n");
}
vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
if (vnet_state.route_cache == NULL) {
- PrintError("VNET/P: Fails to initiate route cache\n");
+ Vnet_Print(0, "VNET/P: Fails to initiate route cache\n");
return -1;
}