Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Fixed ioapic bug that caused interrupt routing entries to be stored incorrectly and...
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
index bfbc56c..4569bc7 100644 (file)
@@ -8,10 +8,12 @@
  * http://www.v3vee.org
  *
  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2008, Lei Xia <lxia@cs.northwestern.edu>
  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
  * All rights reserved.
  *
  * Author: Jack Lange <jarusl@cs.northwestern.edu>
+ *            Lei Xia <lxia@cs.northwestern.edu>
  *
  * This is free software.  You are permitted to use,
  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
@@ -32,7 +34,8 @@
 #endif
 
 
-#define QUEUE_SIZE 128
+#define QUEUE_SIZE 4096
+#define CMD_QUEUE_SIZE 128
 #define NUM_QUEUES 3
 
 struct vnet_config {
@@ -59,6 +62,15 @@ struct virtio_vnet_state {
 
     int io_range_size;
     v3_lock_t lock;
+
+    uint32_t pkt_sent;
+    uint32_t pkt_recv;
+    uint32_t pkt_drop;
+    uint32_t tx_exit;
+    uint32_t rx_exit;
+    uint32_t total_exit;
+
+    int ready;
 };
 
 #define VNET_GET_ROUTES 10
@@ -75,7 +87,8 @@ struct vnet_ctrl_hdr {
     uint32_t num_cmds;
 } __attribute__((packed));
 
-struct vnet_virtio_pkt {
+
+struct vnet_bridge_pkt {
     uint32_t link_id;
     uint32_t pkt_size;
     uint8_t pkt[ETHERNET_PACKET_LEN];
@@ -84,14 +97,14 @@ struct vnet_virtio_pkt {
 
 static int virtio_reset(struct virtio_vnet_state * vnet_state) {
 
-    memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * 2);
+    memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * NUM_QUEUES);
 
     vnet_state->cur_queue = &(vnet_state->queue[0]);
 
     vnet_state->virtio_cfg.status = 0;
     vnet_state->virtio_cfg.pci_isr = 0;
 
-    vnet_state->queue[0].queue_size = QUEUE_SIZE;
+    vnet_state->queue[0].queue_size = CMD_QUEUE_SIZE;
     vnet_state->queue[1].queue_size = QUEUE_SIZE;
     vnet_state->queue[2].queue_size = QUEUE_SIZE;
 
@@ -136,7 +149,8 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state *
        uint8_t status = 0;
 
 
-       PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
+       PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", 
+                  desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
 
        if (desc_cnt < 3) {
            PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
@@ -145,7 +159,7 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state *
        
        hdr_desc = &(q->desc[desc_idx]);
 
-       if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
+       if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
            PrintError("Could not translate VirtioVNET header address\n");
            return -1;
        }
@@ -160,7 +174,7 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state *
                
                buf_desc = &(q->desc[desc_idx]);
 
-               if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
+               if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
                    PrintError("Could not translate route address\n");
                    return -1;
                }
@@ -187,7 +201,7 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state *
 
        status_desc = &(q->desc[desc_idx]);
 
-       if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
+       if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
            PrintError("VirtioVNET Error could not translate status address\n");
            return -1;
        }
@@ -215,48 +229,67 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state *
 }
 
 
-static int vnet_pkt_input_cb(struct v3_vm_info * vm,  struct v3_vnet_pkt * pkt,  void * private_data){
+static int vnet_pkt_input_cb(struct v3_vm_info * vm,  struct v3_vnet_pkt vnet_pkts[], uint16_t pkt_num, void * private_data){
     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
     struct virtio_queue * q = &(vnet_state->queue[RECV_QUEUE]);
     int ret_val = -1;
     unsigned long flags;
+    uint16_t sent;
+    struct v3_vnet_pkt * pkt = NULL;
+
+    if (pkt_num <= 0) {
+       return 0;
+    }
 
     flags = v3_lock_irqsave(vnet_state->lock);
        
     if (q->ring_avail_addr == 0) {
        PrintError("Queue is not set\n");
-       goto exit;
+       v3_unlock_irqrestore(vnet_state->lock, flags);
+       return ret_val;
     }
 
+    PrintDebug("VNET Bridge: RX: running on cpu: %d, num of pkts: %d\n", V3_Get_CPU(), pkt_num);
 
-    if (q->cur_avail_idx != q->avail->index) {
-       uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
-       struct vring_desc * pkt_desc = NULL;
-       struct vnet_virtio_pkt * virtio_pkt = NULL;
+    for (sent = 0; sent < pkt_num; sent++) {
+       pkt = &vnet_pkts[sent];
+       vnet_state->pkt_recv++;
 
+       if (q->cur_avail_idx != q->avail->index) {
+           uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
+           struct vring_desc * pkt_desc = NULL;
+           struct vnet_bridge_pkt * virtio_pkt = NULL;
 
-       pkt_desc = &(q->desc[pkt_idx]);
-       PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
+           pkt_desc = &(q->desc[pkt_idx]);
+           PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
 
-       if (guest_pa_to_host_va(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
-           PrintError("Could not translate buffer address\n");
-           return -1;
-       }
+           if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
+               PrintError("Could not translate buffer address\n");
+               v3_unlock_irqrestore(vnet_state->lock, flags);
+               return ret_val;
+           }
 
-       PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
+           PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
 
-       // Fill in dst packet buffer
-       virtio_pkt->link_id = pkt->dst_id;
-       virtio_pkt->pkt_size = pkt->size;
-       memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
+           // Fill in dst packet buffer
+           virtio_pkt->link_id = pkt->dst_id;
+           virtio_pkt->pkt_size = pkt->size;
+           memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
        
-       q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
-       q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_virtio_pkt); // This should be the total length of data sent to guest (header+pkt_data)
+           q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
+           q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); 
+
+           q->used->index++;
+           q->cur_avail_idx++;
+       } else {
+           vnet_state->pkt_drop++;
+           v3_vnet_disable_bridge();
+       }
+    }
 
-       q->used->index++;
-       q->cur_avail_idx++;
-    } else {
-       PrintError("Packet buffer overflow in the guest: cur_avai_idx %d, idx: %d\n", q->cur_avail_idx, q->avail->index);
+    if (sent == 0) {
+       v3_unlock_irqrestore(vnet_state->lock, flags);
+       return ret_val;
     }
 
     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
@@ -267,68 +300,127 @@ static int vnet_pkt_input_cb(struct v3_vm_info * vm,  struct v3_vnet_pkt * pkt,
 
     ret_val = 0;
 
-exit:
+       
+#ifdef CONFIG_VNET_PROFILE
+    if (vnet_state->pkt_recv % 200000 == 0)
+       PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
+                  vnet_state->pkt_sent,
+                  vnet_state->pkt_recv,
+                  vnet_state->pkt_drop, 
+                  vnet_state->total_exit,
+                  vnet_state->tx_exit,
+                  vnet_state->rx_exit);
+#endif
+
     v3_unlock_irqrestore(vnet_state->lock, flags);
+
     return ret_val;
+
 }
 
+static void vnet_pkt_input_xcall(void * data) {
+    struct v3_vnet_bridge_input_args * args = (struct v3_vnet_bridge_input_args *)data;
+       
+    vnet_pkt_input_cb(args->vm, args->vnet_pkts, args->pkt_num, args->private_data);
+}
 
-static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
-{
+static int handle_pkt_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
     struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
-    struct v3_vnet_pkt pkt;
+    unsigned long flags = 0;
+    int recvd = 0;
+    int cpu = V3_Get_CPU();
+
+    flags = v3_lock_irqsave(vnet_state->lock);
+
+    if (q->ring_avail_addr == 0) {
+       v3_unlock_irqrestore(vnet_state->lock,flags);
+       return 0;
+    }
 
     while (q->cur_avail_idx != q->avail->index) {
        uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
        struct vring_desc * pkt_desc = NULL;
-       struct vnet_virtio_pkt * virtio_pkt = NULL;
+       struct vnet_bridge_pkt * virtio_pkt = NULL;
 
        pkt_desc = &(q->desc[desc_idx]);
 
        PrintDebug("VNET Bridge: Handle TX desc buf_len: %d\n", pkt_desc->length);
-
-       //PrintDebug("q: %p, %p, %p, %d\n", q, vnet_state->queue, &vnet_state->queue[XMIT_QUEUE], XMIT_QUEUE);
-       //PrintDebug("q->used: %p\n", q->used);
-       //PrintDebug("q->used->ring %p\n", q->used->ring);
-
-       if (guest_pa_to_host_va(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
+       
+       if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
            PrintError("Could not translate buffer address\n");
            return -1;
        }
 
-       PrintDebug("VNET Bridge: TX: pkt size: %d, dst link: %d\n", virtio_pkt->pkt_size, virtio_pkt->link_id);
-
-       pkt.size = virtio_pkt->pkt_size;
-       pkt.src_id = virtio_pkt->link_id;
-       pkt.src_type = LINK_EDGE;
-       memcpy(pkt.header, virtio_pkt->pkt, ETHERNET_HEADER_LEN);
-       pkt.data = virtio_pkt->pkt;
-
-       v3_vnet_send_pkt(&pkt, NULL);
-
-       q = (struct virtio_queue *)((addr_t)vnet_state->queue + XMIT_QUEUE*sizeof(struct virtio_queue));
-
-       PrintDebug("After q: %p, , %p, %p, %d\n", q, vnet_state->queue, &(vnet_state->queue[XMIT_QUEUE]), XMIT_QUEUE);
-       //PrintDebug("After q->used: %p\n", q->used);
-       //PrintDebug("After q->used->ring %p\n", q->used->ring);
+       PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id);
        
+       v3_vnet_rx(virtio_pkt->pkt, virtio_pkt->pkt_size, virtio_pkt->link_id, LINK_EDGE);
+
        q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
        q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
        q->used->index++;
 
+       vnet_state->pkt_sent++;
+       recvd++;
+
        q->cur_avail_idx++;
     }
 
-    if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
-       v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
-       vnet_state->virtio_cfg.pci_isr = 0x1;
+    if (recvd == 0) {
+       v3_unlock_irqrestore(vnet_state->lock,flags);
+       return 0;
     }
 
+    //PrintError("In polling get %d\n", recvd);
+       
+    //if on the dom0 core, interrupt the domU core to poll pkts
+    //otherwise, call the polling directly
+
+
+    if (vnet_state->vm->cores[0].cpu_id == cpu) {
+       cpu = (cpu == 0) ? 1 : 0;
+       v3_interrupt_cpu(vnet_state->vm, cpu, V3_VNET_POLLING_VECTOR);
+    } else {
+       v3_vnet_polling();
+    }
+
+    if ((vnet_state->pkt_sent % (QUEUE_SIZE/20)) == 0) {
+       //optimized for guest's, batch the interrupts
+       
+       if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
+           v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
+           vnet_state->virtio_cfg.pci_isr = 0x1;
+       }
+    }
+    
+#ifdef CONFIG_VNET_PROFILE
+    if (vnet_state->pkt_sent % 200000 == 0)
+       PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
+                  vnet_state->pkt_sent,
+                  vnet_state->pkt_recv,
+                  vnet_state->pkt_drop, 
+                  vnet_state->total_exit,
+                  vnet_state->tx_exit,
+                  vnet_state->rx_exit);
+#endif
+
+    v3_unlock_irqrestore(vnet_state->lock,flags);
+
     return 0;
 }
 
-static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
+static int polling_pkt_from_guest(struct v3_vm_info * vm, void *private_data) {
+    struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
+       
+    return handle_pkt_kick(&(vm->cores[0]), vnet_state);
+}
+
+static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) {
+    v3_vnet_enable_bridge();
+       
+    return 0;
+}
+
+static int vnet_virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
     int port_idx = port % vnet_state->io_range_size;
 
@@ -336,60 +428,77 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src,
               port, length, *(uint32_t *)src);
     PrintDebug("VNET Bridge: port idx=%d\n", port_idx);
 
+    vnet_state->total_exit++;
 
     switch (port_idx) {
        case GUEST_FEATURES_PORT:
+
            if (length != 4) {
                PrintError("Illegal write length for guest features\n");
                return -1;
            }    
+
            vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
 
            break;
-       case VRING_PG_NUM_PORT:
-           if (length == 4) {
-               addr_t pfn = *(uint32_t *)src;
-               addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
-
-               vnet_state->cur_queue->pfn = pfn;
-               
-               vnet_state->cur_queue->ring_desc_addr = page_addr ;
-               vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
-               vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
-                                                sizeof(struct vring_avail)    + \
-                                                (QUEUE_SIZE * sizeof(uint16_t)));
-               
-               // round up to next page boundary.
-               vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
+       case VRING_PG_NUM_PORT: {
 
-               if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
-                   PrintError("Could not translate ring descriptor address\n");
-                   return -1;
-               }
+           addr_t pfn = *(uint32_t *)src;
+           addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
 
-               if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
-                   PrintError("Could not translate ring available address\n");
-                   return -1;
-               }
-
-               if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
-                   PrintError("Could not translate ring used address\n");
-                   return -1;
-               }
-
-               PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
-                          (void *)(vnet_state->cur_queue->ring_desc_addr),
-                          (void *)(vnet_state->cur_queue->ring_avail_addr),
-                          (void *)(vnet_state->cur_queue->ring_used_addr));
-
-               PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
-                          vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
-
-           } else {
+           if (length != 4) {
                PrintError("Illegal write length for page frame number\n");
                return -1;
            }
+           
+
+           vnet_state->cur_queue->pfn = pfn;
+               
+           vnet_state->cur_queue->ring_desc_addr = page_addr ;
+           vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
+           vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
+                                                     sizeof(struct vring_avail) + \
+                                                     (QUEUE_SIZE * sizeof(uint16_t)));
+           
+           // round up to next page boundary.
+           vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
+           
+           if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
+               PrintError("Could not translate ring descriptor address\n");
+               return -1;
+           }
+           
+           if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
+               PrintError("Could not translate ring available address\n");
+               return -1;
+           }
+           
+           if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
+               PrintError("Could not translate ring used address\n");
+               return -1;
+           }
+           
+           PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
+                      (void *)(vnet_state->cur_queue->ring_desc_addr),
+                      (void *)(vnet_state->cur_queue->ring_avail_addr),
+                      (void *)(vnet_state->cur_queue->ring_used_addr));
+           
+           PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
+                      vnet_state->cur_queue->desc, 
+                      vnet_state->cur_queue->avail, 
+                      vnet_state->cur_queue->used);
+           
+           if (vnet_state->queue[RECV_QUEUE].avail != NULL){
+               vnet_state->ready = 1;
+           }
+           
+           //No notify when there is pkt tx from guest
+           if (vnet_state->queue[XMIT_QUEUE].used != NULL) {
+               vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
+           }
+           
            break;
+       }
        case VRING_Q_SEL_PORT:
            vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
 
@@ -417,8 +526,14 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src,
                    PrintError("Could not handle Virtio VNET TX\n");
                    return -1;
                }
+               vnet_state->tx_exit ++;
+               //PrintError("Notify on TX\n");
            } else if (queue_idx == 2) {
-               PrintDebug("VNET Bridge: receive kick on RX Queue\n");
+               if (handle_rx_kick(core, vnet_state) == -1){
+                   PrintError("Could not handle Virtio RX buffer refills Kick\n");
+                   return -1;
+               }
+               vnet_state->rx_exit ++;
            } else {
                PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
                return -1;
@@ -448,15 +563,11 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src,
 }
 
 
-static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
+static int vnet_virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
 
     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
     int port_idx = port % vnet_state->io_range_size;
 
-/*
-    PrintDebug("VirtioVNET: VIRTIO SYMBIOTIC Read  for port %d (index =%d), length=%d\n", 
-              port, port_idx, length);
-*/
     switch (port_idx) {
        case HOST_FEATURES_PORT:
            if (length != 4) {
@@ -529,14 +640,13 @@ static struct v3_device_ops dev_ops = {
     .stop = NULL,
 };
 
-
 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
     struct virtio_vnet_state * vnet_state = NULL;
     struct pci_device * pci_dev = NULL;
-    char * name = v3_cfg_val(cfg, "name");
+    char * dev_id = v3_cfg_val(cfg, "ID");
 
-    PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
+    PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", dev_id);
 
     if (pci_bus == NULL) {
        PrintError("VNET Bridge device require a PCI Bus");
@@ -548,10 +658,10 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
        
     vnet_state->vm = vm;
 
-    struct vm_device * dev = v3_allocate_device(name, &dev_ops, vnet_state);
+    struct vm_device * dev = v3_allocate_device(dev_id, &dev_ops, vnet_state);
 
     if (v3_attach_device(vm, dev) == -1) {
-       PrintError("Could not attach device %s\n", name);
+       PrintError("Could not attach device %s\n", dev_id);
        return -1;
     }
 
@@ -584,8 +694,8 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
        bars[0].type = PCI_BAR_IO;
        bars[0].default_base_port = -1;
        bars[0].num_ports = vnet_state->io_range_size;
-       bars[0].io_read = virtio_io_read;
-       bars[0].io_write = virtio_io_write;
+       bars[0].io_read = vnet_virtio_io_read;
+       bars[0].io_write = vnet_virtio_io_write;
        bars[0].private_data = vnet_state;
 
        pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
@@ -615,7 +725,8 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
     virtio_reset(vnet_state);
 
     V3_Print("Registering Virtio device as vnet bridge\n");
-    v3_vnet_add_bridge(vm, vnet_pkt_input_cb, (void *)vnet_state);
+    v3_vnet_add_bridge(vm, vnet_pkt_input_cb, vnet_pkt_input_xcall, polling_pkt_from_guest, 0, 500000, (void *)vnet_state);
+
 
     return 0;
 }