Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


vnet device updates
Jack Lange [Fri, 12 Mar 2010 00:39:02 +0000 (18:39 -0600)]
palacios/src/devices/lnx_virtio_vnet.c

index 0ce9cd6..fe305c1 100644 (file)
@@ -42,11 +42,11 @@ struct vnet_config {
 
 
 #define CTRL_QUEUE 0
-#define RECV_QUEUE 2
 #define XMIT_QUEUE 1
+#define RECV_QUEUE 2
 
 struct virtio_vnet_state {
-    struct v3_vm_info *vm;
+    struct v3_vm_info * vm;
     struct vnet_config vnet_cfg;
     struct virtio_config virtio_cfg;
 
@@ -75,21 +75,28 @@ struct vnet_ctrl_hdr {
     uint32_t num_cmds;
 } __attribute__((packed));
 
-static int virtio_reset(struct virtio_vnet_state * vnet_brg) {
 
-    memset(vnet_brg->queue, 0, sizeof(struct virtio_queue) * 2);
+struct vnet_virtio_pkt {
+    uint32_t link_id;
+    uint32_t pkt_size;
+    uint8_t pkt[1500];
+};
+
+static int virtio_reset(struct virtio_vnet_state * vnet_state) {
+
+    memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * 2);
 
-    vnet_brg->cur_queue = &(vnet_brg->queue[0]);
+    vnet_state->cur_queue = &(vnet_state->queue[0]);
 
-    vnet_brg->virtio_cfg.status = 0;
-    vnet_brg->virtio_cfg.pci_isr = 0;
+    vnet_state->virtio_cfg.status = 0;
+    vnet_state->virtio_cfg.pci_isr = 0;
 
-    vnet_brg->queue[0].queue_size = QUEUE_SIZE;
-    vnet_brg->queue[1].queue_size = QUEUE_SIZE;
-    vnet_brg->queue[2].queue_size = QUEUE_SIZE;
+    vnet_state->queue[0].queue_size = QUEUE_SIZE;
+    vnet_state->queue[1].queue_size = QUEUE_SIZE;
+    vnet_state->queue[2].queue_size = QUEUE_SIZE;
 
-    memset(&(vnet_brg->vnet_cfg), 0, sizeof(struct vnet_config));
-    v3_lock_init(&(vnet_brg->lock));
+    memset(&(vnet_state->vnet_cfg), 0, sizeof(struct vnet_config));
+    v3_lock_init(&(vnet_state->lock));
 
     return 0;
 }
@@ -116,7 +123,7 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state *
     
     PrintDebug("VNET Bridge: Handling command  queue\n");
 
-    while (q->cur_avail_idx < q->avail->index) {
+    while (q->cur_avail_idx != q->avail->index) {
        struct vring_desc * hdr_desc = NULL;
        struct vring_desc * buf_desc = NULL;
        struct vring_desc * status_desc = NULL;
@@ -207,179 +214,106 @@ static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state *
     return 0;
 }
 
-static int copy_data_to_desc(struct guest_info *core, 
-                                       struct vring_desc * desc, 
-                                       uchar_t * buf, 
-                                       uint_t buf_len) 
-{
-    uint32_t len;
-    uint8_t * desc_buf = NULL;
-
-    if (guest_pa_to_host_va(core, desc->addr_gpa, (addr_t *)&(desc_buf)) == -1) {
-       PrintError("Could not translate buffer address\n");
-       return -1;
-    }
-    len = (desc->length < buf_len)?desc->length:buf_len;
-    memcpy(desc_buf, buf, len);
 
-    return len;
-}
-
-
-static int vnet_brg_rx(struct v3_vnet_pkt *pkt, void * private_data) {
-    struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data;
-    struct virtio_queue * q = &(vnet_brg->queue[RECV_QUEUE]);
-    char *buf = (char *)pkt;
-    int ret_val;
-    int pkt_head_len = ((addr_t)pkt->data) - ((addr_t)pkt); 
-    uint32_t data_len = sizeof(struct v3_vnet_pkt); //TODO: should not transfer the whole struct, only the data size 
+static int vnet_pkt_input_cb(struct v3_vm_info * vm,  struct v3_vnet_pkt * pkt,  void * private_data){
+    struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
+    struct virtio_queue * q = &(vnet_state->queue[RECV_QUEUE]);
+    int ret_val = -1;
     unsigned long flags;
 
-    flags = v3_lock_irqsave(vnet_brg->lock);
+    flags = v3_lock_irqsave(vnet_state->lock);
        
     PrintDebug("VNET Bridge: RX: pkt sent to guest size: %d\n, pkt_header_len: %d\n", data_len, pkt_head_len);
-    v3_hexdump(buf, 100, NULL, 0);
 
     if (q->ring_avail_addr == 0) {
        PrintError("Queue is not set\n");
-       ret_val = -1;
        goto exit;
     }
 
-    if (q->last_avail_idx > q->avail->index)
-       q->idx_overflow = true;
-    q->last_avail_idx = q->avail->index;
 
-    if (q->cur_avail_idx < q->avail->index || (q->idx_overflow && q->cur_avail_idx < q->avail->index+65536)){
-       uint16_t buf_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
-       uint32_t len = 0;
-       uint32_t offset = 0;
-       struct vring_desc * buf_desc = NULL;
+    if (q->cur_avail_idx != q->avail->index) {
+       uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
+       struct vring_desc * pkt_desc = NULL;
+       struct vnet_virtio_pkt * virtio_pkt = NULL;
 
-       buf_desc = &(q->desc[buf_idx]);
-       PrintDebug("VNET Bridge RX: buffer desc len: %d\n", buf_desc->length);
 
-       len = copy_data_to_desc(&(vnet_brg->vm->cores[0]), buf_desc, buf + offset, data_len - offset);
-       if (len == -1) {
-           ret_val = -1;
-           goto exit;
-       }
-       offset += len;
-       buf_desc->length = len;
-       
-       if (offset >= data_len) {
-           buf_desc->flags &= ~VIRTIO_NEXT_FLAG;
-       }else {
-           PrintDebug("VNET Bridge: RX: Pkt not fit into one desc buf\n");
+       pkt_desc = &(q->desc[pkt_idx]);
+       PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
+
+       if (guest_pa_to_host_va(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
+           PrintError("Could not translate buffer address\n");
+           return -1;
        }
 
+       // Fill in dst packet buffer
+       virtio_pkt->link_id = pkt->dst_id;
+       virtio_pkt->pkt_size = pkt->size;
+       memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
+
+       
        q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
-       q->used->ring[q->used->index % q->queue_size].length = data_len; // This should be the total length of data sent to guest (header+pkt_data)
-       q->used->index++;
+       q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_virtio_pkt); // This should be the total length of data sent to guest (header+pkt_data)
 
-       int last_idx = q->cur_avail_idx;
+       q->used->index++;
        q->cur_avail_idx++;
-       if (q->cur_avail_idx < last_idx)
-           q->idx_overflow = false;
     } else {
-       PrintDebug("VNET Bridge: Handle RX: Fails to send, no available buffer: current_idx:%d, availIdx: %d\n", q->cur_avail_idx, q->avail->index);
+       PrintError("Packet buffer overflow in the guest\n");
     }
 
     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
-       v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev);
-       vnet_brg->virtio_cfg.pci_isr = 0x1;
-       PrintDebug("Raising IRQ %d\n",  vnet_brg->pci_dev->config_header.intr_line);
+       v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
+       vnet_state->virtio_cfg.pci_isr = 0x1;
+       PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
     }
 
+
     ret_val = 0;
 
 exit:
-
-    v3_unlock_irqrestore(vnet_brg->lock, flags);
+    v3_unlock_irqrestore(vnet_state->lock, flags);
  
     return ret_val;
 }
 
-static int vnet_send(struct v3_vnet_pkt *pkt, int pkt_len, void * private_data){
-    struct guest_info *core  = (struct guest_info *)private_data; 
-
-#ifdef CONFIG_DEBUG_VNET_BRG
-    {
-       PrintDebug("VNET Bridge: send pkt size: %d\n", pkt->size);
-       v3_hexdump(pkt->data,pkt->size, NULL, 0);
-    }
-#endif
-
-#ifdef CONFIG_VNET_PROFILE
-    uint64_t start, end;
-    rdtscll(start);
-    core->vnet_times.time_copy_from_guest = start - core->vnet_times.virtio_handle_start;
-#endif
-
-    pkt->src_type = LINK_EDGE;
-
-    return v3_vnet_send_pkt(pkt, (void *)core);
-}
-
-static int pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg, struct vring_desc * buf_desc) 
-{
-    uint8_t * buf = NULL;
-    uint32_t len = buf_desc->length;
-    struct v3_vnet_pkt *pkt;
 
-    if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
-       PrintError("Could not translate buffer address\n");
-       return -1;
-    }
-    pkt = (struct v3_vnet_pkt *)buf;
-    if (vnet_send(pkt, len, (void *)core) == -1) {
-       return -1;
-    }
-
-    return 0;
-}
-
-static int handle_pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg) 
+static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
 {
-    struct virtio_queue * q = &(vnet_brg->queue[XMIT_QUEUE]);
-
-    if (q->avail->index < q->last_avail_idx) {
-       q->idx_overflow = true;
-    }
-
-    q->last_avail_idx = q->avail->index;
+    struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
 
     PrintDebug("VNET Bridge Device: Handle TX\n");
 
-    while (q->cur_avail_idx < q->avail->index || 
-          (q->idx_overflow && q->cur_avail_idx < (q->avail->index + 65536))) {
-       struct vring_desc * buf_desc = NULL;
+    while (q->cur_avail_idx != q->avail->index) {
        uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
+       struct vring_desc * pkt_desc = NULL;
+       struct vnet_virtio_pkt * virtio_pkt = NULL;
 
-       buf_desc = &(q->desc[desc_idx]);
+       pkt_desc = &(q->desc[desc_idx]);
 
-       PrintDebug("VNET Bridge: Handle TX buf_len: %d\n", buf_desc->length);
+       PrintDebug("VNET Bridge: Handle TX buf_len: %d\n", pkt_desc->length);
 
-       if (pkt_tx(core, vnet_brg, buf_desc) == -1) {
-               PrintError("Error handling nic operation\n");
-               return -1;
+       if (guest_pa_to_host_va(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
+           PrintError("Could not translate buffer address\n");
+           return -1;
        }
-        
+
+       //TODO:  SETUP VNET PACKET data structure
+
+       /*
+         if (v3_vnet_send_pkt(pkt, (void *)core) == -1) {
+           PrintError("Error sending packet to vnet\n");
+           return -1;
+       }       
+       */
        q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
-       q->used->ring[q->used->index % q->queue_size].length = buf_desc->length; // What do we set this to????
+       q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
        q->used->index++;
 
-       int last_idx = q->cur_avail_idx;
-       q->cur_avail_idx ++;
-       if (q->cur_avail_idx < last_idx) {
-           q->idx_overflow = false;
-       }
+       q->cur_avail_idx++;
     }
 
     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
-       v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev);
-       vnet_brg->virtio_cfg.pci_isr = 0x1;
+       v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
+       vnet_state->virtio_cfg.pci_isr = 0x1;
     }
 
 #ifdef CONFIG_VNET_PROFILE
@@ -393,8 +327,8 @@ static int handle_pkt_tx(struct guest_info *core, struct virtio_vnet_state * vne
 }
 
 static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
-    struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data;
-    int port_idx = port % vnet_brg->io_range_size;
+    struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
+    int port_idx = port % vnet_state->io_range_size;
 
 
     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
@@ -408,7 +342,7 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src,
                PrintError("Illegal write length for guest features\n");
                return -1;
            }    
-           vnet_brg->virtio_cfg.guest_features = *(uint32_t *)src;
+           vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
 
            break;
        case VRING_PG_NUM_PORT:
@@ -416,39 +350,39 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src,
                addr_t pfn = *(uint32_t *)src;
                addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
 
-               vnet_brg->cur_queue->pfn = pfn;
+               vnet_state->cur_queue->pfn = pfn;
                
-               vnet_brg->cur_queue->ring_desc_addr = page_addr ;
-               vnet_brg->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
-               vnet_brg->cur_queue->ring_used_addr = ( vnet_brg->cur_queue->ring_avail_addr + \
+               vnet_state->cur_queue->ring_desc_addr = page_addr ;
+               vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
+               vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
                                                 sizeof(struct vring_avail)    + \
                                                 (QUEUE_SIZE * sizeof(uint16_t)));
                
                // round up to next page boundary.
-               vnet_brg->cur_queue->ring_used_addr = (vnet_brg->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
+               vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
 
-               if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_desc_addr, (addr_t *)&(vnet_brg->cur_queue->desc)) == -1) {
+               if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
                    PrintError("Could not translate ring descriptor address\n");
                    return -1;
                }
 
-               if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_avail_addr, (addr_t *)&(vnet_brg->cur_queue->avail)) == -1) {
+               if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
                    PrintError("Could not translate ring available address\n");
                    return -1;
                }
 
-               if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_used_addr, (addr_t *)&(vnet_brg->cur_queue->used)) == -1) {
+               if (guest_pa_to_host_va(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
                    PrintError("Could not translate ring used address\n");
                    return -1;
                }
 
                PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
-                          (void *)(vnet_brg->cur_queue->ring_desc_addr),
-                          (void *)(vnet_brg->cur_queue->ring_avail_addr),
-                          (void *)(vnet_brg->cur_queue->ring_used_addr));
+                          (void *)(vnet_state->cur_queue->ring_desc_addr),
+                          (void *)(vnet_state->cur_queue->ring_avail_addr),
+                          (void *)(vnet_state->cur_queue->ring_used_addr));
 
                PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
-                          vnet_brg->cur_queue->desc, vnet_brg->cur_queue->avail, vnet_brg->cur_queue->used);
+                          vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
 
            } else {
                PrintError("Illegal write length for page frame number\n");
@@ -456,15 +390,15 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src,
            }
            break;
        case VRING_Q_SEL_PORT:
-           vnet_brg->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
+           vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
 
-           if (vnet_brg->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
+           if (vnet_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
                PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
-                          vnet_brg->virtio_cfg.vring_queue_selector);
+                          vnet_state->virtio_cfg.vring_queue_selector);
                return -1;
            }
            
-           vnet_brg->cur_queue = &(vnet_brg->queue[vnet_brg->virtio_cfg.vring_queue_selector]);
+           vnet_state->cur_queue = &(vnet_state->queue[vnet_state->virtio_cfg.vring_queue_selector]);
 
            break;
        case VRING_Q_NOTIFY_PORT: {
@@ -473,12 +407,12 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src,
            PrintDebug("VNET Bridge: Handling Kick\n");
 
            if (queue_idx == 0) {
-               if (handle_cmd_kick(core, vnet_brg) == -1) {
+               if (handle_cmd_kick(core, vnet_state) == -1) {
                    PrintError("Could not handle Virtio VNET Control command\n");
                    return -1;
                }
            } else if (queue_idx == 1) {
-               if (handle_pkt_tx(core, vnet_brg) == -1){
+               if (handle_pkt_kick(core, vnet_state) == -1){
                    PrintError("Could not handle Virtio VNET TX\n");
                    return -1;
                }
@@ -492,17 +426,17 @@ static int virtio_io_write(struct guest_info * core, uint16_t port, void * src,
            break;
        }
        case VIRTIO_STATUS_PORT:
-           vnet_brg->virtio_cfg.status = *(uint8_t *)src;
+           vnet_state->virtio_cfg.status = *(uint8_t *)src;
 
-           if (vnet_brg->virtio_cfg.status == 0) {
+           if (vnet_state->virtio_cfg.status == 0) {
                PrintDebug("VNET Bridge: Resetting device\n");
-               virtio_reset(vnet_brg);
+               virtio_reset(vnet_state);
            }
 
            break;
 
        case VIRTIO_ISR_PORT:
-           vnet_brg->virtio_cfg.pci_isr = *(uint8_t *)src;
+           vnet_state->virtio_cfg.pci_isr = *(uint8_t *)src;
            break;
        default:
            return -1;
@@ -585,12 +519,7 @@ static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, u
     return length;
 }
 
-static int vnet_brg_input(struct v3_vm_info * vm, 
-                               struct v3_vnet_pkt * pkt, 
-                               void * private_data){
 
-    return vnet_brg_rx(pkt, private_data);
-}
 
 static struct v3_device_ops dev_ops = {
     .free = NULL,
@@ -602,7 +531,7 @@ static struct v3_device_ops dev_ops = {
 
 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
-    struct virtio_vnet_state * vbrg_state = NULL;
+    struct virtio_vnet_state * vnet_state = NULL;
     struct pci_device * pci_dev = NULL;
     char * name = v3_cfg_val(cfg, "name");
 
@@ -613,12 +542,12 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
        return -1;
     }
     
-    vbrg_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
-    memset(vbrg_state, 0, sizeof(struct virtio_vnet_state));
+    vnet_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
+    memset(vnet_state, 0, sizeof(struct virtio_vnet_state));
        
-    vbrg_state->vm = vm;
+    vnet_state->vm = vm;
 
-    struct vm_device * dev = v3_allocate_device(name, &dev_ops, vbrg_state);
+    struct vm_device * dev = v3_allocate_device(name, &dev_ops, vnet_state);
 
     if (v3_attach_device(vm, dev) == -1) {
        PrintError("Could not attach device %s\n", name);
@@ -634,17 +563,17 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
        int i;
 
        // This gets the number of ports, rounded up to a power of 2
-       vbrg_state->io_range_size = 1; // must be a power of 2
+       vnet_state->io_range_size = 1; // must be a power of 2
 
        while (tmp_ports > 0) {
            tmp_ports >>= 1;
-           vbrg_state->io_range_size <<= 1;
+           vnet_state->io_range_size <<= 1;
        }
        
        // this is to account for any low order bits being set in num_ports
        // if there are none, then num_ports was already a power of 2 so we shift right to reset it
-       if ((num_ports & ((vbrg_state->io_range_size >> 1) - 1)) == 0) {
-           vbrg_state->io_range_size >>= 1;
+       if ((num_ports & ((vnet_state->io_range_size >> 1) - 1)) == 0) {
+           vnet_state->io_range_size >>= 1;
        }
 
        for (i = 0; i < 6; i++) {
@@ -653,15 +582,15 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
 
        bars[0].type = PCI_BAR_IO;
        bars[0].default_base_port = -1;
-       bars[0].num_ports = vbrg_state->io_range_size;
+       bars[0].num_ports = vnet_state->io_range_size;
        bars[0].io_read = virtio_io_read;
        bars[0].io_write = virtio_io_write;
-       bars[0].private_data = vbrg_state;
+       bars[0].private_data = vnet_state;
 
        pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
                                         0, PCI_AUTO_DEV_NUM, 0,
                                         "LNX_VIRTIO_VNET", bars,
-                                        NULL, NULL, NULL, vbrg_state);
+                                        NULL, NULL, NULL, vnet_state);
 
        if (!pci_dev) {
            PrintError("Could not register PCI Device\n");
@@ -678,34 +607,13 @@ static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
        pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
 
 
-       vbrg_state->pci_dev = pci_dev;
-       vbrg_state->pci_bus = pci_bus;
+       vnet_state->pci_dev = pci_dev;
+       vnet_state->pci_bus = pci_bus;
     }
 
-    virtio_reset(vbrg_state);
+    virtio_reset(vnet_state);
 
-    v3_vnet_add_bridge(vm, vnet_brg_input, (void *)vbrg_state);
-
-//for temporary hack
-#if 1  
-    {
-       uchar_t dstmac[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
-       uchar_t zeromac[6] = {0,0,0,0,0,0};
-
-       struct v3_vnet_route route;
-       route.dst_id = 0;
-       route.dst_type = LINK_EDGE;
-       route.src_id = -1;
-       route.src_type = LINK_ANY;
-
-       memcpy(route.dst_mac, dstmac, 6);
-       route.dst_mac_qual = MAC_NONE;
-       memcpy(route.src_mac, zeromac, 6);
-       route.src_mac_qual = MAC_ANY;
-          
-       v3_vnet_add_route(route);
-    }
-#endif
+    v3_vnet_add_bridge(vm, vnet_pkt_input_cb, (void *)vnet_state);
 
     return 0;
 }