Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Fix to virtio vnet device
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, Lei Xia <lxia@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Jack Lange <jarusl@cs.northwestern.edu>
16  *             Lei Xia <lxia@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_dev_mgr.h>
24 #include <palacios/vm_guest_mem.h>
25 #include <devices/lnx_virtio_pci.h>
26 #include <palacios/vmm_vnet.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <devices/pci.h>
29
30
31 #ifndef CONFIG_DEBUG_LINUX_VIRTIO_VNET
32 #undef PrintDebug
33 #define PrintDebug(fmt, args...)
34 #endif
35
36
37 #define QUEUE_SIZE 4096
38 #define CMD_QUEUE_SIZE 128
39 #define NUM_QUEUES 3
40
41 struct vnet_config {
42     uint32_t num_devs;
43     uint32_t num_routes;
44 } __attribute__((packed));
45
46
47 #define CTRL_QUEUE 0
48 #define XMIT_QUEUE 1
49 #define RECV_QUEUE 2
50
51 struct virtio_vnet_state {
52     struct v3_vm_info * vm;
53     struct vnet_config vnet_cfg;
54     struct virtio_config virtio_cfg;
55
56     struct vm_device * pci_bus;
57     struct pci_device * pci_dev;
58         
59     struct virtio_queue queue[NUM_QUEUES];
60
61     struct virtio_queue * cur_queue;
62
63     int io_range_size;
64     v3_lock_t lock;
65
66     ulong_t pkt_sent, pkt_recv, pkt_drop, tx_exit, rx_exit, total_exit;
67     int ready;
68 };
69
70 #define VNET_GET_ROUTES 10
71 #define VNET_ADD_ROUTE 11
72 #define VNET_DEL_ROUTE 12
73
74 #define VNET_GET_LINKS 20
75 #define VNET_ADD_LINK 21
76 #define VNET_DEL_LINK 22
77
78 // structure of the vnet command header
79 struct vnet_ctrl_hdr {
80     uint8_t cmd_type;
81     uint32_t num_cmds;
82 } __attribute__((packed));
83
84
85 struct vnet_bridge_pkt {
86     uint32_t link_id;
87     uint32_t pkt_size;
88     uint8_t pkt[ETHERNET_PACKET_LEN];
89 }__attribute__((packed));
90
91
92 static int virtio_reset(struct virtio_vnet_state * vnet_state) {
93
94     memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * NUM_QUEUES);
95
96     vnet_state->cur_queue = &(vnet_state->queue[0]);
97
98     vnet_state->virtio_cfg.status = 0;
99     vnet_state->virtio_cfg.pci_isr = 0;
100
101     vnet_state->queue[0].queue_size = CMD_QUEUE_SIZE;
102     vnet_state->queue[1].queue_size = QUEUE_SIZE;
103     vnet_state->queue[2].queue_size = QUEUE_SIZE;
104
105     memset(&(vnet_state->vnet_cfg), 0, sizeof(struct vnet_config));
106     v3_lock_init(&(vnet_state->lock));
107
108     return 0;
109 }
110
111
112
113 static int get_desc_count(struct virtio_queue * q, int index) {
114     struct vring_desc * tmp_desc = &(q->desc[index]);
115     int cnt = 1;
116     
117     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
118         tmp_desc = &(q->desc[tmp_desc->next]);
119         cnt++;
120     }
121
122     return cnt;
123 }
124
125
126
127
128 static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
129     struct virtio_queue * q = &(vnet_state->queue[0]);
130     
131     PrintDebug("VNET Bridge: Handling command  queue\n");
132
133     while (q->cur_avail_idx != q->avail->index) {
134         struct vring_desc * hdr_desc = NULL;
135         struct vring_desc * buf_desc = NULL;
136         struct vring_desc * status_desc = NULL;
137         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
138         uint16_t desc_cnt = get_desc_count(q, desc_idx);
139         struct vnet_ctrl_hdr * hdr = NULL;
140         int i;
141         int xfer_len = 0;
142         uint8_t * status_ptr = NULL;
143         uint8_t status = 0;
144
145
146         PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
147
148         if (desc_cnt < 3) {
149             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
150             return -1;
151         }
152         
153         hdr_desc = &(q->desc[desc_idx]);
154
155         if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
156             PrintError("Could not translate VirtioVNET header address\n");
157             return -1;
158         }
159
160         desc_idx = hdr_desc->next;
161         
162         if (hdr->cmd_type == VNET_ADD_ROUTE) {
163             
164             for (i = 0; i < hdr->num_cmds; i++) {
165                 uint8_t tmp_status = 0;
166                 struct v3_vnet_route * route = NULL;
167                 
168                 buf_desc = &(q->desc[desc_idx]);
169
170                 if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
171                     PrintError("Could not translate route address\n");
172                     return -1;
173                 }
174
175                 // add route
176                 PrintDebug("VNET Bridge: Adding VNET Route\n");
177
178                 tmp_status = v3_vnet_add_route(*route);
179
180                 PrintDebug("VNET Route Added\n");
181
182                 if (tmp_status != 0) {
183                     PrintError("Error adding VNET ROUTE\n");
184                     status = tmp_status;
185                 }
186
187                 xfer_len += buf_desc->length;
188                 desc_idx = buf_desc->next;
189             }
190
191         } 
192
193
194
195         status_desc = &(q->desc[desc_idx]);
196
197         if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
198             PrintError("VirtioVNET Error could not translate status address\n");
199             return -1;
200         }
201
202         xfer_len += status_desc->length;
203         *status_ptr = status;
204
205         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
206         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
207         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
208
209         q->used->index++;
210         q->cur_avail_idx++;
211     }
212
213
214     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
215         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
216         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
217         vnet_state->virtio_cfg.pci_isr = 1;
218     }
219
220
221     return 0;
222 }
223
224
225 static int vnet_pkt_input_cb(struct v3_vm_info * vm,  struct v3_vnet_pkt *pkt, void * private_data){
226     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
227     struct virtio_queue * q = &(vnet_state->queue[RECV_QUEUE]);
228     int ret_val = -1;
229     unsigned long flags;
230
231     flags = v3_lock_irqsave(vnet_state->lock);
232         
233     if (q->ring_avail_addr == 0) {
234         PrintError("Queue is not set\n");
235         goto exit;
236     }
237
238     if (q->cur_avail_idx != q->avail->index) {
239         uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
240         struct vring_desc * pkt_desc = NULL;
241         struct vnet_bridge_pkt * virtio_pkt = NULL;
242
243         pkt_desc = &(q->desc[pkt_idx]);
244         PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
245
246         if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
247             PrintError("Could not translate buffer address\n");
248             goto exit;
249         }
250
251         PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
252
253         // Fill in dst packet buffer
254         virtio_pkt->link_id = pkt->dst_id;
255         virtio_pkt->pkt_size = pkt->size;
256         memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
257         
258         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
259         q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); 
260
261         q->used->index++;
262         q->cur_avail_idx++;
263     } else {
264         vnet_state->pkt_drop ++;
265     }
266
267     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
268         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
269         vnet_state->virtio_cfg.pci_isr = 0x1;
270         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
271     }
272
273     ret_val = 0;
274         
275 exit:
276
277     v3_unlock_irqrestore(vnet_state->lock, flags);
278  
279     return ret_val;
280 }
281
282 static int vnet_pkt_input_xcall(void *data){
283     struct v3_vnet_bridge_xcall_args *args = (struct v3_vnet_bridge_xcall_args *)data;
284     int i = 0;
285
286     for(i = 0; i < args->pkt_num; i++) {
287         vnet_pkt_input_cb(args->vm, &(args->vnet_pkts[i]), args->private_data);
288     }
289         
290     return 0;
291 }
292
293 static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
294 {
295     struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
296     int recvd = 0;
297         
298     if (q->ring_avail_addr == 0) {
299         return -1;
300     }
301
302     while (q->cur_avail_idx != q->avail->index) {
303         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
304         struct vring_desc * pkt_desc = NULL;
305         struct vnet_bridge_pkt * virtio_pkt = NULL;
306
307         pkt_desc = &(q->desc[desc_idx]);
308
309         PrintDebug("VNET Bridge: Handle TX desc buf_len: %d\n", pkt_desc->length);
310         
311         if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
312             PrintError("Could not translate buffer address\n");
313             return -1;
314         }
315
316         //PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id);
317
318         struct v3_vnet_pkt pkt;
319         pkt.size = virtio_pkt->pkt_size;
320         pkt.src_type = LINK_EDGE;
321         pkt.src_id = 0;
322         memcpy(pkt.header, virtio_pkt->pkt, ETHERNET_HEADER_LEN);
323         pkt.data = virtio_pkt->pkt;
324
325         v3_vnet_send_pkt(&pkt, NULL);
326         
327         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
328         q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
329         q->used->index++;
330
331         vnet_state->pkt_sent ++;
332         recvd ++;
333
334         q->cur_avail_idx++;
335     }
336
337     if(recvd == 0){
338         return 0;
339     }
340
341     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
342             v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
343             vnet_state->virtio_cfg.pci_isr = 0x1;
344     }
345         
346     return 0;
347 }
348
349 static void vnet_virtio_poll(struct v3_vm_info * vm, void *private_data){
350     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
351
352     if(vm == vnet_state->vm){   
353         handle_pkt_kick(&(vm->cores[0]), vnet_state);
354     }
355 }
356
357 static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
358 {
359     //v3_vnet_enable_bridge();
360         
361     return 0;
362 }
363
364 static int vnet_virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
365     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
366     int port_idx = port % vnet_state->io_range_size;
367
368     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
369                port, length, *(uint32_t *)src);
370
371     vnet_state->total_exit ++;
372     switch (port_idx) {
373         case GUEST_FEATURES_PORT:
374             if (length != 4) {
375                 PrintError("Illegal write length for guest features\n");
376                 return -1;
377             }    
378             vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
379
380             break;
381         case VRING_PG_NUM_PORT:
382             if (length == 4) {
383                 addr_t pfn = *(uint32_t *)src;
384                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
385
386                 vnet_state->cur_queue->pfn = pfn;
387                 
388                 vnet_state->cur_queue->ring_desc_addr = page_addr ;
389                 vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
390                 vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
391                                                  sizeof(struct vring_avail)    + \
392                                                  (QUEUE_SIZE * sizeof(uint16_t)));
393                 
394                 // round up to next page boundary.
395                 vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
396
397                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
398                     PrintError("Could not translate ring descriptor address\n");
399                     return -1;
400                 }
401
402                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
403                     PrintError("Could not translate ring available address\n");
404                     return -1;
405                 }
406
407                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
408                     PrintError("Could not translate ring used address\n");
409                     return -1;
410                 }
411
412                 PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
413                            (void *)(vnet_state->cur_queue->ring_desc_addr),
414                            (void *)(vnet_state->cur_queue->ring_avail_addr),
415                            (void *)(vnet_state->cur_queue->ring_used_addr));
416
417                 PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
418                            vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
419
420                 if(vnet_state->queue[RECV_QUEUE].avail != NULL){
421                     vnet_state->ready = 1;
422                     vnet_state->queue[RECV_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
423                 }
424
425                 //No notify when there is pkt tx from guest
426                 //palacios will do the polling
427                 if(vnet_state->queue[XMIT_QUEUE].used != NULL){
428                     vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
429                 }
430             } else {
431                 PrintError("Illegal write length for page frame number\n");
432                 return -1;
433             }
434             break;
435         case VRING_Q_SEL_PORT:
436             vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
437
438             if (vnet_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
439                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
440                            vnet_state->virtio_cfg.vring_queue_selector);
441                 return -1;
442             }
443             
444             vnet_state->cur_queue = &(vnet_state->queue[vnet_state->virtio_cfg.vring_queue_selector]);
445
446             break;
447         case VRING_Q_NOTIFY_PORT: {
448             uint16_t queue_idx = *(uint16_t *)src;
449
450             PrintDebug("VNET Bridge: Handling Kick\n");
451
452             if (queue_idx == 0) {
453                 if (handle_cmd_kick(core, vnet_state) == -1) {
454                     PrintError("Could not handle Virtio VNET Control command\n");
455                     return -1;
456                 }
457             } else if (queue_idx == 1) {
458                 if (handle_pkt_kick(core, vnet_state) == -1){
459                     PrintError("Could not handle Virtio VNET TX\n");
460                     return -1;
461                 }
462                 PrintError("Notify on TX\n");
463             } else if (queue_idx == 2) {
464                 if (handle_rx_kick(core, vnet_state) == -1){
465                     PrintError("Could not handle Virtio RX buffer refills Kick\n");
466                     return -1;
467                 }
468                 vnet_state->rx_exit ++;
469             } else {
470                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
471                 return -1;
472             }
473
474             break;
475         }
476         case VIRTIO_STATUS_PORT:
477             vnet_state->virtio_cfg.status = *(uint8_t *)src;
478
479             if (vnet_state->virtio_cfg.status == 0) {
480                 PrintDebug("VNET Bridge: Resetting device\n");
481                 virtio_reset(vnet_state);
482             }
483
484             break;
485
486         case VIRTIO_ISR_PORT:
487             vnet_state->virtio_cfg.pci_isr = *(uint8_t *)src;
488             break;
489         default:
490             return -1;
491             break;
492     }
493
494     return length;
495 }
496
497
498 static int vnet_virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
499
500     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
501     int port_idx = port % vnet_state->io_range_size;
502
503     switch (port_idx) {
504         case HOST_FEATURES_PORT:
505             if (length != 4) {
506                 PrintError("Illegal read length for host features\n");
507                 return -1;
508             }
509
510             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
511         
512             break;
513         case VRING_PG_NUM_PORT:
514             if (length != 4) {
515                 PrintError("Illegal read length for page frame number\n");
516                 return -1;
517             }
518
519             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
520
521             break;
522         case VRING_SIZE_PORT:
523             if (length != 2) {
524                 PrintError("Illegal read length for vring size\n");
525                 return -1;
526             }
527                 
528             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
529
530             break;
531
532         case VIRTIO_STATUS_PORT:
533             if (length != 1) {
534                 PrintError("Illegal read length for status\n");
535                 return -1;
536             }
537
538             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
539             break;
540
541         case VIRTIO_ISR_PORT:
542             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
543             vnet_state->virtio_cfg.pci_isr = 0;
544             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
545             break;
546
547         default:
548             if ( (port_idx >= sizeof(struct virtio_config)) && 
549                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
550                 int cfg_offset = port_idx - sizeof(struct virtio_config);
551                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
552
553                 memcpy(dst, cfg_ptr + cfg_offset, length);
554                 
555             } else {
556                 PrintError("Read of Unhandled Virtio Read\n");
557                 return -1;
558             }
559           
560             break;
561     }
562
563     return length;
564 }
565
566
567
568 static struct v3_device_ops dev_ops = {
569     .free = NULL,
570     .reset = NULL,
571     .start = NULL,
572     .stop = NULL,
573 };
574
575 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
576     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
577     struct virtio_vnet_state * vnet_state = NULL;
578     struct pci_device * pci_dev = NULL;
579     char * name = v3_cfg_val(cfg, "name");
580
581     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
582
583     if (pci_bus == NULL) {
584         PrintError("VNET Bridge device require a PCI Bus");
585         return -1;
586     }
587     
588     vnet_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
589     memset(vnet_state, 0, sizeof(struct virtio_vnet_state));
590         
591     vnet_state->vm = vm;
592
593     struct vm_device * dev = v3_allocate_device(name, &dev_ops, vnet_state);
594
595     if (v3_attach_device(vm, dev) == -1) {
596         PrintError("Could not attach device %s\n", name);
597         return -1;
598     }
599
600
601     // PCI initialization
602     {
603         struct v3_pci_bar bars[6];
604         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
605         int tmp_ports = num_ports;
606         int i;
607
608         // This gets the number of ports, rounded up to a power of 2
609         vnet_state->io_range_size = 1; // must be a power of 2
610
611         while (tmp_ports > 0) {
612             tmp_ports >>= 1;
613             vnet_state->io_range_size <<= 1;
614         }
615         
616         // this is to account for any low order bits being set in num_ports
617         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
618         if ((num_ports & ((vnet_state->io_range_size >> 1) - 1)) == 0) {
619             vnet_state->io_range_size >>= 1;
620         }
621
622         for (i = 0; i < 6; i++) {
623             bars[i].type = PCI_BAR_NONE;
624         }
625
626         bars[0].type = PCI_BAR_IO;
627         bars[0].default_base_port = -1;
628         bars[0].num_ports = vnet_state->io_range_size;
629         bars[0].io_read = vnet_virtio_io_read;
630         bars[0].io_write = vnet_virtio_io_write;
631         bars[0].private_data = vnet_state;
632
633         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
634                                          0, 5 /*PCI_AUTO_DEV_NUM*/, 0,
635                                          "LNX_VIRTIO_VNET", bars,
636                                          NULL, NULL, NULL, vnet_state);
637
638         if (!pci_dev) {
639             PrintError("Could not register PCI Device\n");
640             return -1;
641         }
642         
643         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
644         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
645         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
646         pci_dev->config_header.class = PCI_CLASS_MEMORY;
647         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
648         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
649         pci_dev->config_header.intr_pin = 1;
650         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
651
652
653         vnet_state->pci_dev = pci_dev;
654         vnet_state->pci_bus = pci_bus;
655     }
656
657     virtio_reset(vnet_state);
658
659     struct v3_vnet_bridge_ops brg_ops;
660     brg_ops.input = vnet_pkt_input_cb;
661     brg_ops.polling_pkt = vnet_virtio_poll;
662     brg_ops.xcall_input = vnet_pkt_input_xcall;
663
664     V3_Print("Registering Virtio device as vnet bridge\n");
665     v3_vnet_add_bridge(vm, &brg_ops, (void *)vnet_state);
666
667     return 0;
668 }
669
670
671 device_register("LNX_VIRTIO_VNET", dev_init)