Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


deallocation of devices
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, Lei Xia <lxia@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Jack Lange <jarusl@cs.northwestern.edu>
16  *             Lei Xia <lxia@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_dev_mgr.h>
24 #include <palacios/vm_guest_mem.h>
25 #include <devices/lnx_virtio_pci.h>
26 #include <palacios/vmm_vnet.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <devices/pci.h>
29
30
31 #ifndef CONFIG_DEBUG_LINUX_VIRTIO_VNET
32 #undef PrintDebug
33 #define PrintDebug(fmt, args...)
34 #endif
35
36
37 #define QUEUE_SIZE 4096
38 #define CMD_QUEUE_SIZE 128
39 #define NUM_QUEUES 3
40
41 struct vnet_config {
42     uint32_t num_devs;
43     uint32_t num_routes;
44 } __attribute__((packed));
45
46
47 #define CTRL_QUEUE 0
48 #define XMIT_QUEUE 1
49 #define RECV_QUEUE 2
50
51 struct virtio_vnet_state {
52     struct v3_vm_info * vm;
53     struct vnet_config vnet_cfg;
54     struct virtio_config virtio_cfg;
55
56     struct vm_device * pci_bus;
57     struct pci_device * pci_dev;
58         
59     struct virtio_queue queue[NUM_QUEUES];
60
61     struct virtio_queue * cur_queue;
62
63     int io_range_size;
64     v3_lock_t lock;
65
66     ulong_t pkt_sent, pkt_recv, pkt_drop, tx_exit, rx_exit, total_exit;
67     int ready;
68 };
69
70 #define VNET_GET_ROUTES 10
71 #define VNET_ADD_ROUTE 11
72 #define VNET_DEL_ROUTE 12
73
74 #define VNET_GET_LINKS 20
75 #define VNET_ADD_LINK 21
76 #define VNET_DEL_LINK 22
77
78 // structure of the vnet command header
79 struct vnet_ctrl_hdr {
80     uint8_t cmd_type;
81     uint32_t num_cmds;
82 } __attribute__((packed));
83
84
85 struct vnet_bridge_pkt {
86     uint32_t link_id;
87     uint32_t pkt_size;
88     uint8_t pkt[ETHERNET_PACKET_LEN];
89 }__attribute__((packed));
90
91
92 static int virtio_reset(struct virtio_vnet_state * vnet_state) {
93
94     memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * NUM_QUEUES);
95
96     vnet_state->cur_queue = &(vnet_state->queue[0]);
97
98     vnet_state->virtio_cfg.status = 0;
99     vnet_state->virtio_cfg.pci_isr = 0;
100
101     vnet_state->queue[0].queue_size = CMD_QUEUE_SIZE;
102     vnet_state->queue[1].queue_size = QUEUE_SIZE;
103     vnet_state->queue[2].queue_size = QUEUE_SIZE;
104
105     memset(&(vnet_state->vnet_cfg), 0, sizeof(struct vnet_config));
106     v3_lock_init(&(vnet_state->lock));
107
108     return 0;
109 }
110
111
112
113 static int get_desc_count(struct virtio_queue * q, int index) {
114     struct vring_desc * tmp_desc = &(q->desc[index]);
115     int cnt = 1;
116     
117     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
118         tmp_desc = &(q->desc[tmp_desc->next]);
119         cnt++;
120     }
121
122     return cnt;
123 }
124
125
126
127
128 static int handle_cmd_kick(struct guest_info * core, 
129                            struct virtio_vnet_state * vnet_state) {
130     struct virtio_queue * q = &(vnet_state->queue[0]);
131     
132     PrintDebug("VNET Bridge: Handling command  queue\n");
133
134     while (q->cur_avail_idx != q->avail->index) {
135         struct vring_desc * hdr_desc = NULL;
136         struct vring_desc * buf_desc = NULL;
137         struct vring_desc * status_desc = NULL;
138         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
139         uint16_t desc_cnt = get_desc_count(q, desc_idx);
140         struct vnet_ctrl_hdr * hdr = NULL;
141         int i;
142         int xfer_len = 0;
143         uint8_t * status_ptr = NULL;
144         uint8_t status = 0;
145
146
147         PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
148
149         if (desc_cnt < 3) {
150             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
151             return -1;
152         }
153         
154         hdr_desc = &(q->desc[desc_idx]);
155
156         if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
157             PrintError("Could not translate VirtioVNET header address\n");
158             return -1;
159         }
160
161         desc_idx = hdr_desc->next;
162         
163         if (hdr->cmd_type == VNET_ADD_ROUTE) {
164             
165             for (i = 0; i < hdr->num_cmds; i++) {
166                 uint8_t tmp_status = 0;
167                 struct v3_vnet_route * route = NULL;
168                 
169                 buf_desc = &(q->desc[desc_idx]);
170
171                 if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
172                     PrintError("Could not translate route address\n");
173                     return -1;
174                 }
175
176                 // add route
177                 PrintDebug("VNET Bridge: Adding VNET Route\n");
178
179                 tmp_status = v3_vnet_add_route(*route);
180
181                 PrintDebug("VNET Route Added\n");
182
183                 if (tmp_status != 0) {
184                     PrintError("Error adding VNET ROUTE\n");
185                     status = tmp_status;
186                 }
187
188                 xfer_len += buf_desc->length;
189                 desc_idx = buf_desc->next;
190             }
191
192         } 
193
194
195
196         status_desc = &(q->desc[desc_idx]);
197
198         if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
199             PrintError("VirtioVNET Error could not translate status address\n");
200             return -1;
201         }
202
203         xfer_len += status_desc->length;
204         *status_ptr = status;
205
206         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
207         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
208         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
209
210         q->used->index++;
211         q->cur_avail_idx++;
212     }
213
214
215     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
216         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
217         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
218         vnet_state->virtio_cfg.pci_isr = 1;
219     }
220
221
222     return 0;
223 }
224
225
226 static int vnet_pkt_input_cb(struct v3_vm_info * vm,  
227                              struct v3_vnet_pkt * pkt, 
228                              void * private_data){
229     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
230     struct virtio_queue * q = &(vnet_state->queue[RECV_QUEUE]);
231     int ret_val = -1;
232     unsigned long flags;
233
234     flags = v3_lock_irqsave(vnet_state->lock);
235         
236     if (q->ring_avail_addr == 0) {
237         PrintError("Queue is not set\n");
238         goto exit;
239     }
240
241     if (q->cur_avail_idx != q->avail->index) {
242         uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
243         struct vring_desc * pkt_desc = NULL;
244         struct vnet_bridge_pkt * virtio_pkt = NULL;
245
246         pkt_desc = &(q->desc[pkt_idx]);
247         PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
248
249         if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
250             PrintError("Could not translate buffer address\n");
251             goto exit;
252         }
253
254         PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
255
256         // Fill in dst packet buffer
257         virtio_pkt->link_id = pkt->dst_id;
258         virtio_pkt->pkt_size = pkt->size;
259         memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
260         
261         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
262         q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); 
263
264         q->used->index++;
265         q->cur_avail_idx++;
266     } else {
267         vnet_state->pkt_drop ++;
268     }
269
270     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
271         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
272         vnet_state->virtio_cfg.pci_isr = 0x1;
273         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
274     }
275
276     ret_val = 0;
277         
278 exit:
279
280     v3_unlock_irqrestore(vnet_state->lock, flags);
281  
282     return ret_val;
283 }
284
285 static int handle_pkt_kick(struct guest_info * core, 
286                            struct virtio_vnet_state * vnet_state) 
287 {
288     struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
289     int recvd = 0;
290         
291     if (q->ring_avail_addr == 0) {
292         return -1;
293     }
294
295     while (q->cur_avail_idx != q->avail->index) {
296         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
297         struct vring_desc * pkt_desc = NULL;
298         struct vnet_bridge_pkt * virtio_pkt = NULL;
299
300         pkt_desc = &(q->desc[desc_idx]);
301
302         PrintDebug("VNET Bridge: Handle TX desc buf_len: %d\n", pkt_desc->length);
303         
304         if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
305             PrintError("Could not translate buffer address\n");
306             return -1;
307         }
308
309         //PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id);
310
311         struct v3_vnet_pkt pkt;
312         pkt.size = virtio_pkt->pkt_size;
313         pkt.src_type = LINK_EDGE;
314         pkt.src_id = 0;
315         memcpy(pkt.header, virtio_pkt->pkt, ETHERNET_HEADER_LEN);
316         pkt.data = virtio_pkt->pkt;
317
318         v3_vnet_send_pkt(&pkt, NULL);
319         
320         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
321         q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
322         q->used->index++;
323
324         vnet_state->pkt_sent ++;
325         recvd ++;
326
327         q->cur_avail_idx++;
328     }
329
330     if(recvd == 0){
331         return 0;
332     }
333
334     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
335             v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
336             vnet_state->virtio_cfg.pci_isr = 0x1;
337     }
338         
339     return 0;
340 }
341
342 static void vnet_virtio_poll(struct v3_vm_info * vm, void * private_data){
343     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
344
345     if(vm == vnet_state->vm){   
346         handle_pkt_kick(&(vm->cores[0]), vnet_state);
347     }
348 }
349
350 static int handle_rx_kick(struct guest_info *core, 
351                           struct virtio_vnet_state * vnet_state) 
352 {
353     //v3_vnet_enable_bridge();
354         
355     return 0;
356 }
357
358 static int vnet_virtio_io_write(struct guest_info * core, 
359                                 uint16_t port, void * src, 
360                                 uint_t length, void * private_data) {
361     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
362     int port_idx = port % vnet_state->io_range_size;
363
364     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
365                port, length, *(uint32_t *)src);
366
367     vnet_state->total_exit ++;
368     switch (port_idx) {
369         case GUEST_FEATURES_PORT:
370             if (length != 4) {
371                 PrintError("Illegal write length for guest features\n");
372                 return -1;
373             }    
374             vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
375
376             break;
377         case VRING_PG_NUM_PORT:
378             if (length == 4) {
379                 addr_t pfn = *(uint32_t *)src;
380                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
381
382                 vnet_state->cur_queue->pfn = pfn;
383                 
384                 vnet_state->cur_queue->ring_desc_addr = page_addr ;
385                 vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
386                 vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
387                                                  sizeof(struct vring_avail)    + \
388                                                  (QUEUE_SIZE * sizeof(uint16_t)));
389                 
390                 // round up to next page boundary.
391                 vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
392
393                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
394                     PrintError("Could not translate ring descriptor address\n");
395                     return -1;
396                 }
397
398                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
399                     PrintError("Could not translate ring available address\n");
400                     return -1;
401                 }
402
403                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
404                     PrintError("Could not translate ring used address\n");
405                     return -1;
406                 }
407
408                 PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
409                            (void *)(vnet_state->cur_queue->ring_desc_addr),
410                            (void *)(vnet_state->cur_queue->ring_avail_addr),
411                            (void *)(vnet_state->cur_queue->ring_used_addr));
412
413                 PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
414                            vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
415
416                 if(vnet_state->queue[RECV_QUEUE].avail != NULL){
417                     vnet_state->ready = 1;
418                     vnet_state->queue[RECV_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
419                 }
420
421                 //No notify when there is pkt tx from guest
422                 //palacios will do the polling
423                 if(vnet_state->queue[XMIT_QUEUE].used != NULL){
424                     vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
425                 }
426             } else {
427                 PrintError("Illegal write length for page frame number\n");
428                 return -1;
429             }
430             break;
431         case VRING_Q_SEL_PORT:
432             vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
433
434             if (vnet_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
435                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
436                            vnet_state->virtio_cfg.vring_queue_selector);
437                 return -1;
438             }
439             
440             vnet_state->cur_queue = &(vnet_state->queue[vnet_state->virtio_cfg.vring_queue_selector]);
441
442             break;
443         case VRING_Q_NOTIFY_PORT: {
444             uint16_t queue_idx = *(uint16_t *)src;
445
446             PrintDebug("VNET Bridge: Handling Kick\n");
447
448             if (queue_idx == 0) {
449                 if (handle_cmd_kick(core, vnet_state) == -1) {
450                     PrintError("Could not handle Virtio VNET Control command\n");
451                     return -1;
452                 }
453             } else if (queue_idx == 1) {
454                 if (handle_pkt_kick(core, vnet_state) == -1){
455                     PrintError("Could not handle Virtio VNET TX\n");
456                     return -1;
457                 }
458                 PrintError("Notify on TX\n");
459             } else if (queue_idx == 2) {
460                 if (handle_rx_kick(core, vnet_state) == -1){
461                     PrintError("Could not handle Virtio RX buffer refills Kick\n");
462                     return -1;
463                 }
464                 vnet_state->rx_exit ++;
465             } else {
466                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
467                 return -1;
468             }
469
470             break;
471         }
472         case VIRTIO_STATUS_PORT:
473             vnet_state->virtio_cfg.status = *(uint8_t *)src;
474
475             if (vnet_state->virtio_cfg.status == 0) {
476                 PrintDebug("VNET Bridge: Resetting device\n");
477                 virtio_reset(vnet_state);
478             }
479
480             break;
481
482         case VIRTIO_ISR_PORT:
483             vnet_state->virtio_cfg.pci_isr = *(uint8_t *)src;
484             break;
485         default:
486             return -1;
487             break;
488     }
489
490     return length;
491 }
492
493
494 static int vnet_virtio_io_read(struct guest_info * core, 
495                                uint16_t port, void * dst, 
496                                uint_t length, void * private_data) {
497
498     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
499     int port_idx = port % vnet_state->io_range_size;
500
501     switch (port_idx) {
502         case HOST_FEATURES_PORT:
503             if (length != 4) {
504                 PrintError("Illegal read length for host features\n");
505                 return -1;
506             }
507
508             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
509         
510             break;
511         case VRING_PG_NUM_PORT:
512             if (length != 4) {
513                 PrintError("Illegal read length for page frame number\n");
514                 return -1;
515             }
516
517             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
518
519             break;
520         case VRING_SIZE_PORT:
521             if (length != 2) {
522                 PrintError("Illegal read length for vring size\n");
523                 return -1;
524             }
525                 
526             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
527
528             break;
529
530         case VIRTIO_STATUS_PORT:
531             if (length != 1) {
532                 PrintError("Illegal read length for status\n");
533                 return -1;
534             }
535
536             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
537             break;
538
539         case VIRTIO_ISR_PORT:
540             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
541             vnet_state->virtio_cfg.pci_isr = 0;
542             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
543             break;
544
545         default:
546             if ( (port_idx >= sizeof(struct virtio_config)) && 
547                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
548                 int cfg_offset = port_idx - sizeof(struct virtio_config);
549                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
550
551                 memcpy(dst, cfg_ptr + cfg_offset, length);
552                 
553             } else {
554                 PrintError("Read of Unhandled Virtio Read\n");
555                 return -1;
556             }
557           
558             break;
559     }
560
561     return length;
562 }
563
564
565 static int virtio_free(struct virtio_vnet_state * vnet_state) {
566
567     // unregister from PCI
568
569     V3_Free(vnet_state);
570     return 0;
571 }
572
573
574 static struct v3_device_ops dev_ops = {
575     .free = (int (*)(void *))virtio_free,
576 };
577
578 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
579     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
580     struct virtio_vnet_state * vnet_state = NULL;
581     struct pci_device * pci_dev = NULL;
582     char * name = v3_cfg_val(cfg, "name");
583
584     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
585
586     if (pci_bus == NULL) {
587         PrintError("VNET Bridge device require a PCI Bus");
588         return -1;
589     }
590     
591     vnet_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
592     memset(vnet_state, 0, sizeof(struct virtio_vnet_state));
593         
594     vnet_state->vm = vm;
595
596     struct vm_device * dev = v3_add_device(vm, name, &dev_ops, vnet_state);
597
598     if (dev == NULL) {
599         PrintError("Could not attach device %s\n", name);
600         V3_Free(vnet_state);
601         return -1;
602     }
603
604
605     // PCI initialization
606     {
607         struct v3_pci_bar bars[6];
608         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
609         int tmp_ports = num_ports;
610         int i;
611
612         // This gets the number of ports, rounded up to a power of 2
613         vnet_state->io_range_size = 1; // must be a power of 2
614
615         while (tmp_ports > 0) {
616             tmp_ports >>= 1;
617             vnet_state->io_range_size <<= 1;
618         }
619         
620         // this is to account for any low order bits being set in num_ports
621         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
622         if ((num_ports & ((vnet_state->io_range_size >> 1) - 1)) == 0) {
623             vnet_state->io_range_size >>= 1;
624         }
625
626         for (i = 0; i < 6; i++) {
627             bars[i].type = PCI_BAR_NONE;
628         }
629
630         bars[0].type = PCI_BAR_IO;
631         bars[0].default_base_port = -1;
632         bars[0].num_ports = vnet_state->io_range_size;
633         bars[0].io_read = vnet_virtio_io_read;
634         bars[0].io_write = vnet_virtio_io_write;
635         bars[0].private_data = vnet_state;
636
637         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
638                                          0, 5 /*PCI_AUTO_DEV_NUM*/, 0,
639                                          "LNX_VIRTIO_VNET", bars,
640                                          NULL, NULL, NULL, vnet_state);
641
642         if (!pci_dev) {
643             PrintError("Could not register PCI Device\n");
644             v3_remove_device(dev);
645             return -1;
646         }
647         
648         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
649         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
650         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
651         pci_dev->config_header.class = PCI_CLASS_MEMORY;
652         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
653         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
654         pci_dev->config_header.intr_pin = 1;
655         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
656
657
658         vnet_state->pci_dev = pci_dev;
659         vnet_state->pci_bus = pci_bus;
660     }
661
662     virtio_reset(vnet_state);
663
664     struct v3_vnet_bridge_ops brg_ops;
665     brg_ops.input = vnet_pkt_input_cb;
666     brg_ops.poll = vnet_virtio_poll;
667
668     V3_Print("Registering Virtio device as vnet bridge\n");
669
670     v3_vnet_add_bridge(vm, &brg_ops, CTL_VM_BRIDGE, (void *)vnet_state);
671
672     return 0;
673 }
674
675
676 device_register("LNX_VIRTIO_VNET", dev_init)