Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


VNET code clean and rearrangement
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, Lei Xia <lxia@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Jack Lange <jarusl@cs.northwestern.edu>
16  *             Lei Xia <lxia@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_dev_mgr.h>
24 #include <palacios/vm_guest_mem.h>
25 #include <devices/lnx_virtio_pci.h>
26 #include <palacios/vmm_vnet.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <devices/pci.h>
29
30
31 #ifndef CONFIG_DEBUG_LINUX_VIRTIO_VNET
32 #undef PrintDebug
33 #define PrintDebug(fmt, args...)
34 #endif
35
36
37 #define QUEUE_SIZE 4096
38 #define CMD_QUEUE_SIZE 128
39 #define NUM_QUEUES 3
40
41 struct vnet_config {
42     uint32_t num_devs;
43     uint32_t num_routes;
44 } __attribute__((packed));
45
46
47 #define CTRL_QUEUE 0
48 #define XMIT_QUEUE 1
49 #define RECV_QUEUE 2
50
51 struct virtio_vnet_state {
52     struct v3_vm_info * vm;
53     struct vnet_config vnet_cfg;
54     struct virtio_config virtio_cfg;
55
56     struct vm_device * pci_bus;
57     struct pci_device * pci_dev;
58         
59     struct virtio_queue queue[NUM_QUEUES];
60
61     struct virtio_queue * cur_queue;
62
63     int io_range_size;
64     v3_lock_t lock;
65
66     ulong_t pkt_sent, pkt_recv, pkt_drop, tx_exit, rx_exit, total_exit;
67     int ready;
68 };
69
70 #define VNET_GET_ROUTES 10
71 #define VNET_ADD_ROUTE 11
72 #define VNET_DEL_ROUTE 12
73
74 #define VNET_GET_LINKS 20
75 #define VNET_ADD_LINK 21
76 #define VNET_DEL_LINK 22
77
78 // structure of the vnet command header
79 struct vnet_ctrl_hdr {
80     uint8_t cmd_type;
81     uint32_t num_cmds;
82 } __attribute__((packed));
83
84
85 struct vnet_bridge_pkt {
86     uint32_t link_id;
87     uint32_t pkt_size;
88     uint8_t pkt[ETHERNET_PACKET_LEN];
89 }__attribute__((packed));
90
91
92 static int virtio_reset(struct virtio_vnet_state * vnet_state) {
93
94     memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * NUM_QUEUES);
95
96     vnet_state->cur_queue = &(vnet_state->queue[0]);
97
98     vnet_state->virtio_cfg.status = 0;
99     vnet_state->virtio_cfg.pci_isr = 0;
100
101     vnet_state->queue[0].queue_size = CMD_QUEUE_SIZE;
102     vnet_state->queue[1].queue_size = QUEUE_SIZE;
103     vnet_state->queue[2].queue_size = QUEUE_SIZE;
104
105     memset(&(vnet_state->vnet_cfg), 0, sizeof(struct vnet_config));
106     v3_lock_init(&(vnet_state->lock));
107
108     return 0;
109 }
110
111
112
113 static int get_desc_count(struct virtio_queue * q, int index) {
114     struct vring_desc * tmp_desc = &(q->desc[index]);
115     int cnt = 1;
116     
117     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
118         tmp_desc = &(q->desc[tmp_desc->next]);
119         cnt++;
120     }
121
122     return cnt;
123 }
124
125
126
127
128 static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
129     struct virtio_queue * q = &(vnet_state->queue[0]);
130     
131     PrintDebug("VNET Bridge: Handling command  queue\n");
132
133     while (q->cur_avail_idx != q->avail->index) {
134         struct vring_desc * hdr_desc = NULL;
135         struct vring_desc * buf_desc = NULL;
136         struct vring_desc * status_desc = NULL;
137         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
138         uint16_t desc_cnt = get_desc_count(q, desc_idx);
139         struct vnet_ctrl_hdr * hdr = NULL;
140         int i;
141         int xfer_len = 0;
142         uint8_t * status_ptr = NULL;
143         uint8_t status = 0;
144
145
146         PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
147
148         if (desc_cnt < 3) {
149             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
150             return -1;
151         }
152         
153         hdr_desc = &(q->desc[desc_idx]);
154
155         if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
156             PrintError("Could not translate VirtioVNET header address\n");
157             return -1;
158         }
159
160         desc_idx = hdr_desc->next;
161         
162         if (hdr->cmd_type == VNET_ADD_ROUTE) {
163             
164             for (i = 0; i < hdr->num_cmds; i++) {
165                 uint8_t tmp_status = 0;
166                 struct v3_vnet_route * route = NULL;
167                 
168                 buf_desc = &(q->desc[desc_idx]);
169
170                 if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
171                     PrintError("Could not translate route address\n");
172                     return -1;
173                 }
174
175                 // add route
176                 PrintDebug("VNET Bridge: Adding VNET Route\n");
177
178                 tmp_status = v3_vnet_add_route(*route);
179
180                 PrintDebug("VNET Route Added\n");
181
182                 if (tmp_status != 0) {
183                     PrintError("Error adding VNET ROUTE\n");
184                     status = tmp_status;
185                 }
186
187                 xfer_len += buf_desc->length;
188                 desc_idx = buf_desc->next;
189             }
190
191         } 
192
193
194
195         status_desc = &(q->desc[desc_idx]);
196
197         if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
198             PrintError("VirtioVNET Error could not translate status address\n");
199             return -1;
200         }
201
202         xfer_len += status_desc->length;
203         *status_ptr = status;
204
205         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
206         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
207         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
208
209         q->used->index++;
210         q->cur_avail_idx++;
211     }
212
213
214     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
215         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
216         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
217         vnet_state->virtio_cfg.pci_isr = 1;
218     }
219
220
221     return 0;
222 }
223
224
225 static int vnet_pkt_input_cb(struct v3_vm_info * vm,  
226                                                         struct v3_vnet_pkt * pkt, 
227                                                         void * private_data){
228     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
229     struct virtio_queue * q = &(vnet_state->queue[RECV_QUEUE]);
230     int ret_val = -1;
231     unsigned long flags;
232
233     flags = v3_lock_irqsave(vnet_state->lock);
234         
235     if (q->ring_avail_addr == 0) {
236         PrintError("Queue is not set\n");
237         goto exit;
238     }
239
240     if (q->cur_avail_idx != q->avail->index) {
241         uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
242         struct vring_desc * pkt_desc = NULL;
243         struct vnet_bridge_pkt * virtio_pkt = NULL;
244
245         pkt_desc = &(q->desc[pkt_idx]);
246         PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
247
248         if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
249             PrintError("Could not translate buffer address\n");
250             goto exit;
251         }
252
253         PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
254
255         // Fill in dst packet buffer
256         virtio_pkt->link_id = pkt->dst_id;
257         virtio_pkt->pkt_size = pkt->size;
258         memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
259         
260         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
261         q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); 
262
263         q->used->index++;
264         q->cur_avail_idx++;
265     } else {
266         vnet_state->pkt_drop ++;
267     }
268
269     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
270         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
271         vnet_state->virtio_cfg.pci_isr = 0x1;
272         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
273     }
274
275     ret_val = 0;
276         
277 exit:
278
279     v3_unlock_irqrestore(vnet_state->lock, flags);
280  
281     return ret_val;
282 }
283
284 static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
285 {
286     struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
287     int recvd = 0;
288         
289     if (q->ring_avail_addr == 0) {
290         return -1;
291     }
292
293     while (q->cur_avail_idx != q->avail->index) {
294         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
295         struct vring_desc * pkt_desc = NULL;
296         struct vnet_bridge_pkt * virtio_pkt = NULL;
297
298         pkt_desc = &(q->desc[desc_idx]);
299
300         PrintDebug("VNET Bridge: Handle TX desc buf_len: %d\n", pkt_desc->length);
301         
302         if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
303             PrintError("Could not translate buffer address\n");
304             return -1;
305         }
306
307         //PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id);
308
309         struct v3_vnet_pkt pkt;
310         pkt.size = virtio_pkt->pkt_size;
311         pkt.src_type = LINK_EDGE;
312         pkt.src_id = 0;
313         memcpy(pkt.header, virtio_pkt->pkt, ETHERNET_HEADER_LEN);
314         pkt.data = virtio_pkt->pkt;
315
316         v3_vnet_send_pkt(&pkt, NULL);
317         
318         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
319         q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
320         q->used->index++;
321
322         vnet_state->pkt_sent ++;
323         recvd ++;
324
325         q->cur_avail_idx++;
326     }
327
328     if(recvd == 0){
329         return 0;
330     }
331
332     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
333             v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
334             vnet_state->virtio_cfg.pci_isr = 0x1;
335     }
336         
337     return 0;
338 }
339
340 static void vnet_virtio_poll(struct v3_vm_info * vm, void *private_data){
341     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
342
343     if(vm == vnet_state->vm){   
344         handle_pkt_kick(&(vm->cores[0]), vnet_state);
345     }
346 }
347
348 static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
349 {
350     //v3_vnet_enable_bridge();
351         
352     return 0;
353 }
354
355 static int vnet_virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
356     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
357     int port_idx = port % vnet_state->io_range_size;
358
359     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
360                port, length, *(uint32_t *)src);
361
362     vnet_state->total_exit ++;
363     switch (port_idx) {
364         case GUEST_FEATURES_PORT:
365             if (length != 4) {
366                 PrintError("Illegal write length for guest features\n");
367                 return -1;
368             }    
369             vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
370
371             break;
372         case VRING_PG_NUM_PORT:
373             if (length == 4) {
374                 addr_t pfn = *(uint32_t *)src;
375                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
376
377                 vnet_state->cur_queue->pfn = pfn;
378                 
379                 vnet_state->cur_queue->ring_desc_addr = page_addr ;
380                 vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
381                 vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
382                                                  sizeof(struct vring_avail)    + \
383                                                  (QUEUE_SIZE * sizeof(uint16_t)));
384                 
385                 // round up to next page boundary.
386                 vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
387
388                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
389                     PrintError("Could not translate ring descriptor address\n");
390                     return -1;
391                 }
392
393                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
394                     PrintError("Could not translate ring available address\n");
395                     return -1;
396                 }
397
398                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
399                     PrintError("Could not translate ring used address\n");
400                     return -1;
401                 }
402
403                 PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
404                            (void *)(vnet_state->cur_queue->ring_desc_addr),
405                            (void *)(vnet_state->cur_queue->ring_avail_addr),
406                            (void *)(vnet_state->cur_queue->ring_used_addr));
407
408                 PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
409                            vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
410
411                 if(vnet_state->queue[RECV_QUEUE].avail != NULL){
412                     vnet_state->ready = 1;
413                     vnet_state->queue[RECV_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
414                 }
415
416                 //No notify when there is pkt tx from guest
417                 //palacios will do the polling
418                 if(vnet_state->queue[XMIT_QUEUE].used != NULL){
419                     vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
420                 }
421             } else {
422                 PrintError("Illegal write length for page frame number\n");
423                 return -1;
424             }
425             break;
426         case VRING_Q_SEL_PORT:
427             vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
428
429             if (vnet_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
430                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
431                            vnet_state->virtio_cfg.vring_queue_selector);
432                 return -1;
433             }
434             
435             vnet_state->cur_queue = &(vnet_state->queue[vnet_state->virtio_cfg.vring_queue_selector]);
436
437             break;
438         case VRING_Q_NOTIFY_PORT: {
439             uint16_t queue_idx = *(uint16_t *)src;
440
441             PrintDebug("VNET Bridge: Handling Kick\n");
442
443             if (queue_idx == 0) {
444                 if (handle_cmd_kick(core, vnet_state) == -1) {
445                     PrintError("Could not handle Virtio VNET Control command\n");
446                     return -1;
447                 }
448             } else if (queue_idx == 1) {
449                 if (handle_pkt_kick(core, vnet_state) == -1){
450                     PrintError("Could not handle Virtio VNET TX\n");
451                     return -1;
452                 }
453                 PrintError("Notify on TX\n");
454             } else if (queue_idx == 2) {
455                 if (handle_rx_kick(core, vnet_state) == -1){
456                     PrintError("Could not handle Virtio RX buffer refills Kick\n");
457                     return -1;
458                 }
459                 vnet_state->rx_exit ++;
460             } else {
461                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
462                 return -1;
463             }
464
465             break;
466         }
467         case VIRTIO_STATUS_PORT:
468             vnet_state->virtio_cfg.status = *(uint8_t *)src;
469
470             if (vnet_state->virtio_cfg.status == 0) {
471                 PrintDebug("VNET Bridge: Resetting device\n");
472                 virtio_reset(vnet_state);
473             }
474
475             break;
476
477         case VIRTIO_ISR_PORT:
478             vnet_state->virtio_cfg.pci_isr = *(uint8_t *)src;
479             break;
480         default:
481             return -1;
482             break;
483     }
484
485     return length;
486 }
487
488
489 static int vnet_virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
490
491     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
492     int port_idx = port % vnet_state->io_range_size;
493
494     switch (port_idx) {
495         case HOST_FEATURES_PORT:
496             if (length != 4) {
497                 PrintError("Illegal read length for host features\n");
498                 return -1;
499             }
500
501             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
502         
503             break;
504         case VRING_PG_NUM_PORT:
505             if (length != 4) {
506                 PrintError("Illegal read length for page frame number\n");
507                 return -1;
508             }
509
510             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
511
512             break;
513         case VRING_SIZE_PORT:
514             if (length != 2) {
515                 PrintError("Illegal read length for vring size\n");
516                 return -1;
517             }
518                 
519             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
520
521             break;
522
523         case VIRTIO_STATUS_PORT:
524             if (length != 1) {
525                 PrintError("Illegal read length for status\n");
526                 return -1;
527             }
528
529             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
530             break;
531
532         case VIRTIO_ISR_PORT:
533             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
534             vnet_state->virtio_cfg.pci_isr = 0;
535             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
536             break;
537
538         default:
539             if ( (port_idx >= sizeof(struct virtio_config)) && 
540                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
541                 int cfg_offset = port_idx - sizeof(struct virtio_config);
542                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
543
544                 memcpy(dst, cfg_ptr + cfg_offset, length);
545                 
546             } else {
547                 PrintError("Read of Unhandled Virtio Read\n");
548                 return -1;
549             }
550           
551             break;
552     }
553
554     return length;
555 }
556
557
558
559 static struct v3_device_ops dev_ops = {
560     .free = NULL,
561     .reset = NULL,
562     .start = NULL,
563     .stop = NULL,
564 };
565
566 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
567     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
568     struct virtio_vnet_state * vnet_state = NULL;
569     struct pci_device * pci_dev = NULL;
570     char * name = v3_cfg_val(cfg, "name");
571
572     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
573
574     if (pci_bus == NULL) {
575         PrintError("VNET Bridge device require a PCI Bus");
576         return -1;
577     }
578     
579     vnet_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
580     memset(vnet_state, 0, sizeof(struct virtio_vnet_state));
581         
582     vnet_state->vm = vm;
583
584     struct vm_device * dev = v3_allocate_device(name, &dev_ops, vnet_state);
585
586     if (v3_attach_device(vm, dev) == -1) {
587         PrintError("Could not attach device %s\n", name);
588         return -1;
589     }
590
591
592     // PCI initialization
593     {
594         struct v3_pci_bar bars[6];
595         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
596         int tmp_ports = num_ports;
597         int i;
598
599         // This gets the number of ports, rounded up to a power of 2
600         vnet_state->io_range_size = 1; // must be a power of 2
601
602         while (tmp_ports > 0) {
603             tmp_ports >>= 1;
604             vnet_state->io_range_size <<= 1;
605         }
606         
607         // this is to account for any low order bits being set in num_ports
608         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
609         if ((num_ports & ((vnet_state->io_range_size >> 1) - 1)) == 0) {
610             vnet_state->io_range_size >>= 1;
611         }
612
613         for (i = 0; i < 6; i++) {
614             bars[i].type = PCI_BAR_NONE;
615         }
616
617         bars[0].type = PCI_BAR_IO;
618         bars[0].default_base_port = -1;
619         bars[0].num_ports = vnet_state->io_range_size;
620         bars[0].io_read = vnet_virtio_io_read;
621         bars[0].io_write = vnet_virtio_io_write;
622         bars[0].private_data = vnet_state;
623
624         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
625                                          0, 5 /*PCI_AUTO_DEV_NUM*/, 0,
626                                          "LNX_VIRTIO_VNET", bars,
627                                          NULL, NULL, NULL, vnet_state);
628
629         if (!pci_dev) {
630             PrintError("Could not register PCI Device\n");
631             return -1;
632         }
633         
634         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
635         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
636         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
637         pci_dev->config_header.class = PCI_CLASS_MEMORY;
638         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
639         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
640         pci_dev->config_header.intr_pin = 1;
641         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
642
643
644         vnet_state->pci_dev = pci_dev;
645         vnet_state->pci_bus = pci_bus;
646     }
647
648     virtio_reset(vnet_state);
649
650     struct v3_vnet_bridge_ops brg_ops;
651     brg_ops.input = vnet_pkt_input_cb;
652     brg_ops.poll = vnet_virtio_poll;
653
654     V3_Print("Registering Virtio device as vnet bridge\n");
655     v3_vnet_add_bridge(vm, &brg_ops, CTL_VM_BRIDGE, (void *)vnet_state);
656
657     return 0;
658 }
659
660
661 device_register("LNX_VIRTIO_VNET", dev_init)