Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


minor fix to virtio VNET device to make it compilable
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, Lei Xia <lxia@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Jack Lange <jarusl@cs.northwestern.edu>
16  *             Lei Xia <lxia@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_dev_mgr.h>
24 #include <palacios/vm_guest_mem.h>
25 #include <devices/lnx_virtio_pci.h>
26 #include <vnet/vnet.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <devices/pci.h>
29
30
31 #ifndef V3_CONFIG_DEBUG_LINUX_VIRTIO_VNET
32 #undef PrintDebug
33 #define PrintDebug(fmt, args...)
34 #endif
35
36
37 #define QUEUE_SIZE 4096
38 #define CMD_QUEUE_SIZE 128
39 #define NUM_QUEUES 3
40
41 struct vnet_config {
42     uint32_t num_devs;
43     uint32_t num_routes;
44 } __attribute__((packed));
45
46
47 #define CTRL_QUEUE 0
48 #define XMIT_QUEUE 1
49 #define RECV_QUEUE 2
50
51 struct virtio_vnet_state {
52     struct v3_vm_info * vm;
53     struct vnet_config vnet_cfg;
54     struct virtio_config virtio_cfg;
55
56     struct vm_device * pci_bus;
57     struct pci_device * pci_dev;
58         
59     struct virtio_queue queue[NUM_QUEUES];
60
61     struct virtio_queue * cur_queue;
62
63     int io_range_size;
64     v3_lock_t lock;
65
66     ulong_t pkt_sent, pkt_recv, pkt_drop, tx_exit, rx_exit, total_exit;
67     int ready;
68 };
69
70 #define VNET_GET_ROUTES 10
71 #define VNET_ADD_ROUTE 11
72 #define VNET_DEL_ROUTE 12
73
74 #define VNET_GET_LINKS 20
75 #define VNET_ADD_LINK 21
76 #define VNET_DEL_LINK 22
77
78 struct vnet_ctrl_hdr {
79     uint8_t cmd_type;
80     uint32_t num_cmds;
81 } __attribute__((packed));
82
83
84 struct vnet_bridge_pkt {
85     uint32_t link_id;
86     uint32_t pkt_size;
87     uint8_t pkt[ETHERNET_PACKET_LEN];
88 }__attribute__((packed));
89
90
91 static int virtio_reset(struct virtio_vnet_state * vnet_state) {
92
93     memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * NUM_QUEUES);
94
95     vnet_state->cur_queue = &(vnet_state->queue[0]);
96
97     vnet_state->virtio_cfg.status = 0;
98     vnet_state->virtio_cfg.pci_isr = 0;
99
100     vnet_state->queue[0].queue_size = CMD_QUEUE_SIZE;
101     vnet_state->queue[1].queue_size = QUEUE_SIZE;
102     vnet_state->queue[2].queue_size = QUEUE_SIZE;
103
104     memset(&(vnet_state->vnet_cfg), 0, sizeof(struct vnet_config));
105     v3_lock_init(&(vnet_state->lock));
106
107     return 0;
108 }
109
110
111
112 static int get_desc_count(struct virtio_queue * q, int index) {
113     struct vring_desc * tmp_desc = &(q->desc[index]);
114     int cnt = 1;
115     
116     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
117         tmp_desc = &(q->desc[tmp_desc->next]);
118         cnt++;
119     }
120
121     return cnt;
122 }
123
124
125 static int handle_cmd_kick(struct guest_info * core, 
126                            struct virtio_vnet_state * vnet_state) {
127     struct virtio_queue * q = &(vnet_state->queue[0]);
128     
129     PrintDebug("VNET Bridge: Handling command  queue\n");
130
131     while (q->cur_avail_idx != q->avail->index) {
132         struct vring_desc * hdr_desc = NULL;
133         struct vring_desc * buf_desc = NULL;
134         struct vring_desc * status_desc = NULL;
135         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
136         uint16_t desc_cnt = get_desc_count(q, desc_idx);
137         struct vnet_ctrl_hdr * hdr = NULL;
138         int i;
139         int xfer_len = 0;
140         uint8_t * status_ptr = NULL;
141         uint8_t status = 0;
142
143         PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
144
145         if (desc_cnt < 3) {
146             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
147             return -1;
148         }
149         
150         hdr_desc = &(q->desc[desc_idx]);
151
152         if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
153             PrintError("Could not translate VirtioVNET header address\n");
154             return -1;
155         }
156
157         desc_idx = hdr_desc->next;
158         
159         if (hdr->cmd_type == VNET_ADD_ROUTE) {   
160             for (i = 0; i < hdr->num_cmds; i++) {
161                 uint8_t tmp_status = 0;
162                 struct v3_vnet_route * route = NULL;
163                 
164                 buf_desc = &(q->desc[desc_idx]);
165
166                 if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
167                     PrintError("Could not translate route address\n");
168                     return -1;
169                 }
170
171                 tmp_status = v3_vnet_add_route(*route);
172                 if (tmp_status != 0) {
173                     PrintError("Error adding VNET ROUTE\n");
174                         
175                     status = tmp_status;
176                 }
177
178                 PrintDebug("VNET Route Added\n");
179
180                 xfer_len += buf_desc->length;
181                 desc_idx = buf_desc->next;
182             }
183
184         } 
185
186         status_desc = &(q->desc[desc_idx]);
187
188         if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
189             PrintError("VirtioVNET Error could not translate status address\n");
190             return -1;
191         }
192
193         xfer_len += status_desc->length;
194         *status_ptr = status;
195
196         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
197         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
198         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
199
200         q->used->index++;
201         q->cur_avail_idx++;
202     }
203
204
205     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
206         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
207         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
208         vnet_state->virtio_cfg.pci_isr = 1;
209     }
210
211
212     return 0;
213 }
214
215
216 static int vnet_pkt_input_cb(struct v3_vm_info * vm,  
217                              struct v3_vnet_pkt * pkt, 
218                              void * private_data){
219     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
220     struct virtio_queue * q = &(vnet_state->queue[RECV_QUEUE]);
221     int ret_val = -1;
222     unsigned long flags;
223
224     flags = v3_lock_irqsave(vnet_state->lock);
225         
226     if (q->ring_avail_addr == 0) {
227         PrintError("Queue is not set\n");
228         
229         goto exit;
230     }
231
232     if (q->cur_avail_idx != q->avail->index) {
233         uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
234         struct vring_desc * pkt_desc = NULL;
235         struct vnet_bridge_pkt * virtio_pkt = NULL;
236
237         pkt_desc = &(q->desc[pkt_idx]);
238
239         if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
240             PrintError("Could not translate buffer address\n");
241             goto exit;
242         }
243
244         PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
245
246         // Fill in dst packet buffer
247         virtio_pkt->link_id = pkt->dst_id;
248         virtio_pkt->pkt_size = pkt->size;
249         memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
250         
251         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
252         q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); 
253
254         q->used->index++;
255         q->cur_avail_idx++;
256     } else {
257         vnet_state->pkt_drop ++;
258     }
259
260     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
261         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
262         vnet_state->virtio_cfg.pci_isr = 0x1;
263         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
264     }
265
266     ret_val = 0;
267         
268 exit:
269
270     v3_unlock_irqrestore(vnet_state->lock, flags);
271  
272     return ret_val;
273 }
274
275 static int do_tx_pkts(struct guest_info * core, 
276                            struct virtio_vnet_state * vnet_state) 
277 {
278     struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
279     int recvd = 0;
280         
281     if (q->ring_avail_addr == 0) {
282         return -1;
283     }
284
285     while (q->cur_avail_idx != q->avail->index) {
286         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
287         struct vring_desc * pkt_desc = NULL;
288         struct vnet_bridge_pkt * virtio_pkt = NULL;
289
290         pkt_desc = &(q->desc[desc_idx]);
291         
292         if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
293             PrintError("Could not translate buffer address\n");
294             return -1;
295         }
296
297         //PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id);
298
299         struct v3_vnet_pkt pkt;
300         pkt.size = virtio_pkt->pkt_size;
301         pkt.src_type = LINK_EDGE;
302         pkt.src_id = 0;
303         memcpy(pkt.header, virtio_pkt->pkt, ETHERNET_HEADER_LEN);
304         pkt.data = virtio_pkt->pkt;
305
306         v3_vnet_send_pkt(&pkt, NULL);
307         
308         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
309         q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
310         q->used->index++;
311
312         vnet_state->pkt_sent ++;
313         recvd ++;
314
315         q->cur_avail_idx++;
316     }
317
318     if(recvd == 0){
319         return 0;
320     }
321
322     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
323             v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
324             vnet_state->virtio_cfg.pci_isr = 0x1;
325     }
326         
327     return 0;
328 }
329
330 static void vnet_virtio_poll(struct v3_vm_info * vm, void * private_data){
331     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
332
333     if(vm == vnet_state->vm){   
334         do_tx_pkts(&(vm->cores[0]), vnet_state);
335     }
336 }
337
338 static int handle_rx_queue_kick(struct guest_info *core, 
339                           struct virtio_vnet_state * vnet_state) 
340 {       
341     return 0;
342 }
343
344 static int vnet_virtio_io_write(struct guest_info * core, 
345                                 uint16_t port, void * src, 
346                                 uint_t length, void * private_data) {
347     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
348     int port_idx = port % vnet_state->io_range_size;
349
350     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
351                port, length, *(uint32_t *)src);
352
353     vnet_state->total_exit ++;
354     switch (port_idx) {
355         case GUEST_FEATURES_PORT:
356             if (length != 4) {
357                 PrintError("Illegal write length for guest features\n");
358                 return -1;
359             }    
360             vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
361
362             break;
363         case VRING_PG_NUM_PORT:
364             if (length == 4) {
365                 addr_t pfn = *(uint32_t *)src;
366                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
367
368                 vnet_state->cur_queue->pfn = pfn;
369                 
370                 vnet_state->cur_queue->ring_desc_addr = page_addr ;
371                 vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
372                 vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
373                                                  sizeof(struct vring_avail)    + \
374                                                  (QUEUE_SIZE * sizeof(uint16_t)));
375                 
376                 // round up to next page boundary.
377                 vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
378
379                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
380                     PrintError("Could not translate ring descriptor address\n");
381                     return -1;
382                 }
383
384                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
385                     PrintError("Could not translate ring available address\n");
386                     return -1;
387                 }
388
389                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
390                     PrintError("Could not translate ring used address\n");
391                     return -1;
392                 }
393
394                 PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
395                            (void *)(vnet_state->cur_queue->ring_desc_addr),
396                            (void *)(vnet_state->cur_queue->ring_avail_addr),
397                            (void *)(vnet_state->cur_queue->ring_used_addr));
398
399                 PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
400                            vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
401
402                 if(vnet_state->queue[RECV_QUEUE].avail != NULL){
403                     vnet_state->ready = 1;
404                     vnet_state->queue[RECV_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
405                 }
406
407                 //No notify when there is pkt tx from guest
408                 //palacios will do the polling
409                 if(vnet_state->queue[XMIT_QUEUE].used != NULL){
410                     vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
411                 }
412             } else {
413                 PrintError("Illegal write length for page frame number\n");
414                 return -1;
415             }
416             break;
417         case VRING_Q_SEL_PORT:
418             vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
419
420             if (vnet_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
421                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
422                            vnet_state->virtio_cfg.vring_queue_selector);
423                 return -1;
424             }
425             
426             vnet_state->cur_queue = &(vnet_state->queue[vnet_state->virtio_cfg.vring_queue_selector]);
427
428             break;
429         case VRING_Q_NOTIFY_PORT: {
430             uint16_t queue_idx = *(uint16_t *)src;
431
432             PrintDebug("VNET Bridge: Handling Kick\n");
433
434             if (queue_idx == 0) {
435                 if (handle_cmd_kick(core, vnet_state) == -1) {
436                     PrintError("Could not handle Virtio VNET Control command\n");
437                     return -1;
438                 }
439             } else if (queue_idx == 1) {
440                 if (do_tx_pkts(core, vnet_state) == -1){
441                     PrintError("Could not handle Virtio VNET TX\n");
442                     return -1;
443                 }
444                 PrintError("Notify on TX\n");
445             } else if (queue_idx == 2) {
446                 if (handle_rx_queue_kick(core, vnet_state) == -1){
447                     PrintError("Could not handle Virtio RX buffer refills Kick\n");
448                     return -1;
449                 }
450                 vnet_state->rx_exit ++;
451             } else {
452                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
453                 return -1;
454             }
455
456             break;
457         }
458         case VIRTIO_STATUS_PORT:
459             vnet_state->virtio_cfg.status = *(uint8_t *)src;
460
461             if (vnet_state->virtio_cfg.status == 0) {
462                 PrintDebug("VNET Bridge: Resetting device\n");
463                 virtio_reset(vnet_state);
464             }
465
466             break;
467
468         case VIRTIO_ISR_PORT:
469             vnet_state->virtio_cfg.pci_isr = *(uint8_t *)src;
470             break;
471         default:
472             return -1;
473             break;
474     }
475
476     return length;
477 }
478
479
480 static int vnet_virtio_io_read(struct guest_info * core, 
481                                uint16_t port, void * dst, 
482                                uint_t length, void * private_data) {
483
484     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
485     int port_idx = port % vnet_state->io_range_size;
486
487     switch (port_idx) {
488         case HOST_FEATURES_PORT:
489             if (length != 4) {
490                 PrintError("Illegal read length for host features\n");
491                 return -1;
492             }
493
494             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
495         
496             break;
497         case VRING_PG_NUM_PORT:
498             if (length != 4) {
499                 PrintError("Illegal read length for page frame number\n");
500                 return -1;
501             }
502
503             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
504
505             break;
506         case VRING_SIZE_PORT:
507             if (length != 2) {
508                 PrintError("Illegal read length for vring size\n");
509                 return -1;
510             }
511                 
512             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
513
514             break;
515
516         case VIRTIO_STATUS_PORT:
517             if (length != 1) {
518                 PrintError("Illegal read length for status\n");
519                 return -1;
520             }
521
522             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
523             break;
524
525         case VIRTIO_ISR_PORT:
526             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
527             vnet_state->virtio_cfg.pci_isr = 0;
528             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
529             break;
530
531         default:
532             if ( (port_idx >= sizeof(struct virtio_config)) && 
533                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
534                 int cfg_offset = port_idx - sizeof(struct virtio_config);
535                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
536
537                 memcpy(dst, cfg_ptr + cfg_offset, length);
538                 
539             } else {
540                 PrintError("Read of Unhandled Virtio Read\n");
541                 return -1;
542             }
543           
544             break;
545     }
546
547     return length;
548 }
549
550
551 static int virtio_free(struct virtio_vnet_state * vnet_state) {
552
553     // unregister from PCI
554
555     V3_Free(vnet_state);
556     return 0;
557 }
558
559
560 static struct v3_device_ops dev_ops = {
561     .free = (int (*)(void *))virtio_free,
562 };
563
564 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
565     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
566     struct virtio_vnet_state * vnet_state = NULL;
567     struct pci_device * pci_dev = NULL;
568     char * name = v3_cfg_val(cfg, "name");
569
570     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
571
572     if (pci_bus == NULL) {
573         PrintError("VNET Bridge device require a PCI Bus");
574         return -1;
575     }
576     
577     vnet_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
578     memset(vnet_state, 0, sizeof(struct virtio_vnet_state));
579         
580     vnet_state->vm = vm;
581
582     struct vm_device * dev = v3_add_device(vm, name, &dev_ops, vnet_state);
583
584     if (dev == NULL) {
585         PrintError("Could not attach device %s\n", name);
586         V3_Free(vnet_state);
587         return -1;
588     }
589
590
591     // PCI initialization
592     {
593         struct v3_pci_bar bars[6];
594         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
595         int tmp_ports = num_ports;
596         int i;
597
598         // This gets the number of ports, rounded up to a power of 2
599         vnet_state->io_range_size = 1; // must be a power of 2
600
601         while (tmp_ports > 0) {
602             tmp_ports >>= 1;
603             vnet_state->io_range_size <<= 1;
604         }
605         
606         // this is to account for any low order bits being set in num_ports
607         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
608         if ((num_ports & ((vnet_state->io_range_size >> 1) - 1)) == 0) {
609             vnet_state->io_range_size >>= 1;
610         }
611
612         for (i = 0; i < 6; i++) {
613             bars[i].type = PCI_BAR_NONE;
614         }
615
616         bars[0].type = PCI_BAR_IO;
617         bars[0].default_base_port = -1;
618         bars[0].num_ports = vnet_state->io_range_size;
619         bars[0].io_read = vnet_virtio_io_read;
620         bars[0].io_write = vnet_virtio_io_write;
621         bars[0].private_data = vnet_state;
622
623         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
624                                          0, 5 /*PCI_AUTO_DEV_NUM*/, 0,
625                                          "LNX_VIRTIO_VNET", bars,
626                                          NULL, NULL, NULL, vnet_state);
627
628         if (!pci_dev) {
629             PrintError("Could not register PCI Device\n");
630             v3_remove_device(dev);
631             return -1;
632         }
633         
634         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
635         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
636         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
637         pci_dev->config_header.class = PCI_CLASS_MEMORY;
638         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
639         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
640         pci_dev->config_header.intr_pin = 1;
641         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
642
643
644         vnet_state->pci_dev = pci_dev;
645         vnet_state->pci_bus = pci_bus;
646     }
647
648     virtio_reset(vnet_state);
649
650     struct v3_vnet_bridge_ops brg_ops;
651     brg_ops.input = vnet_pkt_input_cb;
652     brg_ops.poll = vnet_virtio_poll;
653
654     V3_Print("Registering Virtio device as vnet bridge\n");
655
656     v3_vnet_add_bridge(vm, &brg_ops, CTL_VM_BRIDGE, (void *)vnet_state);
657
658     return 0;
659 }
660
661
662 device_register("LNX_VIRTIO_VNET", dev_init)