Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


add the virtual console input as a host event
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, Lei Xia <lxia@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Jack Lange <jarusl@cs.northwestern.edu>
16  *             Lei Xia <lxia@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_dev_mgr.h>
24 #include <palacios/vm_guest_mem.h>
25 #include <devices/lnx_virtio_pci.h>
26 #include <palacios/vmm_vnet.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <devices/pci.h>
29
30
31 #ifndef CONFIG_DEBUG_LINUX_VIRTIO_VNET
32 #undef PrintDebug
33 #define PrintDebug(fmt, args...)
34 #endif
35
36
37 #define QUEUE_SIZE 4096
38 #define CMD_QUEUE_SIZE 128
39 #define NUM_QUEUES 3
40
41 struct vnet_config {
42     uint32_t num_devs;
43     uint32_t num_routes;
44 } __attribute__((packed));
45
46
47 #define CTRL_QUEUE 0
48 #define XMIT_QUEUE 1
49 #define RECV_QUEUE 2
50
51 struct virtio_vnet_state {
52     struct v3_vm_info * vm;
53     struct vnet_config vnet_cfg;
54     struct virtio_config virtio_cfg;
55
56     struct vm_device * pci_bus;
57     struct pci_device * pci_dev;
58         
59     struct virtio_queue queue[NUM_QUEUES];
60
61     struct virtio_queue * cur_queue;
62
63     int io_range_size;
64     v3_lock_t lock;
65
66     ulong_t pkt_sent, pkt_recv, pkt_drop, tx_exit, rx_exit, total_exit;
67     int ready;
68 };
69
70 #define VNET_GET_ROUTES 10
71 #define VNET_ADD_ROUTE 11
72 #define VNET_DEL_ROUTE 12
73
74 #define VNET_GET_LINKS 20
75 #define VNET_ADD_LINK 21
76 #define VNET_DEL_LINK 22
77
78 // structure of the vnet command header
79 struct vnet_ctrl_hdr {
80     uint8_t cmd_type;
81     uint32_t num_cmds;
82 } __attribute__((packed));
83
84
85 struct vnet_bridge_pkt {
86     uint32_t link_id;
87     uint32_t pkt_size;
88     uint8_t pkt[ETHERNET_PACKET_LEN];
89 }__attribute__((packed));
90
91
92 static int virtio_reset(struct virtio_vnet_state * vnet_state) {
93
94     memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * NUM_QUEUES);
95
96     vnet_state->cur_queue = &(vnet_state->queue[0]);
97
98     vnet_state->virtio_cfg.status = 0;
99     vnet_state->virtio_cfg.pci_isr = 0;
100
101     vnet_state->queue[0].queue_size = CMD_QUEUE_SIZE;
102     vnet_state->queue[1].queue_size = QUEUE_SIZE;
103     vnet_state->queue[2].queue_size = QUEUE_SIZE;
104
105     memset(&(vnet_state->vnet_cfg), 0, sizeof(struct vnet_config));
106     v3_lock_init(&(vnet_state->lock));
107
108     return 0;
109 }
110
111
112
113 static int get_desc_count(struct virtio_queue * q, int index) {
114     struct vring_desc * tmp_desc = &(q->desc[index]);
115     int cnt = 1;
116     
117     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
118         tmp_desc = &(q->desc[tmp_desc->next]);
119         cnt++;
120     }
121
122     return cnt;
123 }
124
125
126
127
128 static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
129     struct virtio_queue * q = &(vnet_state->queue[0]);
130     
131     PrintDebug("VNET Bridge: Handling command  queue\n");
132
133     while (q->cur_avail_idx != q->avail->index) {
134         struct vring_desc * hdr_desc = NULL;
135         struct vring_desc * buf_desc = NULL;
136         struct vring_desc * status_desc = NULL;
137         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
138         uint16_t desc_cnt = get_desc_count(q, desc_idx);
139         struct vnet_ctrl_hdr * hdr = NULL;
140         int i;
141         int xfer_len = 0;
142         uint8_t * status_ptr = NULL;
143         uint8_t status = 0;
144
145
146         PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
147
148         if (desc_cnt < 3) {
149             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
150             return -1;
151         }
152         
153         hdr_desc = &(q->desc[desc_idx]);
154
155         if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
156             PrintError("Could not translate VirtioVNET header address\n");
157             return -1;
158         }
159
160         desc_idx = hdr_desc->next;
161         
162         if (hdr->cmd_type == VNET_ADD_ROUTE) {
163             
164             for (i = 0; i < hdr->num_cmds; i++) {
165                 uint8_t tmp_status = 0;
166                 struct v3_vnet_route * route = NULL;
167                 
168                 buf_desc = &(q->desc[desc_idx]);
169
170                 if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
171                     PrintError("Could not translate route address\n");
172                     return -1;
173                 }
174
175                 // add route
176                 PrintDebug("VNET Bridge: Adding VNET Route\n");
177
178                 tmp_status = v3_vnet_add_route(*route);
179
180                 PrintDebug("VNET Route Added\n");
181
182                 if (tmp_status != 0) {
183                     PrintError("Error adding VNET ROUTE\n");
184                     status = tmp_status;
185                 }
186
187                 xfer_len += buf_desc->length;
188                 desc_idx = buf_desc->next;
189             }
190
191         } 
192
193
194
195         status_desc = &(q->desc[desc_idx]);
196
197         if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
198             PrintError("VirtioVNET Error could not translate status address\n");
199             return -1;
200         }
201
202         xfer_len += status_desc->length;
203         *status_ptr = status;
204
205         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
206         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
207         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
208
209         q->used->index++;
210         q->cur_avail_idx++;
211     }
212
213
214     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
215         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
216         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
217         vnet_state->virtio_cfg.pci_isr = 1;
218     }
219
220
221     return 0;
222 }
223
224
225 static int vnet_pkt_input_cb(struct v3_vm_info * vm,  struct v3_vnet_pkt *pkt, void * private_data){
226     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
227     struct virtio_queue * q = &(vnet_state->queue[RECV_QUEUE]);
228     int ret_val = -1;
229     unsigned long flags;
230     uint16_t sent;
231
232     flags = v3_lock_irqsave(vnet_state->lock);
233         
234     if (q->ring_avail_addr == 0) {
235         PrintError("Queue is not set\n");
236         goto exit;
237     }
238
239     if (q->cur_avail_idx != q->avail->index) {
240         uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
241         struct vring_desc * pkt_desc = NULL;
242         struct vnet_bridge_pkt * virtio_pkt = NULL;
243
244         pkt_desc = &(q->desc[pkt_idx]);
245         PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
246
247         if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
248             PrintError("Could not translate buffer address\n");
249             goto exit;
250         }
251
252         PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
253
254         // Fill in dst packet buffer
255         virtio_pkt->link_id = pkt->dst_id;
256         virtio_pkt->pkt_size = pkt->size;
257         memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
258         
259         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
260         q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); 
261
262         q->used->index++;
263         q->cur_avail_idx++;
264     } else {
265         vnet_state->pkt_drop ++;
266     }
267
268     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
269         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
270         vnet_state->virtio_cfg.pci_isr = 0x1;
271         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
272     }
273
274     ret_val = 0;
275         
276 exit:
277
278     v3_unlock_irqrestore(vnet_state->lock, flags);
279  
280     return ret_val;
281 }
282
283 static int vnet_pkt_input_xcall(void *data){
284     struct v3_vnet_bridge_xcall_args *args = (struct v3_vnet_bridge_xcall_args *)data;
285     int i = 0;
286
287     for(i = 0; i < args->pkt_num; i++) {
288         vnet_pkt_input_cb(args->vm, args->vnet_pkts[i], args->private_data);
289     }
290         
291     return 0;
292 }
293
294 static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
295 {
296     struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
297     int recvd = 0;
298         
299     if (q->ring_avail_addr == 0) {
300         return -1;
301     }
302
303     while (q->cur_avail_idx != q->avail->index) {
304         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
305         struct vring_desc * pkt_desc = NULL;
306         struct vnet_bridge_pkt * virtio_pkt = NULL;
307
308         pkt_desc = &(q->desc[desc_idx]);
309
310         PrintDebug("VNET Bridge: Handle TX desc buf_len: %d\n", pkt_desc->length);
311         
312         if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
313             PrintError("Could not translate buffer address\n");
314             return -1;
315         }
316
317         //PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id);
318
319         struct v3_vnet_pkt pkt;
320         pkt.size = virtio_pkt->pkt_size;
321         pkt.src_type = LINK_EDGE;
322         pkt.src_id = 0;
323         memcpy(pkt.header, virtio_pkt->pkt, ETHERNET_HEADER_LEN);
324         pkt.data = virtio_pkt->pkt;
325
326         v3_vnet_send_pkt(&pkt, NULL);
327         
328         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
329         q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
330         q->used->index++;
331
332         vnet_state->pkt_sent ++;
333         recvd ++;
334
335         q->cur_avail_idx++;
336     }
337
338     if(recvd == 0){
339         return 0;
340     }
341
342     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
343             v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
344             vnet_state->virtio_cfg.pci_isr = 0x1;
345     }
346         
347     return 0;
348 }
349
350 static void vnet_virtio_poll(struct v3_vm_info * vm, void *private_data){
351     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
352
353     if(vm == vnet_state->vm){   
354         handle_pkt_kick(&(vm->cores[0]), vnet_state);
355     }
356 }
357
358 static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
359 {
360     //v3_vnet_enable_bridge();
361         
362     return 0;
363 }
364
365 static int vnet_virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
366     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
367     int port_idx = port % vnet_state->io_range_size;
368
369     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
370                port, length, *(uint32_t *)src);
371
372     vnet_state->total_exit ++;
373     switch (port_idx) {
374         case GUEST_FEATURES_PORT:
375             if (length != 4) {
376                 PrintError("Illegal write length for guest features\n");
377                 return -1;
378             }    
379             vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
380
381             break;
382         case VRING_PG_NUM_PORT:
383             if (length == 4) {
384                 addr_t pfn = *(uint32_t *)src;
385                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
386
387                 vnet_state->cur_queue->pfn = pfn;
388                 
389                 vnet_state->cur_queue->ring_desc_addr = page_addr ;
390                 vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
391                 vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
392                                                  sizeof(struct vring_avail)    + \
393                                                  (QUEUE_SIZE * sizeof(uint16_t)));
394                 
395                 // round up to next page boundary.
396                 vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
397
398                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
399                     PrintError("Could not translate ring descriptor address\n");
400                     return -1;
401                 }
402
403                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
404                     PrintError("Could not translate ring available address\n");
405                     return -1;
406                 }
407
408                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
409                     PrintError("Could not translate ring used address\n");
410                     return -1;
411                 }
412
413                 PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
414                            (void *)(vnet_state->cur_queue->ring_desc_addr),
415                            (void *)(vnet_state->cur_queue->ring_avail_addr),
416                            (void *)(vnet_state->cur_queue->ring_used_addr));
417
418                 PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
419                            vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
420
421                 if(vnet_state->queue[RECV_QUEUE].avail != NULL){
422                     vnet_state->ready = 1;
423                     vnet_state->queue[RECV_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
424                 }
425
426                 //No notify when there is pkt tx from guest
427                 //palacios will do the polling
428                 if(vnet_state->queue[XMIT_QUEUE].used != NULL){
429                     vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
430                 }
431             } else {
432                 PrintError("Illegal write length for page frame number\n");
433                 return -1;
434             }
435             break;
436         case VRING_Q_SEL_PORT:
437             vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
438
439             if (vnet_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
440                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
441                            vnet_state->virtio_cfg.vring_queue_selector);
442                 return -1;
443             }
444             
445             vnet_state->cur_queue = &(vnet_state->queue[vnet_state->virtio_cfg.vring_queue_selector]);
446
447             break;
448         case VRING_Q_NOTIFY_PORT: {
449             uint16_t queue_idx = *(uint16_t *)src;
450
451             PrintDebug("VNET Bridge: Handling Kick\n");
452
453             if (queue_idx == 0) {
454                 if (handle_cmd_kick(core, vnet_state) == -1) {
455                     PrintError("Could not handle Virtio VNET Control command\n");
456                     return -1;
457                 }
458             } else if (queue_idx == 1) {
459                 if (handle_pkt_kick(core, vnet_state) == -1){
460                     PrintError("Could not handle Virtio VNET TX\n");
461                     return -1;
462                 }
463                 PrintError("Notify on TX\n");
464             } else if (queue_idx == 2) {
465                 if (handle_rx_kick(core, vnet_state) == -1){
466                     PrintError("Could not handle Virtio RX buffer refills Kick\n");
467                     return -1;
468                 }
469                 vnet_state->rx_exit ++;
470             } else {
471                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
472                 return -1;
473             }
474
475             break;
476         }
477         case VIRTIO_STATUS_PORT:
478             vnet_state->virtio_cfg.status = *(uint8_t *)src;
479
480             if (vnet_state->virtio_cfg.status == 0) {
481                 PrintDebug("VNET Bridge: Resetting device\n");
482                 virtio_reset(vnet_state);
483             }
484
485             break;
486
487         case VIRTIO_ISR_PORT:
488             vnet_state->virtio_cfg.pci_isr = *(uint8_t *)src;
489             break;
490         default:
491             return -1;
492             break;
493     }
494
495     return length;
496 }
497
498
499 static int vnet_virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
500
501     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
502     int port_idx = port % vnet_state->io_range_size;
503
504     switch (port_idx) {
505         case HOST_FEATURES_PORT:
506             if (length != 4) {
507                 PrintError("Illegal read length for host features\n");
508                 return -1;
509             }
510
511             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
512         
513             break;
514         case VRING_PG_NUM_PORT:
515             if (length != 4) {
516                 PrintError("Illegal read length for page frame number\n");
517                 return -1;
518             }
519
520             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
521
522             break;
523         case VRING_SIZE_PORT:
524             if (length != 2) {
525                 PrintError("Illegal read length for vring size\n");
526                 return -1;
527             }
528                 
529             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
530
531             break;
532
533         case VIRTIO_STATUS_PORT:
534             if (length != 1) {
535                 PrintError("Illegal read length for status\n");
536                 return -1;
537             }
538
539             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
540             break;
541
542         case VIRTIO_ISR_PORT:
543             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
544             vnet_state->virtio_cfg.pci_isr = 0;
545             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
546             break;
547
548         default:
549             if ( (port_idx >= sizeof(struct virtio_config)) && 
550                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
551                 int cfg_offset = port_idx - sizeof(struct virtio_config);
552                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
553
554                 memcpy(dst, cfg_ptr + cfg_offset, length);
555                 
556             } else {
557                 PrintError("Read of Unhandled Virtio Read\n");
558                 return -1;
559             }
560           
561             break;
562     }
563
564     return length;
565 }
566
567
568
569 static struct v3_device_ops dev_ops = {
570     .free = NULL,
571     .reset = NULL,
572     .start = NULL,
573     .stop = NULL,
574 };
575
576 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
577     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
578     struct virtio_vnet_state * vnet_state = NULL;
579     struct pci_device * pci_dev = NULL;
580     char * name = v3_cfg_val(cfg, "name");
581
582     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
583
584     if (pci_bus == NULL) {
585         PrintError("VNET Bridge device require a PCI Bus");
586         return -1;
587     }
588     
589     vnet_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
590     memset(vnet_state, 0, sizeof(struct virtio_vnet_state));
591         
592     vnet_state->vm = vm;
593
594     struct vm_device * dev = v3_allocate_device(name, &dev_ops, vnet_state);
595
596     if (v3_attach_device(vm, dev) == -1) {
597         PrintError("Could not attach device %s\n", name);
598         return -1;
599     }
600
601
602     // PCI initialization
603     {
604         struct v3_pci_bar bars[6];
605         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
606         int tmp_ports = num_ports;
607         int i;
608
609         // This gets the number of ports, rounded up to a power of 2
610         vnet_state->io_range_size = 1; // must be a power of 2
611
612         while (tmp_ports > 0) {
613             tmp_ports >>= 1;
614             vnet_state->io_range_size <<= 1;
615         }
616         
617         // this is to account for any low order bits being set in num_ports
618         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
619         if ((num_ports & ((vnet_state->io_range_size >> 1) - 1)) == 0) {
620             vnet_state->io_range_size >>= 1;
621         }
622
623         for (i = 0; i < 6; i++) {
624             bars[i].type = PCI_BAR_NONE;
625         }
626
627         bars[0].type = PCI_BAR_IO;
628         bars[0].default_base_port = -1;
629         bars[0].num_ports = vnet_state->io_range_size;
630         bars[0].io_read = vnet_virtio_io_read;
631         bars[0].io_write = vnet_virtio_io_write;
632         bars[0].private_data = vnet_state;
633
634         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
635                                          0, 5 /*PCI_AUTO_DEV_NUM*/, 0,
636                                          "LNX_VIRTIO_VNET", bars,
637                                          NULL, NULL, NULL, vnet_state);
638
639         if (!pci_dev) {
640             PrintError("Could not register PCI Device\n");
641             return -1;
642         }
643         
644         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
645         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
646         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
647         pci_dev->config_header.class = PCI_CLASS_MEMORY;
648         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
649         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
650         pci_dev->config_header.intr_pin = 1;
651         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
652
653
654         vnet_state->pci_dev = pci_dev;
655         vnet_state->pci_bus = pci_bus;
656     }
657
658     virtio_reset(vnet_state);
659
660     struct v3_vnet_bridge_ops brg_ops;
661     brg_ops.input = vnet_pkt_input_cb;
662     brg_ops.polling_pkt = vnet_virtio_poll;
663     brg_ops.xcall_input = vnet_pkt_input_xcall;
664
665     V3_Print("Registering Virtio device as vnet bridge\n");
666     v3_vnet_add_bridge(vm, &brg_ops, (void *)vnet_state);
667
668     return 0;
669 }
670
671
672 device_register("LNX_VIRTIO_VNET", dev_init)