Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Update on VNET Bridge
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/lnx_virtio_pci.h>
24 #include <palacios/vmm_vnet.h>
25 #include <palacios/vmm_sprintf.h>
26 #include <devices/pci.h>
27
28
29 #ifndef CONFIG_DEBUG_VNET_BRG
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34
35 #define QUEUE_SIZE 128
36 #define NUM_QUEUES 3
37
38 struct vnet_config {
39     uint32_t num_devs;
40     uint32_t num_routes;
41 } __attribute__((packed));
42
43
44 #define CTRL_QUEUE 0
45 #define RECV_QUEUE 2
46 #define XMIT_QUEUE 1
47
48 struct virtio_vnet_state {
49     struct v3_vm_info *vm;
50     struct vnet_config vnet_cfg;
51     struct virtio_config virtio_cfg;
52
53     struct vm_device * pci_bus;
54     struct pci_device * pci_dev;
55         
56     struct virtio_queue queue[NUM_QUEUES];
57
58     struct virtio_queue * cur_queue;
59
60     int io_range_size;
61     v3_lock_t lock;
62 };
63
64 #define VNET_GET_ROUTES 10
65 #define VNET_ADD_ROUTE 11
66 #define VNET_DEL_ROUTE 12
67
68 #define VNET_GET_LINKS 20
69 #define VNET_ADD_LINK 21
70 #define VNET_DEL_LINK 22
71
72 // structure of the vnet command header
73 struct vnet_ctrl_hdr {
74     uint8_t cmd_type;
75     uint32_t num_cmds;
76 } __attribute__((packed));
77
78 static int vnetbrg_reset(struct virtio_vnet_state * vnet_brg) {
79
80     memset(vnet_brg->queue, 0, sizeof(struct virtio_queue) * 2);
81
82     vnet_brg->cur_queue = &(vnet_brg->queue[0]);
83
84     vnet_brg->virtio_cfg.status = 0;
85     vnet_brg->virtio_cfg.pci_isr = 0;
86
87     vnet_brg->queue[0].queue_size = QUEUE_SIZE;
88     vnet_brg->queue[1].queue_size = QUEUE_SIZE;
89     vnet_brg->queue[2].queue_size = QUEUE_SIZE;
90
91     memset(&(vnet_brg->vnet_cfg), 0, sizeof(struct vnet_config));
92     v3_lock_init(&(vnet_brg->lock));
93
94     return 0;
95 }
96
97
98
99 static int get_desc_count(struct virtio_queue * q, int index) {
100     struct vring_desc * tmp_desc = &(q->desc[index]);
101     int cnt = 1;
102     
103     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
104         tmp_desc = &(q->desc[tmp_desc->next]);
105         cnt++;
106     }
107
108     return cnt;
109 }
110
111
112
113
114 static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
115     struct virtio_queue * q = &(vnet_state->queue[0]);
116     
117     PrintDebug("VNET Bridge: Handling command  queue\n");
118
119     while (q->cur_avail_idx < q->avail->index) {
120         struct vring_desc * hdr_desc = NULL;
121         struct vring_desc * buf_desc = NULL;
122         struct vring_desc * status_desc = NULL;
123         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
124         uint16_t desc_cnt = get_desc_count(q, desc_idx);
125         struct vnet_ctrl_hdr * hdr = NULL;
126         int i;
127         int xfer_len = 0;
128         uint8_t * status_ptr = NULL;
129         uint8_t status = 0;
130
131
132         PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
133
134         if (desc_cnt < 3) {
135             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
136             return -1;
137         }
138         
139         hdr_desc = &(q->desc[desc_idx]);
140
141         if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
142             PrintError("Could not translate VirtioVNET header address\n");
143             return -1;
144         }
145
146         desc_idx = hdr_desc->next;
147         
148         if (hdr->cmd_type == VNET_ADD_ROUTE) {
149             
150             for (i = 0; i < hdr->num_cmds; i++) {
151                 uint8_t tmp_status = 0;
152                 struct v3_vnet_route * route = NULL;
153                 
154                 buf_desc = &(q->desc[desc_idx]);
155
156                 if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
157                     PrintError("Could not translate route address\n");
158                     return -1;
159                 }
160
161                 // add route
162                 PrintDebug("VNET Bridge: Adding VNET Route\n");
163
164                 tmp_status = v3_vnet_add_route(*route);
165
166                 PrintDebug("VNET Route Added\n");
167
168                 if (tmp_status != 0) {
169                     PrintError("Error adding VNET ROUTE\n");
170                     status = tmp_status;
171                 }
172
173                 xfer_len += buf_desc->length;
174                 desc_idx = buf_desc->next;
175             }
176
177         } 
178
179
180
181         status_desc = &(q->desc[desc_idx]);
182
183         if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
184             PrintError("VirtioVNET Error could not translate status address\n");
185             return -1;
186         }
187
188         xfer_len += status_desc->length;
189         *status_ptr = status;
190
191         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
192         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
193         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
194
195         q->used->index++;
196         q->cur_avail_idx++;
197     }
198
199
200     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
201         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
202         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
203         vnet_state->virtio_cfg.pci_isr = 1;
204     }
205
206
207     return 0;
208 }
209
210 static int copy_data_to_desc(struct guest_info *core, 
211                                         struct vring_desc * desc, 
212                                         uchar_t * buf, 
213                                         uint_t buf_len) 
214 {
215     uint32_t len;
216     uint8_t * desc_buf = NULL;
217
218     if (guest_pa_to_host_va(core, desc->addr_gpa, (addr_t *)&(desc_buf)) == -1) {
219         PrintError("Could not translate buffer address\n");
220         return -1;
221     }
222     len = (desc->length < buf_len)?desc->length:buf_len;
223     memcpy(desc_buf, buf, len);
224
225     return len;
226 }
227
228
229 static int vnet_brg_rx(struct v3_vnet_pkt *pkt, void * private_data) {
230     struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data;
231     struct virtio_queue * q = &(vnet_brg->queue[RECV_QUEUE]);
232     char *buf = (char *)pkt;
233     int ret_val;
234     int pkt_head_len = ((addr_t)pkt->data) - ((addr_t)pkt); 
235     uint32_t data_len = sizeof(struct v3_vnet_pkt); //TODO: should not transfer the whole struct, only the data size 
236     unsigned long flags;
237
238     flags = v3_lock_irqsave(vnet_brg->lock);
239         
240     PrintDebug("VNET Bridge: RX: pkt sent to guest size: %d\n, pkt_header_len: %d\n", data_len, pkt_head_len);
241     v3_hexdump(buf, 100, NULL, 0);
242
243     if (q->ring_avail_addr == 0) {
244         PrintError("Queue is not set\n");
245         ret_val = -1;
246         goto exit;
247     }
248
249     if (q->last_avail_idx > q->avail->index)
250         q->idx_overflow = true;
251     q->last_avail_idx = q->avail->index;
252
253     if (q->cur_avail_idx < q->avail->index || (q->idx_overflow && q->cur_avail_idx < q->avail->index+65536)){
254         uint16_t buf_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
255         uint32_t len = 0;
256         uint32_t offset = 0;
257         struct vring_desc * buf_desc = NULL;
258
259         buf_desc = &(q->desc[buf_idx]);
260         PrintDebug("VNET Bridge RX: buffer desc len: %d\n", buf_desc->length);
261
262         len = copy_data_to_desc(&(vnet_brg->vm->cores[0]), buf_desc, buf + offset, data_len - offset);
263         if (len == -1) {
264             ret_val = -1;
265             goto exit;
266         }
267         offset += len;
268         buf_desc->length = len;
269         
270         if (offset >= data_len) {
271             buf_desc->flags &= ~VIRTIO_NEXT_FLAG;
272         }else {
273             PrintDebug("VNET Bridge: RX: Pkt not fit into one desc buf\n");
274         }
275
276         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
277         q->used->ring[q->used->index % q->queue_size].length = data_len; // This should be the total length of data sent to guest (header+pkt_data)
278         q->used->index++;
279
280         int last_idx = q->cur_avail_idx;
281         q->cur_avail_idx++;
282         if (q->cur_avail_idx < last_idx)
283             q->idx_overflow = false;
284     } else {
285         PrintDebug("VNET Bridge: Handle RX: Fails to send, no available buffer: current_idx:%d, availIdx: %d\n", q->cur_avail_idx, q->avail->index);
286     }
287
288     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
289         v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev);
290         vnet_brg->virtio_cfg.pci_isr = 0x1;
291         PrintDebug("Raising IRQ %d\n",  vnet_brg->pci_dev->config_header.intr_line);
292     }
293
294     ret_val = 0;
295
296 exit:
297
298     v3_unlock_irqrestore(vnet_brg->lock, flags);
299  
300     return ret_val;
301 }
302
303 static int vnet_send(struct v3_vnet_pkt *pkt, int pkt_len, void * private_data){
304     struct virtio_vnet_state *vnet_brg  = (struct virtio_vnet_state *)private_data; 
305     struct guest_info *core = &(vnet_brg->vm->cores[0]);
306
307 #ifdef CONFIG_DEBUG_VNET_BRG
308     {
309         PrintDebug("VNET Bridge: send pkt size: %d, src_id: %d, src_type: %d\n", 
310                         pkt->size, pkt->src_id, pkt->src_type);
311         v3_hexdump(pkt->data,pkt->size, NULL, 0);
312     }
313 #endif
314
315 #ifdef CONFIG_VNET_PROFILE
316     uint64_t time;
317     rdtscll(time);
318     core->vnet_times.time_copy_from_guest = time - core->vnet_times.virtio_handle_start;
319 #endif
320
321     pkt->src_type = LINK_EDGE;
322
323     v3_vnet_send_pkt(pkt, (void *)core);
324
325     return 0;
326 //v3_vnet_send_pkt(pkt, (void *)core);
327 }
328
329 static int pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg, struct vring_desc * buf_desc) 
330 {
331     uint8_t * buf = NULL;
332     uint32_t len = buf_desc->length;
333     struct v3_vnet_pkt *pkt;
334
335     if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
336         PrintError("Could not translate buffer address\n");
337         return -1;
338     }
339  
340     pkt = (struct v3_vnet_pkt *)buf;
341     if (vnet_send(pkt, len, vnet_brg) == -1) {
342         return -1;
343     }
344
345     return 0;
346 }
347
348 static int handle_pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg) 
349 {
350     struct virtio_queue * q = &(vnet_brg->queue[XMIT_QUEUE]);
351
352     if (q->avail->index < q->last_avail_idx) {
353         q->idx_overflow = true;
354     }
355
356     q->last_avail_idx = q->avail->index;
357
358     PrintDebug("VNET Bridge Device: Handle TX\n");
359
360     while (q->cur_avail_idx < q->avail->index || 
361            (q->idx_overflow && q->cur_avail_idx < (q->avail->index + 65536))) {
362         struct vring_desc * buf_desc = NULL;
363         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
364
365         buf_desc = &(q->desc[desc_idx]);
366
367         PrintDebug("VNET Bridge: Handle TX buf_len: %d\n", buf_desc->length);
368
369         if (pkt_tx(core, vnet_brg, buf_desc) == -1) {
370                 PrintError("Error handling nic operation\n");
371                 return -1;
372         }
373          
374         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
375         q->used->ring[q->used->index % q->queue_size].length = buf_desc->length; // What do we set this to????
376         q->used->index++;
377
378         int last_idx = q->cur_avail_idx;
379         q->cur_avail_idx ++;
380         if (q->cur_avail_idx < last_idx) {
381             q->idx_overflow = false;
382         }
383     }
384
385     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
386         v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev);
387         vnet_brg->virtio_cfg.pci_isr = 0x1;
388     }
389
390 #ifdef CONFIG_VNET_PROFILE
391     uint64_t time;
392     rdtscll(time);
393     core->vnet_times.total_handle_time = time - core->vnet_times.virtio_handle_start;
394     core->vnet_times.print = true;
395 #endif
396
397     return 0;
398 }
399
400 static int vnetbrg_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
401     struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data;
402     int port_idx = port % vnet_brg->io_range_size;
403
404
405     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
406                port, length, *(uint32_t *)src);
407     PrintDebug("VNET Bridge: port idx=%d\n", port_idx);
408
409
410     switch (port_idx) {
411         case GUEST_FEATURES_PORT:
412             if (length != 4) {
413                 PrintError("Illegal write length for guest features\n");
414                 return -1;
415             }    
416             vnet_brg->virtio_cfg.guest_features = *(uint32_t *)src;
417
418             break;
419         case VRING_PG_NUM_PORT:
420             if (length == 4) {
421                 addr_t pfn = *(uint32_t *)src;
422                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
423
424                 vnet_brg->cur_queue->pfn = pfn;
425                 
426                 vnet_brg->cur_queue->ring_desc_addr = page_addr ;
427                 vnet_brg->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
428                 vnet_brg->cur_queue->ring_used_addr = ( vnet_brg->cur_queue->ring_avail_addr + \
429                                                  sizeof(struct vring_avail)    + \
430                                                  (QUEUE_SIZE * sizeof(uint16_t)));
431                 
432                 // round up to next page boundary.
433                 vnet_brg->cur_queue->ring_used_addr = (vnet_brg->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
434
435                 if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_desc_addr, (addr_t *)&(vnet_brg->cur_queue->desc)) == -1) {
436                     PrintError("Could not translate ring descriptor address\n");
437                     return -1;
438                 }
439
440                 if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_avail_addr, (addr_t *)&(vnet_brg->cur_queue->avail)) == -1) {
441                     PrintError("Could not translate ring available address\n");
442                     return -1;
443                 }
444
445                 if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_used_addr, (addr_t *)&(vnet_brg->cur_queue->used)) == -1) {
446                     PrintError("Could not translate ring used address\n");
447                     return -1;
448                 }
449
450                 PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
451                            (void *)(vnet_brg->cur_queue->ring_desc_addr),
452                            (void *)(vnet_brg->cur_queue->ring_avail_addr),
453                            (void *)(vnet_brg->cur_queue->ring_used_addr));
454
455                 PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
456                            vnet_brg->cur_queue->desc, vnet_brg->cur_queue->avail, vnet_brg->cur_queue->used);
457
458             } else {
459                 PrintError("Illegal write length for page frame number\n");
460                 return -1;
461             }
462             break;
463         case VRING_Q_SEL_PORT:
464             vnet_brg->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
465
466             if (vnet_brg->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
467                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
468                            vnet_brg->virtio_cfg.vring_queue_selector);
469                 return -1;
470             }
471             
472             vnet_brg->cur_queue = &(vnet_brg->queue[vnet_brg->virtio_cfg.vring_queue_selector]);
473
474             break;
475         case VRING_Q_NOTIFY_PORT: {
476             uint16_t queue_idx = *(uint16_t *)src;
477
478             PrintDebug("VNET Bridge: Handling Kick\n");
479
480             if (queue_idx == 0) {
481                 if (handle_cmd_kick(core, vnet_brg) == -1) {
482                     PrintError("Could not handle Virtio VNET Control command\n");
483                     return -1;
484                 }
485             } else if (queue_idx == 1) {
486                 if (handle_pkt_tx(core, vnet_brg) == -1){
487                     PrintError("Could not handle Virtio VNET TX\n");
488                     return -1;
489                 }
490             } else if (queue_idx == 2) {
491                 PrintDebug("VNET Bridge: receive kick on RX Queue\n");
492             } else {
493                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
494                 return -1;
495             }
496
497             break;
498         }
499         case VIRTIO_STATUS_PORT:
500             vnet_brg->virtio_cfg.status = *(uint8_t *)src;
501
502             if (vnet_brg->virtio_cfg.status == 0) {
503                 PrintDebug("VNET Bridge: Resetting device\n");
504                 vnetbrg_reset(vnet_brg);
505             }
506
507             break;
508
509         case VIRTIO_ISR_PORT:
510             vnet_brg->virtio_cfg.pci_isr = *(uint8_t *)src;
511             break;
512         default:
513             return -1;
514             break;
515     }
516
517     return length;
518 }
519
520
521 static int vnetbrg_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
522
523     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
524     int port_idx = port % vnet_state->io_range_size;
525
526 /*
527     PrintDebug("VirtioVNET: VIRTIO SYMBIOTIC Read  for port %d (index =%d), length=%d\n", 
528                port, port_idx, length);
529 */
530     switch (port_idx) {
531         case HOST_FEATURES_PORT:
532             if (length != 4) {
533                 PrintError("Illegal read length for host features\n");
534                 return -1;
535             }
536
537             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
538         
539             break;
540         case VRING_PG_NUM_PORT:
541             if (length != 4) {
542                 PrintError("Illegal read length for page frame number\n");
543                 return -1;
544             }
545
546             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
547
548             break;
549         case VRING_SIZE_PORT:
550             if (length != 2) {
551                 PrintError("Illegal read length for vring size\n");
552                 return -1;
553             }
554                 
555             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
556
557             break;
558
559         case VIRTIO_STATUS_PORT:
560             if (length != 1) {
561                 PrintError("Illegal read length for status\n");
562                 return -1;
563             }
564
565             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
566             break;
567
568         case VIRTIO_ISR_PORT:
569             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
570             vnet_state->virtio_cfg.pci_isr = 0;
571             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
572             break;
573
574         default:
575             if ( (port_idx >= sizeof(struct virtio_config)) && 
576                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
577                 int cfg_offset = port_idx - sizeof(struct virtio_config);
578                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
579
580                 memcpy(dst, cfg_ptr + cfg_offset, length);
581                 
582             } else {
583                 PrintError("Read of Unhandled Virtio Read\n");
584                 return -1;
585             }
586           
587             break;
588     }
589
590     return length;
591 }
592
593 static int vnet_brg_input(struct v3_vm_info * vm, 
594                                 struct v3_vnet_pkt * pkt, 
595                                 void * private_data){
596
597     return vnet_brg_rx(pkt, private_data);
598 }
599
600 static struct v3_device_ops dev_ops = {
601     .free = NULL,
602     .reset = NULL,
603     .start = NULL,
604     .stop = NULL,
605 };
606
607
608 static int vnet_brg_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
609     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
610     struct virtio_vnet_state * vbrg_state = NULL;
611     struct pci_device * pci_dev = NULL;
612     char * name = v3_cfg_val(cfg, "name");
613
614     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
615
616     if (pci_bus == NULL) {
617         PrintError("VNET Bridge device require a PCI Bus");
618         return -1;
619     }
620     
621     vbrg_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
622     memset(vbrg_state, 0, sizeof(struct virtio_vnet_state));
623         
624     vbrg_state->vm = vm;
625
626     struct vm_device * dev = v3_allocate_device(name, &dev_ops, vbrg_state);
627
628     if (v3_attach_device(vm, dev) == -1) {
629         PrintError("Could not attach device %s\n", name);
630         return -1;
631     }
632
633
634     // PCI initialization
635     {
636         struct v3_pci_bar bars[6];
637         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
638         int tmp_ports = num_ports;
639         int i;
640
641         // This gets the number of ports, rounded up to a power of 2
642         vbrg_state->io_range_size = 1; // must be a power of 2
643
644         while (tmp_ports > 0) {
645             tmp_ports >>= 1;
646             vbrg_state->io_range_size <<= 1;
647         }
648         
649         // this is to account for any low order bits being set in num_ports
650         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
651         if ((num_ports & ((vbrg_state->io_range_size >> 1) - 1)) == 0) {
652             vbrg_state->io_range_size >>= 1;
653         }
654
655         for (i = 0; i < 6; i++) {
656             bars[i].type = PCI_BAR_NONE;
657         }
658
659         bars[0].type = PCI_BAR_IO;
660         bars[0].default_base_port = -1;
661         bars[0].num_ports = vbrg_state->io_range_size;
662         bars[0].io_read = vnetbrg_io_read;
663         bars[0].io_write = vnetbrg_io_write;
664         bars[0].private_data = vbrg_state;
665
666         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
667                                          0, PCI_AUTO_DEV_NUM, 0,
668                                          "LNX_VIRTIO_VNET", bars,
669                                          NULL, NULL, NULL, vbrg_state);
670
671         if (!pci_dev) {
672             PrintError("Could not register PCI Device\n");
673             return -1;
674         }
675         
676         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
677         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
678         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
679         pci_dev->config_header.class = PCI_CLASS_MEMORY;
680         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
681         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
682         pci_dev->config_header.intr_pin = 1;
683         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
684
685
686         vbrg_state->pci_dev = pci_dev;
687         vbrg_state->pci_bus = pci_bus;
688     }
689
690     vnetbrg_reset(vbrg_state);
691
692     v3_vnet_add_bridge(vm, vnet_brg_input, (void *)vbrg_state);
693
694     return 0;
695 }
696
697
698 device_register("LNX_VNET_BRG", vnet_brg_init)