Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


reverted naming changes to fix configuration breakage
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/lnx_virtio_pci.h>
24 #include <palacios/vmm_vnet.h>
25 #include <palacios/vmm_sprintf.h>
26 #include <devices/pci.h>
27
28
29 #ifndef CONFIG_LINUX_VIRTIO_VNET_DEBUG
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34
35 #define QUEUE_SIZE 128
36 #define NUM_QUEUES 3
37
38 struct vnet_config {
39     uint32_t num_devs;
40     uint32_t num_routes;
41 } __attribute__((packed));
42
43
44 #define CTRL_QUEUE 0
45 #define RECV_QUEUE 2
46 #define XMIT_QUEUE 1
47
48 struct virtio_vnet_state {
49     struct v3_vm_info *vm;
50     struct vnet_config vnet_cfg;
51     struct virtio_config virtio_cfg;
52
53     struct vm_device * pci_bus;
54     struct pci_device * pci_dev;
55         
56     struct virtio_queue queue[NUM_QUEUES];
57
58     struct virtio_queue * cur_queue;
59
60     int io_range_size;
61     v3_lock_t lock;
62 };
63
64 #define VNET_GET_ROUTES 10
65 #define VNET_ADD_ROUTE 11
66 #define VNET_DEL_ROUTE 12
67
68 #define VNET_GET_LINKS 20
69 #define VNET_ADD_LINK 21
70 #define VNET_DEL_LINK 22
71
72 // structure of the vnet command header
73 struct vnet_ctrl_hdr {
74     uint8_t cmd_type;
75     uint32_t num_cmds;
76 } __attribute__((packed));
77
78 static int virtio_reset(struct virtio_vnet_state * vnet_brg) {
79
80     memset(vnet_brg->queue, 0, sizeof(struct virtio_queue) * 2);
81
82     vnet_brg->cur_queue = &(vnet_brg->queue[0]);
83
84     vnet_brg->virtio_cfg.status = 0;
85     vnet_brg->virtio_cfg.pci_isr = 0;
86
87     vnet_brg->queue[0].queue_size = QUEUE_SIZE;
88     vnet_brg->queue[1].queue_size = QUEUE_SIZE;
89     vnet_brg->queue[2].queue_size = QUEUE_SIZE;
90
91     memset(&(vnet_brg->vnet_cfg), 0, sizeof(struct vnet_config));
92     v3_lock_init(&(vnet_brg->lock));
93
94     return 0;
95 }
96
97
98
99 static int get_desc_count(struct virtio_queue * q, int index) {
100     struct vring_desc * tmp_desc = &(q->desc[index]);
101     int cnt = 1;
102     
103     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
104         tmp_desc = &(q->desc[tmp_desc->next]);
105         cnt++;
106     }
107
108     return cnt;
109 }
110
111
112
113
114 static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
115     struct virtio_queue * q = &(vnet_state->queue[0]);
116     
117     PrintDebug("VNET Bridge: Handling command  queue\n");
118
119     while (q->cur_avail_idx < q->avail->index) {
120         struct vring_desc * hdr_desc = NULL;
121         struct vring_desc * buf_desc = NULL;
122         struct vring_desc * status_desc = NULL;
123         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
124         uint16_t desc_cnt = get_desc_count(q, desc_idx);
125         struct vnet_ctrl_hdr * hdr = NULL;
126         int i;
127         int xfer_len = 0;
128         uint8_t * status_ptr = NULL;
129         uint8_t status = 0;
130
131
132         PrintDebug("Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
133
134         if (desc_cnt < 3) {
135             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
136             return -1;
137         }
138         
139         hdr_desc = &(q->desc[desc_idx]);
140
141         if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
142             PrintError("Could not translate VirtioVNET header address\n");
143             return -1;
144         }
145
146         desc_idx = hdr_desc->next;
147         
148         if (hdr->cmd_type == VNET_ADD_ROUTE) {
149             
150             for (i = 0; i < hdr->num_cmds; i++) {
151                 uint8_t tmp_status = 0;
152                 struct v3_vnet_route * route = NULL;
153                 
154                 buf_desc = &(q->desc[desc_idx]);
155
156                 if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
157                     PrintError("Could not translate route address\n");
158                     return -1;
159                 }
160
161                 // add route
162                 PrintDebug("Adding VNET Route\n");
163
164                 tmp_status = v3_vnet_add_route(*route);
165
166                 PrintDebug("VNET Route Added\n");
167
168                 if (tmp_status != 0) {
169                     PrintError("Error adding VNET ROUTE\n");
170                     status = tmp_status;
171                 }
172
173                 xfer_len += buf_desc->length;
174                 desc_idx = buf_desc->next;
175             }
176
177         } 
178
179
180
181         status_desc = &(q->desc[desc_idx]);
182
183         if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
184             PrintError("VirtioVNET Error could not translate status address\n");
185             return -1;
186         }
187
188         xfer_len += status_desc->length;
189         *status_ptr = status;
190
191         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
192         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
193         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
194
195         q->used->index++;
196         q->cur_avail_idx++;
197     }
198
199
200     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
201         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
202         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
203         vnet_state->virtio_cfg.pci_isr = 1;
204     }
205
206
207     return 0;
208 }
209
210 static int copy_data_to_desc(struct guest_info *core, 
211                                         struct vring_desc * desc, 
212                                         uchar_t * buf, 
213                                         uint_t buf_len) 
214 {
215     uint32_t len;
216     uint8_t * desc_buf = NULL;
217
218     if (guest_pa_to_host_va(core, desc->addr_gpa, (addr_t *)&(desc_buf)) == -1) {
219         PrintError("Could not translate buffer address\n");
220         return -1;
221     }
222     len = (desc->length < buf_len)?desc->length:buf_len;
223     memcpy(desc_buf, buf, len);
224
225     return len;
226 }
227
228
229 static int vnet_brg_rx(struct v3_vnet_pkt *pkt, void * private_data) {
230     struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data;
231     struct virtio_queue * q = &(vnet_brg->queue[RECV_QUEUE]);
232     char *buf = (char *)pkt;
233     int ret_val;
234     int pkt_head_len = ((addr_t)pkt->data) - ((addr_t)pkt); 
235     uint32_t data_len = sizeof(struct v3_vnet_pkt); //TODO: should not transfer the whole struct, only the data size 
236     unsigned long flags;
237
238     flags = v3_lock_irqsave(vnet_brg->lock);
239         
240     PrintDebug("VNET Bridge: RX: pkt sent to guest size: %d\n, pkt_header_len: %d\n", data_len, pkt_head_len);
241     v3_hexdump(buf, 100, NULL, 0);
242
243     if (q->ring_avail_addr == 0) {
244         PrintError("Queue is not set\n");
245         ret_val = -1;
246         goto exit;
247     }
248
249     if (q->last_avail_idx > q->avail->index)
250         q->idx_overflow = true;
251     q->last_avail_idx = q->avail->index;
252
253     if (q->cur_avail_idx < q->avail->index || (q->idx_overflow && q->cur_avail_idx < q->avail->index+65536)){
254         uint16_t buf_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
255         uint32_t len = 0;
256         uint32_t offset = 0;
257         struct vring_desc * buf_desc = NULL;
258
259         buf_desc = &(q->desc[buf_idx]);
260         PrintDebug("VNET Bridge RX: buffer desc len: %d\n", buf_desc->length);
261
262         len = copy_data_to_desc(&(vnet_brg->vm->cores[0]), buf_desc, buf + offset, data_len - offset);
263         if (len == -1) {
264             ret_val = -1;
265             goto exit;
266         }
267         offset += len;
268         buf_desc->length = len;
269         
270         if (offset >= data_len) {
271             buf_desc->flags &= ~VIRTIO_NEXT_FLAG;
272         }else {
273             PrintDebug("VNET Bridge: RX: Pkt not fit into one desc buf\n");
274         }
275
276         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
277         q->used->ring[q->used->index % q->queue_size].length = data_len; // This should be the total length of data sent to guest (header+pkt_data)
278         q->used->index++;
279
280         int last_idx = q->cur_avail_idx;
281         q->cur_avail_idx++;
282         if (q->cur_avail_idx < last_idx)
283             q->idx_overflow = false;
284     } else {
285         PrintDebug("VNET Bridge: Handle RX: Fails to send, no available buffer: current_idx:%d, availIdx: %d\n", q->cur_avail_idx, q->avail->index);
286     }
287
288     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
289         v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev);
290         vnet_brg->virtio_cfg.pci_isr = 0x1;
291         PrintDebug("Raising IRQ %d\n",  vnet_brg->pci_dev->config_header.intr_line);
292     }
293
294     ret_val = 0;
295
296 exit:
297
298     v3_unlock_irqrestore(vnet_brg->lock, flags);
299  
300     return ret_val;
301 }
302
303 static int vnet_send(struct v3_vnet_pkt *pkt, int pkt_len, void * private_data){
304     struct guest_info *core  = (struct guest_info *)private_data; 
305
306 #ifdef CONFIG_DEBUG_VNET_BRG
307     {
308         PrintDebug("VNET Bridge: send pkt size: %d\n", pkt->size);
309         v3_hexdump(pkt->data,pkt->size, NULL, 0);
310     }
311 #endif
312
313 #ifdef CONFIG_VNET_PROFILE
314     uint64_t start, end;
315     rdtscll(start);
316     core->vnet_times.time_copy_from_guest = start - core->vnet_times.virtio_handle_start;
317 #endif
318
319     pkt->src_type = LINK_EDGE;
320
321     return v3_vnet_send_pkt(pkt, (void *)core);
322 }
323
324 static int pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg, struct vring_desc * buf_desc) 
325 {
326     uint8_t * buf = NULL;
327     uint32_t len = buf_desc->length;
328     struct v3_vnet_pkt *pkt;
329
330     if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
331         PrintError("Could not translate buffer address\n");
332         return -1;
333     }
334  
335     pkt = (struct v3_vnet_pkt *)buf;
336     if (vnet_send(pkt, len, (void *)core) == -1) {
337         return -1;
338     }
339
340     return 0;
341 }
342
343 static int handle_pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg) 
344 {
345     struct virtio_queue * q = &(vnet_brg->queue[XMIT_QUEUE]);
346
347     if (q->avail->index < q->last_avail_idx) {
348         q->idx_overflow = true;
349     }
350
351     q->last_avail_idx = q->avail->index;
352
353     PrintDebug("VNET Bridge Device: Handle TX\n");
354
355     while (q->cur_avail_idx < q->avail->index || 
356            (q->idx_overflow && q->cur_avail_idx < (q->avail->index + 65536))) {
357         struct vring_desc * buf_desc = NULL;
358         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
359
360         buf_desc = &(q->desc[desc_idx]);
361
362         PrintDebug("VNET Bridge: Handle TX buf_len: %d\n", buf_desc->length);
363
364         if (pkt_tx(core, vnet_brg, buf_desc) == -1) {
365                 PrintError("Error handling nic operation\n");
366                 return -1;
367         }
368          
369         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
370         q->used->ring[q->used->index % q->queue_size].length = buf_desc->length; // What do we set this to????
371         q->used->index++;
372
373         int last_idx = q->cur_avail_idx;
374         q->cur_avail_idx ++;
375         if (q->cur_avail_idx < last_idx) {
376             q->idx_overflow = false;
377         }
378     }
379
380     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
381         v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev);
382         vnet_brg->virtio_cfg.pci_isr = 0x1;
383     }
384
385 #ifdef CONFIG_VNET_PROFILE
386     uint64_t time;
387     rdtscll(time);
388     core->vnet_times.total_handle_time = time - core->vnet_times.virtio_handle_start;
389     core->vnet_times.print = true;
390 #endif
391
392     return 0;
393 }
394
395 static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
396     struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data;
397     int port_idx = port % vnet_brg->io_range_size;
398
399
400     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
401                port, length, *(uint32_t *)src);
402     PrintDebug("VNET Bridge: port idx=%d\n", port_idx);
403
404
405     switch (port_idx) {
406         case GUEST_FEATURES_PORT:
407             if (length != 4) {
408                 PrintError("Illegal write length for guest features\n");
409                 return -1;
410             }    
411             vnet_brg->virtio_cfg.guest_features = *(uint32_t *)src;
412
413             break;
414         case VRING_PG_NUM_PORT:
415             if (length == 4) {
416                 addr_t pfn = *(uint32_t *)src;
417                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
418
419                 vnet_brg->cur_queue->pfn = pfn;
420                 
421                 vnet_brg->cur_queue->ring_desc_addr = page_addr ;
422                 vnet_brg->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
423                 vnet_brg->cur_queue->ring_used_addr = ( vnet_brg->cur_queue->ring_avail_addr + \
424                                                  sizeof(struct vring_avail)    + \
425                                                  (QUEUE_SIZE * sizeof(uint16_t)));
426                 
427                 // round up to next page boundary.
428                 vnet_brg->cur_queue->ring_used_addr = (vnet_brg->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
429
430                 if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_desc_addr, (addr_t *)&(vnet_brg->cur_queue->desc)) == -1) {
431                     PrintError("Could not translate ring descriptor address\n");
432                     return -1;
433                 }
434
435                 if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_avail_addr, (addr_t *)&(vnet_brg->cur_queue->avail)) == -1) {
436                     PrintError("Could not translate ring available address\n");
437                     return -1;
438                 }
439
440                 if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_used_addr, (addr_t *)&(vnet_brg->cur_queue->used)) == -1) {
441                     PrintError("Could not translate ring used address\n");
442                     return -1;
443                 }
444
445                 PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
446                            (void *)(vnet_brg->cur_queue->ring_desc_addr),
447                            (void *)(vnet_brg->cur_queue->ring_avail_addr),
448                            (void *)(vnet_brg->cur_queue->ring_used_addr));
449
450                 PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
451                            vnet_brg->cur_queue->desc, vnet_brg->cur_queue->avail, vnet_brg->cur_queue->used);
452
453             } else {
454                 PrintError("Illegal write length for page frame number\n");
455                 return -1;
456             }
457             break;
458         case VRING_Q_SEL_PORT:
459             vnet_brg->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
460
461             if (vnet_brg->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
462                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
463                            vnet_brg->virtio_cfg.vring_queue_selector);
464                 return -1;
465             }
466             
467             vnet_brg->cur_queue = &(vnet_brg->queue[vnet_brg->virtio_cfg.vring_queue_selector]);
468
469             break;
470         case VRING_Q_NOTIFY_PORT: {
471             uint16_t queue_idx = *(uint16_t *)src;
472
473             PrintDebug("VNET Bridge: Handling Kick\n");
474
475             if (queue_idx == 0) {
476                 if (handle_cmd_kick(core, vnet_brg) == -1) {
477                     PrintError("Could not handle Virtio VNET Control command\n");
478                     return -1;
479                 }
480             } else if (queue_idx == 1) {
481                 if (handle_pkt_tx(core, vnet_brg) == -1){
482                     PrintError("Could not handle Virtio VNET TX\n");
483                     return -1;
484                 }
485             } else if (queue_idx == 2) {
486                 PrintDebug("VNET Bridge: receive kick on RX Queue\n");
487             } else {
488                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
489                 return -1;
490             }
491
492             break;
493         }
494         case VIRTIO_STATUS_PORT:
495             vnet_brg->virtio_cfg.status = *(uint8_t *)src;
496
497             if (vnet_brg->virtio_cfg.status == 0) {
498                 PrintDebug("VNET Bridge: Resetting device\n");
499                 virtio_reset(vnet_brg);
500             }
501
502             break;
503
504         case VIRTIO_ISR_PORT:
505             vnet_brg->virtio_cfg.pci_isr = *(uint8_t *)src;
506             break;
507         default:
508             return -1;
509             break;
510     }
511
512     return length;
513 }
514
515
516 static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
517
518     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
519     int port_idx = port % vnet_state->io_range_size;
520
521 /*
522     PrintDebug("VirtioVNET: VIRTIO SYMBIOTIC Read  for port %d (index =%d), length=%d\n", 
523                port, port_idx, length);
524 */
525     switch (port_idx) {
526         case HOST_FEATURES_PORT:
527             if (length != 4) {
528                 PrintError("Illegal read length for host features\n");
529                 return -1;
530             }
531
532             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
533         
534             break;
535         case VRING_PG_NUM_PORT:
536             if (length != 4) {
537                 PrintError("Illegal read length for page frame number\n");
538                 return -1;
539             }
540
541             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
542
543             break;
544         case VRING_SIZE_PORT:
545             if (length != 2) {
546                 PrintError("Illegal read length for vring size\n");
547                 return -1;
548             }
549                 
550             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
551
552             break;
553
554         case VIRTIO_STATUS_PORT:
555             if (length != 1) {
556                 PrintError("Illegal read length for status\n");
557                 return -1;
558             }
559
560             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
561             break;
562
563         case VIRTIO_ISR_PORT:
564             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
565             vnet_state->virtio_cfg.pci_isr = 0;
566             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
567             break;
568
569         default:
570             if ( (port_idx >= sizeof(struct virtio_config)) && 
571                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
572                 int cfg_offset = port_idx - sizeof(struct virtio_config);
573                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
574
575                 memcpy(dst, cfg_ptr + cfg_offset, length);
576                 
577             } else {
578                 PrintError("Read of Unhandled Virtio Read\n");
579                 return -1;
580             }
581           
582             break;
583     }
584
585     return length;
586 }
587
588 static int vnet_brg_input(struct v3_vm_info * vm, 
589                                 struct v3_vnet_pkt * pkt, 
590                                 void * private_data){
591
592     return vnet_brg_rx(pkt, private_data);
593 }
594
595 static struct v3_device_ops dev_ops = {
596     .free = NULL,
597     .reset = NULL,
598     .start = NULL,
599     .stop = NULL,
600 };
601
602
603 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
604     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
605     struct virtio_vnet_state * vbrg_state = NULL;
606     struct pci_device * pci_dev = NULL;
607     char * name = v3_cfg_val(cfg, "name");
608
609     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
610
611     if (pci_bus == NULL) {
612         PrintError("VNET Bridge device require a PCI Bus");
613         return -1;
614     }
615     
616     vbrg_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
617     memset(vbrg_state, 0, sizeof(struct virtio_vnet_state));
618         
619     vbrg_state->vm = vm;
620
621     struct vm_device * dev = v3_allocate_device(name, &dev_ops, vbrg_state);
622
623     if (v3_attach_device(vm, dev) == -1) {
624         PrintError("Could not attach device %s\n", name);
625         return -1;
626     }
627
628
629     // PCI initialization
630     {
631         struct v3_pci_bar bars[6];
632         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
633         int tmp_ports = num_ports;
634         int i;
635
636         // This gets the number of ports, rounded up to a power of 2
637         vbrg_state->io_range_size = 1; // must be a power of 2
638
639         while (tmp_ports > 0) {
640             tmp_ports >>= 1;
641             vbrg_state->io_range_size <<= 1;
642         }
643         
644         // this is to account for any low order bits being set in num_ports
645         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
646         if ((num_ports & ((vbrg_state->io_range_size >> 1) - 1)) == 0) {
647             vbrg_state->io_range_size >>= 1;
648         }
649
650         for (i = 0; i < 6; i++) {
651             bars[i].type = PCI_BAR_NONE;
652         }
653
654         bars[0].type = PCI_BAR_IO;
655         bars[0].default_base_port = -1;
656         bars[0].num_ports = vbrg_state->io_range_size;
657         bars[0].io_read = virtio_io_read;
658         bars[0].io_write = virtio_io_write;
659         bars[0].private_data = vbrg_state;
660
661         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
662                                          0, PCI_AUTO_DEV_NUM, 0,
663                                          "LNX_VIRTIO_VNET", bars,
664                                          NULL, NULL, NULL, vbrg_state);
665
666         if (!pci_dev) {
667             PrintError("Could not register PCI Device\n");
668             return -1;
669         }
670         
671         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
672         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
673         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
674         pci_dev->config_header.class = PCI_CLASS_MEMORY;
675         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
676         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
677         pci_dev->config_header.intr_pin = 1;
678         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
679
680
681         vbrg_state->pci_dev = pci_dev;
682         vbrg_state->pci_bus = pci_bus;
683     }
684
685     virtio_reset(vbrg_state);
686
687     v3_vnet_add_bridge(vm, vnet_brg_input, (void *)vbrg_state);
688
689 //for temporary hack
690 #if 1   
691     {
692         uchar_t dstmac[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
693         uchar_t zeromac[6] = {0,0,0,0,0,0};
694
695         struct v3_vnet_route route;
696         route.dst_id = 0;
697         route.dst_type = LINK_EDGE;
698         route.src_id = -1;
699         route.src_type = LINK_ANY;
700
701         memcpy(route.dst_mac, dstmac, 6);
702         route.dst_mac_qual = MAC_NONE;
703         memcpy(route.src_mac, zeromac, 6);
704         route.src_mac_qual = MAC_ANY;
705            
706         v3_vnet_add_route(route);
707     }
708 #endif
709
710     return 0;
711 }
712
713
714 device_register("LNX_VIRTIO_VNET", dev_init)