Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Update on the VNET Bridge virtio device
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/lnx_virtio_pci.h>
24 #include <palacios/vmm_vnet.h>
25 #include <palacios/vmm_sprintf.h>
26 #include <devices/pci.h>
27
28
29 #ifndef CONFIG_DEBUG_VNET_BRG
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34
35 #define QUEUE_SIZE 128
36 #define NUM_QUEUES 3
37
38 struct vnet_config {
39     uint32_t num_devs;
40     uint32_t num_routes;
41 } __attribute__((packed));
42
43
44 #define CTRL_QUEUE 0
45 #define RECV_QUEUE 2
46 #define XMIT_QUEUE 1
47
48 struct virtio_vnet_state {
49     struct v3_vm_info *vm;
50     struct vnet_config vnet_cfg;
51     struct virtio_config virtio_cfg;
52
53     struct vm_device * pci_bus;
54     struct pci_device * pci_dev;
55         
56     struct virtio_queue queue[NUM_QUEUES];
57
58     struct virtio_queue * cur_queue;
59
60     int io_range_size;
61     v3_lock_t lock;
62 };
63
64 #define VNET_GET_ROUTES 10
65 #define VNET_ADD_ROUTE 11
66 #define VNET_DEL_ROUTE 12
67
68 #define VNET_GET_LINKS 20
69 #define VNET_ADD_LINK 21
70 #define VNET_DEL_LINK 22
71
72 // structure of the vnet command header
73 struct vnet_ctrl_hdr {
74     uint8_t cmd_type;
75     uint32_t num_cmds;
76 } __attribute__((packed));
77
78 #define VIRTIO_NET_S_LINK_UP    1       /* Link is up */
79 #define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 << 10))
80
81 struct virtio_net_hdr {
82         uint8_t flags;
83
84 #define VIRTIO_NET_HDR_GSO_NONE         0       /* Not a GSO frame */
85         uint8_t gso_type;
86         uint16_t hdr_len;               /* Ethernet + IP + tcp/udp hdrs */
87         uint16_t gso_size;              /* Bytes to append to hdr_len per frame */
88         uint16_t csum_start;    /* Position to start checksumming from */
89         uint16_t csum_offset;   /* Offset after that to place checksum */
90 }__attribute__((packed));
91
92
93 static int virtio_reset(struct virtio_vnet_state * vnet_brg) {
94
95     memset(vnet_brg->queue, 0, sizeof(struct virtio_queue) * 2);
96
97     vnet_brg->cur_queue = &(vnet_brg->queue[0]);
98
99     vnet_brg->virtio_cfg.status = 0;
100     vnet_brg->virtio_cfg.pci_isr = 0;
101
102     vnet_brg->queue[0].queue_size = QUEUE_SIZE;
103     vnet_brg->queue[1].queue_size = QUEUE_SIZE;
104     vnet_brg->queue[2].queue_size = QUEUE_SIZE;
105
106     memset(&(vnet_brg->vnet_cfg), 0, sizeof(struct vnet_config));
107     v3_lock_init(&(vnet_brg->lock));
108
109     return 0;
110 }
111
112
113
114 static int get_desc_count(struct virtio_queue * q, int index) {
115     struct vring_desc * tmp_desc = &(q->desc[index]);
116     int cnt = 1;
117     
118     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
119         tmp_desc = &(q->desc[tmp_desc->next]);
120         cnt++;
121     }
122
123     return cnt;
124 }
125
126
127
128
129 static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
130     struct virtio_queue * q = &(vnet_state->queue[0]);
131     
132     PrintDebug("VNET Bridge: Handling command  queue\n");
133
134     while (q->cur_avail_idx < q->avail->index) {
135         struct vring_desc * hdr_desc = NULL;
136         struct vring_desc * buf_desc = NULL;
137         struct vring_desc * status_desc = NULL;
138         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
139         uint16_t desc_cnt = get_desc_count(q, desc_idx);
140         struct vnet_ctrl_hdr * hdr = NULL;
141         int i;
142         int xfer_len = 0;
143         uint8_t * status_ptr = NULL;
144         uint8_t status = 0;
145
146
147         PrintDebug("Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
148
149         if (desc_cnt < 3) {
150             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
151             return -1;
152         }
153         
154         hdr_desc = &(q->desc[desc_idx]);
155
156         if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
157             PrintError("Could not translate VirtioVNET header address\n");
158             return -1;
159         }
160
161         desc_idx = hdr_desc->next;
162         
163         if (hdr->cmd_type == VNET_ADD_ROUTE) {
164             
165             for (i = 0; i < hdr->num_cmds; i++) {
166                 uint8_t tmp_status = 0;
167                 struct v3_vnet_route * route = NULL;
168                 
169                 buf_desc = &(q->desc[desc_idx]);
170
171                 if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
172                     PrintError("Could not translate route address\n");
173                     return -1;
174                 }
175
176                 // add route
177                 PrintDebug("Adding VNET Route\n");
178
179                 tmp_status = v3_vnet_add_route(*route);
180
181                 PrintDebug("VNET Route Added\n");
182
183                 if (tmp_status != 0) {
184                     PrintError("Error adding VNET ROUTE\n");
185                     status = tmp_status;
186                 }
187
188                 xfer_len += buf_desc->length;
189                 desc_idx = buf_desc->next;
190             }
191
192         } 
193
194
195
196         status_desc = &(q->desc[desc_idx]);
197
198         if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
199             PrintError("VirtioVNET Error could not translate status address\n");
200             return -1;
201         }
202
203         xfer_len += status_desc->length;
204         *status_ptr = status;
205
206         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
207         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
208         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
209
210         q->used->index++;
211         q->cur_avail_idx++;
212     }
213
214
215     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
216         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
217         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
218         vnet_state->virtio_cfg.pci_isr = 1;
219     }
220
221
222     return 0;
223 }
224
225 static int copy_data_to_desc(struct guest_info *core, 
226                                         struct vring_desc * desc, 
227                                         uchar_t * buf, 
228                                         uint_t buf_len) 
229 {
230     uint32_t len;
231     uint8_t * desc_buf = NULL;
232
233     if (guest_pa_to_host_va(core, desc->addr_gpa, (addr_t *)&(desc_buf)) == -1) {
234         PrintError("Could not translate buffer address\n");
235         return -1;
236     }
237     len = (desc->length < buf_len)?desc->length:buf_len;
238     memcpy(desc_buf, buf, len);
239
240     return len;
241 }
242
243
244 static int vnet_brg_rx(struct v3_vnet_pkt *pkt, void * private_data) {
245     struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data;
246     struct virtio_queue * q = &(vnet_brg->queue[RECV_QUEUE]);
247     char *buf = (char *)pkt;
248     int ret_val;
249     int pkt_head_len = ((addr_t)pkt->data) - ((addr_t)pkt); 
250     uint32_t data_len = sizeof(struct v3_vnet_pkt); //TODO: should not transfer the whole struct, only the data size 
251     unsigned long flags;
252
253     flags = v3_lock_irqsave(vnet_brg->lock);
254         
255     PrintDebug("VNET Bridge: RX: pkt sent to guest size: %d\n, pkt_header_len: %d\n", data_len, pkt_head_len);
256     v3_hexdump(buf, 100, NULL, 0);
257
258     if (q->ring_avail_addr == 0) {
259         PrintError("Queue is not set\n");
260         ret_val = -1;
261         goto exit;
262     }
263
264     if (q->last_avail_idx > q->avail->index)
265         q->idx_overflow = true;
266     q->last_avail_idx = q->avail->index;
267
268     if (q->cur_avail_idx < q->avail->index || (q->idx_overflow && q->cur_avail_idx < q->avail->index+65536)){
269         uint16_t buf_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
270         uint32_t len = 0;
271         uint32_t offset = 0;
272         struct vring_desc * buf_desc = NULL;
273
274         buf_desc = &(q->desc[buf_idx]);
275         PrintDebug("VNET Bridge RX: buffer desc len: %d\n", buf_desc->length);
276
277         len = copy_data_to_desc(&(vnet_brg->vm->cores[0]), buf_desc, buf + offset, data_len - offset);
278         if (len == -1) {
279             ret_val = -1;
280             goto exit;
281         }
282         offset += len;
283         buf_desc->length = len;
284         
285         if (offset >= data_len) {
286             buf_desc->flags &= ~VIRTIO_NEXT_FLAG;
287         }else {
288             PrintDebug("VNET Bridge: RX: Pkt not fit into one desc buf\n");
289         }
290
291         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
292         q->used->ring[q->used->index % q->queue_size].length = data_len; // This should be the total length of data sent to guest (header+pkt_data)
293         q->used->index++;
294
295         int last_idx = q->cur_avail_idx;
296         q->cur_avail_idx++;
297         if (q->cur_avail_idx < last_idx)
298             q->idx_overflow = false;
299     } else {
300         PrintDebug("VNET Bridge: Handle RX: Fails to send, no available buffer: current_idx:%d, availIdx: %d\n", q->cur_avail_idx, q->avail->index);
301     }
302
303     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
304         v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev);
305         vnet_brg->virtio_cfg.pci_isr = 0x1;
306         PrintDebug("Raising IRQ %d\n",  vnet_brg->pci_dev->config_header.intr_line);
307     }
308
309     ret_val = 0;
310
311 exit:
312
313     v3_unlock_irqrestore(vnet_brg->lock, flags);
314  
315     return ret_val;
316 }
317
318 static int vnet_send(struct v3_vnet_pkt *pkt, int pkt_len, void * private_data){
319     struct guest_info *core  = (struct guest_info *)private_data; 
320
321 #ifdef CONFIG_DEBUG_VNET_BRG
322     {
323         PrintDebug("VNET Bridge: send pkt size: %d\n", pkt->size);
324         v3_hexdump(pkt->data,pkt->size, NULL, 0);
325     }
326 #endif
327
328 #ifdef CONFIG_VNET_PROFILE
329     uint64_t start, end;
330     rdtscll(start);
331     core->vnet_times.time_copy_from_guest = start - core->vnet_times.virtio_handle_start;
332 #endif
333
334     pkt->src_type = LINK_EDGE;
335
336     return v3_vnet_send_pkt(pkt, (void *)core);
337 }
338
339 static int pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg, struct vring_desc * buf_desc) 
340 {
341     uint8_t * buf = NULL;
342     uint32_t len = buf_desc->length;
343     struct v3_vnet_pkt *pkt;
344
345     if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
346         PrintError("Could not translate buffer address\n");
347         return -1;
348     }
349  
350     pkt = (struct v3_vnet_pkt *)buf;
351     if (vnet_send(pkt, len, (void *)core) == -1) {
352         return -1;
353     }
354
355     return 0;
356 }
357
358 static int handle_pkt_tx(struct guest_info *core, struct virtio_vnet_state * vnet_brg) 
359 {
360     struct virtio_queue * q = &(vnet_brg->queue[XMIT_QUEUE]);
361
362     if (q->avail->index < q->last_avail_idx) {
363         q->idx_overflow = true;
364     }
365
366     q->last_avail_idx = q->avail->index;
367
368     PrintDebug("VNET Bridge Device: Handle TX\n");
369
370     while (q->cur_avail_idx < q->avail->index || 
371            (q->idx_overflow && q->cur_avail_idx < (q->avail->index + 65536))) {
372         struct vring_desc * buf_desc = NULL;
373         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
374
375         buf_desc = &(q->desc[desc_idx]);
376
377         PrintDebug("VNET Bridge: Handle TX buf_len: %d\n", buf_desc->length);
378
379         if (pkt_tx(core, vnet_brg, buf_desc) == -1) {
380                 PrintError("Error handling nic operation\n");
381                 return -1;
382         }
383          
384         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
385         q->used->ring[q->used->index % q->queue_size].length = buf_desc->length; // What do we set this to????
386         q->used->index++;
387
388         int last_idx = q->cur_avail_idx;
389         q->cur_avail_idx ++;
390         if (q->cur_avail_idx < last_idx) {
391             q->idx_overflow = false;
392         }
393     }
394
395     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
396         v3_pci_raise_irq(vnet_brg->pci_bus, 0, vnet_brg->pci_dev);
397         vnet_brg->virtio_cfg.pci_isr = 0x1;
398     }
399
400 #ifdef CONFIG_VNET_PROFILE
401     uint64_t time;
402     rdtscll(time);
403     core->vnet_times.total_handle_time = time - core->vnet_times.virtio_handle_start;
404     core->vnet_times.print = true;
405 #endif
406
407     return 0;
408 }
409
410 static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
411     struct virtio_vnet_state * vnet_brg = (struct virtio_vnet_state *)private_data;
412     int port_idx = port % vnet_brg->io_range_size;
413
414
415     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
416                port, length, *(uint32_t *)src);
417     PrintDebug("VNET Bridge: port idx=%d\n", port_idx);
418
419
420     switch (port_idx) {
421         case GUEST_FEATURES_PORT:
422             if (length != 4) {
423                 PrintError("Illegal write length for guest features\n");
424                 return -1;
425             }    
426             vnet_brg->virtio_cfg.guest_features = *(uint32_t *)src;
427
428             break;
429         case VRING_PG_NUM_PORT:
430             if (length == 4) {
431                 addr_t pfn = *(uint32_t *)src;
432                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
433
434                 vnet_brg->cur_queue->pfn = pfn;
435                 
436                 vnet_brg->cur_queue->ring_desc_addr = page_addr ;
437                 vnet_brg->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
438                 vnet_brg->cur_queue->ring_used_addr = ( vnet_brg->cur_queue->ring_avail_addr + \
439                                                  sizeof(struct vring_avail)    + \
440                                                  (QUEUE_SIZE * sizeof(uint16_t)));
441                 
442                 // round up to next page boundary.
443                 vnet_brg->cur_queue->ring_used_addr = (vnet_brg->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
444
445                 if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_desc_addr, (addr_t *)&(vnet_brg->cur_queue->desc)) == -1) {
446                     PrintError("Could not translate ring descriptor address\n");
447                     return -1;
448                 }
449
450                 if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_avail_addr, (addr_t *)&(vnet_brg->cur_queue->avail)) == -1) {
451                     PrintError("Could not translate ring available address\n");
452                     return -1;
453                 }
454
455                 if (guest_pa_to_host_va(core, vnet_brg->cur_queue->ring_used_addr, (addr_t *)&(vnet_brg->cur_queue->used)) == -1) {
456                     PrintError("Could not translate ring used address\n");
457                     return -1;
458                 }
459
460                 PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
461                            (void *)(vnet_brg->cur_queue->ring_desc_addr),
462                            (void *)(vnet_brg->cur_queue->ring_avail_addr),
463                            (void *)(vnet_brg->cur_queue->ring_used_addr));
464
465                 PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
466                            vnet_brg->cur_queue->desc, vnet_brg->cur_queue->avail, vnet_brg->cur_queue->used);
467
468             } else {
469                 PrintError("Illegal write length for page frame number\n");
470                 return -1;
471             }
472             break;
473         case VRING_Q_SEL_PORT:
474             vnet_brg->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
475
476             if (vnet_brg->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
477                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
478                            vnet_brg->virtio_cfg.vring_queue_selector);
479                 return -1;
480             }
481             
482             vnet_brg->cur_queue = &(vnet_brg->queue[vnet_brg->virtio_cfg.vring_queue_selector]);
483
484             break;
485         case VRING_Q_NOTIFY_PORT: {
486             uint16_t queue_idx = *(uint16_t *)src;
487
488             PrintDebug("VNET Bridge: Handling Kick\n");
489
490             if (queue_idx == 0) {
491                 if (handle_cmd_kick(core, vnet_brg) == -1) {
492                     PrintError("Could not handle Virtio VNET Control command\n");
493                     return -1;
494                 }
495             } else if (queue_idx == 1) {
496                 if (handle_pkt_tx(core, vnet_brg) == -1){
497                     PrintError("Could not handle Virtio VNET TX\n");
498                     return -1;
499                 }
500             } else if (queue_idx == 2) {
501                 PrintDebug("VNET Bridge: receive kick on RX Queue\n");
502             } else {
503                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
504                 return -1;
505             }
506
507             break;
508         }
509         case VIRTIO_STATUS_PORT:
510             vnet_brg->virtio_cfg.status = *(uint8_t *)src;
511
512             if (vnet_brg->virtio_cfg.status == 0) {
513                 PrintDebug("VNET Bridge: Resetting device\n");
514                 virtio_reset(vnet_brg);
515             }
516
517             break;
518
519         case VIRTIO_ISR_PORT:
520             vnet_brg->virtio_cfg.pci_isr = *(uint8_t *)src;
521             break;
522         default:
523             return -1;
524             break;
525     }
526
527     return length;
528 }
529
530
531 static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
532
533     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
534     int port_idx = port % vnet_state->io_range_size;
535
536 /*
537     PrintDebug("VirtioVNET: VIRTIO SYMBIOTIC Read  for port %d (index =%d), length=%d\n", 
538                port, port_idx, length);
539 */
540     switch (port_idx) {
541         case HOST_FEATURES_PORT:
542             if (length != 4) {
543                 PrintError("Illegal read length for host features\n");
544                 return -1;
545             }
546
547             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
548         
549             break;
550         case VRING_PG_NUM_PORT:
551             if (length != 4) {
552                 PrintError("Illegal read length for page frame number\n");
553                 return -1;
554             }
555
556             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
557
558             break;
559         case VRING_SIZE_PORT:
560             if (length != 2) {
561                 PrintError("Illegal read length for vring size\n");
562                 return -1;
563             }
564                 
565             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
566
567             break;
568
569         case VIRTIO_STATUS_PORT:
570             if (length != 1) {
571                 PrintError("Illegal read length for status\n");
572                 return -1;
573             }
574
575             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
576             break;
577
578         case VIRTIO_ISR_PORT:
579             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
580             vnet_state->virtio_cfg.pci_isr = 0;
581             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
582             break;
583
584         default:
585             if ( (port_idx >= sizeof(struct virtio_config)) && 
586                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
587                 int cfg_offset = port_idx - sizeof(struct virtio_config);
588                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
589
590                 memcpy(dst, cfg_ptr + cfg_offset, length);
591                 
592             } else {
593                 PrintError("Read of Unhandled Virtio Read\n");
594                 return -1;
595             }
596           
597             break;
598     }
599
600     return length;
601 }
602
603 static int vnet_brg_input(struct v3_vm_info * vm, 
604                                 struct v3_vnet_pkt * pkt, 
605                                 void * private_data){
606
607     return vnet_brg_rx(pkt, private_data);
608 }
609
610 static struct v3_device_ops dev_ops = {
611     .free = NULL,
612     .reset = NULL,
613     .start = NULL,
614     .stop = NULL,
615 };
616
617
618 static int vnet_brg_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
619     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
620     struct virtio_vnet_state * vbrg_state = NULL;
621     struct pci_device * pci_dev = NULL;
622     char * name = v3_cfg_val(cfg, "name");
623
624     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
625
626     if (pci_bus == NULL) {
627         PrintError("VNET Bridge device require a PCI Bus");
628         return -1;
629     }
630     
631     vbrg_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
632     memset(vbrg_state, 0, sizeof(struct virtio_vnet_state));
633         
634     vbrg_state->vm = vm;
635
636     struct vm_device * dev = v3_allocate_device(name, &dev_ops, vbrg_state);
637
638     if (v3_attach_device(vm, dev) == -1) {
639         PrintError("Could not attach device %s\n", name);
640         return -1;
641     }
642
643
644     // PCI initialization
645     {
646         struct v3_pci_bar bars[6];
647         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
648         int tmp_ports = num_ports;
649         int i;
650
651         // This gets the number of ports, rounded up to a power of 2
652         vbrg_state->io_range_size = 1; // must be a power of 2
653
654         while (tmp_ports > 0) {
655             tmp_ports >>= 1;
656             vbrg_state->io_range_size <<= 1;
657         }
658         
659         // this is to account for any low order bits being set in num_ports
660         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
661         if ((num_ports & ((vbrg_state->io_range_size >> 1) - 1)) == 0) {
662             vbrg_state->io_range_size >>= 1;
663         }
664
665         for (i = 0; i < 6; i++) {
666             bars[i].type = PCI_BAR_NONE;
667         }
668
669         bars[0].type = PCI_BAR_IO;
670         bars[0].default_base_port = -1;
671         bars[0].num_ports = vbrg_state->io_range_size;
672         bars[0].io_read = virtio_io_read;
673         bars[0].io_write = virtio_io_write;
674         bars[0].private_data = vbrg_state;
675
676         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
677                                          0, PCI_AUTO_DEV_NUM, 0,
678                                          "LNX_VIRTIO_VNET", bars,
679                                          NULL, NULL, NULL, vbrg_state);
680
681         if (!pci_dev) {
682             PrintError("Could not register PCI Device\n");
683             return -1;
684         }
685         
686         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
687         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
688         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
689         pci_dev->config_header.class = PCI_CLASS_MEMORY;
690         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
691         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
692         pci_dev->config_header.intr_pin = 1;
693         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
694
695
696         vbrg_state->pci_dev = pci_dev;
697         vbrg_state->pci_bus = pci_bus;
698     }
699
700     virtio_reset(vbrg_state);
701
702     v3_vnet_add_bridge(vm, vnet_brg_input, (void *)vbrg_state);
703
704 //for temporary hack
705 #if 1   
706     {
707         uchar_t dstmac[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
708         uchar_t zeromac[6] = {0,0,0,0,0,0};
709
710         struct v3_vnet_route route;
711         route.dst_id = 0;
712         route.dst_type = LINK_EDGE;
713         route.src_id = -1;
714         route.src_type = LINK_ANY;
715
716         memcpy(route.dst_mac, dstmac, 6);
717         route.dst_mac_qual = MAC_NONE;
718         memcpy(route.src_mac, zeromac, 6);
719         route.src_mac_qual = MAC_ANY;
720            
721         v3_vnet_add_route(route);
722     }
723 #endif
724
725     return 0;
726 }
727
728
729 device_register("LNX_VNET_BRG", vnet_brg_init)