Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


format fixes
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, Lei Xia <lxia@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Jack Lange <jarusl@cs.northwestern.edu>
16  *            Lei Xia <lxia@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_dev_mgr.h>
24 #include <palacios/vm_guest_mem.h>
25 #include <devices/lnx_virtio_pci.h>
26 #include <palacios/vmm_vnet.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <devices/pci.h>
29
30
31 #ifndef CONFIG_DEBUG_LINUX_VIRTIO_VNET
32 #undef PrintDebug
33 #define PrintDebug(fmt, args...)
34 #endif
35
36
37 #define QUEUE_SIZE 4096
38 #define CMD_QUEUE_SIZE 128
39 #define NUM_QUEUES 3
40
41 struct vnet_config {
42     uint32_t num_devs;
43     uint32_t num_routes;
44 } __attribute__((packed));
45
46
47 #define CTRL_QUEUE 0
48 #define XMIT_QUEUE 1
49 #define RECV_QUEUE 2
50
51 struct virtio_vnet_state {
52     struct v3_vm_info * vm;
53     struct vnet_config vnet_cfg;
54     struct virtio_config virtio_cfg;
55
56     struct vm_device * pci_bus;
57     struct pci_device * pci_dev;
58         
59     struct virtio_queue queue[NUM_QUEUES];
60
61     struct virtio_queue * cur_queue;
62
63     int io_range_size;
64     v3_lock_t lock;
65
66     uint32_t pkt_sent;
67     uint32_t pkt_recv;
68     uint32_t pkt_drop;
69     uint32_t tx_exit;
70     uint32_t rx_exit;
71     uint32_t total_exit;
72
73     int ready;
74 };
75
76 #define VNET_GET_ROUTES 10
77 #define VNET_ADD_ROUTE 11
78 #define VNET_DEL_ROUTE 12
79
80 #define VNET_GET_LINKS 20
81 #define VNET_ADD_LINK 21
82 #define VNET_DEL_LINK 22
83
84 // structure of the vnet command header
85 struct vnet_ctrl_hdr {
86     uint8_t cmd_type;
87     uint32_t num_cmds;
88 } __attribute__((packed));
89
90
91 struct vnet_bridge_pkt {
92     uint32_t link_id;
93     uint32_t pkt_size;
94     uint8_t pkt[ETHERNET_PACKET_LEN];
95 }__attribute__((packed));
96
97
98 static int virtio_reset(struct virtio_vnet_state * vnet_state) {
99
100     memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * NUM_QUEUES);
101
102     vnet_state->cur_queue = &(vnet_state->queue[0]);
103
104     vnet_state->virtio_cfg.status = 0;
105     vnet_state->virtio_cfg.pci_isr = 0;
106
107     vnet_state->queue[0].queue_size = CMD_QUEUE_SIZE;
108     vnet_state->queue[1].queue_size = QUEUE_SIZE;
109     vnet_state->queue[2].queue_size = QUEUE_SIZE;
110
111     memset(&(vnet_state->vnet_cfg), 0, sizeof(struct vnet_config));
112     v3_lock_init(&(vnet_state->lock));
113
114     return 0;
115 }
116
117
118
119 static int get_desc_count(struct virtio_queue * q, int index) {
120     struct vring_desc * tmp_desc = &(q->desc[index]);
121     int cnt = 1;
122     
123     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
124         tmp_desc = &(q->desc[tmp_desc->next]);
125         cnt++;
126     }
127
128     return cnt;
129 }
130
131
132
133
134 static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
135     struct virtio_queue * q = &(vnet_state->queue[0]);
136     
137     PrintDebug("VNET Bridge: Handling command  queue\n");
138
139     while (q->cur_avail_idx != q->avail->index) {
140         struct vring_desc * hdr_desc = NULL;
141         struct vring_desc * buf_desc = NULL;
142         struct vring_desc * status_desc = NULL;
143         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
144         uint16_t desc_cnt = get_desc_count(q, desc_idx);
145         struct vnet_ctrl_hdr * hdr = NULL;
146         int i;
147         int xfer_len = 0;
148         uint8_t * status_ptr = NULL;
149         uint8_t status = 0;
150
151
152         PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", 
153                    desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
154
155         if (desc_cnt < 3) {
156             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
157             return -1;
158         }
159         
160         hdr_desc = &(q->desc[desc_idx]);
161
162         if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
163             PrintError("Could not translate VirtioVNET header address\n");
164             return -1;
165         }
166
167         desc_idx = hdr_desc->next;
168         
169         if (hdr->cmd_type == VNET_ADD_ROUTE) {
170             
171             for (i = 0; i < hdr->num_cmds; i++) {
172                 uint8_t tmp_status = 0;
173                 struct v3_vnet_route * route = NULL;
174                 
175                 buf_desc = &(q->desc[desc_idx]);
176
177                 if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
178                     PrintError("Could not translate route address\n");
179                     return -1;
180                 }
181
182                 // add route
183                 PrintDebug("VNET Bridge: Adding VNET Route\n");
184
185                 tmp_status = v3_vnet_add_route(*route);
186
187                 PrintDebug("VNET Route Added\n");
188
189                 if (tmp_status != 0) {
190                     PrintError("Error adding VNET ROUTE\n");
191                     status = tmp_status;
192                 }
193
194                 xfer_len += buf_desc->length;
195                 desc_idx = buf_desc->next;
196             }
197
198         } 
199
200
201
202         status_desc = &(q->desc[desc_idx]);
203
204         if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
205             PrintError("VirtioVNET Error could not translate status address\n");
206             return -1;
207         }
208
209         xfer_len += status_desc->length;
210         *status_ptr = status;
211
212         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
213         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
214         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
215
216         q->used->index++;
217         q->cur_avail_idx++;
218     }
219
220
221     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
222         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
223         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
224         vnet_state->virtio_cfg.pci_isr = 1;
225     }
226
227
228     return 0;
229 }
230
231
232 static int vnet_pkt_input_cb(struct v3_vm_info * vm,  struct v3_vnet_pkt vnet_pkts[], uint16_t pkt_num, void * private_data){
233     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
234     struct virtio_queue * q = &(vnet_state->queue[RECV_QUEUE]);
235     int ret_val = -1;
236     unsigned long flags;
237     uint16_t sent;
238     struct v3_vnet_pkt * pkt = NULL;
239
240     if (pkt_num <= 0) {
241         return 0;
242     }
243
244     flags = v3_lock_irqsave(vnet_state->lock);
245         
246     if (q->ring_avail_addr == 0) {
247         PrintError("Queue is not set\n");
248         v3_unlock_irqrestore(vnet_state->lock, flags);
249         return ret_val;
250     }
251
252     PrintDebug("VNET Bridge: RX: running on cpu: %d, num of pkts: %d\n", V3_Get_CPU(), pkt_num);
253
254     for (sent = 0; sent < pkt_num; sent++) {
255         pkt = &vnet_pkts[sent];
256         vnet_state->pkt_recv++;
257
258         if (q->cur_avail_idx != q->avail->index) {
259             uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
260             struct vring_desc * pkt_desc = NULL;
261             struct vnet_bridge_pkt * virtio_pkt = NULL;
262
263             pkt_desc = &(q->desc[pkt_idx]);
264             PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
265
266             if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
267                 PrintError("Could not translate buffer address\n");
268                 v3_unlock_irqrestore(vnet_state->lock, flags);
269                 return ret_val;
270             }
271
272             PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
273
274             // Fill in dst packet buffer
275             virtio_pkt->link_id = pkt->dst_id;
276             virtio_pkt->pkt_size = pkt->size;
277             memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
278         
279             q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
280             q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); 
281
282             q->used->index++;
283             q->cur_avail_idx++;
284         } else {
285             vnet_state->pkt_drop++;
286             v3_vnet_disable_bridge();
287         }
288     }
289
290     if (sent == 0) {
291         v3_unlock_irqrestore(vnet_state->lock, flags);
292         return ret_val;
293     }
294
295     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
296         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
297         vnet_state->virtio_cfg.pci_isr = 0x1;
298         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
299     }
300
301     ret_val = 0;
302
303         
304 #ifdef CONFIG_VNET_PROFILE
305     if (vnet_state->pkt_recv % 200000 == 0)
306         PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
307                    vnet_state->pkt_sent,
308                    vnet_state->pkt_recv,
309                    vnet_state->pkt_drop, 
310                    vnet_state->total_exit,
311                    vnet_state->tx_exit,
312                    vnet_state->rx_exit);
313 #endif
314
315     v3_unlock_irqrestore(vnet_state->lock, flags);
316
317     return ret_val;
318
319 }
320
321 static void vnet_pkt_input_xcall(void * data) {
322     struct v3_vnet_bridge_input_args * args = (struct v3_vnet_bridge_input_args *)data;
323         
324     vnet_pkt_input_cb(args->vm, args->vnet_pkts, args->pkt_num, args->private_data);
325 }
326
327 static int handle_pkt_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
328     struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
329     unsigned long flags = 0;
330     int recvd = 0;
331     int cpu = V3_Get_CPU();
332
333     flags = v3_lock_irqsave(vnet_state->lock);
334
335     if (q->ring_avail_addr == 0) {
336         v3_unlock_irqrestore(vnet_state->lock,flags);
337         return 0;
338     }
339
340     while (q->cur_avail_idx != q->avail->index) {
341         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
342         struct vring_desc * pkt_desc = NULL;
343         struct vnet_bridge_pkt * virtio_pkt = NULL;
344
345         pkt_desc = &(q->desc[desc_idx]);
346
347         PrintDebug("VNET Bridge: Handle TX desc buf_len: %d\n", pkt_desc->length);
348         
349         if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
350             PrintError("Could not translate buffer address\n");
351             return -1;
352         }
353
354         PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id);
355         
356         v3_vnet_rx(virtio_pkt->pkt, virtio_pkt->pkt_size, virtio_pkt->link_id, LINK_EDGE);
357
358         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
359         q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
360         q->used->index++;
361
362         vnet_state->pkt_sent++;
363         recvd++;
364
365         q->cur_avail_idx++;
366     }
367
368     if (recvd == 0) {
369         v3_unlock_irqrestore(vnet_state->lock,flags);
370         return 0;
371     }
372
373     //PrintError("In polling get %d\n", recvd);
374         
375     //if on the dom0 core, interrupt the domU core to poll pkts
376     //otherwise, call the polling directly
377
378
379     if (vnet_state->vm->cores[0].cpu_id == cpu) {
380         cpu = (cpu == 0) ? 1 : 0;
381         v3_interrupt_cpu(vnet_state->vm, cpu, V3_VNET_POLLING_VECTOR);
382     } else {
383         v3_vnet_polling();
384     }
385
386     if ((vnet_state->pkt_sent % (QUEUE_SIZE/20)) == 0) {
387         //optimized for guest's, batch the interrupts
388         
389         if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
390             v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
391             vnet_state->virtio_cfg.pci_isr = 0x1;
392         }
393     }
394     
395 #ifdef CONFIG_VNET_PROFILE
396     if (vnet_state->pkt_sent % 200000 == 0)
397         PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
398                    vnet_state->pkt_sent,
399                    vnet_state->pkt_recv,
400                    vnet_state->pkt_drop, 
401                    vnet_state->total_exit,
402                    vnet_state->tx_exit,
403                    vnet_state->rx_exit);
404 #endif
405
406     v3_unlock_irqrestore(vnet_state->lock,flags);
407
408     return 0;
409 }
410
411 static int polling_pkt_from_guest(struct v3_vm_info * vm, void *private_data) {
412     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
413         
414     return handle_pkt_kick(&(vm->cores[0]), vnet_state);
415 }
416
417 static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) {
418     v3_vnet_enable_bridge();
419         
420     return 0;
421 }
422
423 static int vnet_virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
424     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
425     int port_idx = port % vnet_state->io_range_size;
426
427     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
428                port, length, *(uint32_t *)src);
429     PrintDebug("VNET Bridge: port idx=%d\n", port_idx);
430
431     vnet_state->total_exit++;
432
433     switch (port_idx) {
434         case GUEST_FEATURES_PORT:
435
436             if (length != 4) {
437                 PrintError("Illegal write length for guest features\n");
438                 return -1;
439             }    
440
441             vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
442
443             break;
444         case VRING_PG_NUM_PORT: {
445
446             addr_t pfn = *(uint32_t *)src;
447             addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
448
449             if (length != 4) {
450                 PrintError("Illegal write length for page frame number\n");
451                 return -1;
452             }
453             
454
455             vnet_state->cur_queue->pfn = pfn;
456                 
457             vnet_state->cur_queue->ring_desc_addr = page_addr ;
458             vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
459             vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
460                                                       sizeof(struct vring_avail) + \
461                                                       (QUEUE_SIZE * sizeof(uint16_t)));
462             
463             // round up to next page boundary.
464             vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
465             
466             if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
467                 PrintError("Could not translate ring descriptor address\n");
468                 return -1;
469             }
470             
471             if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
472                 PrintError("Could not translate ring available address\n");
473                 return -1;
474             }
475             
476             if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
477                 PrintError("Could not translate ring used address\n");
478                 return -1;
479             }
480             
481             PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
482                        (void *)(vnet_state->cur_queue->ring_desc_addr),
483                        (void *)(vnet_state->cur_queue->ring_avail_addr),
484                        (void *)(vnet_state->cur_queue->ring_used_addr));
485             
486             PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
487                        vnet_state->cur_queue->desc, 
488                        vnet_state->cur_queue->avail, 
489                        vnet_state->cur_queue->used);
490             
491             if (vnet_state->queue[RECV_QUEUE].avail != NULL){
492                 vnet_state->ready = 1;
493             }
494             
495             //No notify when there is pkt tx from guest
496             if (vnet_state->queue[XMIT_QUEUE].used != NULL) {
497                 vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
498             }
499             
500             break;
501         }
502         case VRING_Q_SEL_PORT:
503             vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
504
505             if (vnet_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
506                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
507                            vnet_state->virtio_cfg.vring_queue_selector);
508                 return -1;
509             }
510             
511             vnet_state->cur_queue = &(vnet_state->queue[vnet_state->virtio_cfg.vring_queue_selector]);
512
513             break;
514         case VRING_Q_NOTIFY_PORT: {
515             uint16_t queue_idx = *(uint16_t *)src;
516
517             PrintDebug("VNET Bridge: Handling Kick\n");
518
519             if (queue_idx == 0) {
520                 if (handle_cmd_kick(core, vnet_state) == -1) {
521                     PrintError("Could not handle Virtio VNET Control command\n");
522                     return -1;
523                 }
524             } else if (queue_idx == 1) {
525                 if (handle_pkt_kick(core, vnet_state) == -1){
526                     PrintError("Could not handle Virtio VNET TX\n");
527                     return -1;
528                 }
529                 vnet_state->tx_exit ++;
530                 //PrintError("Notify on TX\n");
531             } else if (queue_idx == 2) {
532                 if (handle_rx_kick(core, vnet_state) == -1){
533                     PrintError("Could not handle Virtio RX buffer refills Kick\n");
534                     return -1;
535                 }
536                 vnet_state->rx_exit ++;
537             } else {
538                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
539                 return -1;
540             }
541
542             break;
543         }
544         case VIRTIO_STATUS_PORT:
545             vnet_state->virtio_cfg.status = *(uint8_t *)src;
546
547             if (vnet_state->virtio_cfg.status == 0) {
548                 PrintDebug("VNET Bridge: Resetting device\n");
549                 virtio_reset(vnet_state);
550             }
551
552             break;
553
554         case VIRTIO_ISR_PORT:
555             vnet_state->virtio_cfg.pci_isr = *(uint8_t *)src;
556             break;
557         default:
558             return -1;
559             break;
560     }
561
562     return length;
563 }
564
565
566 static int vnet_virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
567
568     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
569     int port_idx = port % vnet_state->io_range_size;
570
571     switch (port_idx) {
572         case HOST_FEATURES_PORT:
573             if (length != 4) {
574                 PrintError("Illegal read length for host features\n");
575                 return -1;
576             }
577
578             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
579         
580             break;
581         case VRING_PG_NUM_PORT:
582             if (length != 4) {
583                 PrintError("Illegal read length for page frame number\n");
584                 return -1;
585             }
586
587             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
588
589             break;
590         case VRING_SIZE_PORT:
591             if (length != 2) {
592                 PrintError("Illegal read length for vring size\n");
593                 return -1;
594             }
595                 
596             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
597
598             break;
599
600         case VIRTIO_STATUS_PORT:
601             if (length != 1) {
602                 PrintError("Illegal read length for status\n");
603                 return -1;
604             }
605
606             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
607             break;
608
609         case VIRTIO_ISR_PORT:
610             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
611             vnet_state->virtio_cfg.pci_isr = 0;
612             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
613             break;
614
615         default:
616             if ( (port_idx >= sizeof(struct virtio_config)) && 
617                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
618                 int cfg_offset = port_idx - sizeof(struct virtio_config);
619                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
620
621                 memcpy(dst, cfg_ptr + cfg_offset, length);
622                 
623             } else {
624                 PrintError("Read of Unhandled Virtio Read\n");
625                 return -1;
626             }
627           
628             break;
629     }
630
631     return length;
632 }
633
634
635
636 static struct v3_device_ops dev_ops = {
637     .free = NULL,
638     .reset = NULL,
639     .start = NULL,
640     .stop = NULL,
641 };
642
643 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
644     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
645     struct virtio_vnet_state * vnet_state = NULL;
646     struct pci_device * pci_dev = NULL;
647     char * dev_id = v3_cfg_val(cfg, "ID");
648
649     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", dev_id);
650
651     if (pci_bus == NULL) {
652         PrintError("VNET Bridge device require a PCI Bus");
653         return -1;
654     }
655     
656     vnet_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
657     memset(vnet_state, 0, sizeof(struct virtio_vnet_state));
658         
659     vnet_state->vm = vm;
660
661     struct vm_device * dev = v3_allocate_device(dev_id, &dev_ops, vnet_state);
662
663     if (v3_attach_device(vm, dev) == -1) {
664         PrintError("Could not attach device %s\n", dev_id);
665         return -1;
666     }
667
668
669     // PCI initialization
670     {
671         struct v3_pci_bar bars[6];
672         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
673         int tmp_ports = num_ports;
674         int i;
675
676         // This gets the number of ports, rounded up to a power of 2
677         vnet_state->io_range_size = 1; // must be a power of 2
678
679         while (tmp_ports > 0) {
680             tmp_ports >>= 1;
681             vnet_state->io_range_size <<= 1;
682         }
683         
684         // this is to account for any low order bits being set in num_ports
685         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
686         if ((num_ports & ((vnet_state->io_range_size >> 1) - 1)) == 0) {
687             vnet_state->io_range_size >>= 1;
688         }
689
690         for (i = 0; i < 6; i++) {
691             bars[i].type = PCI_BAR_NONE;
692         }
693
694         bars[0].type = PCI_BAR_IO;
695         bars[0].default_base_port = -1;
696         bars[0].num_ports = vnet_state->io_range_size;
697         bars[0].io_read = vnet_virtio_io_read;
698         bars[0].io_write = vnet_virtio_io_write;
699         bars[0].private_data = vnet_state;
700
701         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
702                                          0, PCI_AUTO_DEV_NUM, 0,
703                                          "LNX_VIRTIO_VNET", bars,
704                                          NULL, NULL, NULL, vnet_state);
705
706         if (!pci_dev) {
707             PrintError("Could not register PCI Device\n");
708             return -1;
709         }
710         
711         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
712         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
713         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
714         pci_dev->config_header.class = PCI_CLASS_MEMORY;
715         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
716         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
717         pci_dev->config_header.intr_pin = 1;
718         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
719
720
721         vnet_state->pci_dev = pci_dev;
722         vnet_state->pci_bus = pci_bus;
723     }
724
725     virtio_reset(vnet_state);
726
727     V3_Print("Registering Virtio device as vnet bridge\n");
728     v3_vnet_add_bridge(vm, vnet_pkt_input_cb, vnet_pkt_input_xcall, polling_pkt_from_guest, 0, 500000, (void *)vnet_state);
729
730
731     return 0;
732 }
733
734
735 device_register("LNX_VIRTIO_VNET", dev_init)