Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


All updates on the VNET during summer
[palacios.git] / palacios / src / devices / lnx_virtio_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, Lei Xia <lxia@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Jack Lange <jarusl@cs.northwestern.edu>
16  *             Lei Xia <lxia@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm.h>
23 #include <palacios/vmm_dev_mgr.h>
24 #include <palacios/vm_guest_mem.h>
25 #include <devices/lnx_virtio_pci.h>
26 #include <palacios/vmm_vnet.h>
27 #include <palacios/vmm_sprintf.h>
28 #include <devices/pci.h>
29
30
31 #ifndef CONFIG_DEBUG_LINUX_VIRTIO_VNET
32 #undef PrintDebug
33 #define PrintDebug(fmt, args...)
34 #endif
35
36
37 #define QUEUE_SIZE 4096
38 #define CMD_QUEUE_SIZE 128
39 #define NUM_QUEUES 3
40
41 struct vnet_config {
42     uint32_t num_devs;
43     uint32_t num_routes;
44 } __attribute__((packed));
45
46
47 #define CTRL_QUEUE 0
48 #define XMIT_QUEUE 1
49 #define RECV_QUEUE 2
50
51 struct virtio_vnet_state {
52     struct v3_vm_info * vm;
53     struct vnet_config vnet_cfg;
54     struct virtio_config virtio_cfg;
55
56     struct vm_device * pci_bus;
57     struct pci_device * pci_dev;
58         
59     struct virtio_queue queue[NUM_QUEUES];
60
61     struct virtio_queue * cur_queue;
62
63     int io_range_size;
64     v3_lock_t lock;
65
66     ulong_t pkt_sent, pkt_recv, pkt_drop, tx_exit, rx_exit, total_exit;
67     int ready;
68 };
69
70 #define VNET_GET_ROUTES 10
71 #define VNET_ADD_ROUTE 11
72 #define VNET_DEL_ROUTE 12
73
74 #define VNET_GET_LINKS 20
75 #define VNET_ADD_LINK 21
76 #define VNET_DEL_LINK 22
77
78 // structure of the vnet command header
79 struct vnet_ctrl_hdr {
80     uint8_t cmd_type;
81     uint32_t num_cmds;
82 } __attribute__((packed));
83
84
85 struct vnet_bridge_pkt {
86     uint32_t link_id;
87     uint32_t pkt_size;
88     uint8_t pkt[ETHERNET_PACKET_LEN];
89 }__attribute__((packed));
90
91
92 static int virtio_reset(struct virtio_vnet_state * vnet_state) {
93
94     memset(vnet_state->queue, 0, sizeof(struct virtio_queue) * NUM_QUEUES);
95
96     vnet_state->cur_queue = &(vnet_state->queue[0]);
97
98     vnet_state->virtio_cfg.status = 0;
99     vnet_state->virtio_cfg.pci_isr = 0;
100
101     vnet_state->queue[0].queue_size = CMD_QUEUE_SIZE;
102     vnet_state->queue[1].queue_size = QUEUE_SIZE;
103     vnet_state->queue[2].queue_size = QUEUE_SIZE;
104
105     memset(&(vnet_state->vnet_cfg), 0, sizeof(struct vnet_config));
106     v3_lock_init(&(vnet_state->lock));
107
108     return 0;
109 }
110
111
112
113 static int get_desc_count(struct virtio_queue * q, int index) {
114     struct vring_desc * tmp_desc = &(q->desc[index]);
115     int cnt = 1;
116     
117     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
118         tmp_desc = &(q->desc[tmp_desc->next]);
119         cnt++;
120     }
121
122     return cnt;
123 }
124
125
126
127
128 static int handle_cmd_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
129     struct virtio_queue * q = &(vnet_state->queue[0]);
130     
131     PrintDebug("VNET Bridge: Handling command  queue\n");
132
133     while (q->cur_avail_idx != q->avail->index) {
134         struct vring_desc * hdr_desc = NULL;
135         struct vring_desc * buf_desc = NULL;
136         struct vring_desc * status_desc = NULL;
137         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
138         uint16_t desc_cnt = get_desc_count(q, desc_idx);
139         struct vnet_ctrl_hdr * hdr = NULL;
140         int i;
141         int xfer_len = 0;
142         uint8_t * status_ptr = NULL;
143         uint8_t status = 0;
144
145
146         PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
147
148         if (desc_cnt < 3) {
149             PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
150             return -1;
151         }
152         
153         hdr_desc = &(q->desc[desc_idx]);
154
155         if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
156             PrintError("Could not translate VirtioVNET header address\n");
157             return -1;
158         }
159
160         desc_idx = hdr_desc->next;
161         
162         if (hdr->cmd_type == VNET_ADD_ROUTE) {
163             
164             for (i = 0; i < hdr->num_cmds; i++) {
165                 uint8_t tmp_status = 0;
166                 struct v3_vnet_route * route = NULL;
167                 
168                 buf_desc = &(q->desc[desc_idx]);
169
170                 if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(route)) == -1) {
171                     PrintError("Could not translate route address\n");
172                     return -1;
173                 }
174
175                 // add route
176                 PrintDebug("VNET Bridge: Adding VNET Route\n");
177
178                 tmp_status = v3_vnet_add_route(*route);
179
180                 PrintDebug("VNET Route Added\n");
181
182                 if (tmp_status != 0) {
183                     PrintError("Error adding VNET ROUTE\n");
184                     status = tmp_status;
185                 }
186
187                 xfer_len += buf_desc->length;
188                 desc_idx = buf_desc->next;
189             }
190
191         } 
192
193
194
195         status_desc = &(q->desc[desc_idx]);
196
197         if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
198             PrintError("VirtioVNET Error could not translate status address\n");
199             return -1;
200         }
201
202         xfer_len += status_desc->length;
203         *status_ptr = status;
204
205         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
206         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
207         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
208
209         q->used->index++;
210         q->cur_avail_idx++;
211     }
212
213
214     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
215         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
216         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
217         vnet_state->virtio_cfg.pci_isr = 1;
218     }
219
220
221     return 0;
222 }
223
224
225 static int vnet_pkt_input_cb(struct v3_vm_info * vm,  struct v3_vnet_pkt vnet_pkts[], uint16_t pkt_num, void * private_data){
226     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
227     struct virtio_queue * q = &(vnet_state->queue[RECV_QUEUE]);
228     int ret_val = -1;
229     unsigned long flags;
230     uint16_t sent;
231     struct v3_vnet_pkt *pkt;
232
233     if(pkt_num <= 0)
234         return 0;
235
236     flags = v3_lock_irqsave(vnet_state->lock);
237         
238     if (q->ring_avail_addr == 0) {
239         PrintError("Queue is not set\n");
240         goto exit;
241     }
242
243     PrintDebug("VNET Bridge: RX: running on cpu: %d, num of pkts: %d\n", V3_Get_CPU(), pkt_num);
244
245     for(sent = 0; sent < pkt_num; sent ++) {
246         pkt = &vnet_pkts[sent];
247         vnet_state->pkt_recv ++;
248
249         if (q->cur_avail_idx != q->avail->index) {
250             uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
251             struct vring_desc * pkt_desc = NULL;
252             struct vnet_bridge_pkt * virtio_pkt = NULL;
253
254             pkt_desc = &(q->desc[pkt_idx]);
255             PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
256
257             if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
258                 PrintError("Could not translate buffer address\n");
259                 goto exit;
260             }
261
262             PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
263
264             // Fill in dst packet buffer
265             virtio_pkt->link_id = pkt->dst_id;
266             virtio_pkt->pkt_size = pkt->size;
267             memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
268         
269             q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
270             q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); 
271
272             q->used->index++;
273             q->cur_avail_idx++;
274         } else {
275             vnet_state->pkt_drop ++;
276             //v3_vnet_disable_bridge();
277         }
278     }
279
280     if(sent == 0){
281         goto exit;
282     }
283
284     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
285         v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
286         vnet_state->virtio_cfg.pci_isr = 0x1;
287         PrintDebug("Raising IRQ %d\n",  vnet_state->pci_dev->config_header.intr_line);
288     }
289
290     ret_val = 0;
291
292         
293 #ifdef CONFIG_VNET_PROFILE
294     if (vnet_state->pkt_recv % 20000 == 0)
295         PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
296                         vnet_state->pkt_sent,
297                         vnet_state->pkt_recv,
298                         vnet_state->pkt_drop, 
299                         vnet_state->total_exit,
300                         vnet_state->tx_exit,
301                         vnet_state->rx_exit);
302 #endif
303
304 exit:
305
306     v3_unlock_irqrestore(vnet_state->lock, flags);
307  
308     return ret_val;
309 }
310
311 static int vnet_pkt_input_xcall(void *data){
312     struct v3_vnet_bridge_xcall_args *args = (struct v3_vnet_bridge_xcall_args *)data;
313         
314     return vnet_pkt_input_cb(args->vm, args->vnet_pkts, args->pkt_num, args->private_data);
315 }
316
317 static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
318 {
319     struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
320     int recvd = 0;
321         
322     if (q->ring_avail_addr == 0) {
323         return -1;
324     }
325
326     while (q->cur_avail_idx != q->avail->index) {
327         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
328         struct vring_desc * pkt_desc = NULL;
329         struct vnet_bridge_pkt * virtio_pkt = NULL;
330
331         pkt_desc = &(q->desc[desc_idx]);
332
333         PrintDebug("VNET Bridge: Handle TX desc buf_len: %d\n", pkt_desc->length);
334         
335         if (v3_gpa_to_hva(core, pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
336             PrintError("Could not translate buffer address\n");
337             return -1;
338         }
339
340         //PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id);
341
342         struct v3_vnet_pkt pkt;
343         pkt.size = virtio_pkt->pkt_size;
344         pkt.src_type = LINK_EDGE;
345         pkt.src_id = 0;
346         memcpy(pkt.header, virtio_pkt->pkt, ETHERNET_HEADER_LEN);
347         pkt.data = virtio_pkt->pkt;
348
349         v3_vnet_send_pkt(&pkt, NULL);
350         
351         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
352         q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
353         q->used->index++;
354
355         vnet_state->pkt_sent ++;
356         recvd ++;
357
358         q->cur_avail_idx++;
359     }
360
361     if(recvd == 0){
362         return 0;
363     }
364
365     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
366             v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
367             vnet_state->virtio_cfg.pci_isr = 0x1;
368     }
369
370     //PrintError("Virtio VNET: polling %d pkts\n", recvd);
371         
372 #ifdef CONFIG_VNET_PROFILE
373     if (vnet_state->pkt_sent % 20000 == 0)
374         PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
375                         vnet_state->pkt_sent,
376                         vnet_state->pkt_recv,
377                         vnet_state->pkt_drop, 
378                         vnet_state->total_exit,
379                         vnet_state->tx_exit,
380                         vnet_state->rx_exit);
381 #endif
382
383     return 0;
384 }
385
386 static void vnet_virtio_poll(struct v3_vm_info * vm, void *private_data){
387     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
388
389     if(vm == vnet_state->vm){   
390         handle_pkt_kick(&(vm->cores[0]), vnet_state);
391     }
392 }
393
394 static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) 
395 {
396     //v3_vnet_enable_bridge();
397         
398     return 0;
399 }
400
401 static int vnet_virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
402     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
403     int port_idx = port % vnet_state->io_range_size;
404
405     PrintDebug("VNET Bridge: VIRTIO VNET Write for port %d len=%d, value=%x\n", 
406                port, length, *(uint32_t *)src);
407
408     vnet_state->total_exit ++;
409     switch (port_idx) {
410         case GUEST_FEATURES_PORT:
411             if (length != 4) {
412                 PrintError("Illegal write length for guest features\n");
413                 return -1;
414             }    
415             vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
416
417             break;
418         case VRING_PG_NUM_PORT:
419             if (length == 4) {
420                 addr_t pfn = *(uint32_t *)src;
421                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
422
423                 vnet_state->cur_queue->pfn = pfn;
424                 
425                 vnet_state->cur_queue->ring_desc_addr = page_addr ;
426                 vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
427                 vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
428                                                  sizeof(struct vring_avail)    + \
429                                                  (QUEUE_SIZE * sizeof(uint16_t)));
430                 
431                 // round up to next page boundary.
432                 vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
433
434                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
435                     PrintError("Could not translate ring descriptor address\n");
436                     return -1;
437                 }
438
439                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
440                     PrintError("Could not translate ring available address\n");
441                     return -1;
442                 }
443
444                 if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
445                     PrintError("Could not translate ring used address\n");
446                     return -1;
447                 }
448
449                 PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
450                            (void *)(vnet_state->cur_queue->ring_desc_addr),
451                            (void *)(vnet_state->cur_queue->ring_avail_addr),
452                            (void *)(vnet_state->cur_queue->ring_used_addr));
453
454                 PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n", 
455                            vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
456
457                 if(vnet_state->queue[RECV_QUEUE].avail != NULL){
458                     vnet_state->ready = 1;
459                     vnet_state->queue[RECV_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
460                 }
461
462                 //No notify when there is pkt tx from guest
463                 //palacios will do the polling
464                 if(vnet_state->queue[XMIT_QUEUE].used != NULL){
465                     vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
466                 }
467             } else {
468                 PrintError("Illegal write length for page frame number\n");
469                 return -1;
470             }
471             break;
472         case VRING_Q_SEL_PORT:
473             vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
474
475             if (vnet_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
476                 PrintError("VNET Bridge device has no qeueues. Selected %d\n", 
477                            vnet_state->virtio_cfg.vring_queue_selector);
478                 return -1;
479             }
480             
481             vnet_state->cur_queue = &(vnet_state->queue[vnet_state->virtio_cfg.vring_queue_selector]);
482
483             break;
484         case VRING_Q_NOTIFY_PORT: {
485             uint16_t queue_idx = *(uint16_t *)src;
486
487             PrintDebug("VNET Bridge: Handling Kick\n");
488
489             if (queue_idx == 0) {
490                 if (handle_cmd_kick(core, vnet_state) == -1) {
491                     PrintError("Could not handle Virtio VNET Control command\n");
492                     return -1;
493                 }
494             } else if (queue_idx == 1) {
495                 if (handle_pkt_kick(core, vnet_state) == -1){
496                     PrintError("Could not handle Virtio VNET TX\n");
497                     return -1;
498                 }
499                 PrintError("Notify on TX\n");
500             } else if (queue_idx == 2) {
501                 if (handle_rx_kick(core, vnet_state) == -1){
502                     PrintError("Could not handle Virtio RX buffer refills Kick\n");
503                     return -1;
504                 }
505                 vnet_state->rx_exit ++;
506             } else {
507                 PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
508                 return -1;
509             }
510
511             break;
512         }
513         case VIRTIO_STATUS_PORT:
514             vnet_state->virtio_cfg.status = *(uint8_t *)src;
515
516             if (vnet_state->virtio_cfg.status == 0) {
517                 PrintDebug("VNET Bridge: Resetting device\n");
518                 virtio_reset(vnet_state);
519             }
520
521             break;
522
523         case VIRTIO_ISR_PORT:
524             vnet_state->virtio_cfg.pci_isr = *(uint8_t *)src;
525             break;
526         default:
527             return -1;
528             break;
529     }
530
531     return length;
532 }
533
534
535 static int vnet_virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
536
537     struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
538     int port_idx = port % vnet_state->io_range_size;
539
540     switch (port_idx) {
541         case HOST_FEATURES_PORT:
542             if (length != 4) {
543                 PrintError("Illegal read length for host features\n");
544                 return -1;
545             }
546
547             *(uint32_t *)dst = vnet_state->virtio_cfg.host_features;
548         
549             break;
550         case VRING_PG_NUM_PORT:
551             if (length != 4) {
552                 PrintError("Illegal read length for page frame number\n");
553                 return -1;
554             }
555
556             *(uint32_t *)dst = vnet_state->cur_queue->pfn;
557
558             break;
559         case VRING_SIZE_PORT:
560             if (length != 2) {
561                 PrintError("Illegal read length for vring size\n");
562                 return -1;
563             }
564                 
565             *(uint16_t *)dst = vnet_state->cur_queue->queue_size;
566
567             break;
568
569         case VIRTIO_STATUS_PORT:
570             if (length != 1) {
571                 PrintError("Illegal read length for status\n");
572                 return -1;
573             }
574
575             *(uint8_t *)dst = vnet_state->virtio_cfg.status;
576             break;
577
578         case VIRTIO_ISR_PORT:
579             *(uint8_t *)dst = vnet_state->virtio_cfg.pci_isr;
580             vnet_state->virtio_cfg.pci_isr = 0;
581             v3_pci_lower_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
582             break;
583
584         default:
585             if ( (port_idx >= sizeof(struct virtio_config)) && 
586                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct vnet_config))) ) {
587                 int cfg_offset = port_idx - sizeof(struct virtio_config);
588                 uint8_t * cfg_ptr = (uint8_t *)&(vnet_state->vnet_cfg);
589
590                 memcpy(dst, cfg_ptr + cfg_offset, length);
591                 
592             } else {
593                 PrintError("Read of Unhandled Virtio Read\n");
594                 return -1;
595             }
596           
597             break;
598     }
599
600     return length;
601 }
602
603
604
605 static struct v3_device_ops dev_ops = {
606     .free = NULL,
607     .reset = NULL,
608     .start = NULL,
609     .stop = NULL,
610 };
611
612 static int dev_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
613     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
614     struct virtio_vnet_state * vnet_state = NULL;
615     struct pci_device * pci_dev = NULL;
616     char * name = v3_cfg_val(cfg, "name");
617
618     PrintDebug("VNET Bridge: Initializing VNET Bridge Control device: %s\n", name);
619
620     if (pci_bus == NULL) {
621         PrintError("VNET Bridge device require a PCI Bus");
622         return -1;
623     }
624     
625     vnet_state  = (struct virtio_vnet_state *)V3_Malloc(sizeof(struct virtio_vnet_state));
626     memset(vnet_state, 0, sizeof(struct virtio_vnet_state));
627         
628     vnet_state->vm = vm;
629
630     struct vm_device * dev = v3_allocate_device(name, &dev_ops, vnet_state);
631
632     if (v3_attach_device(vm, dev) == -1) {
633         PrintError("Could not attach device %s\n", name);
634         return -1;
635     }
636
637
638     // PCI initialization
639     {
640         struct v3_pci_bar bars[6];
641         int num_ports = sizeof(struct virtio_config) + sizeof(struct vnet_config);
642         int tmp_ports = num_ports;
643         int i;
644
645         // This gets the number of ports, rounded up to a power of 2
646         vnet_state->io_range_size = 1; // must be a power of 2
647
648         while (tmp_ports > 0) {
649             tmp_ports >>= 1;
650             vnet_state->io_range_size <<= 1;
651         }
652         
653         // this is to account for any low order bits being set in num_ports
654         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
655         if ((num_ports & ((vnet_state->io_range_size >> 1) - 1)) == 0) {
656             vnet_state->io_range_size >>= 1;
657         }
658
659         for (i = 0; i < 6; i++) {
660             bars[i].type = PCI_BAR_NONE;
661         }
662
663         bars[0].type = PCI_BAR_IO;
664         bars[0].default_base_port = -1;
665         bars[0].num_ports = vnet_state->io_range_size;
666         bars[0].io_read = vnet_virtio_io_read;
667         bars[0].io_write = vnet_virtio_io_write;
668         bars[0].private_data = vnet_state;
669
670         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
671                                          0, 5 /*PCI_AUTO_DEV_NUM*/, 0,
672                                          "LNX_VIRTIO_VNET", bars,
673                                          NULL, NULL, NULL, vnet_state);
674
675         if (!pci_dev) {
676             PrintError("Could not register PCI Device\n");
677             return -1;
678         }
679         
680         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
681         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
682         pci_dev->config_header.device_id = VIRTIO_VNET_DEV_ID;
683         pci_dev->config_header.class = PCI_CLASS_MEMORY;
684         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
685         pci_dev->config_header.subsystem_id = VIRTIO_VNET_SUBDEVICE_ID;
686         pci_dev->config_header.intr_pin = 1;
687         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
688
689
690         vnet_state->pci_dev = pci_dev;
691         vnet_state->pci_bus = pci_bus;
692     }
693
694     virtio_reset(vnet_state);
695
696     struct v3_vnet_bridge_ops brg_ops;
697     brg_ops.input = vnet_pkt_input_cb;
698     brg_ops.polling_pkt = vnet_virtio_poll;
699     brg_ops.xcall_input = vnet_pkt_input_xcall;
700
701     V3_Print("Registering Virtio device as vnet bridge\n");
702     v3_vnet_add_bridge(vm, &brg_ops, (void *)vnet_state);
703
704     return 0;
705 }
706
707
708 device_register("LNX_VIRTIO_VNET", dev_init)