Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


virtio-blk now handles config requests for non-declared config info
[palacios.git] / palacios / src / devices / lnx_virtio_blk.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <devices/lnx_virtio_pci.h>
23 #include <palacios/vm_guest_mem.h>
24
25 #include <devices/pci.h>
26
27
28
29 #ifndef V3_CONFIG_DEBUG_VIRTIO_BLK
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34
35 #define SECTOR_SIZE 512
36
37 #define BLK_CAPACITY_PORT     20
38 #define BLK_MAX_SIZE_PORT     28
39 #define BLK_MAX_SEG_PORT      32
40 #define BLK_CYLINDERS_PORT    36
41 #define BLK_HEADS_PORT        38
42 #define BLK_SECTS_PORT        39
43
44 #define BLK_IN_REQ            0
45 #define BLK_OUT_REQ           1
46 #define BLK_SCSI_CMD          2
47
48 #define BLK_BARRIER_FLAG     0x80000000
49
50 #define BLK_STATUS_OK             0
51 #define BLK_STATUS_ERR            1
52 #define BLK_STATUS_NOT_SUPPORTED  2
53
54
55 struct blk_config {
56     uint64_t capacity;
57     uint32_t max_size;
58     uint32_t max_seg;
59     uint16_t cylinders;
60     uint8_t heads;
61     uint8_t sectors;
62     /*
63     uint32_t blk_size;
64     uint8_t phys_block_exp;
65     uint8_t alignment_offset;
66     uint16_t min_io_size;
67     uint32_t opt_io_size;
68     */
69 } __attribute__((packed));
70
71
72
73 struct blk_op_hdr {
74     uint32_t type;
75     uint32_t prior;
76     uint64_t sector;
77 } __attribute__((packed));
78
79 #define QUEUE_SIZE 128
80
81 /* Host Feature flags */
82 #define VIRTIO_BARRIER       0x01       /* Does host support barriers? */
83 #define VIRTIO_SIZE_MAX      0x02       /* Indicates maximum segment size */
84 #define VIRTIO_SEG_MAX       0x04       /* Indicates maximum # of segments */
85 #define VIRTIO_LEGACY_GEOM   0x10       /* Indicates support of legacy geometry */
86
87
88 struct virtio_dev_state {
89     struct vm_device * pci_bus;
90     struct list_head dev_list;
91 };
92
93 struct virtio_blk_state {
94
95     struct pci_device * pci_dev;
96     struct blk_config block_cfg;
97     struct virtio_config virtio_cfg;
98
99     
100     struct virtio_queue queue;
101
102     struct v3_dev_blk_ops * ops;
103
104     void * backend_data;
105
106     int io_range_size;
107
108     struct virtio_dev_state * virtio_dev;
109
110     struct list_head dev_link;
111 };
112
113
114
115
116 static int blk_reset(struct virtio_blk_state * virtio) {
117
118     virtio->queue.ring_desc_addr = 0;
119     virtio->queue.ring_avail_addr = 0;
120     virtio->queue.ring_used_addr = 0;
121     virtio->queue.pfn = 0;
122     virtio->queue.cur_avail_idx = 0;
123
124     virtio->virtio_cfg.status = 0;
125     virtio->virtio_cfg.pci_isr = 0;
126     return 0;
127 }
128
129
130
131
132 static int handle_read_op(struct virtio_blk_state * blk_state, uint8_t * buf, uint64_t * sector, uint64_t len) {
133     int ret = -1;
134
135     PrintDebug(info->vm_info, info, "Reading Disk\n");
136     ret = blk_state->ops->read(buf, (*sector) * SECTOR_SIZE, len, (void *)(blk_state->backend_data));
137     *sector += (len / SECTOR_SIZE);
138
139     return ret;
140 }
141
142
143 static int handle_write_op(struct virtio_blk_state * blk_state, uint8_t * buf, uint64_t * sector, uint64_t len) {
144     int ret = -1;
145
146     PrintDebug(info->vm_info, info, "Writing Disk\n");
147     ret = blk_state->ops->write(buf, (*sector) * SECTOR_SIZE, len, (void *)(blk_state->backend_data));
148     *sector += (len / SECTOR_SIZE);
149
150     return ret;
151 }
152
153
154
155 // multiple block operations need to increment the sector 
156
157 static int handle_block_op(struct guest_info * core, struct virtio_blk_state * blk_state, struct blk_op_hdr * hdr, 
158                            struct vring_desc * buf_desc, uint8_t * status) {
159     uint8_t * buf = NULL;
160
161     PrintDebug(core->vm_info, core, "Handling Block op\n");
162     if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
163         PrintError(core->vm_info, core, "Could not translate buffer address\n");
164         return -1;
165     }
166
167     PrintDebug(core->vm_info, core, "Sector=%p Length=%d\n", (void *)(addr_t)(hdr->sector), buf_desc->length);
168
169     if (hdr->type == BLK_IN_REQ) {
170         if (handle_read_op(blk_state, buf, &(hdr->sector), buf_desc->length) == -1) {
171             *status = BLK_STATUS_ERR;
172             return -1;
173         } else {
174             *status = BLK_STATUS_OK;
175         }
176     } else if (hdr->type == BLK_OUT_REQ) {
177         if (handle_write_op(blk_state, buf, &(hdr->sector), buf_desc->length) == -1) {
178             *status = BLK_STATUS_ERR;
179             return -1;
180         } else {
181             *status = BLK_STATUS_OK;
182         }
183     } else if (hdr->type == BLK_SCSI_CMD) {
184         PrintError(core->vm_info, core, "VIRTIO: SCSI Command Not supported!!!\n");
185         *status = BLK_STATUS_NOT_SUPPORTED;
186         return -1;
187     }
188
189     PrintDebug(core->vm_info, core, "Returning Status: %d\n", *status);
190
191     return 0;
192 }
193
194 static int get_desc_count(struct virtio_queue * q, int index) {
195     struct vring_desc * tmp_desc = &(q->desc[index]);
196     int cnt = 1;
197     
198     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
199         tmp_desc = &(q->desc[tmp_desc->next]);
200         cnt++;
201     }
202
203     return cnt;
204 }
205
206
207
208 static int handle_kick(struct guest_info * core, struct virtio_blk_state * blk_state) {  
209     struct virtio_queue * q = &(blk_state->queue);
210
211     PrintDebug(core->vm_info, core, "VIRTIO KICK: cur_index=%d (mod=%d), avail_index=%d\n", 
212                q->cur_avail_idx, q->cur_avail_idx % QUEUE_SIZE, q->avail->index);
213
214     while (q->cur_avail_idx != q->avail->index) {
215         struct vring_desc * hdr_desc = NULL;
216         struct vring_desc * buf_desc = NULL;
217         struct vring_desc * status_desc = NULL;
218         struct blk_op_hdr hdr;
219         addr_t hdr_addr = 0;
220         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
221         int desc_cnt = get_desc_count(q, desc_idx);
222         int i = 0;
223         uint8_t * status_ptr = NULL;
224         uint8_t status = BLK_STATUS_OK;
225         uint32_t req_len = 0;
226
227         PrintDebug(core->vm_info, core, "Descriptor Count=%d, index=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE);
228
229         if (desc_cnt < 3) {
230             PrintError(core->vm_info, core, "Block operations must include at least 3 descriptors\n");
231             return -1;
232         }
233
234         hdr_desc = &(q->desc[desc_idx]);
235
236
237         PrintDebug(core->vm_info, core, "Header Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", hdr_desc, 
238                    (void *)(hdr_desc->addr_gpa), hdr_desc->length, hdr_desc->flags, hdr_desc->next);    
239
240         if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, &(hdr_addr)) == -1) {
241             PrintError(core->vm_info, core, "Could not translate block header address\n");
242             return -1;
243         }
244
245         // We copy the block op header out because we are going to modify its contents
246         memcpy(&hdr, (void *)hdr_addr, sizeof(struct blk_op_hdr));
247         
248         PrintDebug(core->vm_info, core, "Blk Op Hdr (ptr=%p) type=%d, sector=%p\n", (void *)hdr_addr, hdr.type, (void *)hdr.sector);
249
250         desc_idx = hdr_desc->next;
251
252         for (i = 0; i < desc_cnt - 2; i++) {
253             uint8_t tmp_status = BLK_STATUS_OK;
254
255             buf_desc = &(q->desc[desc_idx]);
256
257             PrintDebug(core->vm_info, core, "Buffer Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", buf_desc, 
258                        (void *)(buf_desc->addr_gpa), buf_desc->length, buf_desc->flags, buf_desc->next);
259
260             if (handle_block_op(core, blk_state, &hdr, buf_desc, &tmp_status) == -1) {
261                 PrintError(core->vm_info, core, "Error handling block operation\n");
262                 return -1;
263             }
264
265             if (tmp_status != BLK_STATUS_OK) {
266                 status = tmp_status;
267             }
268
269             req_len += buf_desc->length;
270             desc_idx = buf_desc->next;
271         }
272
273         status_desc = &(q->desc[desc_idx]);
274
275         PrintDebug(core->vm_info, core, "Status Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", status_desc, 
276                    (void *)(status_desc->addr_gpa), status_desc->length, status_desc->flags, status_desc->next);
277
278         if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&(status_ptr)) == -1) {
279             PrintError(core->vm_info, core, "Could not translate status address\n");
280             return -1;
281         }
282
283         req_len += status_desc->length;
284         *status_ptr = status;
285
286         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
287         q->used->ring[q->used->index % QUEUE_SIZE].length = req_len; // What do we set this to????
288
289         q->used->index++;
290         q->cur_avail_idx++;
291     }
292
293     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
294         PrintDebug(core->vm_info, core, "Raising IRQ %d\n",  blk_state->pci_dev->config_header.intr_line);
295         v3_pci_raise_irq(blk_state->virtio_dev->pci_bus, blk_state->pci_dev, 0);
296         blk_state->virtio_cfg.pci_isr = 1;
297     }
298
299     return 0;
300 }
301
302 static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
303     struct virtio_blk_state * blk_state = (struct virtio_blk_state *)private_data;
304     int port_idx = port % blk_state->io_range_size;
305
306
307     PrintDebug(core->vm_info, core, "VIRTIO BLOCK Write for port %d (index=%d) len=%d, value=%x\n", 
308                port, port_idx,  length, *(uint32_t *)src);
309
310
311
312     switch (port_idx) {
313         case GUEST_FEATURES_PORT:
314             if (length != 4) {
315                 PrintError(core->vm_info, core, "Illegal write length for guest features\n");
316                 return -1;
317             }
318             
319             blk_state->virtio_cfg.guest_features = *(uint32_t *)src;
320             PrintDebug(core->vm_info, core, "Setting Guest Features to %x\n", blk_state->virtio_cfg.guest_features);
321
322             break;
323         case VRING_PG_NUM_PORT:
324             if (length == 4) {
325                 addr_t pfn = *(uint32_t *)src;
326                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
327
328
329                 blk_state->queue.pfn = pfn;
330                 
331                 blk_state->queue.ring_desc_addr = page_addr ;
332                 blk_state->queue.ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
333                 blk_state->queue.ring_used_addr = ( blk_state->queue.ring_avail_addr + \
334                                                  sizeof(struct vring_avail)    + \
335                                                  (QUEUE_SIZE * sizeof(uint16_t)));
336                 
337                 // round up to next page boundary.
338                 blk_state->queue.ring_used_addr = (blk_state->queue.ring_used_addr + 0xfff) & ~0xfff;
339
340                 if (v3_gpa_to_hva(core, blk_state->queue.ring_desc_addr, (addr_t *)&(blk_state->queue.desc)) == -1) {
341                     PrintError(core->vm_info, core, "Could not translate ring descriptor address\n");
342                     return -1;
343                 }
344
345
346                 if (v3_gpa_to_hva(core, blk_state->queue.ring_avail_addr, (addr_t *)&(blk_state->queue.avail)) == -1) {
347                     PrintError(core->vm_info, core, "Could not translate ring available address\n");
348                     return -1;
349                 }
350
351
352                 if (v3_gpa_to_hva(core, blk_state->queue.ring_used_addr, (addr_t *)&(blk_state->queue.used)) == -1) {
353                     PrintError(core->vm_info, core, "Could not translate ring used address\n");
354                     return -1;
355                 }
356
357                 PrintDebug(core->vm_info, core, "RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
358                            (void *)(blk_state->queue.ring_desc_addr),
359                            (void *)(blk_state->queue.ring_avail_addr),
360                            (void *)(blk_state->queue.ring_used_addr));
361
362                 PrintDebug(core->vm_info, core, "RingDesc=%p, Avail=%p, Used=%p\n", 
363                            blk_state->queue.desc, blk_state->queue.avail, blk_state->queue.used);
364
365             } else {
366                 PrintError(core->vm_info, core, "Illegal write length for page frame number\n");
367                 return -1;
368             }
369             break;
370         case VRING_Q_SEL_PORT:
371             blk_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
372
373             if (blk_state->virtio_cfg.vring_queue_selector != 0) {
374                 PrintError(core->vm_info, core, "Virtio Block device only uses 1 queue, selected %d\n", 
375                            blk_state->virtio_cfg.vring_queue_selector);
376                 return -1;
377             }
378
379             break;
380         case VRING_Q_NOTIFY_PORT:
381             PrintDebug(core->vm_info, core, "Handling Kick\n");
382             if (handle_kick(core, blk_state) == -1) {
383                 PrintError(core->vm_info, core, "Could not handle Block Notification\n");
384                 return -1;
385             }
386             break;
387         case VIRTIO_STATUS_PORT:
388             blk_state->virtio_cfg.status = *(uint8_t *)src;
389
390             if (blk_state->virtio_cfg.status == 0) {
391                 PrintDebug(core->vm_info, core, "Resetting device\n");
392                 blk_reset(blk_state);
393             }
394
395             break;
396
397         case VIRTIO_ISR_PORT:
398             blk_state->virtio_cfg.pci_isr = *(uint8_t *)src;
399             break;
400         default:
401             return -1;
402             break;
403     }
404
405     return length;
406 }
407
408
409 static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
410     struct virtio_blk_state * blk_state = (struct virtio_blk_state *)private_data;
411     int port_idx = port % blk_state->io_range_size;
412
413
414     PrintDebug(core->vm_info, core, "VIRTIO BLOCK Read  for port %d (index =%d), length=%d\n", 
415                port, port_idx, length);
416
417
418     switch (port_idx) {
419         case HOST_FEATURES_PORT:
420         case HOST_FEATURES_PORT + 1:
421         case HOST_FEATURES_PORT + 2:
422         case HOST_FEATURES_PORT + 3:
423             if (port_idx + length > HOST_FEATURES_PORT + 4) {
424                 PrintError(core->vm_info, core, "Illegal read length for host features (len=%d)\n", length);
425                 return -1;
426             }
427
428             memcpy(dst, &(blk_state->virtio_cfg.host_features), length);
429             break;
430         case VRING_PG_NUM_PORT:
431         case VRING_PG_NUM_PORT + 1:
432         case VRING_PG_NUM_PORT + 2:
433         case VRING_PG_NUM_PORT + 3:
434             if (port_idx + length > VRING_PG_NUM_PORT + 4) {
435                 PrintError(core->vm_info, core, "Illegal read length for vring pg num (len=%d)\n", length);
436                 return -1;
437             }
438
439             memcpy(dst, &(blk_state->queue.pfn), length);
440             break;
441         case VRING_SIZE_PORT:
442         case VRING_SIZE_PORT + 1:
443             if (length > 2) {
444                 PrintError(core->vm_info, core, "Illegal read length for vring size (len=%d)\n", length);
445                 return -1;
446             }
447             
448             memcpy(dst, &(blk_state->queue.queue_size), length);
449
450             break;
451
452         case VIRTIO_STATUS_PORT:
453             if (length != 1) {
454                 PrintError(core->vm_info, core, "Illegal read length for status (len=%d)\n", length);
455                 return -1;
456             }
457
458             *(uint8_t *)dst = blk_state->virtio_cfg.status;
459             break;
460
461         case VIRTIO_ISR_PORT:
462             *(uint8_t *)dst = blk_state->virtio_cfg.pci_isr;
463             blk_state->virtio_cfg.pci_isr = 0;
464             v3_pci_lower_irq(blk_state->virtio_dev->pci_bus, blk_state->pci_dev, 0);
465             break;
466
467         default:
468             if ( (port_idx >= sizeof(struct virtio_config)) && 
469                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct blk_config))) ) {
470                 int cfg_offset = port_idx - sizeof(struct virtio_config);
471                 uint8_t * cfg_ptr = (uint8_t *)&(blk_state->block_cfg);
472
473                 memcpy(dst, cfg_ptr + cfg_offset, length);
474                 
475             } else {
476
477                 PrintError(core->vm_info,core,"Read of Unhandled Virtio Read. Returning 0\n");
478                 if (length == 1) {
479                     *(uint8_t *)dst = 0;
480                 } else if (length == 2) {
481                     *(uint16_t *)dst = 0;
482                 } else if (length == 4) {
483                     *(uint32_t *)dst = 0;
484                 }
485
486             }
487           
488             break;
489     }
490
491     return length;
492 }
493
494
495 static int virtio_free(struct virtio_dev_state * virtio) {
496     struct virtio_blk_state * blk_state = NULL;
497     struct virtio_blk_state * tmp = NULL;
498
499     list_for_each_entry_safe(blk_state, tmp, &(virtio->dev_list), dev_link) {
500
501         // unregister from PCI
502
503         list_del(&(blk_state->dev_link));
504         V3_Free(blk_state);
505     }
506     
507
508     V3_Free(virtio);
509
510     return 0;
511 }
512
513
514
515 static struct v3_device_ops dev_ops = {
516     .free = (int (*)(void *))virtio_free,
517
518 };
519
520
521
522
523
524 static int register_dev(struct virtio_dev_state * virtio, struct virtio_blk_state * blk_state) {
525     // initialize PCI
526     struct pci_device * pci_dev = NULL;
527     struct v3_pci_bar bars[6];
528     int num_ports = sizeof(struct virtio_config) + sizeof(struct blk_config);
529     int tmp_ports = num_ports;
530     int i;
531
532
533
534     // This gets the number of ports, rounded up to a power of 2
535     blk_state->io_range_size = 1; // must be a power of 2
536     
537     while (tmp_ports > 0) {
538         tmp_ports >>= 1;
539         blk_state->io_range_size <<= 1;
540     }
541         
542     // this is to account for any low order bits being set in num_ports
543     // if there are none, then num_ports was already a power of 2 so we shift right to reset it
544     if ((num_ports & ((blk_state->io_range_size >> 1) - 1)) == 0) {
545         blk_state->io_range_size >>= 1;
546     }
547     
548     
549     for (i = 0; i < 6; i++) {
550         bars[i].type = PCI_BAR_NONE;
551     }
552     
553     PrintDebug(VM_NONE, VCORE_NONE, "Virtio-BLK io_range_size = %d\n", blk_state->io_range_size);
554     
555     bars[0].type = PCI_BAR_IO;
556     bars[0].default_base_port = -1;
557     bars[0].num_ports = blk_state->io_range_size;
558     
559     bars[0].io_read = virtio_io_read;
560     bars[0].io_write = virtio_io_write;
561     bars[0].private_data = blk_state;
562     
563     pci_dev = v3_pci_register_device(virtio->pci_bus, PCI_STD_DEVICE, 
564                                      0, PCI_AUTO_DEV_NUM, 0,
565                                      "LNX_VIRTIO_BLK", bars,
566                                      NULL, NULL, NULL, NULL, blk_state);
567     
568     if (!pci_dev) {
569         PrintError(VM_NONE, VCORE_NONE, "Could not register PCI Device\n");
570         return -1;
571     }
572     
573     pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
574     pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
575     
576     
577     pci_dev->config_header.device_id = VIRTIO_BLOCK_DEV_ID;
578     pci_dev->config_header.class = PCI_CLASS_STORAGE;
579     pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_OTHER;
580     
581     pci_dev->config_header.subsystem_id = VIRTIO_BLOCK_SUBDEVICE_ID;
582     
583     
584     pci_dev->config_header.intr_pin = 1;
585     
586     pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
587     
588     
589     blk_state->pci_dev = pci_dev;
590
591
592     /* Add backend to list of devices */
593     list_add(&(blk_state->dev_link), &(virtio->dev_list));
594     
595     /* Block configuration */
596     blk_state->virtio_cfg.host_features = VIRTIO_SEG_MAX;
597     blk_state->block_cfg.max_seg = QUEUE_SIZE - 2;
598
599
600     // Virtio Block only uses one queue
601     blk_state->queue.queue_size = QUEUE_SIZE;
602
603     blk_state->virtio_dev = virtio;
604
605     blk_reset(blk_state);
606
607
608     return 0;
609 }
610
611
612 static int connect_fn(struct v3_vm_info * vm, 
613                       void * frontend_data, 
614                       struct v3_dev_blk_ops * ops, 
615                       v3_cfg_tree_t * cfg, 
616                       void * private_data) {
617
618     struct virtio_dev_state * virtio = (struct virtio_dev_state *)frontend_data;
619
620     struct virtio_blk_state * blk_state  = (struct virtio_blk_state *)V3_Malloc(sizeof(struct virtio_blk_state));
621
622     if (!blk_state) {
623         PrintError(vm, VCORE_NONE, "Cannot allocate in connect\n");
624         return -1;
625     }
626
627
628     memset(blk_state, 0, sizeof(struct virtio_blk_state));
629
630     register_dev(virtio, blk_state);
631
632     blk_state->ops = ops;
633     blk_state->backend_data = private_data;
634
635     blk_state->block_cfg.capacity = ops->get_capacity(private_data) / SECTOR_SIZE;
636
637     PrintDebug(vm, VCORE_NONE, "Virtio Capacity = %d -- 0x%p\n", (int)(blk_state->block_cfg.capacity), 
638                (void *)(addr_t)(blk_state->block_cfg.capacity));
639
640     return 0;
641 }
642
643
644 static int virtio_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
645     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
646     struct virtio_dev_state * virtio_state = NULL;
647     char * dev_id = v3_cfg_val(cfg, "ID");
648
649     PrintDebug(vm, VCORE_NONE, "Initializing VIRTIO Block device\n");
650
651     if (pci_bus == NULL) {
652         PrintError(vm, VCORE_NONE, "VirtIO devices require a PCI Bus");
653         return -1;
654     }
655
656
657     virtio_state  = (struct virtio_dev_state *)V3_Malloc(sizeof(struct virtio_dev_state));
658
659     if (!virtio_state) {
660         PrintError(vm, VCORE_NONE, "Cannot allocate in init\n");
661         return -1;
662     }
663
664     memset(virtio_state, 0, sizeof(struct virtio_dev_state));
665
666     INIT_LIST_HEAD(&(virtio_state->dev_list));
667     virtio_state->pci_bus = pci_bus;
668
669
670     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, virtio_state);
671
672     if (dev == NULL) {
673         PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
674         V3_Free(virtio_state);
675         return -1;
676     }
677
678     if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)virtio_state) == -1) {
679         PrintError(vm, VCORE_NONE, "Could not register %s as block frontend\n", dev_id);
680         v3_remove_device(dev);
681         return -1;
682     }
683
684     return 0;
685 }
686
687
688 device_register("LNX_VIRTIO_BLK", virtio_init)