Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


change pci devices to be more generic.
[palacios.git] / palacios / src / devices / lnx_virtio_blk.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <devices/lnx_virtio_pci.h>
23 #include <devices/lnx_virtio_blk.h>
24 #include <devices/block_dev.h>
25 #include <palacios/vm_guest_mem.h>
26
27 #include <devices/pci.h>
28
29
30
31 #ifndef CONFIG_DEBUG_VIRTIO_BLK
32 #undef PrintDebug
33 #define PrintDebug(fmt, args...)
34 #endif
35
36
37 #define BLK_CAPACITY_PORT     20
38 #define BLK_MAX_SIZE_PORT     28
39 #define BLK_MAX_SEG_PORT      32
40 #define BLK_CYLINDERS_PORT    36
41 #define BLK_HEADS_PORT        38
42 #define BLK_SECTS_PORT        39
43
44 #define BLK_IN_REQ            0
45 #define BLK_OUT_REQ           1
46 #define BLK_SCSI_CMD          2
47
48 #define BLK_BARRIER_FLAG     0x80000000
49
50 #define BLK_STATUS_OK             0
51 #define BLK_STATUS_ERR            1
52 #define BLK_STATUS_NOT_SUPPORTED  2
53
54
55 struct blk_config {
56     uint64_t capacity;
57     uint32_t max_size;
58     uint32_t max_seg;
59     uint16_t cylinders;
60     uint8_t heads;
61     uint8_t sectors;
62 } __attribute__((packed));
63
64
65
66 struct blk_op_hdr {
67     uint32_t type;
68     uint32_t prior;
69     uint64_t sector;
70 } __attribute__((packed));
71
72 #define QUEUE_SIZE 128
73
74 /* Host Feature flags */
75 #define VIRTIO_BARRIER       0x01       /* Does host support barriers? */
76 #define VIRTIO_SIZE_MAX      0x02       /* Indicates maximum segment size */
77 #define VIRTIO_SEG_MAX       0x04       /* Indicates maximum # of segments */
78 #define VIRTIO_LEGACY_GEOM   0x10       /* Indicates support of legacy geometry */
79
80
81 struct virtio_dev_state {
82     struct vm_device * pci_bus;
83     struct list_head dev_list;
84     struct guest_info * vm;
85 };
86
87 struct virtio_blk_state {
88
89     struct pci_device * pci_dev;
90     struct blk_config block_cfg;
91     struct virtio_config virtio_cfg;
92
93     
94     struct virtio_queue queue;
95
96     union {
97         struct v3_cd_ops * cd_ops;
98         struct v3_hd_ops * hd_ops;
99     };
100
101     v3_block_type_t block_type;
102     void * backend_data;
103
104     int io_range_size;
105
106     struct virtio_dev_state * virtio_dev;
107
108     struct list_head dev_link;
109 };
110
111
112 static int virtio_free(struct vm_device * dev) {
113     return -1;
114 }
115
116 static int blk_reset(struct virtio_blk_state * virtio) {
117
118     virtio->queue.ring_desc_addr = 0;
119     virtio->queue.ring_avail_addr = 0;
120     virtio->queue.ring_used_addr = 0;
121     virtio->queue.pfn = 0;
122     virtio->queue.cur_avail_idx = 0;
123
124     virtio->virtio_cfg.status = 0;
125     virtio->virtio_cfg.pci_isr = 0;
126     return 0;
127 }
128
129
130 static int virtio_reset(struct vm_device * dev) {
131     struct virtio_dev_state * dev_state = (struct virtio_dev_state *)(dev->private_data);
132     struct virtio_blk_state * blk_state = NULL;
133
134     list_for_each_entry(blk_state, &(dev_state->dev_list), dev_link) {
135         blk_reset(blk_state);
136     }
137
138     return 0;
139 }
140
141 static int handle_read_op(struct virtio_blk_state * blk_state, uint8_t * buf, uint64_t * sector, uint32_t len) {
142     int ret = -1;
143
144     if (blk_state->block_type == BLOCK_DISK) {
145         if (len % HD_SECTOR_SIZE) {
146             PrintError("Write of something that is not a sector len %d, mod=%d\n", len, len % HD_SECTOR_SIZE);
147             return -1;
148         }
149
150
151         PrintDebug("Reading Disk\n");
152             
153         ret = blk_state->hd_ops->read(buf, len / HD_SECTOR_SIZE, *sector, blk_state->backend_data);
154
155         *sector += len / HD_SECTOR_SIZE;
156
157     } else if (blk_state->block_type == BLOCK_CDROM) {
158         if (len % ATAPI_BLOCK_SIZE) {
159             PrintError("Write of something that is not an ATAPI block len %d, mod=%d\n", len, len % ATAPI_BLOCK_SIZE);
160             return -1;
161         }
162
163         ret = blk_state->cd_ops->read(buf, len / ATAPI_BLOCK_SIZE, *sector , blk_state->backend_data);
164
165         *sector += len / ATAPI_BLOCK_SIZE;
166     }
167
168     return ret;
169 }
170
171
172 static int handle_write_op(struct virtio_blk_state * blk_state, uint8_t * buf, uint64_t * sector, uint32_t len) {
173     int ret = -1;
174
175     if (blk_state->block_type == BLOCK_DISK) {
176         if (len % HD_SECTOR_SIZE) {
177             PrintError("Write of something that is not a sector len %d, mod=%d\n", len, len % HD_SECTOR_SIZE);
178             return -1;
179         }
180
181         PrintDebug("Writing Disk\n");
182
183         ret = blk_state->hd_ops->write(buf, len / HD_SECTOR_SIZE, *sector, blk_state->backend_data);
184
185         *sector += len / HD_SECTOR_SIZE;        
186     }
187
188     return ret;
189 }
190
191
192
193 // multiple block operations need to increment the sector 
194
195 static int handle_block_op(struct virtio_blk_state * blk_state, struct blk_op_hdr * hdr, 
196                            struct vring_desc * buf_desc, uint8_t * status) {
197     uint8_t * buf = NULL;
198
199     PrintDebug("Handling Block op\n");
200
201
202
203     if (guest_pa_to_host_va(blk_state->virtio_dev->vm, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
204         PrintError("Could not translate buffer address\n");
205         return -1;
206     }
207
208
209     PrintDebug("Sector=%p Length=%d\n", (void *)(addr_t)(hdr->sector), buf_desc->length);
210
211     if (hdr->type == BLK_IN_REQ) {
212         if (blk_state->block_type != BLOCK_NONE) {
213             if (handle_read_op(blk_state, buf, &(hdr->sector), buf_desc->length) == -1) {
214                 *status = BLK_STATUS_ERR;
215                 return -1;
216             } else {
217                 *status = BLK_STATUS_OK;
218             }
219         } else {
220             *status = BLK_STATUS_NOT_SUPPORTED;
221         }
222
223     } else if (hdr->type == BLK_OUT_REQ) {
224         if (blk_state->block_type == BLOCK_DISK) {
225             if (handle_write_op(blk_state, buf, &(hdr->sector), buf_desc->length) == -1) {
226                 *status = BLK_STATUS_ERR;
227                 return -1;
228             } else {
229                 *status = BLK_STATUS_OK;
230             }
231
232         } else {
233             *status = BLK_STATUS_NOT_SUPPORTED;
234         }
235
236     } else if (hdr->type == BLK_SCSI_CMD) {
237         PrintError("VIRTIO: SCSI Command Not supported!!!\n");
238         *status = BLK_STATUS_NOT_SUPPORTED;
239         return -1;
240     }
241
242
243
244     PrintDebug("Returning Status: %d\n", *status);
245
246     return 0;
247 }
248
249 static int get_desc_count(struct virtio_queue * q, int index) {
250     struct vring_desc * tmp_desc = &(q->desc[index]);
251     int cnt = 1;
252     
253     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
254         tmp_desc = &(q->desc[tmp_desc->next]);
255         cnt++;
256     }
257
258     return cnt;
259 }
260
261
262
263 static int handle_kick(struct virtio_blk_state * blk_state) {  
264     struct virtio_queue * q = &(blk_state->queue);
265
266     PrintDebug("VIRTIO KICK: cur_index=%d (mod=%d), avail_index=%d\n", 
267                q->cur_avail_idx, q->cur_avail_idx % QUEUE_SIZE, q->avail->index);
268
269     while (q->cur_avail_idx < q->avail->index) {
270         struct vring_desc * hdr_desc = NULL;
271         struct vring_desc * buf_desc = NULL;
272         struct vring_desc * status_desc = NULL;
273         struct blk_op_hdr hdr;
274         addr_t hdr_addr = 0;
275         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
276         int desc_cnt = get_desc_count(q, desc_idx);
277         int i = 0;
278         uint8_t * status_ptr = NULL;
279         uint8_t status = BLK_STATUS_OK;
280         uint32_t req_len = 0;
281
282         PrintDebug("Descriptor Count=%d, index=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE);
283
284         if (desc_cnt < 3) {
285             PrintError("Block operations must include at least 3 descriptors\n");
286             return -1;
287         }
288
289         hdr_desc = &(q->desc[desc_idx]);
290
291
292         PrintDebug("Header Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", hdr_desc, 
293                    (void *)(hdr_desc->addr_gpa), hdr_desc->length, hdr_desc->flags, hdr_desc->next);    
294
295         if (guest_pa_to_host_va(blk_state->virtio_dev->vm, hdr_desc->addr_gpa, &(hdr_addr)) == -1) {
296             PrintError("Could not translate block header address\n");
297             return -1;
298         }
299
300         // We copy the block op header out because we are going to modify its contents
301         memcpy(&hdr, (void *)hdr_addr, sizeof(struct blk_op_hdr));
302         
303         PrintDebug("Blk Op Hdr (ptr=%p) type=%d, sector=%p\n", (void *)hdr_addr, hdr.type, (void *)hdr.sector);
304
305         desc_idx = hdr_desc->next;
306
307         for (i = 0; i < desc_cnt - 2; i++) {
308             uint8_t tmp_status = BLK_STATUS_OK;
309
310             buf_desc = &(q->desc[desc_idx]);
311
312             PrintDebug("Buffer Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", buf_desc, 
313                        (void *)(buf_desc->addr_gpa), buf_desc->length, buf_desc->flags, buf_desc->next);
314
315             if (handle_block_op(blk_state, &hdr, buf_desc, &tmp_status) == -1) {
316                 PrintError("Error handling block operation\n");
317                 return -1;
318             }
319
320             if (tmp_status != BLK_STATUS_OK) {
321                 status = tmp_status;
322             }
323
324             req_len += buf_desc->length;
325             desc_idx = buf_desc->next;
326         }
327
328         status_desc = &(q->desc[desc_idx]);
329
330         PrintDebug("Status Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", status_desc, 
331                    (void *)(status_desc->addr_gpa), status_desc->length, status_desc->flags, status_desc->next);
332
333         if (guest_pa_to_host_va(blk_state->virtio_dev->vm, status_desc->addr_gpa, (addr_t *)&(status_ptr)) == -1) {
334             PrintError("Could not translate status address\n");
335             return -1;
336         }
337
338         req_len += status_desc->length;
339         *status_ptr = status;
340
341         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
342         q->used->ring[q->used->index % QUEUE_SIZE].length = req_len; // What do we set this to????
343
344         q->used->index++;
345         q->cur_avail_idx++;
346     }
347
348     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
349         PrintDebug("Raising IRQ %d\n",  blk_state->pci_dev->config_header.intr_line);
350         v3_pci_raise_irq(blk_state->virtio_dev->pci_bus, 0, blk_state->pci_dev);
351         blk_state->virtio_cfg.pci_isr = 1;
352     }
353
354     return 0;
355 }
356
357 static int virtio_io_write(uint16_t port, void * src, uint_t length, void * private_data) {
358     struct virtio_blk_state * blk_state = (struct virtio_blk_state *)private_data;
359     int port_idx = port % blk_state->io_range_size;
360
361
362     PrintDebug("VIRTIO BLOCK Write for port %d (index=%d) len=%d, value=%x\n", 
363                port, port_idx,  length, *(uint32_t *)src);
364
365
366
367     switch (port_idx) {
368         case GUEST_FEATURES_PORT:
369             if (length != 4) {
370                 PrintError("Illegal write length for guest features\n");
371                 return -1;
372             }
373             
374             blk_state->virtio_cfg.guest_features = *(uint32_t *)src;
375             PrintDebug("Setting Guest Features to %x\n", blk_state->virtio_cfg.guest_features);
376
377             break;
378         case VRING_PG_NUM_PORT:
379             if (length == 4) {
380                 addr_t pfn = *(uint32_t *)src;
381                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
382
383
384                 blk_state->queue.pfn = pfn;
385                 
386                 blk_state->queue.ring_desc_addr = page_addr ;
387                 blk_state->queue.ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
388                 blk_state->queue.ring_used_addr = ( blk_state->queue.ring_avail_addr + \
389                                                  sizeof(struct vring_avail)    + \
390                                                  (QUEUE_SIZE * sizeof(uint16_t)));
391                 
392                 // round up to next page boundary.
393                 blk_state->queue.ring_used_addr = (blk_state->queue.ring_used_addr + 0xfff) & ~0xfff;
394
395                 if (guest_pa_to_host_va(blk_state->virtio_dev->vm, blk_state->queue.ring_desc_addr, (addr_t *)&(blk_state->queue.desc)) == -1) {
396                     PrintError("Could not translate ring descriptor address\n");
397                     return -1;
398                 }
399
400
401                 if (guest_pa_to_host_va(blk_state->virtio_dev->vm, blk_state->queue.ring_avail_addr, (addr_t *)&(blk_state->queue.avail)) == -1) {
402                     PrintError("Could not translate ring available address\n");
403                     return -1;
404                 }
405
406
407                 if (guest_pa_to_host_va(blk_state->virtio_dev->vm, blk_state->queue.ring_used_addr, (addr_t *)&(blk_state->queue.used)) == -1) {
408                     PrintError("Could not translate ring used address\n");
409                     return -1;
410                 }
411
412                 PrintDebug("RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
413                            (void *)(blk_state->queue.ring_desc_addr),
414                            (void *)(blk_state->queue.ring_avail_addr),
415                            (void *)(blk_state->queue.ring_used_addr));
416
417                 PrintDebug("RingDesc=%p, Avail=%p, Used=%p\n", 
418                            blk_state->queue.desc, blk_state->queue.avail, blk_state->queue.used);
419
420             } else {
421                 PrintError("Illegal write length for page frame number\n");
422                 return -1;
423             }
424             break;
425         case VRING_Q_SEL_PORT:
426             blk_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
427
428             if (blk_state->virtio_cfg.vring_queue_selector != 0) {
429                 PrintError("Virtio Block device only uses 1 queue, selected %d\n", 
430                            blk_state->virtio_cfg.vring_queue_selector);
431                 return -1;
432             }
433
434             break;
435         case VRING_Q_NOTIFY_PORT:
436             PrintDebug("Handling Kick\n");
437             if (handle_kick(blk_state) == -1) {
438                 PrintError("Could not handle Block Notification\n");
439                 return -1;
440             }
441             break;
442         case VIRTIO_STATUS_PORT:
443             blk_state->virtio_cfg.status = *(uint8_t *)src;
444
445             if (blk_state->virtio_cfg.status == 0) {
446                 PrintDebug("Resetting device\n");
447                 blk_reset(blk_state);
448             }
449
450             break;
451
452         case VIRTIO_ISR_PORT:
453             blk_state->virtio_cfg.pci_isr = *(uint8_t *)src;
454             break;
455         default:
456             return -1;
457             break;
458     }
459
460     return length;
461 }
462
463
464 static int virtio_io_read(uint16_t port, void * dst, uint_t length, void * private_data) {
465     struct virtio_blk_state * blk_state = (struct virtio_blk_state *)private_data;
466     int port_idx = port % blk_state->io_range_size;
467
468
469     PrintDebug("VIRTIO BLOCK Read  for port %d (index =%d), length=%d\n", 
470                port, port_idx, length);
471
472     switch (port_idx) {
473         case HOST_FEATURES_PORT:
474             if (length != 4) {
475                 PrintError("Illegal read length for host features\n");
476                 return -1;
477             }
478
479             *(uint32_t *)dst = blk_state->virtio_cfg.host_features;
480         
481             break;
482         case VRING_PG_NUM_PORT:
483             if (length != 4) {
484                 PrintError("Illegal read length for page frame number\n");
485                 return -1;
486             }
487
488             *(uint32_t *)dst = blk_state->queue.pfn;
489
490             break;
491         case VRING_SIZE_PORT:
492             if (length != 2) {
493                 PrintError("Illegal read length for vring size\n");
494                 return -1;
495             }
496                 
497             *(uint16_t *)dst = blk_state->queue.queue_size;
498
499             break;
500
501         case VIRTIO_STATUS_PORT:
502             if (length != 1) {
503                 PrintError("Illegal read length for status\n");
504                 return -1;
505             }
506
507             *(uint8_t *)dst = blk_state->virtio_cfg.status;
508             break;
509
510         case VIRTIO_ISR_PORT:
511             *(uint8_t *)dst = blk_state->virtio_cfg.pci_isr;
512             blk_state->virtio_cfg.pci_isr = 0;
513             v3_pci_lower_irq(blk_state->virtio_dev->pci_bus, 0, blk_state->pci_dev);
514             break;
515
516         default:
517             if ( (port_idx >= sizeof(struct virtio_config)) && 
518                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct blk_config))) ) {
519                 int cfg_offset = port_idx - sizeof(struct virtio_config);
520                 uint8_t * cfg_ptr = (uint8_t *)&(blk_state->block_cfg);
521
522                 memcpy(dst, cfg_ptr + cfg_offset, length);
523                 
524             } else {
525                 PrintError("Read of Unhandled Virtio Read\n");
526                 return -1;
527             }
528           
529             break;
530     }
531
532     return length;
533 }
534
535
536
537
538 static struct v3_device_ops dev_ops = {
539     .free = virtio_free,
540     .reset = virtio_reset,
541     .start = NULL,
542     .stop = NULL,
543 };
544
545
546
547
548
549 static int register_dev(struct virtio_dev_state * virtio, struct virtio_blk_state * blk_state) {
550     // initialize PCI
551     struct pci_device * pci_dev = NULL;
552     struct v3_pci_bar bars[6];
553     int num_ports = sizeof(struct virtio_config) + sizeof(struct blk_config);
554     int tmp_ports = num_ports;
555     int i;
556
557
558
559     // This gets the number of ports, rounded up to a power of 2
560     blk_state->io_range_size = 1; // must be a power of 2
561     
562     while (tmp_ports > 0) {
563         tmp_ports >>= 1;
564         blk_state->io_range_size <<= 1;
565     }
566         
567     // this is to account for any low order bits being set in num_ports
568     // if there are none, then num_ports was already a power of 2 so we shift right to reset it
569     if ((num_ports & ((blk_state->io_range_size >> 1) - 1)) == 0) {
570         blk_state->io_range_size >>= 1;
571     }
572     
573     
574     for (i = 0; i < 6; i++) {
575         bars[i].type = PCI_BAR_NONE;
576     }
577     
578     PrintDebug("Virtio-BLK io_range_size = %d\n", blk_state->io_range_size);
579     
580     bars[0].type = PCI_BAR_IO;
581     bars[0].default_base_port = -1;
582     bars[0].num_ports = blk_state->io_range_size;
583     
584     bars[0].io_read = virtio_io_read;
585     bars[0].io_write = virtio_io_write;
586     bars[0].private_data = blk_state;
587     
588     pci_dev = v3_pci_register_device(virtio->pci_bus, PCI_STD_DEVICE, 
589                                      0, PCI_AUTO_DEV_NUM, 0,
590                                      "LNX_VIRTIO_BLK", bars,
591                                      NULL, NULL, NULL, blk_state);
592     
593     if (!pci_dev) {
594         PrintError("Could not register PCI Device\n");
595         return -1;
596     }
597     
598     pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
599     pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
600     
601     
602     pci_dev->config_header.device_id = VIRTIO_BLOCK_DEV_ID;
603     pci_dev->config_header.class = PCI_CLASS_STORAGE;
604     pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_OTHER;
605     
606     pci_dev->config_header.subsystem_id = VIRTIO_BLOCK_SUBDEVICE_ID;
607     
608     
609     pci_dev->config_header.intr_pin = 1;
610     
611     pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
612     
613     
614     blk_state->pci_dev = pci_dev;
615     
616     /* Block configuration */
617     blk_state->virtio_cfg.host_features = VIRTIO_SEG_MAX;
618     blk_state->block_cfg.max_seg = QUEUE_SIZE - 2;
619
620     // Virtio Block only uses one queue
621     blk_state->queue.queue_size = QUEUE_SIZE;
622
623     blk_state->virtio_dev = virtio;
624
625     blk_reset(blk_state);
626
627
628     return 0;
629 }
630
631
632 int v3_virtio_register_cdrom(struct vm_device * dev, struct v3_cd_ops * ops, void * private_data) {
633     struct virtio_dev_state * virtio = (struct virtio_dev_state *)dev->private_data;
634     struct virtio_blk_state * blk_state  = (struct virtio_blk_state *)V3_Malloc(sizeof(struct virtio_blk_state));
635     memset(blk_state, 0, sizeof(struct virtio_blk_state));
636     
637     register_dev(virtio, blk_state);
638
639     blk_state->block_type = BLOCK_CDROM;
640     blk_state->cd_ops = ops;
641     blk_state->backend_data = private_data;
642
643     blk_state->block_cfg.capacity = ops->get_capacity(private_data);
644
645     return 0;
646 }
647
648
649 int v3_virtio_register_harddisk(struct vm_device * dev, struct v3_hd_ops * ops, void * private_data) {
650     struct virtio_dev_state * virtio = (struct virtio_dev_state *)dev->private_data;
651     struct virtio_blk_state * blk_state  = (struct virtio_blk_state *)V3_Malloc(sizeof(struct virtio_blk_state));
652     memset(blk_state, 0, sizeof(struct virtio_blk_state));
653
654     register_dev(virtio, blk_state);
655
656     blk_state->block_type = BLOCK_DISK;
657     blk_state->hd_ops = ops;
658     blk_state->backend_data = private_data;
659
660     blk_state->block_cfg.capacity = ops->get_capacity(private_data);
661
662     PrintDebug("Virtio Capacity = %d -- 0x%p\n", (int)(virtio->block_cfg.capacity), 
663         (void *)(addr_t)(virtio->block_cfg.capacity));
664
665     return 0;
666 }
667
668
669
670
671 static int virtio_init(struct guest_info * vm, void * cfg_data) {
672     struct vm_device * pci_bus = v3_find_dev(vm, (char *)cfg_data);
673     struct virtio_dev_state * virtio_state = NULL;
674
675
676     PrintDebug("Initializing VIRTIO Block device\n");
677
678     if (pci_bus == NULL) {
679         PrintError("VirtIO devices require a PCI Bus");
680         return -1;
681     }
682
683
684     virtio_state  = (struct virtio_dev_state *)V3_Malloc(sizeof(struct virtio_dev_state));
685     memset(virtio_state, 0, sizeof(struct virtio_dev_state));
686
687     INIT_LIST_HEAD(&(virtio_state->dev_list));
688     virtio_state->pci_bus = pci_bus;
689     virtio_state->vm = vm;
690
691     struct vm_device * dev = v3_allocate_device("LNX_VIRTIO_BLK", &dev_ops, virtio_state);
692     if (v3_attach_device(vm, dev) == -1) {
693         PrintError("Could not attach device %s\n", "LNX_VIRTIO_BLK");
694         return -1;
695     }
696
697     return 0;
698 }
699     
700  
701
702
703 device_register("LNX_VIRTIO_BLK", virtio_init)