Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


deallocation of devices
[palacios.git] / palacios / src / devices / lnx_virtio_symmod.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/lnx_virtio_pci.h>
24 #include <palacios/vmm_symmod.h>
25 #include <palacios/vmm_hashtable.h>
26
27 #include <devices/pci.h>
28
29
30 #define QUEUE_SIZE 128
31 #define NUM_QUEUES 2
32
33 struct sym_config {
34     uint32_t avail_mods;
35     uint32_t loaded_mods;
36 } __attribute__((packed));
37
38
39
40 struct virtio_sym_state {
41     struct sym_config sym_cfg;
42     struct virtio_config virtio_cfg;
43
44
45     struct vm_device * pci_bus;
46     struct pci_device * pci_dev;
47     struct v3_vm_info * vm;
48     struct v3_symmod_state * symmod_state;
49     
50
51 #define NOTIFY_QUEUE 0
52 #define LOADER_QUEUE 1
53     struct virtio_queue queue[NUM_QUEUES];
54
55     struct virtio_queue * cur_queue;
56
57     int notifier_active;
58
59     int io_range_size;
60 };
61
62
63
64 struct symmod_cmd {
65 #define CMD_INV  0
66 #define CMD_LOAD 1
67 #define CMD_LIST 2
68     uint32_t cmd;
69     uint32_t num_cmds;
70 } __attribute__((packed));
71
72
73 // structure of the symmod notifier ring structures
74 struct symmod_hdr {
75     uint32_t num_bytes;
76     char name[32];
77     union {
78         uint32_t flags;
79         struct {
80 #define V3_SYMMOD_INV (0x00)
81 #define V3_SYMMOD_LNX (0x01)
82 #define V3_SYMMOD_MOD (0x02)
83 #define V3_SYMMOD_SEC (0x03)
84             uint8_t type;
85
86 #define V3_SYMMOD_ARCH_INV     (0x00)
87 #define V3_SYMMOD_ARCH_i386    (0x01)
88 #define V3_SYMMOD_ARCH_x86_64  (0x02)
89             uint8_t arch;
90
91 #define V3_SYMMOD_ACT_INV       (0x00)
92 #define V3_SYMMOD_ACT_ADVERTISE (0x01)
93 #define V3_SYMMOD_ACT_LOAD      (0x02)
94             uint8_t action;
95
96             uint8_t rsvd;
97         } __attribute__((packed));
98     } __attribute__((packed));
99 } __attribute__((packed));
100
101
102 static int virtio_reset(struct virtio_sym_state * virtio) {
103
104     memset(virtio->queue, 0, sizeof(struct virtio_queue) * 2);
105
106     virtio->cur_queue = &(virtio->queue[0]);
107
108     virtio->virtio_cfg.status = 0;
109     virtio->virtio_cfg.pci_isr = 0;
110
111     virtio->queue[0].queue_size = QUEUE_SIZE;
112     virtio->queue[1].queue_size = QUEUE_SIZE;
113
114
115     virtio->sym_cfg.avail_mods = virtio->symmod_state->num_avail_capsules;
116     virtio->sym_cfg.loaded_mods = virtio->symmod_state->num_loaded_capsules;
117
118     return 0;
119 }
120
121
122
123 static int get_desc_count(struct virtio_queue * q, int index) {
124     struct vring_desc * tmp_desc = &(q->desc[index]);
125     int cnt = 1;
126     
127     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
128         tmp_desc = &(q->desc[tmp_desc->next]);
129         cnt++;
130     }
131
132     return cnt;
133 }
134
135
136
137
138 static int handle_xfer_kick(struct guest_info * core, struct virtio_sym_state * sym_state) {
139     struct virtio_queue * q = sym_state->cur_queue;
140     
141     PrintDebug("SYMMOD: VIRTIO SYMMOD Kick on loader queue\n");
142
143     while (q->cur_avail_idx != q->avail->index) {
144         struct vring_desc * cmd_desc = NULL;
145         struct symmod_cmd * cmd = NULL;
146         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
147         uint16_t desc_cnt = get_desc_count(q, desc_idx);
148         struct vring_desc * status_desc = NULL;
149         uint8_t status = 0;
150         uint8_t * status_ptr = NULL;
151         int i;
152         uint32_t xfer_len = 0;
153
154         cmd_desc = &(q->desc[desc_idx]);
155         
156         if (v3_gpa_to_hva(core, cmd_desc->addr_gpa, (addr_t *)&cmd) == -1) {
157             PrintError("Could not translate SYMMOD header address\n");
158             return -1;
159         }
160  
161         desc_idx = cmd_desc->next;
162
163         if (cmd->cmd == CMD_LOAD) {
164             struct vring_desc * name_desc = NULL;
165             struct vring_desc * buf_desc = NULL;
166             char * name = NULL;
167             struct v3_sym_capsule * capsule = NULL;
168             uint32_t offset = 0;
169     
170
171             PrintDebug("Descriptor Count=%d, index=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE);
172     
173             if (desc_cnt < 3) {
174                 PrintError("Symmod loads must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
175                 return -1;
176             }
177         
178             name_desc = &(q->desc[desc_idx]);
179
180             if (v3_gpa_to_hva(core, name_desc->addr_gpa, (addr_t *)&name) == -1) {
181                 PrintError("Could not translate SYMMOD header address\n");
182                 return -1;
183             }
184
185             desc_idx = name_desc->next;
186
187             capsule = v3_get_sym_capsule(core->vm_info, name);
188
189             for (i = 0; i < desc_cnt - 3; i++) {
190                 uint8_t tmp_status = 0;
191                 uint8_t * buf = NULL;
192
193                 buf_desc = &(q->desc[desc_idx]);
194
195                 if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
196                     PrintError("Could not translate buffer address\n");
197                     return -1;
198                 }
199
200                 memcpy(buf, capsule->start_addr + offset, buf_desc->length);
201                 PrintDebug("Copying module to virtio buffers: SRC=%p, DST=%p, len=%d\n",
202                            (void *)(capsule->start_addr + offset), (void *)buf, buf_desc->length);
203
204                 if (tmp_status != 0) {
205                     PrintError("Error loading module segment\n");
206                     status = tmp_status;
207                 }
208
209
210                 offset += buf_desc->length;
211                 xfer_len += buf_desc->length;
212                 desc_idx = buf_desc->next;
213             }
214         } else {
215             PrintError("Invalid SYMMOD Loader command\n");
216             return -1;
217         }
218
219         status_desc = &(q->desc[desc_idx]);
220
221         if (v3_gpa_to_hva(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
222             PrintError("SYMMOD Error could not translate status address\n");
223             return -1;
224         }
225
226         xfer_len += status_desc->length;
227         *status_ptr = status;
228
229         PrintDebug("Transferred %d bytes (xfer_len)\n", xfer_len);
230         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
231         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
232
233         q->used->index++;
234         q->cur_avail_idx++;
235
236     }
237
238
239     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
240         PrintDebug("Raising IRQ %d\n",  sym_state->pci_dev->config_header.intr_line);
241         v3_pci_raise_irq(sym_state->pci_bus, 0, sym_state->pci_dev);
242         sym_state->virtio_cfg.pci_isr = 1;
243     }
244
245
246     return 0;
247 }
248
249
250
251
252 static int handle_notification_kick(struct guest_info * core, struct virtio_sym_state * sym_state) {
253     //    struct virtio_queue * q = sym_state->cur_queue;
254     struct virtio_queue * q = &(sym_state->queue[NOTIFY_QUEUE]);
255     struct hashtable_iter * capsule_iter = NULL;
256
257     PrintDebug("SYMMOD: VIRTIO SYMMOD Kick on notification queue\n");
258
259     capsule_iter = v3_create_htable_iter(sym_state->symmod_state->capsule_table);
260
261     do {
262         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
263         struct vring_desc * hdr_desc = NULL;
264         struct symmod_hdr * hdr = NULL;
265         struct v3_sym_capsule * capsule = NULL;
266
267
268         capsule = (struct v3_sym_capsule *)v3_htable_get_iter_value(capsule_iter);
269
270
271         PrintDebug("SYMMOD: Advertising Capsule %s\n", capsule->name);
272
273         if (capsule->type != V3_SYMMOD_LNX) {
274             continue;
275         }
276         
277
278
279         if (q->cur_avail_idx == q->avail->index) {
280             PrintError("Notification Queue Too SMALL\n");
281             return -1;
282         }
283
284         hdr_desc = &(q->desc[desc_idx]);
285
286         if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
287             PrintError("Could not translate SYMMOD header address\n");
288             return -1;
289         }
290
291         memset(hdr, 0, sizeof(struct symmod_hdr));
292
293
294         memcpy(hdr->name, capsule->name, strlen(capsule->name));
295         hdr->num_bytes = capsule->size;
296         hdr->flags = capsule->flags;
297         hdr->action = V3_SYMMOD_ACT_ADVERTISE;
298
299         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
300         q->used->ring[q->used->index % QUEUE_SIZE].length = sizeof(struct symmod_hdr) ; // set to total inbound xfer length
301         
302         q->used->index++;
303         q->cur_avail_idx++;
304
305     } while (v3_htable_iter_advance(capsule_iter));
306
307
308     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
309         PrintDebug("Raising IRQ %d\n",  sym_state->pci_dev->config_header.intr_line);
310         v3_pci_raise_irq(sym_state->pci_bus, 0, sym_state->pci_dev);
311         sym_state->virtio_cfg.pci_isr = 1;
312     }
313
314
315     return 0;
316 }
317
318
319 static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
320     struct virtio_sym_state * sym_state = (struct virtio_sym_state *)private_data;
321     int port_idx = port % sym_state->io_range_size;
322
323
324     PrintDebug("SYMMOD: VIRTIO SYMMOD Write for port %d len=%d, value=%x\n", 
325                port, length, *(uint32_t *)src);
326     PrintDebug("SYMMOD: port idx=%d\n", port_idx);
327
328
329     switch (port_idx) {
330         case GUEST_FEATURES_PORT:
331             if (length != 4) {
332                 PrintError("Illegal write length for guest features\n");
333                 return -1;
334             }
335             
336             sym_state->virtio_cfg.guest_features = *(uint32_t *)src;
337
338             break;
339         case VRING_PG_NUM_PORT:
340             if (length == 4) {
341                 addr_t pfn = *(uint32_t *)src;
342                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
343
344                 sym_state->cur_queue->pfn = pfn;
345                 
346                 sym_state->cur_queue->ring_desc_addr = page_addr ;
347                 sym_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
348                 sym_state->cur_queue->ring_used_addr = ( sym_state->cur_queue->ring_avail_addr + \
349                                                  sizeof(struct vring_avail)    + \
350                                                  (QUEUE_SIZE * sizeof(uint16_t)));
351                 
352                 // round up to next page boundary.
353                 sym_state->cur_queue->ring_used_addr = (sym_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
354
355                 if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_desc_addr, (addr_t *)&(sym_state->cur_queue->desc)) == -1) {
356                     PrintError("Could not translate ring descriptor address\n");
357                     return -1;
358                 }
359
360
361                 if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_avail_addr, (addr_t *)&(sym_state->cur_queue->avail)) == -1) {
362                     PrintError("Could not translate ring available address\n");
363                     return -1;
364                 }
365
366
367                 if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_used_addr, (addr_t *)&(sym_state->cur_queue->used)) == -1) {
368                     PrintError("Could not translate ring used address\n");
369                     return -1;
370                 }
371
372                 PrintDebug("SYMMOD: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
373                            (void *)(sym_state->cur_queue->ring_desc_addr),
374                            (void *)(sym_state->cur_queue->ring_avail_addr),
375                            (void *)(sym_state->cur_queue->ring_used_addr));
376
377                 PrintDebug("SYMMOD: RingDesc=%p, Avail=%p, Used=%p\n", 
378                            sym_state->cur_queue->desc, sym_state->cur_queue->avail, sym_state->cur_queue->used);
379
380             } else {
381                 PrintError("Illegal write length for page frame number\n");
382                 return -1;
383             }
384             break;
385         case VRING_Q_SEL_PORT:
386             sym_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
387
388             if (sym_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
389                 PrintError("Virtio Symbiotic device has no qeueues. Selected %d\n", 
390                            sym_state->virtio_cfg.vring_queue_selector);
391                 return -1;
392             }
393             
394             sym_state->cur_queue = &(sym_state->queue[sym_state->virtio_cfg.vring_queue_selector]);
395
396             break;
397         case VRING_Q_NOTIFY_PORT: {
398             uint16_t queue_idx = *(uint16_t *)src;
399             
400             PrintDebug("SYMMOD: Handling Kick\n");
401             
402             if (queue_idx == 0) {
403                 if (handle_notification_kick(core, sym_state) == -1) {
404                     PrintError("Could not handle Notification Kick\n");
405                     return -1;
406                 }
407                 
408                 sym_state->notifier_active = 1;
409                 
410             } else if (queue_idx == 1) {
411                 if (handle_xfer_kick(core, sym_state) == -1) {
412                     PrintError("Could not handle Symbiotic Notification\n");
413                     return -1;
414                 }
415             } else {
416                 PrintError("Kick on invalid queue (%d)\n", queue_idx);
417                 return -1;
418             }
419             
420             break;
421         }
422         case VIRTIO_STATUS_PORT:
423             sym_state->virtio_cfg.status = *(uint8_t *)src;
424
425             if (sym_state->virtio_cfg.status == 0) {
426                 PrintDebug("SYMMOD: Resetting device\n");
427                 virtio_reset(sym_state);
428             }
429
430             break;
431
432         case VIRTIO_ISR_PORT:
433             sym_state->virtio_cfg.pci_isr = *(uint8_t *)src;
434             break;
435         default:
436             return -1;
437             break;
438     }
439
440     return length;
441 }
442
443
444 static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
445
446     struct virtio_sym_state * sym_state = (struct virtio_sym_state *)private_data;
447     int port_idx = port % sym_state->io_range_size;
448
449 /*
450     PrintDebug("SYMMOD: VIRTIO SYMBIOTIC Read  for port %d (index =%d), length=%d\n", 
451                port, port_idx, length);
452 */
453     switch (port_idx) {
454         case HOST_FEATURES_PORT:
455             if (length != 4) {
456                 PrintError("Illegal read length for host features\n");
457                 return -1;
458             }
459
460             *(uint32_t *)dst = sym_state->virtio_cfg.host_features;
461         
462             break;
463         case VRING_PG_NUM_PORT:
464             if (length != 4) {
465                 PrintError("Illegal read length for page frame number\n");
466                 return -1;
467             }
468
469             *(uint32_t *)dst = sym_state->cur_queue->pfn;
470
471             break;
472         case VRING_SIZE_PORT:
473             if (length != 2) {
474                 PrintError("Illegal read length for vring size\n");
475                 return -1;
476             }
477                 
478             *(uint16_t *)dst = sym_state->cur_queue->queue_size;
479
480             break;
481
482         case VIRTIO_STATUS_PORT:
483             if (length != 1) {
484                 PrintError("Illegal read length for status\n");
485                 return -1;
486             }
487
488             *(uint8_t *)dst = sym_state->virtio_cfg.status;
489             break;
490
491         case VIRTIO_ISR_PORT:
492             *(uint8_t *)dst = sym_state->virtio_cfg.pci_isr;
493             sym_state->virtio_cfg.pci_isr = 0;
494             v3_pci_lower_irq(sym_state->pci_bus, 0, sym_state->pci_dev);
495             break;
496
497         default:
498             if ( (port_idx >= sizeof(struct virtio_config)) && 
499                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct sym_config))) ) {
500                 int cfg_offset = port_idx - sizeof(struct virtio_config);
501                 uint8_t * cfg_ptr = (uint8_t *)&(sym_state->sym_cfg);
502
503                 memcpy(dst, cfg_ptr + cfg_offset, length);
504
505                 V3_Print("Reading SymConfig at idx %d (val=%x)\n", cfg_offset, *(uint32_t *)cfg_ptr);
506                 
507             } else {
508                 PrintError("Read of Unhandled Virtio Read\n");
509                 return -1;
510             }
511           
512             break;
513     }
514
515     return length;
516 }
517
518
519
520
521 static int virtio_load_capsule(struct v3_vm_info * vm, struct v3_sym_capsule * mod, void * priv_data) {
522     struct virtio_sym_state * virtio = (struct virtio_sym_state *)priv_data;
523     //   struct virtio_queue * q = virtio->cur_queue;
524     struct virtio_queue * q = &(virtio->queue[NOTIFY_QUEUE]);
525
526
527     if (strlen(mod->name) >= 32) {
528         PrintError("Capsule name is too long... (%d bytes) limit is 32\n", (uint32_t)strlen(mod->name));
529         return -1;
530     }
531
532     PrintDebug("SYMMOD: VIRTIO SYMMOD Loader: Loading Capsule (size=%d)\n", mod->size);
533
534     //queue is not set yet
535     if (q->ring_avail_addr == 0) {
536         PrintError("Queue is not set\n");
537         return -1;
538     }
539
540     
541     if (q->cur_avail_idx != q->avail->index) {
542         uint16_t notifier_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
543         struct symmod_hdr * notifier = NULL;
544         struct vring_desc * notifier_desc = NULL;
545
546         PrintDebug("SYMMOD: Descriptor index=%d\n", q->cur_avail_idx % q->queue_size);
547
548         notifier_desc = &(q->desc[notifier_idx]);
549
550         PrintDebug("SYMMOD: Notifier Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", 
551                    notifier_desc, (void *)(addr_t)(notifier_desc->addr_gpa), 
552                    notifier_desc->length, notifier_desc->flags, 
553                    notifier_desc->next);        
554
555         if (v3_gpa_to_hva(&(vm->cores[0]), notifier_desc->addr_gpa, (addr_t *)&(notifier)) == -1) {
556             PrintError("Could not translate receive buffer address\n");
557             return -1;
558         }
559
560         // clear the notifier
561         memset(notifier, 0, sizeof(struct symmod_hdr));
562
563         // set the capsule name
564         memcpy(notifier->name, mod->name, strlen(mod->name));
565
566         // set capsule length
567         notifier->num_bytes = mod->size;
568         notifier->flags = mod->flags;
569         notifier->action =  V3_SYMMOD_ACT_LOAD;
570
571         
572         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
573
574         q->used->ring[q->used->index % q->queue_size].length = sizeof(struct symmod_hdr);
575
576         q->used->index++;
577         q->cur_avail_idx++;
578     }
579
580     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
581         PrintDebug("SYMMOD: Raising IRQ %d\n",  virtio->pci_dev->config_header.intr_line);
582         v3_pci_raise_irq(virtio->pci_bus, 0, virtio->pci_dev);
583         virtio->virtio_cfg.pci_isr = 0x1;
584     }
585
586
587     return 0;
588 }
589
590
591 static int virtio_free(struct virtio_sym_state * virtio_state) {
592     // unregister from PCI
593
594     V3_Free(virtio_state);
595     return 0;
596 }
597
598
599 static struct v3_device_ops dev_ops = {
600     .free = (int (*)(void *))virtio_free,
601 };
602
603
604
605 static struct v3_symmod_loader_ops loader_ops = {
606     .load_capsule = virtio_load_capsule,
607 };
608
609
610 static int virtio_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
611     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
612     struct virtio_sym_state * virtio_state = NULL;
613     struct v3_symmod_state * symmod_state = &(vm->sym_vm_state.symmod_state);
614     struct pci_device * pci_dev = NULL;
615     char * dev_id = v3_cfg_val(cfg, "ID");
616
617     PrintDebug("SYMMOD: Initializing VIRTIO Symbiotic Module device\n");
618
619     if (pci_bus == NULL) {
620         PrintError("VirtIO devices require a PCI Bus");
621         return -1;
622     }
623     
624     virtio_state  = (struct virtio_sym_state *)V3_Malloc(sizeof(struct virtio_sym_state));
625     memset(virtio_state, 0, sizeof(struct virtio_sym_state));
626
627     virtio_state->vm = vm;
628     virtio_state->symmod_state = symmod_state;
629
630
631
632
633     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, virtio_state);
634
635     if (dev == NULL) {
636         PrintError("Could not attach device %s\n", dev_id);
637         V3_Free(virtio_state);
638         return -1;
639     }
640
641
642     // PCI initialization
643     {
644         struct v3_pci_bar bars[6];
645         int num_ports = sizeof(struct virtio_config) + sizeof(struct sym_config);
646         int tmp_ports = num_ports;
647         int i;
648
649
650         // This gets the number of ports, rounded up to a power of 2
651         virtio_state->io_range_size = 1; // must be a power of 2
652
653         while (tmp_ports > 0) {
654             tmp_ports >>= 1;
655             virtio_state->io_range_size <<= 1;
656         }
657         
658         // this is to account for any low order bits being set in num_ports
659         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
660         if ((num_ports & ((virtio_state->io_range_size >> 1) - 1)) == 0) {
661             virtio_state->io_range_size >>= 1;
662         }
663
664
665         for (i = 0; i < 6; i++) {
666             bars[i].type = PCI_BAR_NONE;
667         }
668
669         bars[0].type = PCI_BAR_IO;
670         bars[0].default_base_port = -1;
671         bars[0].num_ports = virtio_state->io_range_size;
672
673         bars[0].io_read = virtio_io_read;
674         bars[0].io_write = virtio_io_write;
675         bars[0].private_data = virtio_state;
676
677         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
678                                          0, PCI_AUTO_DEV_NUM, 0,
679                                          "LNX_VIRTIO_SYMMOD", bars,
680                                          NULL, NULL, NULL, virtio_state);
681
682         if (!pci_dev) {
683             PrintError("Could not register PCI Device\n");
684             v3_remove_device(dev);
685             return -1;
686         }
687         
688         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
689         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
690         
691
692         pci_dev->config_header.device_id = VIRTIO_SYMMOD_DEV_ID;
693         pci_dev->config_header.class = PCI_CLASS_MEMORY;
694         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
695     
696         pci_dev->config_header.subsystem_id = VIRTIO_SYMMOD_SUBDEVICE_ID;
697
698
699         pci_dev->config_header.intr_pin = 1;
700
701         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
702
703
704         virtio_state->pci_dev = pci_dev;
705         virtio_state->pci_bus = pci_bus;
706     }
707     
708
709     V3_Print("SYMMOD: %d available sym modules\n", virtio_state->sym_cfg.avail_mods);
710
711     virtio_reset(virtio_state);
712
713     v3_set_symmod_loader(vm, &loader_ops, virtio_state);
714
715     return 0;
716 }
717
718
719 device_register("LNX_VIRTIO_SYMMOD", virtio_init)