Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Added symbiotic module loader framework
[palacios.git] / palacios / src / devices / lnx_virtio_symmod.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/lnx_virtio_pci.h>
24 #include <palacios/vmm_symmod.h>
25
26 #include <devices/pci.h>
27
28
29 #define QUEUE_SIZE 128
30 #define NUM_QUEUES 2
31
32 struct sym_config {
33 } __attribute__((packed));
34
35
36
37 struct virtio_sym_state {
38     struct sym_config sym_cfg;
39     struct virtio_config virtio_cfg;
40
41
42     struct vm_device * pci_bus;
43     struct pci_device * pci_dev;
44
45
46 #define NOTIFY_QUEUE 0
47 #define LOADER_QUEUE 1
48     struct virtio_queue queue[NUM_QUEUES];
49
50     struct virtio_queue * cur_queue;
51
52     int notifier_active;
53
54     int io_range_size;
55 };
56
57
58
59 // structure of the symmod notifier ring structures
60 struct symmod_hdr {
61     uint32_t num_bytes;
62     char name[32];
63 } __attribute__((packed));
64
65
66 static int virtio_reset(struct virtio_sym_state * virtio) {
67
68     memset(virtio->queue, 0, sizeof(struct virtio_queue) * 2);
69
70     virtio->cur_queue = &(virtio->queue[0]);
71
72     virtio->virtio_cfg.status = 0;
73     virtio->virtio_cfg.pci_isr = 0;
74
75     virtio->queue[0].queue_size = QUEUE_SIZE;
76     virtio->queue[1].queue_size = QUEUE_SIZE;
77
78
79     memset(&(virtio->sym_cfg), 0, sizeof(struct sym_config));
80
81     return 0;
82 }
83
84
85
86 static int get_desc_count(struct virtio_queue * q, int index) {
87     struct vring_desc * tmp_desc = &(q->desc[index]);
88     int cnt = 1;
89     
90     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
91         tmp_desc = &(q->desc[tmp_desc->next]);
92         cnt++;
93     }
94
95     return cnt;
96 }
97
98
99
100
101 static int handle_xfer_kick(struct guest_info * core, struct virtio_sym_state * sym_state) {
102     struct virtio_queue * q = sym_state->cur_queue;
103     
104     PrintDebug("SYMMOD: VIRTIO SYMMOD Kick on loader queue\n");
105
106     while (q->cur_avail_idx < q->avail->index) {
107         struct vring_desc * hdr_desc = NULL;
108         struct vring_desc * buf_desc = NULL;
109         struct vring_desc * status_desc = NULL;
110         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
111         uint16_t desc_cnt = get_desc_count(q, desc_idx);
112         struct symmod_hdr * hdr = NULL;
113         int i;
114         uint32_t xfer_len = 0;
115         uint8_t status = 0;
116         uint8_t * status_ptr = NULL;
117         struct v3_sym_module * module = NULL;
118         uint32_t offset = 0;
119
120
121         PrintDebug("Descriptor Count=%d, index=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE);
122
123         if (desc_cnt < 3) {
124             PrintError("Symmod loads must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
125             return -1;
126         }
127         
128         hdr_desc = &(q->desc[desc_idx]);
129
130         if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, (addr_t *)&hdr) == -1) {
131             PrintError("Could not translate SYMMOD header address\n");
132             return -1;
133         }
134
135         desc_idx = hdr_desc->next;
136
137         module = v3_get_sym_module(core->vm_info, hdr->name);
138
139         for (i = 0; i < desc_cnt - 2; i++) {
140             uint8_t tmp_status = 0;
141             uint8_t * buf = NULL;
142
143             buf_desc = &(q->desc[desc_idx]);
144
145             if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
146                 PrintError("Could not translate buffer address\n");
147                 return -1;
148             }
149
150             memcpy(buf, module->data + offset, buf_desc->length);
151
152             if (tmp_status != 0) {
153                 PrintError("Error loading module segment\n");
154                 status = tmp_status;
155             }
156
157
158             offset += buf_desc->length;
159             xfer_len += buf_desc->length;
160             desc_idx = buf_desc->next;
161         }
162
163         status_desc = &(q->desc[desc_idx]);
164
165         if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&status_ptr) == -1) {
166             PrintError("SYMMOD Error could not translate status address\n");
167             return -1;
168         }
169
170         xfer_len += status_desc->length;
171         *status_ptr = status;
172
173         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
174         q->used->ring[q->used->index % QUEUE_SIZE].length = xfer_len; // set to total inbound xfer length
175
176         q->used->index++;
177         q->cur_avail_idx++;
178     }
179
180
181     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
182         PrintDebug("Raising IRQ %d\n",  sym_state->pci_dev->config_header.intr_line);
183         v3_pci_raise_irq(sym_state->pci_bus, 0, sym_state->pci_dev);
184         sym_state->virtio_cfg.pci_isr = 1;
185     }
186
187
188     return 0;
189 }
190
191
192 static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
193     struct virtio_sym_state * sym_state = (struct virtio_sym_state *)private_data;
194     int port_idx = port % sym_state->io_range_size;
195
196
197     PrintDebug("SYMMOD: VIRTIO SYMMOD Write for port %d len=%d, value=%x\n", 
198                port, length, *(uint32_t *)src);
199     PrintDebug("SYMMOD: port idx=%d\n", port_idx);
200
201
202     switch (port_idx) {
203         case GUEST_FEATURES_PORT:
204             if (length != 4) {
205                 PrintError("Illegal write length for guest features\n");
206                 return -1;
207             }
208             
209             sym_state->virtio_cfg.guest_features = *(uint32_t *)src;
210
211             break;
212         case VRING_PG_NUM_PORT:
213             if (length == 4) {
214                 addr_t pfn = *(uint32_t *)src;
215                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
216
217                 sym_state->cur_queue->pfn = pfn;
218                 
219                 sym_state->cur_queue->ring_desc_addr = page_addr ;
220                 sym_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
221                 sym_state->cur_queue->ring_used_addr = ( sym_state->cur_queue->ring_avail_addr + \
222                                                  sizeof(struct vring_avail)    + \
223                                                  (QUEUE_SIZE * sizeof(uint16_t)));
224                 
225                 // round up to next page boundary.
226                 sym_state->cur_queue->ring_used_addr = (sym_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
227
228                 if (guest_pa_to_host_va(core, sym_state->cur_queue->ring_desc_addr, (addr_t *)&(sym_state->cur_queue->desc)) == -1) {
229                     PrintError("Could not translate ring descriptor address\n");
230                     return -1;
231                 }
232
233
234                 if (guest_pa_to_host_va(core, sym_state->cur_queue->ring_avail_addr, (addr_t *)&(sym_state->cur_queue->avail)) == -1) {
235                     PrintError("Could not translate ring available address\n");
236                     return -1;
237                 }
238
239
240                 if (guest_pa_to_host_va(core, sym_state->cur_queue->ring_used_addr, (addr_t *)&(sym_state->cur_queue->used)) == -1) {
241                     PrintError("Could not translate ring used address\n");
242                     return -1;
243                 }
244
245                 PrintDebug("SYMMOD: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
246                            (void *)(sym_state->cur_queue->ring_desc_addr),
247                            (void *)(sym_state->cur_queue->ring_avail_addr),
248                            (void *)(sym_state->cur_queue->ring_used_addr));
249
250                 PrintDebug("SYMMOD: RingDesc=%p, Avail=%p, Used=%p\n", 
251                            sym_state->cur_queue->desc, sym_state->cur_queue->avail, sym_state->cur_queue->used);
252
253             } else {
254                 PrintError("Illegal write length for page frame number\n");
255                 return -1;
256             }
257             break;
258         case VRING_Q_SEL_PORT:
259             sym_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
260
261             if (sym_state->virtio_cfg.vring_queue_selector > NUM_QUEUES) {
262                 PrintError("Virtio Symbiotic device has no qeueues. Selected %d\n", 
263                            sym_state->virtio_cfg.vring_queue_selector);
264                 return -1;
265             }
266             
267             sym_state->cur_queue = &(sym_state->queue[sym_state->virtio_cfg.vring_queue_selector]);
268
269             break;
270         case VRING_Q_NOTIFY_PORT: {
271             uint16_t queue_idx = *(uint16_t *)src;
272
273             PrintDebug("SYMMOD: Handling Kick\n");
274
275             if (queue_idx == 0) {
276                 sym_state->notifier_active = 1;
277
278             } else if (queue_idx == 1) {
279                 if (handle_xfer_kick(core, sym_state) == -1) {
280                     PrintError("Could not handle Symbiotic Notification\n");
281                     return -1;
282                 }
283             } else {
284                 PrintError("Kick on invalid queue (%d)\n", queue_idx);
285                 return -1;
286             }
287
288             break;
289         }
290         case VIRTIO_STATUS_PORT:
291             sym_state->virtio_cfg.status = *(uint8_t *)src;
292
293             if (sym_state->virtio_cfg.status == 0) {
294                 PrintDebug("SYMMOD: Resetting device\n");
295                 virtio_reset(sym_state);
296             }
297
298             break;
299
300         case VIRTIO_ISR_PORT:
301             sym_state->virtio_cfg.pci_isr = *(uint8_t *)src;
302             break;
303         default:
304             return -1;
305             break;
306     }
307
308     return length;
309 }
310
311
312 static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
313
314     struct virtio_sym_state * sym_state = (struct virtio_sym_state *)private_data;
315     int port_idx = port % sym_state->io_range_size;
316
317 /*
318     PrintDebug("SYMMOD: VIRTIO SYMBIOTIC Read  for port %d (index =%d), length=%d\n", 
319                port, port_idx, length);
320 */
321     switch (port_idx) {
322         case HOST_FEATURES_PORT:
323             if (length != 4) {
324                 PrintError("Illegal read length for host features\n");
325                 return -1;
326             }
327
328             *(uint32_t *)dst = sym_state->virtio_cfg.host_features;
329         
330             break;
331         case VRING_PG_NUM_PORT:
332             if (length != 4) {
333                 PrintError("Illegal read length for page frame number\n");
334                 return -1;
335             }
336
337             *(uint32_t *)dst = sym_state->cur_queue->pfn;
338
339             break;
340         case VRING_SIZE_PORT:
341             if (length != 2) {
342                 PrintError("Illegal read length for vring size\n");
343                 return -1;
344             }
345                 
346             *(uint16_t *)dst = sym_state->cur_queue->queue_size;
347
348             break;
349
350         case VIRTIO_STATUS_PORT:
351             if (length != 1) {
352                 PrintError("Illegal read length for status\n");
353                 return -1;
354             }
355
356             *(uint8_t *)dst = sym_state->virtio_cfg.status;
357             break;
358
359         case VIRTIO_ISR_PORT:
360             *(uint8_t *)dst = sym_state->virtio_cfg.pci_isr;
361             sym_state->virtio_cfg.pci_isr = 0;
362             v3_pci_lower_irq(sym_state->pci_bus, 0, sym_state->pci_dev);
363             break;
364
365         default:
366             if ( (port_idx >= sizeof(struct virtio_config)) && 
367                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct sym_config))) ) {
368                 int cfg_offset = port_idx - sizeof(struct virtio_config);
369                 uint8_t * cfg_ptr = (uint8_t *)&(sym_state->sym_cfg);
370
371                 memcpy(dst, cfg_ptr + cfg_offset, length);
372                 
373             } else {
374                 PrintError("Read of Unhandled Virtio Read\n");
375                 return -1;
376             }
377           
378             break;
379     }
380
381     return length;
382 }
383
384
385
386
387 static int virtio_load_module(struct v3_vm_info * vm, char * name, int mod_size, void * priv_data) {
388     struct virtio_sym_state * virtio = (struct virtio_sym_state *)priv_data;
389     //   struct virtio_queue * q = virtio->cur_queue;
390     struct virtio_queue * q = &(virtio->queue[NOTIFY_QUEUE]);
391
392     if (strlen(name) >= 32) {
393         PrintError("Module name is too long... (%d bytes) limit is 32\n", (uint32_t)strlen(name));
394         return -1;
395     }
396
397     PrintDebug("SYMMOD: VIRTIO SYMMOD Loader: Loading Module (size=%d)\n", mod_size);
398
399     //queue is not set yet
400     if (q->ring_avail_addr == 0) {
401         PrintError("Queue is not set\n");
402         return -1;
403     }
404
405     
406     if (q->cur_avail_idx < q->avail->index) {
407         uint16_t notifier_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
408         struct symmod_hdr * notifier = NULL;
409         struct vring_desc * notifier_desc = NULL;
410
411         PrintDebug("SYMMOD: Descriptor index=%d\n", q->cur_avail_idx % q->queue_size);
412
413         notifier_desc = &(q->desc[notifier_idx]);
414
415         PrintDebug("SYMMOD: Notifier Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", notifier_desc, 
416                    (void *)(notifier_desc->addr_gpa), notifier_desc->length, notifier_desc->flags, notifier_desc->next);        
417
418         if (guest_pa_to_host_va(&(vm->cores[0]), notifier_desc->addr_gpa, (addr_t *)&(notifier)) == -1) {
419             PrintError("Could not translate receive buffer address\n");
420             return -1;
421         }
422
423         // clear the notifier
424         memset(notifier, 0, sizeof(struct symmod_hdr));
425
426         // set the module name
427         memcpy(notifier->name, name, strlen(name));
428
429         // set module length
430         notifier->num_bytes = mod_size;
431
432         
433         q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
434
435         q->used->ring[q->used->index % q->queue_size].length = sizeof(struct symmod_hdr);
436
437         q->used->index++;
438         q->cur_avail_idx++;
439     }
440
441     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
442         PrintDebug("SYMMOD: Raising IRQ %d\n",  virtio->pci_dev->config_header.intr_line);
443         v3_pci_raise_irq(virtio->pci_bus, 0, virtio->pci_dev);
444         virtio->virtio_cfg.pci_isr = 0x1;
445     }
446
447
448     return 0;
449 }
450
451
452
453
454 static struct v3_device_ops dev_ops = {
455     .free = NULL,
456     .reset = NULL,
457     .start = NULL,
458     .stop = NULL,
459 };
460
461
462
463 static struct v3_symmod_loader_ops loader_ops = {
464     .load_module = virtio_load_module,
465 };
466
467
468 static int virtio_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
469     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
470     struct virtio_sym_state * virtio_state = NULL;
471     struct pci_device * pci_dev = NULL;
472     char * name = v3_cfg_val(cfg, "name");
473
474     PrintDebug("SYMMOD: Initializing VIRTIO Symbiotic Module device\n");
475
476     if (pci_bus == NULL) {
477         PrintError("VirtIO devices require a PCI Bus");
478         return -1;
479     }
480     
481     virtio_state  = (struct virtio_sym_state *)V3_Malloc(sizeof(struct virtio_sym_state));
482     memset(virtio_state, 0, sizeof(struct virtio_sym_state));
483
484     struct vm_device * dev = v3_allocate_device(name, &dev_ops, virtio_state);
485
486     if (v3_attach_device(vm, dev) == -1) {
487         PrintError("Could not attach device %s\n", name);
488         return -1;
489     }
490
491
492     // PCI initialization
493     {
494         struct v3_pci_bar bars[6];
495         int num_ports = sizeof(struct virtio_config) + sizeof(struct sym_config);
496         int tmp_ports = num_ports;
497         int i;
498
499
500         // This gets the number of ports, rounded up to a power of 2
501         virtio_state->io_range_size = 1; // must be a power of 2
502
503         while (tmp_ports > 0) {
504             tmp_ports >>= 1;
505             virtio_state->io_range_size <<= 1;
506         }
507         
508         // this is to account for any low order bits being set in num_ports
509         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
510         if ((num_ports & ((virtio_state->io_range_size >> 1) - 1)) == 0) {
511             virtio_state->io_range_size >>= 1;
512         }
513
514
515         for (i = 0; i < 6; i++) {
516             bars[i].type = PCI_BAR_NONE;
517         }
518
519         bars[0].type = PCI_BAR_IO;
520         bars[0].default_base_port = -1;
521         bars[0].num_ports = virtio_state->io_range_size;
522
523         bars[0].io_read = virtio_io_read;
524         bars[0].io_write = virtio_io_write;
525         bars[0].private_data = virtio_state;
526
527         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
528                                          0, PCI_AUTO_DEV_NUM, 0,
529                                          "LNX_VIRTIO_SYMMOD", bars,
530                                          NULL, NULL, NULL, virtio_state);
531
532         if (!pci_dev) {
533             PrintError("Could not register PCI Device\n");
534             return -1;
535         }
536         
537         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
538         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
539         
540
541         pci_dev->config_header.device_id = VIRTIO_SYMMOD_DEV_ID;
542         pci_dev->config_header.class = PCI_CLASS_MEMORY;
543         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
544     
545         pci_dev->config_header.subsystem_id = VIRTIO_SYMMOD_SUBDEVICE_ID;
546
547
548         pci_dev->config_header.intr_pin = 1;
549
550         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
551
552
553         virtio_state->pci_dev = pci_dev;
554         virtio_state->pci_bus = pci_bus;
555     }
556
557     virtio_reset(virtio_state);
558
559     v3_set_symmod_loader(vm, &loader_ops, virtio_state);
560
561     return 0;
562 }
563
564
565 device_register("LNX_VIRTIO_SYMMOD", virtio_init)