Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Updated devices to remove vm_device dependencies.
[palacios.git] / palacios / src / devices / lnx_virtio_sym.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/lnx_virtio_pci.h>
24
25 #include <devices/pci.h>
26
27
28 #define SYM_MAGIC_PORT 20
29 #define SYM_SWAP_BASE_PORT 24
30 #define SYM_SWAP_SIZE_PORT 32
31
32
33 #define QUEUE_SIZE 128
34
35 struct sym_config {
36     uint32_t magic;
37     uint64_t swap_base;
38     uint32_t swap_size;
39 } __attribute__((packed));
40
41
42
43 struct virtio_sym_state {
44     struct sym_config sym_cfg;
45     struct virtio_config virtio_cfg;
46
47
48     struct vm_device * pci_bus;
49     struct pci_device * pci_dev;
50
51     struct virtio_queue queue[1];
52
53     struct virtio_queue * cur_queue;
54
55     int io_range_size;
56 };
57
58
59
60
61
62 static int virtio_reset(struct virtio_sym_state * virtio) {
63
64     memset(virtio->queue, 0, sizeof(struct virtio_queue));
65
66     virtio->cur_queue = &(virtio->queue[0]);
67
68
69     virtio->virtio_cfg.status = 0;
70     virtio->virtio_cfg.pci_isr = 0;
71
72     virtio->queue[1].queue_size = QUEUE_SIZE;
73
74
75     memset(&(virtio->sym_cfg), 0, sizeof(struct sym_config));
76
77     return 0;
78 }
79
80
81 static int get_desc_count(struct virtio_queue * q, int index) {
82     struct vring_desc * tmp_desc = &(q->desc[index]);
83     int cnt = 1;
84     
85     while (tmp_desc->flags & VIRTIO_NEXT_FLAG) {
86         tmp_desc = &(q->desc[tmp_desc->next]);
87         cnt++;
88     }
89
90     return cnt;
91 }
92
93
94 static int handle_kick(struct guest_info * core, struct virtio_sym_state * sym_state) {
95     struct virtio_queue * q = sym_state->cur_queue;
96
97     return -1;
98
99     PrintDebug("VIRTIO Symbiotic KICK: cur_index=%d (mod=%d), avail_index=%d\n", 
100                q->cur_avail_idx, q->cur_avail_idx % QUEUE_SIZE, q->avail->index);
101
102     while (q->cur_avail_idx < q->avail->index) {
103         struct vring_desc * tmp_desc = NULL;
104         uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
105         int desc_cnt = get_desc_count(q, desc_idx);
106         int i = 0;
107         uint32_t req_len = 0;
108
109
110         PrintDebug("Descriptor Count=%d, index=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE);
111
112         for (i = 0; i < desc_cnt; i++) {
113             addr_t page_addr;
114             tmp_desc = &(q->desc[desc_idx]);
115             
116             PrintDebug("Header Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", 
117                        tmp_desc, 
118                        (void *)(addr_t)(tmp_desc->addr_gpa), tmp_desc->length, 
119                        tmp_desc->flags, tmp_desc->next);
120         
121
122             if (v3_gpa_to_hva(core, tmp_desc->addr_gpa, (addr_t *)&(page_addr)) == -1) {
123                 PrintError("Could not translate block header address\n");
124                 return -1;
125             }
126
127             /*      
128                if (handle_sym_op(dev, tmp_desc, buf_desc, status_desc) == -1) {
129                PrintError("Error handling symbiotic operation\n");
130                return -1;
131                }
132             */
133
134             PrintDebug("Symbiotic Device Currently Ignored\n");
135
136
137             req_len += tmp_desc->length;
138             desc_idx = tmp_desc->next;
139         }
140
141         q->used->ring[q->used->index % QUEUE_SIZE].id = q->avail->ring[q->cur_avail_idx % QUEUE_SIZE];
142         q->used->ring[q->used->index % QUEUE_SIZE].length = req_len; // What do we set this to????
143
144         q->used->index++;
145         q->cur_avail_idx++;
146     }
147
148     if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
149         PrintDebug("Raising IRQ %d\n",  sym_state->pci_dev->config_header.intr_line);
150         v3_pci_raise_irq(sym_state->pci_bus, 0, sym_state->pci_dev);
151         sym_state->virtio_cfg.pci_isr = VIRTIO_ISR_ACTIVE;
152     }
153
154     return 0;
155 }
156
157
158 static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
159     struct virtio_sym_state * sym_state = (struct virtio_sym_state *)private_data;
160     int port_idx = port % sym_state->io_range_size;
161
162
163 /*
164     PrintDebug("VIRTIO SYMBIOTIC Write for port %d (index=%d) len=%d, value=%x\n", 
165                port, port_idx,  length, *(uint32_t *)src);
166 */
167
168
169     switch (port_idx) {
170         case GUEST_FEATURES_PORT:
171             if (length != 4) {
172                 PrintError("Illegal write length for guest features\n");
173                 return -1;
174             }
175             
176             sym_state->virtio_cfg.guest_features = *(uint32_t *)src;
177
178             break;
179         case VRING_PG_NUM_PORT:
180             if (length == 4) {
181                 addr_t pfn = *(uint32_t *)src;
182                 addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
183
184
185                 sym_state->cur_queue->pfn = pfn;
186                 
187                 sym_state->cur_queue->ring_desc_addr = page_addr ;
188                 sym_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
189                 sym_state->cur_queue->ring_used_addr = ( sym_state->cur_queue->ring_avail_addr + \
190                                                  sizeof(struct vring_avail)    + \
191                                                  (QUEUE_SIZE * sizeof(uint16_t)));
192                 
193                 // round up to next page boundary.
194                 sym_state->cur_queue->ring_used_addr = (sym_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
195
196                 if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_desc_addr, (addr_t *)&(sym_state->cur_queue->desc)) == -1) {
197                     PrintError("Could not translate ring descriptor address\n");
198                     return -1;
199                 }
200
201
202                 if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_avail_addr, (addr_t *)&(sym_state->cur_queue->avail)) == -1) {
203                     PrintError("Could not translate ring available address\n");
204                     return -1;
205                 }
206
207
208                 if (v3_gpa_to_hva(core, sym_state->cur_queue->ring_used_addr, (addr_t *)&(sym_state->cur_queue->used)) == -1) {
209                     PrintError("Could not translate ring used address\n");
210                     return -1;
211                 }
212
213                 PrintDebug("RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
214                            (void *)(sym_state->cur_queue->ring_desc_addr),
215                            (void *)(sym_state->cur_queue->ring_avail_addr),
216                            (void *)(sym_state->cur_queue->ring_used_addr));
217
218                 PrintDebug("RingDesc=%p, Avail=%p, Used=%p\n", 
219                            sym_state->cur_queue->desc, sym_state->cur_queue->avail, sym_state->cur_queue->used);
220
221             } else {
222                 PrintError("Illegal write length for page frame number\n");
223                 return -1;
224             }
225             break;
226         case VRING_Q_SEL_PORT:
227             sym_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
228
229             if (sym_state->virtio_cfg.vring_queue_selector > 0) {
230                 PrintError("Virtio Symbiotic device has not qeueues. Selected %d\n", 
231                            sym_state->virtio_cfg.vring_queue_selector);
232                 return -1;
233             }
234             
235             sym_state->cur_queue = &(sym_state->queue[sym_state->virtio_cfg.vring_queue_selector]);
236
237             break;
238         case VRING_Q_NOTIFY_PORT:
239             PrintDebug("Handling Kick\n");
240             if (handle_kick(core, sym_state) == -1) {
241                 PrintError("Could not handle Symbiotic Notification\n");
242                 return -1;
243             }
244             break;
245         case VIRTIO_STATUS_PORT:
246             sym_state->virtio_cfg.status = *(uint8_t *)src;
247
248             if (sym_state->virtio_cfg.status == 0) {
249                 PrintDebug("Resetting device\n");
250                 virtio_reset(sym_state);
251             }
252
253             break;
254
255         case VIRTIO_ISR_PORT:
256             sym_state->virtio_cfg.pci_isr = *(uint8_t *)src;
257             break;
258         default:
259             return -1;
260             break;
261     }
262
263     return length;
264 }
265
266
267 static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
268
269     struct virtio_sym_state * sym_state = (struct virtio_sym_state *)private_data;
270     int port_idx = port % sym_state->io_range_size;
271
272 /*
273     PrintDebug("VIRTIO SYMBIOTIC Read  for port %d (index =%d), length=%d\n", 
274                port, port_idx, length);
275 */
276     switch (port_idx) {
277         case HOST_FEATURES_PORT:
278             if (length != 4) {
279                 PrintError("Illegal read length for host features\n");
280                 return -1;
281             }
282
283             *(uint32_t *)dst = sym_state->virtio_cfg.host_features;
284         
285             break;
286         case VRING_PG_NUM_PORT:
287             if (length != 4) {
288                 PrintError("Illegal read length for page frame number\n");
289                 return -1;
290             }
291
292             *(uint32_t *)dst = sym_state->cur_queue->pfn;
293
294             break;
295         case VRING_SIZE_PORT:
296             if (length != 2) {
297                 PrintError("Illegal read length for vring size\n");
298                 return -1;
299             }
300                 
301             *(uint16_t *)dst = sym_state->cur_queue->queue_size;
302
303             break;
304
305         case VIRTIO_STATUS_PORT:
306             if (length != 1) {
307                 PrintError("Illegal read length for status\n");
308                 return -1;
309             }
310
311             *(uint8_t *)dst = sym_state->virtio_cfg.status;
312             break;
313
314         case VIRTIO_ISR_PORT:
315             *(uint8_t *)dst = sym_state->virtio_cfg.pci_isr;
316             sym_state->virtio_cfg.pci_isr = 0;
317             v3_pci_lower_irq(sym_state->pci_bus, 0, sym_state->pci_dev);
318             break;
319
320         default:
321             if ( (port_idx >= sizeof(struct virtio_config)) && 
322                  (port_idx < (sizeof(struct virtio_config) + sizeof(struct sym_config))) ) {
323                 int cfg_offset = port_idx - sizeof(struct virtio_config);
324                 uint8_t * cfg_ptr = (uint8_t *)&(sym_state->sym_cfg);
325
326                 memcpy(dst, cfg_ptr + cfg_offset, length);
327                 
328             } else {
329                 PrintError("Read of Unhandled Virtio Read\n");
330                 return -1;
331             }
332           
333             break;
334     }
335
336     return length;
337 }
338
339
340
341    
342
343 static struct v3_device_ops dev_ops = {
344     .free = NULL,
345 };
346
347
348
349
350 static int virtio_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
351     struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
352     struct virtio_sym_state * virtio_state = NULL;
353     struct pci_device * pci_dev = NULL;
354     char * dev_id = v3_cfg_val(cfg, "ID");
355
356     PrintDebug("Initializing VIRTIO Symbiotic device\n");
357
358     if (pci_bus == NULL) {
359         PrintError("VirtIO devices require a PCI Bus");
360         return -1;
361     }
362
363     
364     virtio_state  = (struct virtio_sym_state *)V3_Malloc(sizeof(struct virtio_sym_state));
365     memset(virtio_state, 0, sizeof(struct virtio_sym_state));
366
367
368     struct vm_device * dev = v3_allocate_device(dev_id, &dev_ops, virtio_state);
369     if (v3_attach_device(vm, dev) == -1) {
370         PrintError("Could not attach device %s\n", dev_id);
371         return -1;
372     }
373
374
375     // PCI initialization
376     {
377         struct v3_pci_bar bars[6];
378         int num_ports = sizeof(struct virtio_config) + sizeof(struct sym_config);
379         int tmp_ports = num_ports;
380         int i;
381
382
383
384         // This gets the number of ports, rounded up to a power of 2
385         virtio_state->io_range_size = 1; // must be a power of 2
386
387         while (tmp_ports > 0) {
388             tmp_ports >>= 1;
389             virtio_state->io_range_size <<= 1;
390         }
391         
392         // this is to account for any low order bits being set in num_ports
393         // if there are none, then num_ports was already a power of 2 so we shift right to reset it
394         if ((num_ports & ((virtio_state->io_range_size >> 1) - 1)) == 0) {
395             virtio_state->io_range_size >>= 1;
396         }
397
398
399         for (i = 0; i < 6; i++) {
400             bars[i].type = PCI_BAR_NONE;
401         }
402
403         bars[0].type = PCI_BAR_IO;
404         bars[0].default_base_port = -1;
405         bars[0].num_ports = virtio_state->io_range_size;
406
407         bars[0].io_read = virtio_io_read;
408         bars[0].io_write = virtio_io_write;
409         bars[0].private_data = virtio_state;
410
411         pci_dev = v3_pci_register_device(pci_bus, PCI_STD_DEVICE, 
412                                          0, PCI_AUTO_DEV_NUM, 0,
413                                          "LNX_VIRTIO_SYM", bars,
414                                          NULL, NULL, NULL, virtio_state);
415
416         if (!pci_dev) {
417             PrintError("Could not register PCI Device\n");
418             return -1;
419         }
420         
421         pci_dev->config_header.vendor_id = VIRTIO_VENDOR_ID;
422         pci_dev->config_header.subsystem_vendor_id = VIRTIO_SUBVENDOR_ID;
423         
424
425         pci_dev->config_header.device_id = VIRTIO_SYMBIOTIC_DEV_ID;
426         pci_dev->config_header.class = PCI_CLASS_MEMORY;
427         pci_dev->config_header.subclass = PCI_MEM_SUBCLASS_RAM;
428     
429         pci_dev->config_header.subsystem_id = VIRTIO_SYMBIOTIC_SUBDEVICE_ID;
430
431
432         pci_dev->config_header.intr_pin = 1;
433
434         pci_dev->config_header.max_latency = 1; // ?? (qemu does it...)
435
436
437         virtio_state->pci_dev = pci_dev;
438         virtio_state->pci_bus = pci_bus;
439     }
440
441     virtio_reset(virtio_state);
442
443
444     return 0;
445 }
446
447
448 device_register("LNX_VIRTIO_SYM", virtio_init)