Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


45aaba75fc70e072060236715d2d7873fb1998ef
[palacios.git] / palacios / src / devices / ide.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
28
29 #ifndef V3_CONFIG_DEBUG_IDE
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
36
37
38 #define PRI_DATA_PORT         0x1f0
39 #define PRI_FEATURES_PORT     0x1f1
40 #define PRI_SECT_CNT_PORT     0x1f2
41 #define PRI_SECT_NUM_PORT     0x1f3
42 #define PRI_CYL_LOW_PORT      0x1f4
43 #define PRI_CYL_HIGH_PORT     0x1f5
44 #define PRI_DRV_SEL_PORT      0x1f6
45 #define PRI_CMD_PORT          0x1f7
46 #define PRI_CTRL_PORT         0x3f6
47 #define PRI_ADDR_REG_PORT     0x3f7
48
49 #define SEC_DATA_PORT         0x170
50 #define SEC_FEATURES_PORT     0x171
51 #define SEC_SECT_CNT_PORT     0x172
52 #define SEC_SECT_NUM_PORT     0x173
53 #define SEC_CYL_LOW_PORT      0x174
54 #define SEC_CYL_HIGH_PORT     0x175
55 #define SEC_DRV_SEL_PORT      0x176
56 #define SEC_CMD_PORT          0x177
57 #define SEC_CTRL_PORT         0x376
58 #define SEC_ADDR_REG_PORT     0x377
59
60
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
63
64 #define DATA_BUFFER_SIZE 2048
65
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
68
69
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM", 
71                                           "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72                                            "PRI_CTRL", "PRI_ADDR_REG"};
73
74
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM", 
76                                           "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77                                            "SEC_CTRL", "SEC_ADDR_REG"};
78
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80                                            "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
81
82
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
84
85 static inline const char * io_port_to_str(uint16_t port) {
86     if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87         return ide_pri_port_strs[port - PRI_DATA_PORT];
88     } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89         return ide_sec_port_strs[port - SEC_DATA_PORT];
90     } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91         return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92     } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93         return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94     } 
95     return NULL;
96 }
97
98
99 static inline const char * dma_port_to_str(uint16_t port) {
100     return ide_dma_port_strs[port & 0x7];
101 }
102
103
104
105 struct ide_cd_state {
106     struct atapi_sense_data sense;
107
108     uint8_t atapi_cmd;
109     struct atapi_error_recovery err_recovery;
110 };
111
112 struct ide_hd_state {
113     uint32_t accessed;
114
115     /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116     uint32_t mult_sector_num;
117
118     /* This is the current op sector size:
119      * for multiple sector ops this equals mult_sector_num
120      * for standard ops this equals 1
121      */
122     uint64_t cur_sector_num;
123 };
124
125 struct ide_drive {
126     // Command Registers
127
128     v3_block_type_t drive_type;
129
130     struct v3_dev_blk_ops * ops;
131
132     union {
133         struct ide_cd_state cd_state;
134         struct ide_hd_state hd_state;
135     };
136
137     char model[41];
138
139     // Where we are in the data transfer
140     uint64_t transfer_index;
141
142     // the length of a transfer
143     // calculated for easy access
144     uint64_t transfer_length;
145
146     uint64_t current_lba;
147
148     // We have a local data buffer that we use for IO port accesses
149     uint8_t data_buf[DATA_BUFFER_SIZE];
150
151
152     uint32_t num_cylinders;
153     uint32_t num_heads;
154     uint32_t num_sectors;
155
156
157     struct lba48_state {
158         // all start at zero
159         uint64_t lba;                  
160         uint16_t sector_count;            // for LBA48
161         uint8_t  sector_count_state;      // two step write to 1f2/172 (high first)
162         uint8_t  lba41_state;             // two step write to 1f3
163         uint8_t  lba52_state;             // two step write to 1f4
164         uint8_t  lba63_state;             // two step write to 15
165     } lba48;
166
167     void * private_data;
168     
169     union {
170         uint8_t sector_count;             // 0x1f2,0x172  (ATA)
171         struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
172     } __attribute__((packed));
173
174
175     union {
176         uint8_t sector_num;               // 0x1f3,0x173
177         uint8_t lba0;
178     } __attribute__((packed));
179
180     union {
181         uint16_t cylinder;
182         uint16_t lba12;
183         
184         struct {
185             uint8_t cylinder_low;       // 0x1f4,0x174
186             uint8_t cylinder_high;      // 0x1f5,0x175
187         } __attribute__((packed));
188         
189         struct {
190             uint8_t lba1;
191             uint8_t lba2;
192         } __attribute__((packed));
193         
194         
195         // The transfer length requested by the CPU 
196         uint16_t req_len;
197     } __attribute__((packed));
198
199 };
200
201
202
203 struct ide_channel {
204     struct ide_drive drives[2];
205
206     // Command Registers
207     struct ide_error_reg error_reg;     // [read] 0x1f1,0x171
208
209     struct ide_features_reg features;
210
211     struct ide_drive_head_reg drive_head; // 0x1f6,0x176
212
213     struct ide_status_reg status;       // [read] 0x1f7,0x177
214     uint8_t cmd_reg;                // [write] 0x1f7,0x177
215
216     int irq; // this is temporary until we add PCI support
217
218     // Control Registers
219     struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
220
221     union {
222         uint8_t dma_ports[8];
223         struct {
224             struct ide_dma_cmd_reg dma_cmd;
225             uint8_t rsvd1;
226             struct ide_dma_status_reg dma_status;
227             uint8_t rsvd2;
228             uint32_t dma_prd_addr;
229         } __attribute__((packed));
230     } __attribute__((packed));
231
232     uint32_t dma_tbl_index;
233 };
234
235
236
237 struct ide_internal {
238     struct ide_channel channels[2];
239
240     struct v3_southbridge * southbridge;
241     struct vm_device * pci_bus;
242
243     struct pci_device * ide_pci;
244
245     struct v3_vm_info * vm;
246 };
247
248
249
250
251
252 /* Utility functions */
253
254 static inline uint16_t be_to_le_16(const uint16_t val) {
255     uint8_t * buf = (uint8_t *)&val;
256     return (buf[0] << 8) | (buf[1]) ;
257 }
258
259 static inline uint16_t le_to_be_16(const uint16_t val) {
260     return be_to_le_16(val);
261 }
262
263
264 static inline uint32_t be_to_le_32(const uint32_t val) {
265     uint8_t * buf = (uint8_t *)&val;
266     return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
267 }
268
269 static inline uint32_t le_to_be_32(const uint32_t val) {
270     return be_to_le_32(val);
271 }
272
273
274 static inline int is_lba28(struct ide_channel * channel) {
275     return channel->drive_head.lba_mode && channel->drive_head.rsvd1 && channel->drive_head.rsvd2;
276 }
277
278 static inline int is_lba48(struct ide_channel * channel) {
279     return channel->drive_head.lba_mode && !channel->drive_head.rsvd1 && !channel->drive_head.rsvd2;
280 }
281
282 static inline int is_chs(struct ide_channel * channel) {
283     return !channel->drive_head.lba_mode;
284 }
285
286 static inline int get_channel_index(ushort_t port) {
287     if (((port & 0xfff8) == 0x1f0) ||
288         ((port & 0xfffe) == 0x3f6) || 
289         ((port & 0xfff8) == 0xc000)) {
290         return 0;
291     } else if (((port & 0xfff8) == 0x170) ||
292                ((port & 0xfffe) == 0x376) ||
293                ((port & 0xfff8) == 0xc008)) {
294         return 1;
295     }
296
297     return -1;
298 }
299
300 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
301     int channel_idx = get_channel_index(port);    
302     return &(ide->channels[channel_idx]);
303 }
304
305 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
306     return &(channel->drives[channel->drive_head.drive_sel]);
307 }
308
309
310
311
312 /* Drive Commands */
313 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
314     if (channel->ctrl_reg.irq_disable == 0) {
315
316         PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
317
318         channel->dma_status.int_gen = 1;
319         v3_raise_irq(ide->vm, channel->irq);
320     } else {
321         PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
322     }
323 }
324
325
326 static void drive_reset(struct ide_drive * drive) {
327     drive->sector_count = 0x01;
328     drive->sector_num = 0x01;
329
330     PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
331     
332     if (drive->drive_type == BLOCK_CDROM) {
333         drive->cylinder = 0xeb14;
334     } else {
335         drive->cylinder = 0x0000;
336         //drive->hd_state.accessed = 0;
337     }
338
339
340     memset(drive->data_buf, 0, sizeof(drive->data_buf));
341     drive->transfer_index = 0;
342
343     // Send the reset signal to the connected device callbacks
344     //     channel->drives[0].reset();
345     //    channel->drives[1].reset();
346 }
347
348 static void channel_reset(struct ide_channel * channel) {
349     
350     // set busy and seek complete flags
351     channel->status.val = 0x90;
352
353     // Clear errors
354     channel->error_reg.val = 0x01;
355
356     // clear commands
357     channel->cmd_reg = 0;  // NOP
358
359     channel->ctrl_reg.irq_disable = 0;
360 }
361
362 static void channel_reset_complete(struct ide_channel * channel) {
363     channel->status.busy = 0;
364     channel->status.ready = 1;
365
366     channel->drive_head.head_num = 0;    
367     
368     drive_reset(&(channel->drives[0]));
369     drive_reset(&(channel->drives[1]));
370 }
371
372
373 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
374
375     PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
376
377     channel->status.val = 0x41; // Error + ready
378     channel->error_reg.val = 0x04; // No idea...
379
380     ide_raise_irq(ide, channel);
381 }
382
383
384 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
385 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
386
387
388 /* ATAPI functions */
389 #include "atapi.h"
390
391 /* ATA functions */
392 #include "ata.h"
393
394
395
396 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
397     struct ide_dma_prd prd_entry;
398     int index = 0;
399
400     V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
401
402     while (1) {
403         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
404         int ret = 0;
405
406         ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
407         
408         if (ret != sizeof(struct ide_dma_prd)) {
409             PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
410             return;
411         }
412
413         V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n", 
414                    prd_entry.base_addr, 
415                    (prd_entry.size == 0) ? 0x10000 : prd_entry.size, 
416                    prd_entry.end_of_table);
417
418         if (prd_entry.end_of_table) {
419             break;
420         }
421
422         index++;
423     }
424
425     return;
426 }
427
428
429 /* IO Operations */
430 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
431     struct ide_drive * drive = get_selected_drive(channel);
432     // This is at top level scope to do the EOT test at the end
433     struct ide_dma_prd prd_entry = {};
434     uint_t bytes_left = drive->transfer_length;
435
436     // Read in the data buffer....
437     // Read a sector/block at a time until the prd entry is full.
438
439 #ifdef V3_CONFIG_DEBUG_IDE
440     print_prd_table(ide, channel);
441 #endif
442
443     PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
444
445     // Loop through the disk data
446     while (bytes_left > 0) {
447         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
448         uint_t prd_bytes_left = 0;
449         uint_t prd_offset = 0;
450         int ret;
451
452         PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
453
454         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
455
456         if (ret != sizeof(struct ide_dma_prd)) {
457             PrintError(core->vm_info, core, "Could not read PRD\n");
458             return -1;
459         }
460
461         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
462                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
463
464         // loop through the PRD data....
465
466         if (prd_entry.size == 0) {
467             // a size of 0 means 64k
468             prd_bytes_left = 0x10000;
469         } else {
470             prd_bytes_left = prd_entry.size;
471         }
472
473
474         while (prd_bytes_left > 0) {
475             uint_t bytes_to_write = 0;
476
477             if (drive->drive_type == BLOCK_DISK) {
478                 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
479
480
481                 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
482                     PrintError(core->vm_info, core, "Failed to read next disk sector\n");
483                     return -1;
484                 }
485             } else if (drive->drive_type == BLOCK_CDROM) {
486                 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
487                     bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
488
489                     if (atapi_read_chunk(ide, channel) == -1) {
490                         PrintError(core->vm_info, core, "Failed to read next disk sector\n");
491                         return -1;
492                     }
493                 } else {
494                     /*
495                     PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
496                     return -1;
497                     */
498                     int cmd_ret = 0;
499
500                     //V3_Print(core->vm_info, core, "DMA of command packet\n");
501
502                     bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
503                     prd_bytes_left = bytes_to_write;
504
505
506                     // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
507                     cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, 
508                                                   bytes_to_write, drive->data_buf); 
509
510                     // check cmd_ret
511
512
513                     bytes_to_write = 0;
514                     prd_bytes_left = 0;
515                     drive->transfer_index += bytes_to_write;
516
517                     channel->status.busy = 0;
518                     channel->status.ready = 1;
519                     channel->status.data_req = 0;
520                     channel->status.error = 0;
521                     channel->status.seek_complete = 1;
522
523                     channel->dma_status.active = 0;
524                     channel->dma_status.err = 0;
525
526                     ide_raise_irq(ide, channel);
527                     
528                     return 0;
529                 }
530             }
531
532             PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n", 
533                        (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
534
535             drive->current_lba++;
536
537             ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf); 
538
539             if (ret != bytes_to_write) {
540                 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
541                 return -1;
542             }
543
544             PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
545
546             drive->transfer_index += ret;
547             prd_bytes_left -= ret;
548             prd_offset += ret;
549             bytes_left -= ret;
550         }
551
552         channel->dma_tbl_index++;
553
554         if (drive->drive_type == BLOCK_DISK) {
555             if (drive->transfer_index % HD_SECTOR_SIZE) {
556                 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
557                 return -1;
558             }
559         } else if (drive->drive_type == BLOCK_CDROM) {
560             if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
561                 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
562                     PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
563                     PrintError(core->vm_info, core, "transfer_index=%llu, transfer_length=%llu\n", 
564                                drive->transfer_index, drive->transfer_length);
565                     return -1;
566                 }
567             }
568         }
569
570
571         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
572             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
573             return -1;
574         }
575     }
576
577     /*
578       drive->irq_flags.io_dir = 1;
579       drive->irq_flags.c_d = 1;
580       drive->irq_flags.rel = 0;
581     */
582
583
584     // Update to the next PRD entry
585
586     // set DMA status
587
588     if (prd_entry.end_of_table) {
589         channel->status.busy = 0;
590         channel->status.ready = 1;
591         channel->status.data_req = 0;
592         channel->status.error = 0;
593         channel->status.seek_complete = 1;
594
595         channel->dma_status.active = 0;
596         channel->dma_status.err = 0;
597     }
598
599     ide_raise_irq(ide, channel);
600
601     return 0;
602 }
603
604
605 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
606     struct ide_drive * drive = get_selected_drive(channel);
607     // This is at top level scope to do the EOT test at the end
608     struct ide_dma_prd prd_entry = {};
609     uint_t bytes_left = drive->transfer_length;
610
611
612     PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
613
614     // Loop through disk data
615     while (bytes_left > 0) {
616         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
617         uint_t prd_bytes_left = 0;
618         uint_t prd_offset = 0;
619         int ret;
620         
621         PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
622
623         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
624
625         if (ret != sizeof(struct ide_dma_prd)) {
626             PrintError(core->vm_info, core, "Could not read PRD\n");
627             return -1;
628         }
629
630         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
631                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
632
633
634         if (prd_entry.size == 0) {
635             // a size of 0 means 64k
636             prd_bytes_left = 0x10000;
637         } else {
638             prd_bytes_left = prd_entry.size;
639         }
640
641         while (prd_bytes_left > 0) {
642             uint_t bytes_to_write = 0;
643
644
645             bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
646
647
648             ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
649
650             if (ret != bytes_to_write) {
651                 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
652                 return -1;
653             }
654
655             PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
656
657
658             if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
659                 PrintError(core->vm_info, core, "Failed to write data to disk\n");
660                 return -1;
661             }
662             
663             drive->current_lba++;
664
665             drive->transfer_index += ret;
666             prd_bytes_left -= ret;
667             prd_offset += ret;
668             bytes_left -= ret;
669         }
670
671         channel->dma_tbl_index++;
672
673         if (drive->transfer_index % HD_SECTOR_SIZE) {
674             PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
675             return -1;
676         }
677
678         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
679             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
680             PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%llu)...\n", 
681                        bytes_left, drive->transfer_length);
682             PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
683                        prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
684
685             print_prd_table(ide, channel);
686             return -1;
687         }
688     }
689
690     if (prd_entry.end_of_table) {
691         channel->status.busy = 0;
692         channel->status.ready = 1;
693         channel->status.data_req = 0;
694         channel->status.error = 0;
695         channel->status.seek_complete = 1;
696
697         channel->dma_status.active = 0;
698         channel->dma_status.err = 0;
699     }
700
701     ide_raise_irq(ide, channel);
702
703     return 0;
704 }
705
706
707
708 #define DMA_CMD_PORT      0x00
709 #define DMA_STATUS_PORT   0x02
710 #define DMA_PRD_PORT0     0x04
711 #define DMA_PRD_PORT1     0x05
712 #define DMA_PRD_PORT2     0x06
713 #define DMA_PRD_PORT3     0x07
714
715 #define DMA_CHANNEL_FLAG  0x08
716
717 /*
718   Note that DMA model is as follows:
719
720     1. Write the PRD pointer to the busmaster (DMA engine)
721     2. Start the transfer on the device
722     3. Tell the busmaster to start shoveling data (active DMA)
723 */
724
725 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
726     struct ide_internal * ide = (struct ide_internal *)private_data;
727     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
728     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
729     struct ide_channel * channel = &(ide->channels[channel_flag]);
730
731     PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n", 
732                port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
733
734     switch (port_offset) {
735         case DMA_CMD_PORT:
736             channel->dma_cmd.val = *(uint8_t *)src;
737             
738             PrintDebug(core->vm_info, core, "IDE: dma command write:  0x%x\n", channel->dma_cmd.val);
739
740             if (channel->dma_cmd.start == 0) {
741                 channel->dma_tbl_index = 0;
742             } else {
743                 // Launch DMA operation, interrupt at end
744
745                 channel->dma_status.active = 1;
746
747                 if (channel->dma_cmd.read == 1) {
748                     // DMA Read the whole thing - dma_read will raise irq
749                     if (dma_read(core, ide, channel) == -1) {
750                         PrintError(core->vm_info, core, "Failed DMA Read\n");
751                         return -1;
752                     }
753                 } else {
754                     // DMA write the whole thing - dma_write will raiase irw
755                     if (dma_write(core, ide, channel) == -1) {
756                         PrintError(core->vm_info, core, "Failed DMA Write\n");
757                         return -1;
758                     }
759                 }
760                 
761                 // DMA complete
762                 // Note that guest cannot abort a DMA transfer
763                 channel->dma_cmd.start = 0;
764             }
765
766             break;
767             
768         case DMA_STATUS_PORT: {
769             // This is intended to clear status
770
771             uint8_t val = *(uint8_t *)src;
772
773             if (length != 1) {
774                 PrintError(core->vm_info, core, "Invalid write length for DMA status port\n");
775                 return -1;
776             }
777
778             // but preserve certain bits
779             channel->dma_status.val = ((val & 0x60) | 
780                                        (channel->dma_status.val & 0x01) |
781                                        (channel->dma_status.val & ~val & 0x06));
782
783             break;
784         }           
785         case DMA_PRD_PORT0:
786         case DMA_PRD_PORT1:
787         case DMA_PRD_PORT2:
788         case DMA_PRD_PORT3: {
789             uint_t addr_index = port_offset & 0x3;
790             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
791             int i = 0;
792
793             if (addr_index + length > 4) {
794                 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
795                 return -1;
796             }
797
798             for (i = 0; i < length; i++) {
799                 addr_buf[addr_index + i] = *((uint8_t *)src + i);
800             }
801
802             PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
803
804             break;
805         }
806         default:
807             PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
808             break;
809     }
810
811     return length;
812 }
813
814
815 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
816     struct ide_internal * ide = (struct ide_internal *)private_data;
817     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
818     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
819     struct ide_channel * channel = &(ide->channels[channel_flag]);
820
821     PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
822
823     if (port_offset + length > 16) {
824         PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
825         return -1;
826     }
827
828     memcpy(dst, channel->dma_ports + port_offset, length);
829     
830     PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
831
832     return length;
833 }
834
835
836
837 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
838     struct ide_internal * ide = priv_data;
839     struct ide_channel * channel = get_selected_channel(ide, port);
840     struct ide_drive * drive = get_selected_drive(channel);
841
842     if (length != 1) {
843         PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
844         return -1;
845     }
846
847     PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
848     
849     channel->cmd_reg = *(uint8_t *)src;
850     
851     switch (channel->cmd_reg) {
852
853         case ATA_PIDENTIFY: // ATAPI Identify Device Packet (CDROM)
854             if (drive->drive_type != BLOCK_CDROM) {
855                 drive_reset(drive);
856
857                 // JRL: Should we abort here?
858                 ide_abort_command(ide, channel);
859             } else {
860                 
861                 atapi_identify_device(drive);
862                 
863                 channel->error_reg.val = 0;
864                 channel->status.val = 0x58; // ready, data_req, seek_complete
865             
866                 ide_raise_irq(ide, channel);
867             }
868             break;
869
870         case ATA_IDENTIFY: // Identify Device
871             if (drive->drive_type != BLOCK_DISK) {
872                 drive_reset(drive);
873
874                 // JRL: Should we abort here?
875                 ide_abort_command(ide, channel);
876             } else {
877                 ata_identify_device(drive);
878
879                 channel->error_reg.val = 0;
880                 channel->status.val = 0x58;
881
882                 ide_raise_irq(ide, channel);
883             }
884             break;
885
886         case ATA_PACKETCMD: // ATAPI Command Packet (CDROM)
887             if (drive->drive_type != BLOCK_CDROM) {
888                 ide_abort_command(ide, channel);
889             }
890             
891             drive->sector_count = 1;
892
893             channel->status.busy = 0;
894             channel->status.write_fault = 0;
895             channel->status.data_req = 1;
896             channel->status.error = 0;
897
898             // reset the data buffer...
899             drive->transfer_length = ATAPI_PACKET_SIZE;
900             drive->transfer_index = 0;
901
902             break;
903
904         case ATA_READ:      // Read Sectors with Retry
905         case ATA_READ_ONCE: // Read Sectors without Retry
906         case ATA_MULTREAD:  // Read multiple sectors per ire
907         case ATA_READ_EXT:  // Read Sectors Extended (LBA48)
908
909             if (channel->cmd_reg==ATA_MULTREAD) { 
910                 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
911             } else {
912                 drive->hd_state.cur_sector_num = 1;
913             }
914
915             if (ata_read_sectors(ide, channel) == -1) {
916                 PrintError(core->vm_info, core, "Error reading sectors\n");
917                 ide_abort_command(ide,channel);
918             }
919             break;
920
921         case ATA_WRITE:            // Write Sector with retry
922         case ATA_WRITE_ONCE:       // Write Sector without retry
923         case ATA_MULTWRITE:        // Write multiple sectors per irq
924         case ATA_WRITE_EXT:        // Write Sectors Extended (LBA48)
925
926             if (channel->cmd_reg==ATA_MULTWRITE) { 
927                 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
928             } else {
929                 drive->hd_state.cur_sector_num = 1;
930             }
931
932             if (ata_write_sectors(ide, channel) == -1) {
933                 PrintError(core->vm_info, core, "Error writing sectors\n");
934                 ide_abort_command(ide,channel);
935             }
936             break;
937
938         case ATA_READDMA:            // Read DMA with retry
939         case ATA_READDMA_ONCE:       // Read DMA without retry
940         case ATA_READDMA_EXT:      { // Read DMA (LBA48)
941             uint64_t sect_cnt;
942
943             if (ata_get_lba_and_size(ide, channel, &(drive->current_lba), &sect_cnt) == -1) {
944                 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
945                 ide_abort_command(ide, channel);
946                 return length;
947             }
948             
949             drive->hd_state.cur_sector_num = 1;  // Not used for DMA
950             
951             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
952             drive->transfer_index = 0;
953
954             // Now we wait for the transfer to be intiated by flipping the 
955             // bus-master start bit
956             break;
957         }
958
959         case ATA_WRITEDMA:        // Write DMA with retry
960         case ATA_WRITEDMA_ONCE:   // Write DMA without retry
961         case ATA_WRITEDMA_EXT:  { // Write DMA (LBA48)
962
963             uint64_t sect_cnt;
964
965             if (ata_get_lba_and_size(ide, channel, &(drive->current_lba),&sect_cnt) == -1) {
966                 PrintError(core->vm_info,core,"Cannot get lba\n");
967                 ide_abort_command(ide, channel);
968                 return length;
969             }
970
971             drive->hd_state.cur_sector_num = 1;  // Not used for DMA
972
973             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
974             drive->transfer_index = 0;
975
976             // Now we wait for the transfer to be intiated by flipping the 
977             // bus-master start bit
978             break;
979         }
980
981         case ATA_STANDBYNOW1: // Standby Now 1
982         case ATA_IDLEIMMEDIATE: // Set Idle Immediate
983         case ATA_STANDBY: // Standby
984         case ATA_SETIDLE1: // Set Idle 1
985         case ATA_SLEEPNOW1: // Sleep Now 1
986         case ATA_STANDBYNOW2: // Standby Now 2
987         case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
988         case ATA_STANDBY2: // Standby 2
989         case ATA_SETIDLE2: // Set idle 2
990         case ATA_SLEEPNOW2: // Sleep Now 2
991             channel->status.val = 0;
992             channel->status.ready = 1;
993             ide_raise_irq(ide, channel);
994             break;
995
996         case ATA_SETFEATURES: // Set Features
997             // Prior to this the features register has been written to. 
998             // This command tells the drive to check if the new value is supported (the value is drive specific)
999             // Common is that bit0=DMA enable
1000             // If valid the drive raises an interrupt, if not it aborts.
1001
1002             // Do some checking here...
1003
1004             channel->status.busy = 0;
1005             channel->status.write_fault = 0;
1006             channel->status.error = 0;
1007             channel->status.ready = 1;
1008             channel->status.seek_complete = 1;
1009             
1010             ide_raise_irq(ide, channel);
1011             break;
1012
1013         case ATA_SPECIFY:  // Initialize Drive Parameters
1014         case ATA_RECAL:  // recalibrate?
1015             channel->status.error = 0;
1016             channel->status.ready = 1;
1017             channel->status.seek_complete = 1;
1018             ide_raise_irq(ide, channel);
1019             break;
1020
1021         case ATA_SETMULT: { // Set multiple mode (IDE Block mode) 
1022             // This makes the drive transfer multiple sectors before generating an interrupt
1023
1024             if (drive->sector_count == 0) {
1025                 PrintError(core->vm_info,core,"Attempt to set multiple to zero\n");
1026                 drive->hd_state.mult_sector_num= 1;
1027                 ide_abort_command(ide,channel);
1028                 break;
1029             } else {
1030                 drive->hd_state.mult_sector_num = drive->sector_count;
1031             }
1032
1033             channel->status.ready = 1;
1034             channel->status.error = 0;
1035
1036             ide_raise_irq(ide, channel);
1037
1038             break;
1039         }
1040
1041         case ATA_DEVICE_RESET: // Reset Device
1042             drive_reset(drive);
1043             channel->error_reg.val = 0x01;
1044             channel->status.busy = 0;
1045             channel->status.ready = 1;
1046             channel->status.seek_complete = 1;
1047             channel->status.write_fault = 0;
1048             channel->status.error = 0;
1049             break;
1050
1051         case ATA_CHECKPOWERMODE1: // Check power mode
1052             drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1053             channel->status.busy = 0;
1054             channel->status.ready = 1;
1055             channel->status.write_fault = 0;
1056             channel->status.data_req = 0;
1057             channel->status.error = 0;
1058             break;
1059
1060         default:
1061             PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1062             ide_abort_command(ide, channel);
1063             break;
1064     }
1065
1066     return length;
1067 }
1068
1069
1070
1071
1072 static int read_hd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1073     struct ide_drive * drive = get_selected_drive(channel);
1074     uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1075
1076
1077     PrintDebug(VM_NONE,VCORE_NONE, "Read HD data:  transfer_index %llu transfer length %llu current sector numer %llu\n",
1078                drive->transfer_index, drive->transfer_length, 
1079                drive->hd_state.cur_sector_num);
1080
1081     if (drive->transfer_index >= drive->transfer_length) {
1082         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1083                    drive->transfer_length, drive->transfer_index,
1084                    drive->transfer_index + length);
1085         return -1;
1086     }
1087
1088
1089     if (data_offset + length > HD_SECTOR_SIZE) { 
1090        PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1091     }
1092    
1093     // For index==0, the read has been done in ata_read_sectors
1094     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1095         // advance to next sector and read it
1096         
1097         drive->current_lba++;
1098
1099         if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1100             PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1101             return -1;
1102         }
1103     }
1104
1105     /*
1106       PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n", 
1107       *(uint32_t *)(drive->data_buf + data_offset), 
1108       length, data_offset);
1109     */
1110     memcpy(dst, drive->data_buf + data_offset, length);
1111
1112     drive->transfer_index += length;
1113
1114
1115     /* This is the trigger for interrupt injection.
1116      * For read single sector commands we interrupt after every sector
1117      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1118      * cur_sector_num is configured depending on the operation we are currently running
1119      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1120      */
1121     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1122         (drive->transfer_index == drive->transfer_length)) {
1123         if (drive->transfer_index < drive->transfer_length) {
1124             // An increment is complete, but there is still more data to be transferred...
1125             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1126             channel->status.data_req = 1;
1127         } else {
1128             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1129             // This was the final read of the request
1130             channel->status.data_req = 0;
1131         }
1132
1133         channel->status.ready = 1;
1134         channel->status.busy = 0;
1135
1136         ide_raise_irq(ide, channel);
1137     }
1138
1139
1140     return length;
1141 }
1142
1143 static int write_hd_data(uint8_t * src, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1144     struct ide_drive * drive = get_selected_drive(channel);
1145     uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1146
1147
1148     PrintDebug(VM_NONE,VCORE_NONE, "Write HD data:  transfer_index %llu transfer length %llu current sector numer %llu\n",
1149                drive->transfer_index, drive->transfer_length, 
1150                drive->hd_state.cur_sector_num);
1151
1152     if (drive->transfer_index >= drive->transfer_length) {
1153         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1154                    drive->transfer_length, drive->transfer_index,
1155                    drive->transfer_index + length);
1156         return -1;
1157     }
1158
1159     if (data_offset + length > HD_SECTOR_SIZE) { 
1160        PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1161     }
1162
1163     // Copy data into our buffer - there will be room due to
1164     // (a) the ata_write test below is flushing sectors
1165     // (b) if we somehow get a sector-stradling write (an error), this will
1166     //     be OK since the buffer itself is >1 sector in memory
1167     memcpy(drive->data_buf + data_offset, src, length);
1168
1169     drive->transfer_index += length;
1170
1171     if ((data_offset+length) >= HD_SECTOR_SIZE) {
1172         // Write out the sector we just finished
1173         if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1174             PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1175             return -1;
1176         }
1177
1178         // go onto next sector
1179         drive->current_lba++;
1180     }
1181
1182     /* This is the trigger for interrupt injection.
1183      * For write single sector commands we interrupt after every sector
1184      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1185      * cur_sector_num is configured depending on the operation we are currently running
1186      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1187      */
1188     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1189         (drive->transfer_index == drive->transfer_length)) {
1190         if (drive->transfer_index < drive->transfer_length) {
1191             // An increment is complete, but there is still more data to be transferred...
1192             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1193             channel->status.data_req = 1;
1194         } else {
1195             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1196             // This was the final read of the request
1197             channel->status.data_req = 0;
1198         }
1199
1200         channel->status.ready = 1;
1201         channel->status.busy = 0;
1202
1203         ide_raise_irq(ide, channel);
1204     }
1205
1206     return length;
1207 }
1208
1209
1210
1211 static int read_cd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1212     struct ide_drive * drive = get_selected_drive(channel);
1213     uint64_t data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1214     //  int req_offset = drive->transfer_index % drive->req_len;
1215     
1216     if (drive->cd_state.atapi_cmd != 0x28) {
1217         PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%llu) (req_len=%u)\n", length, drive->req_len);
1218         PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%llu, transfer idx=%llu\n", drive->transfer_length, drive->transfer_index);
1219     }
1220
1221     
1222
1223     if (drive->transfer_index >= drive->transfer_length) {
1224         PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n", 
1225                    drive->transfer_length, drive->transfer_index, 
1226                    drive->transfer_index + length);
1227         return -1;
1228     }
1229
1230     
1231     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1232         if (atapi_update_data_buf(ide, channel) == -1) {
1233             PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1234             return -1;
1235         }
1236     }
1237
1238     memcpy(dst, drive->data_buf + data_offset, length);
1239     
1240     drive->transfer_index += length;
1241
1242
1243     // Should the req_offset be recalculated here?????
1244     if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1245         if (drive->transfer_index < drive->transfer_length) {
1246             // An increment is complete, but there is still more data to be transferred...
1247             
1248             channel->status.data_req = 1;
1249
1250             drive->irq_flags.c_d = 0;
1251
1252             // Update the request length in the cylinder regs
1253             if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1254                 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1255                 return -1;
1256             }
1257         } else {
1258             // This was the final read of the request
1259
1260             drive->req_len = 0;
1261             channel->status.data_req = 0;
1262             channel->status.ready = 1;
1263             
1264             drive->irq_flags.c_d = 1;
1265             drive->irq_flags.rel = 0;
1266         }
1267
1268         drive->irq_flags.io_dir = 1;
1269         channel->status.busy = 0;
1270
1271         ide_raise_irq(ide, channel);
1272     }
1273
1274     return length;
1275 }
1276
1277
1278 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1279     struct ide_drive * drive = get_selected_drive(channel);
1280
1281     channel->status.busy = 0;
1282     channel->status.ready = 1;
1283     channel->status.write_fault = 0;
1284     channel->status.seek_complete = 1;
1285     channel->status.corrected = 0;
1286     channel->status.error = 0;
1287                 
1288     
1289     memcpy(dst, drive->data_buf + drive->transfer_index, length);
1290     drive->transfer_index += length;
1291     
1292     if (drive->transfer_index >= drive->transfer_length) {
1293         channel->status.data_req = 0;
1294     }
1295     
1296     return length;
1297 }
1298
1299
1300
1301 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1302     struct ide_internal * ide = priv_data;
1303     struct ide_channel * channel = get_selected_channel(ide, port);
1304     struct ide_drive * drive = get_selected_drive(channel);
1305
1306     //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1307
1308     if ((channel->cmd_reg == ATA_IDENTIFY) ||
1309         (channel->cmd_reg == ATA_PIDENTIFY)) {
1310         return read_drive_id((uint8_t *)dst, length, ide, channel);
1311     }
1312
1313     if (drive->drive_type == BLOCK_CDROM) {
1314         if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1315             PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1316             return -1;
1317         }
1318     } else if (drive->drive_type == BLOCK_DISK) {
1319         if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1320             PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1321             return -1;
1322         }
1323     } else {
1324         memset((uint8_t *)dst, 0, length);
1325     }
1326
1327     return length;
1328 }
1329
1330 // For the write side, we care both about
1331 // direct PIO writes to a drive as well as 
1332 // writes that pass a packet through to an CD
1333 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1334     struct ide_internal * ide = priv_data;
1335     struct ide_channel * channel = get_selected_channel(ide, port);
1336     struct ide_drive * drive = get_selected_drive(channel);
1337
1338     PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n", 
1339             port, *(uint32_t *)src, length);
1340
1341     if (drive->drive_type == BLOCK_CDROM) {
1342         if (channel->cmd_reg == ATA_PACKETCMD) { 
1343             // short command packet - no check for space... 
1344             memcpy(drive->data_buf + drive->transfer_index, src, length);
1345             drive->transfer_index += length;
1346             if (drive->transfer_index >= drive->transfer_length) {
1347                 if (atapi_handle_packet(core, ide, channel) == -1) {
1348                     PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1349                     return -1;
1350                 }
1351             }
1352         } else {
1353             PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1354             return -1;
1355         }
1356     } else if (drive->drive_type == BLOCK_DISK) {
1357         if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1358             PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1359             return -1;
1360         }
1361     } else {
1362         // nothing ... do not support writable cd
1363     }
1364
1365     return length;
1366 }
1367
1368 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1369     struct ide_internal * ide = priv_data;
1370     struct ide_channel * channel = get_selected_channel(ide, port);
1371     struct ide_drive * drive = get_selected_drive(channel);
1372             
1373     if (length != 1) {
1374         PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1375         return -1;
1376     }
1377
1378     PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1379
1380     switch (port) {
1381         // reset and interrupt enable
1382         case PRI_CTRL_PORT:
1383         case SEC_CTRL_PORT: {
1384             struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1385
1386             // only reset channel on a 0->1 reset bit transition
1387             if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1388                 channel_reset(channel);
1389             } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1390                 channel_reset_complete(channel);
1391             }
1392
1393             channel->ctrl_reg.val = tmp_ctrl->val;          
1394             break;
1395         }
1396         case PRI_FEATURES_PORT:
1397         case SEC_FEATURES_PORT:
1398             channel->features.val = *(uint8_t *)src;
1399             break;
1400
1401         case PRI_SECT_CNT_PORT:
1402         case SEC_SECT_CNT_PORT:
1403             // update CHS and LBA28 state
1404             channel->drives[0].sector_count = *(uint8_t *)src;
1405             channel->drives[1].sector_count = *(uint8_t *)src;
1406
1407             // update LBA48 state
1408             if (is_lba48(channel)) {
1409                 uint16_t val = *(uint8_t*)src; // top bits zero;
1410                 if (!channel->drives[0].lba48.sector_count_state) { 
1411                     channel->drives[0].lba48.sector_count = val<<8;
1412                 } else {
1413                     channel->drives[0].lba48.sector_count |= val;
1414                 }
1415                 channel->drives[0].lba48.sector_count_state ^= 1;
1416                 if (!channel->drives[1].lba48.sector_count_state) { 
1417                     channel->drives[1].lba48.sector_count = val<<8;
1418                 } else {
1419                     channel->drives[1].lba48.sector_count |= val;
1420                 }
1421                 channel->drives[0].lba48.sector_count_state ^= 1;
1422             }
1423             
1424             break;
1425
1426         case PRI_SECT_NUM_PORT:
1427         case SEC_SECT_NUM_PORT:
1428             // update CHS and LBA28 state
1429             channel->drives[0].sector_num = *(uint8_t *)src;
1430             channel->drives[1].sector_num = *(uint8_t *)src;
1431
1432             // update LBA48 state
1433             if (is_lba48(channel)) {
1434                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1435                 if (!channel->drives[0].lba48.lba41_state) { 
1436                     channel->drives[0].lba48.lba |= val<<24; 
1437                 } else {
1438                     channel->drives[0].lba48.lba |= val;
1439                 }
1440                 channel->drives[0].lba48.lba41_state ^= 1;
1441                 if (!channel->drives[1].lba48.lba41_state) { 
1442                     channel->drives[1].lba48.lba |= val<<24; 
1443                 } else {
1444                     channel->drives[1].lba48.lba |= val;
1445                 }
1446                 channel->drives[1].lba48.lba41_state ^= 1;
1447             }
1448
1449             break;
1450         case PRI_CYL_LOW_PORT:
1451         case SEC_CYL_LOW_PORT:
1452             // update CHS and LBA28 state
1453             channel->drives[0].cylinder_low = *(uint8_t *)src;
1454             channel->drives[1].cylinder_low = *(uint8_t *)src;
1455
1456             // update LBA48 state
1457             if (is_lba48(channel)) {
1458                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1459                 if (!channel->drives[0].lba48.lba52_state) { 
1460                     channel->drives[0].lba48.lba |= val<<32; 
1461                 } else {
1462                     channel->drives[0].lba48.lba |= val<<8;
1463                 }
1464                 channel->drives[0].lba48.lba52_state ^= 1;
1465                 if (!channel->drives[1].lba48.lba52_state) { 
1466                     channel->drives[1].lba48.lba |= val<<32; 
1467                 } else {
1468                     channel->drives[1].lba48.lba |= val<<8;
1469                 }
1470                 channel->drives[1].lba48.lba52_state ^= 1;
1471             }
1472
1473             break;
1474
1475         case PRI_CYL_HIGH_PORT:
1476         case SEC_CYL_HIGH_PORT:
1477             // update CHS and LBA28 state
1478             channel->drives[0].cylinder_high = *(uint8_t *)src;
1479             channel->drives[1].cylinder_high = *(uint8_t *)src;
1480
1481             // update LBA48 state
1482             if (is_lba48(channel)) {
1483                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1484                 if (!channel->drives[0].lba48.lba63_state) { 
1485                     channel->drives[0].lba48.lba |= val<<40; 
1486                 } else {
1487                     channel->drives[0].lba48.lba |= val<<16;
1488                 }
1489                 channel->drives[0].lba48.lba63_state ^= 1;
1490                 if (!channel->drives[1].lba48.lba63_state) { 
1491                     channel->drives[1].lba48.lba |= val<<40; 
1492                 } else {
1493                     channel->drives[1].lba48.lba |= val<<16;
1494                 }
1495                 channel->drives[1].lba48.lba63_state ^= 1;
1496             }
1497
1498             break;
1499
1500         case PRI_DRV_SEL_PORT:
1501         case SEC_DRV_SEL_PORT: {
1502             struct ide_drive_head_reg nh, oh;
1503
1504             oh.val = channel->drive_head.val;
1505             channel->drive_head.val = nh.val = *(uint8_t *)src;
1506
1507             // has LBA flipped?
1508             if ((oh.val & 0xe0) != (nh.val & 0xe0)) {
1509                 // reset LBA48 state
1510                 channel->drives[0].lba48.sector_count_state=0;
1511                 channel->drives[0].lba48.lba41_state=0;
1512                 channel->drives[0].lba48.lba52_state=0;
1513                 channel->drives[0].lba48.lba63_state=0;
1514                 channel->drives[1].lba48.sector_count_state=0;
1515                 channel->drives[1].lba48.lba41_state=0;
1516                 channel->drives[1].lba48.lba52_state=0;
1517                 channel->drives[1].lba48.lba63_state=0;
1518             }
1519             
1520
1521             drive = get_selected_drive(channel);
1522
1523             // Selecting a non-present device is a no-no
1524             if (drive->drive_type == BLOCK_NONE) {
1525                 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1526                 channel->error_reg.abort = 1;
1527                 channel->status.error = 1;
1528             } else {
1529                 channel->status.busy = 0;
1530                 channel->status.ready = 1;
1531                 channel->status.data_req = 0;
1532                 channel->status.error = 0;
1533                 channel->status.seek_complete = 1;
1534                 
1535                 channel->dma_status.active = 0;
1536                 channel->dma_status.err = 0;
1537             }
1538
1539             break;
1540         }
1541         default:
1542             PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1543             return -1;
1544     }
1545     return length;
1546 }
1547
1548
1549 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1550     struct ide_internal * ide = priv_data;
1551     struct ide_channel * channel = get_selected_channel(ide, port);
1552     struct ide_drive * drive = get_selected_drive(channel);
1553     
1554     if (length != 1) {
1555         PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1556         return -1;
1557     }
1558     
1559     PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1560
1561     if ((port == PRI_ADDR_REG_PORT) ||
1562         (port == SEC_ADDR_REG_PORT)) {
1563         // unused, return 0xff
1564         *(uint8_t *)dst = 0xff;
1565         return length;
1566     }
1567
1568
1569     // if no drive is present just return 0 + reserved bits
1570     if (drive->drive_type == BLOCK_NONE) {
1571         if ((port == PRI_DRV_SEL_PORT) ||
1572             (port == SEC_DRV_SEL_PORT)) {
1573             *(uint8_t *)dst = 0xa0;
1574         } else {
1575             *(uint8_t *)dst = 0;
1576         }
1577
1578         return length;
1579     }
1580
1581     switch (port) {
1582
1583         // This is really the error register.
1584         case PRI_FEATURES_PORT:
1585         case SEC_FEATURES_PORT:
1586             *(uint8_t *)dst = channel->error_reg.val;
1587             break;
1588             
1589         case PRI_SECT_CNT_PORT:
1590         case SEC_SECT_CNT_PORT:
1591             *(uint8_t *)dst = drive->sector_count;
1592             break;
1593
1594         case PRI_SECT_NUM_PORT:
1595         case SEC_SECT_NUM_PORT:
1596             *(uint8_t *)dst = drive->sector_num;
1597             break;
1598
1599         case PRI_CYL_LOW_PORT:
1600         case SEC_CYL_LOW_PORT:
1601             *(uint8_t *)dst = drive->cylinder_low;
1602             break;
1603
1604
1605         case PRI_CYL_HIGH_PORT:
1606         case SEC_CYL_HIGH_PORT:
1607             *(uint8_t *)dst = drive->cylinder_high;
1608             break;
1609
1610         case PRI_DRV_SEL_PORT:
1611         case SEC_DRV_SEL_PORT:  // hard disk drive and head register 0x1f6
1612             *(uint8_t *)dst = channel->drive_head.val;
1613             break;
1614
1615         case PRI_CTRL_PORT:
1616         case SEC_CTRL_PORT:
1617         case PRI_CMD_PORT:
1618         case SEC_CMD_PORT:
1619             // Something about lowering interrupts here....
1620             *(uint8_t *)dst = channel->status.val;
1621             break;
1622
1623         default:
1624             PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1625             return -1;
1626     }
1627
1628     PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1629
1630     return length;
1631 }
1632
1633
1634
1635 static void init_drive(struct ide_drive * drive) {
1636
1637     drive->sector_count = 0x01;
1638     drive->sector_num = 0x01;
1639     drive->cylinder = 0x0000;
1640
1641     drive->drive_type = BLOCK_NONE;
1642
1643     memset(drive->model, 0, sizeof(drive->model));
1644
1645     drive->transfer_index = 0;
1646     drive->transfer_length = 0;
1647     memset(drive->data_buf, 0, sizeof(drive->data_buf));
1648
1649     drive->num_cylinders = 0;
1650     drive->num_heads = 0;
1651     drive->num_sectors = 0;
1652     
1653
1654     drive->private_data = NULL;
1655     drive->ops = NULL;
1656 }
1657
1658 static void init_channel(struct ide_channel * channel) {
1659     int i = 0;
1660
1661     channel->error_reg.val = 0x01;
1662
1663     //** channel->features = 0x0;
1664
1665     channel->drive_head.val = 0x00;
1666     channel->status.val = 0x00;
1667     channel->cmd_reg = 0x00;
1668     channel->ctrl_reg.val = 0x08;
1669
1670     channel->dma_cmd.val = 0;
1671     channel->dma_status.val = 0;
1672     channel->dma_prd_addr = 0;
1673     channel->dma_tbl_index = 0;
1674
1675     for (i = 0; i < 2; i++) {
1676         init_drive(&(channel->drives[i]));
1677     }
1678
1679 }
1680
1681
1682 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1683     PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1684     /*
1685     struct ide_internal * ide = (struct ide_internal *)(private_data);
1686
1687     PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1688     */
1689
1690     return 0;
1691 }
1692
1693 static int init_ide_state(struct ide_internal * ide) {
1694
1695     /* 
1696      * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry 
1697      */
1698
1699     init_channel(&(ide->channels[0]));
1700     ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1701
1702     init_channel(&(ide->channels[1]));
1703     ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1704
1705
1706     return 0;
1707 }
1708
1709
1710
1711
1712 static int ide_free(struct ide_internal * ide) {
1713
1714     // deregister from PCI?
1715
1716     V3_Free(ide);
1717
1718     return 0;
1719 }
1720
1721 #ifdef V3_CONFIG_CHECKPOINT
1722
1723 #include <palacios/vmm_sprintf.h>
1724
1725 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1726     struct ide_internal * ide = (struct ide_internal *)private_data;
1727     struct v3_chkpt_ctx *ctx=0;
1728     int ch_num = 0;
1729     int drive_num = 0;
1730     char buf[128];
1731     
1732
1733     ctx=v3_chkpt_open_ctx(chkpt,id);
1734     
1735     if (!ctx) { 
1736       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1737       goto savefailout;
1738     }
1739
1740     // nothing saved yet
1741     
1742     v3_chkpt_close_ctx(ctx);ctx=0;
1743    
1744
1745     for (ch_num = 0; ch_num < 2; ch_num++) {
1746         struct ide_channel * ch = &(ide->channels[ch_num]);
1747
1748         snprintf(buf, 128, "%s-%d", id, ch_num);
1749
1750         ctx = v3_chkpt_open_ctx(chkpt, buf);
1751         
1752         if (!ctx) { 
1753           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1754           goto savefailout;
1755         }
1756
1757         V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1758         V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1759         V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1760         V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1761         V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1762         V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1763         V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1764         V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1765         V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1766         V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1767
1768
1769
1770         v3_chkpt_close_ctx(ctx); ctx=0;
1771
1772         for (drive_num = 0; drive_num < 2; drive_num++) {
1773             struct ide_drive * drive = &(ch->drives[drive_num]);
1774             
1775             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1776
1777             ctx = v3_chkpt_open_ctx(chkpt, buf);
1778             
1779             if (!ctx) { 
1780               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1781               goto savefailout;
1782             }
1783
1784             V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1785             V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1786             V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1787             V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1788
1789             V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1790             V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1791             V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1792
1793             V3_CHKPT_SAVE(ctx, "DATA_BUF",  drive->data_buf, savefailout);
1794
1795
1796             /* For now we'll just pack the type specific data at the end... */
1797             /* We should probably add a new context here in the future... */
1798             if (drive->drive_type == BLOCK_CDROM) {
1799               V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1800               V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1801               V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1802             } else if (drive->drive_type == BLOCK_DISK) {
1803               V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1804               V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1805               V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1806             } else if (drive->drive_type == BLOCK_NONE) { 
1807               // no drive connected, so no data
1808             } else {
1809               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1810               goto savefailout;
1811             }
1812
1813             V3_CHKPT_SAVE(ctx, "LBA48_LBA", drive->lba48.lba, savefailout);
1814             V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, savefailout);
1815             V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, savefailout);
1816             V3_CHKPT_SAVE(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, savefailout);
1817             V3_CHKPT_SAVE(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, savefailout);
1818             V3_CHKPT_SAVE(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, savefailout);
1819             
1820             v3_chkpt_close_ctx(ctx); ctx=0;
1821         }
1822     }
1823
1824 // goodout:
1825     return 0;
1826
1827  savefailout:
1828     PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1829     if (ctx) {v3_chkpt_close_ctx(ctx); }
1830     return -1;
1831 }
1832
1833
1834
1835 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1836     struct ide_internal * ide = (struct ide_internal *)private_data;
1837     struct v3_chkpt_ctx *ctx=0;
1838     int ch_num = 0;
1839     int drive_num = 0;
1840     char buf[128];
1841     
1842     ctx=v3_chkpt_open_ctx(chkpt,id);
1843     
1844     if (!ctx) { 
1845       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1846       goto loadfailout;
1847     }
1848
1849     // nothing saved yet
1850     
1851     v3_chkpt_close_ctx(ctx);ctx=0;
1852    
1853
1854     for (ch_num = 0; ch_num < 2; ch_num++) {
1855         struct ide_channel * ch = &(ide->channels[ch_num]);
1856
1857         snprintf(buf, 128, "%s-%d", id, ch_num);
1858
1859         ctx = v3_chkpt_open_ctx(chkpt, buf);
1860         
1861         if (!ctx) { 
1862           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1863           goto loadfailout;
1864         }
1865
1866         V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1867         V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1868         V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1869         V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1870         V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1871         V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1872         V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1873         V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1874         V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1875         V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1876
1877         v3_chkpt_close_ctx(ctx); ctx=0;
1878
1879         for (drive_num = 0; drive_num < 2; drive_num++) {
1880             struct ide_drive * drive = &(ch->drives[drive_num]);
1881             
1882             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1883
1884             ctx = v3_chkpt_open_ctx(chkpt, buf);
1885             
1886             if (!ctx) { 
1887               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1888               goto loadfailout;
1889             }
1890
1891             V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1892             V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1893             V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1894             V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1895
1896             V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1897             V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1898             V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1899
1900             V3_CHKPT_LOAD(ctx, "DATA_BUF",  drive->data_buf, loadfailout);
1901
1902             
1903             /* For now we'll just pack the type specific data at the end... */
1904             /* We should probably add a new context here in the future... */
1905             if (drive->drive_type == BLOCK_CDROM) {
1906               V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1907               V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1908               V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1909             } else if (drive->drive_type == BLOCK_DISK) {
1910               V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1911               V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1912               V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1913             } else if (drive->drive_type == BLOCK_NONE) { 
1914               // no drive connected, so no data
1915             } else {
1916               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1917               goto loadfailout;
1918             }
1919
1920             V3_CHKPT_LOAD(ctx, "LBA48_LBA", drive->lba48.lba, loadfailout);
1921             V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, loadfailout);
1922             V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, loadfailout);
1923             V3_CHKPT_LOAD(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, loadfailout);
1924             V3_CHKPT_LOAD(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, loadfailout);
1925             V3_CHKPT_LOAD(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, loadfailout);
1926             
1927         }
1928     }
1929 // goodout:
1930     return 0;
1931
1932  loadfailout:
1933     PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1934     if (ctx) {v3_chkpt_close_ctx(ctx); }
1935     return -1;
1936
1937 }
1938
1939
1940
1941 #endif
1942
1943
1944 static struct v3_device_ops dev_ops = {
1945     .free = (int (*)(void *))ide_free,
1946 #ifdef V3_CONFIG_CHECKPOINT
1947     .save_extended = ide_save_extended,
1948     .load_extended = ide_load_extended
1949 #endif
1950 };
1951
1952
1953
1954
1955 static int connect_fn(struct v3_vm_info * vm, 
1956                       void * frontend_data, 
1957                       struct v3_dev_blk_ops * ops, 
1958                       v3_cfg_tree_t * cfg, 
1959                       void * private_data) {
1960     struct ide_internal * ide  = (struct ide_internal *)(frontend_data);  
1961     struct ide_channel * channel = NULL;
1962     struct ide_drive * drive = NULL;
1963
1964     char * bus_str = v3_cfg_val(cfg, "bus_num");
1965     char * drive_str = v3_cfg_val(cfg, "drive_num");
1966     char * type_str = v3_cfg_val(cfg, "type");
1967     char * model_str = v3_cfg_val(cfg, "model");
1968     uint_t bus_num = 0;
1969     uint_t drive_num = 0;
1970
1971
1972     if ((!type_str) || (!drive_str) || (!bus_str)) {
1973         PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1974         return -1;
1975     }
1976
1977     bus_num = atoi(bus_str);
1978     drive_num = atoi(drive_str);
1979
1980     channel = &(ide->channels[bus_num]);
1981     drive = &(channel->drives[drive_num]);
1982
1983     if (drive->drive_type != BLOCK_NONE) {
1984         PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1985         return -1;
1986     }
1987
1988     if (model_str != NULL) {
1989         strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1990     }
1991
1992     if (strcasecmp(type_str, "cdrom") == 0) {
1993         drive->drive_type = BLOCK_CDROM;
1994
1995         while (strlen((char *)(drive->model)) < 40) {
1996             strcat((char*)(drive->model), " ");
1997         }
1998
1999     } else if (strcasecmp(type_str, "hd") == 0) {
2000         drive->drive_type = BLOCK_DISK;
2001
2002         drive->hd_state.accessed = 0;
2003         drive->hd_state.mult_sector_num = 1;
2004
2005         drive->num_sectors = 63;
2006         drive->num_heads = 16;
2007         drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
2008     } else {
2009         PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
2010         return -1;
2011     }
2012  
2013     drive->ops = ops;
2014
2015     if (ide->ide_pci) {
2016         // Hardcode this for now, but its not a good idea....
2017         ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
2018     }
2019  
2020     drive->private_data = private_data;
2021
2022     return 0;
2023 }
2024
2025
2026
2027
2028 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
2029     struct ide_internal * ide  = NULL;
2030     char * dev_id = v3_cfg_val(cfg, "ID");
2031     int ret = 0;
2032
2033     PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
2034
2035     ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
2036
2037     if (ide == NULL) {
2038         PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
2039         return -1;
2040     }
2041
2042     memset(ide, 0, sizeof(struct ide_internal));
2043
2044     ide->vm = vm;
2045     ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
2046
2047     if (ide->pci_bus != NULL) {
2048         struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
2049
2050         if (!southbridge) {
2051             PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
2052             V3_Free(ide);
2053             return -1;
2054         }
2055
2056         ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
2057     } else {
2058         PrintError(vm,VCORE_NONE,"Strange - you don't have a PCI bus\n");
2059     }
2060
2061     PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
2062
2063     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
2064
2065     if (dev == NULL) {
2066         PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
2067         V3_Free(ide);
2068         return -1;
2069     }
2070
2071     if (init_ide_state(ide) == -1) {
2072         PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
2073         v3_remove_device(dev);
2074         return -1;
2075     }
2076
2077     PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
2078
2079     ret |= v3_dev_hook_io(dev, PRI_DATA_PORT, 
2080                           &read_data_port, &write_data_port);
2081     ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT, 
2082                           &read_port_std, &write_port_std);
2083     ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT, 
2084                           &read_port_std, &write_port_std);
2085     ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT, 
2086                           &read_port_std, &write_port_std);
2087     ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT, 
2088                           &read_port_std, &write_port_std);
2089     ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT, 
2090                           &read_port_std, &write_port_std);
2091     ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT, 
2092                           &read_port_std, &write_port_std);
2093     ret |= v3_dev_hook_io(dev, PRI_CMD_PORT, 
2094                           &read_port_std, &write_cmd_port);
2095
2096     ret |= v3_dev_hook_io(dev, SEC_DATA_PORT, 
2097                           &read_data_port, &write_data_port);
2098     ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT, 
2099                           &read_port_std, &write_port_std);
2100     ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT, 
2101                           &read_port_std, &write_port_std);
2102     ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT, 
2103                           &read_port_std, &write_port_std);
2104     ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT, 
2105                           &read_port_std, &write_port_std);
2106     ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT, 
2107                           &read_port_std, &write_port_std);
2108     ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT, 
2109                           &read_port_std, &write_port_std);
2110     ret |= v3_dev_hook_io(dev, SEC_CMD_PORT, 
2111                           &read_port_std, &write_cmd_port);
2112   
2113
2114     ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT, 
2115                           &read_port_std, &write_port_std);
2116
2117     ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT, 
2118                           &read_port_std, &write_port_std);
2119   
2120
2121     ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT, 
2122                           &read_port_std, &write_port_std);
2123
2124     ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT, 
2125                           &read_port_std, &write_port_std);
2126
2127
2128     if (ret != 0) {
2129         PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
2130         v3_remove_device(dev);
2131         return -1;
2132     }
2133
2134
2135     if (ide->pci_bus) {
2136         struct v3_pci_bar bars[6];
2137         struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2138         struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2139         struct pci_device * pci_dev = NULL;
2140         int i;
2141
2142         V3_Print(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2143
2144         for (i = 0; i < 6; i++) {
2145             bars[i].type = PCI_BAR_NONE;
2146         }
2147
2148         bars[4].type = PCI_BAR_IO;
2149         //      bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2150         bars[4].default_base_port = -1;
2151         bars[4].num_ports = 16;
2152
2153         bars[4].io_read = read_dma_port;
2154         bars[4].io_write = write_dma_port;
2155         bars[4].private_data = ide;
2156
2157         pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1, 
2158                                          "PIIX3_IDE", bars,
2159                                          pci_config_update, NULL, NULL, NULL, ide);
2160
2161         if (pci_dev == NULL) {
2162             PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i); 
2163             v3_remove_device(dev);
2164             return -1;
2165         }
2166
2167         /* This is for CMD646 devices 
2168            pci_dev->config_header.vendor_id = 0x1095;
2169            pci_dev->config_header.device_id = 0x0646;
2170            pci_dev->config_header.revision = 0x8f07;
2171         */
2172
2173         pci_dev->config_header.vendor_id = 0x8086;
2174         pci_dev->config_header.device_id = 0x7010;
2175         pci_dev->config_header.revision = 0x00;
2176
2177         pci_dev->config_header.prog_if = 0x80; // Master IDE device
2178         pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2179         pci_dev->config_header.class = PCI_CLASS_STORAGE;
2180
2181         pci_dev->config_header.command = 0;
2182         pci_dev->config_header.status = 0x0280;
2183
2184         ide->ide_pci = pci_dev;
2185
2186
2187     }
2188
2189     if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2190         PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2191         v3_remove_device(dev);
2192         return -1;
2193     }
2194     
2195
2196     PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2197
2198     return 0;
2199 }
2200
2201
2202 device_register("IDE", ide_init)
2203
2204
2205
2206
2207 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num, 
2208                         uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2209
2210     struct ide_internal * ide  = ide_data;  
2211     struct ide_channel * channel = &(ide->channels[channel_num]);
2212     struct ide_drive * drive = &(channel->drives[drive_num]);
2213     
2214     if (drive->drive_type == BLOCK_NONE) {
2215         return -1;
2216     }
2217
2218     *cylinders = drive->num_cylinders;
2219     *heads = drive->num_heads;
2220     *sectors = drive->num_sectors;
2221
2222     return 0;
2223 }