Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


IDE/ATA enhancements: read/write multiple, lba48 addressing, cleanup
[palacios.git] / palacios / src / devices / ide.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
28
29 #ifndef V3_CONFIG_DEBUG_IDE
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
36
37
38 #define PRI_DATA_PORT         0x1f0
39 #define PRI_FEATURES_PORT     0x1f1
40 #define PRI_SECT_CNT_PORT     0x1f2
41 #define PRI_SECT_NUM_PORT     0x1f3
42 #define PRI_CYL_LOW_PORT      0x1f4
43 #define PRI_CYL_HIGH_PORT     0x1f5
44 #define PRI_DRV_SEL_PORT      0x1f6
45 #define PRI_CMD_PORT          0x1f7
46 #define PRI_CTRL_PORT         0x3f6
47 #define PRI_ADDR_REG_PORT     0x3f7
48
49 #define SEC_DATA_PORT         0x170
50 #define SEC_FEATURES_PORT     0x171
51 #define SEC_SECT_CNT_PORT     0x172
52 #define SEC_SECT_NUM_PORT     0x173
53 #define SEC_CYL_LOW_PORT      0x174
54 #define SEC_CYL_HIGH_PORT     0x175
55 #define SEC_DRV_SEL_PORT      0x176
56 #define SEC_CMD_PORT          0x177
57 #define SEC_CTRL_PORT         0x376
58 #define SEC_ADDR_REG_PORT     0x377
59
60
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
63
64 #define DATA_BUFFER_SIZE 2048
65
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
68
69
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM", 
71                                           "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72                                            "PRI_CTRL", "PRI_ADDR_REG"};
73
74
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM", 
76                                           "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77                                            "SEC_CTRL", "SEC_ADDR_REG"};
78
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80                                            "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
81
82
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
84
85 static inline const char * io_port_to_str(uint16_t port) {
86     if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87         return ide_pri_port_strs[port - PRI_DATA_PORT];
88     } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89         return ide_sec_port_strs[port - SEC_DATA_PORT];
90     } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91         return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92     } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93         return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94     } 
95     return NULL;
96 }
97
98
99 static inline const char * dma_port_to_str(uint16_t port) {
100     return ide_dma_port_strs[port & 0x7];
101 }
102
103
104
105 struct ide_cd_state {
106     struct atapi_sense_data sense;
107
108     uint8_t atapi_cmd;
109     struct atapi_error_recovery err_recovery;
110 };
111
112 struct ide_hd_state {
113     uint32_t accessed;
114
115     /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116     uint32_t mult_sector_num;
117
118     /* This is the current op sector size:
119      * for multiple sector ops this equals mult_sector_num
120      * for standard ops this equals 1
121      */
122     uint64_t cur_sector_num;
123 };
124
125 struct ide_drive {
126     // Command Registers
127
128     v3_block_type_t drive_type;
129
130     struct v3_dev_blk_ops * ops;
131
132     union {
133         struct ide_cd_state cd_state;
134         struct ide_hd_state hd_state;
135     };
136
137     char model[41];
138
139     // Where we are in the data transfer
140     uint64_t transfer_index;
141
142     // the length of a transfer
143     // calculated for easy access
144     uint64_t transfer_length;
145
146     uint64_t current_lba;
147
148     // We have a local data buffer that we use for IO port accesses
149     uint8_t data_buf[DATA_BUFFER_SIZE];
150
151
152     uint32_t num_cylinders;
153     uint32_t num_heads;
154     uint32_t num_sectors;
155
156
157     struct lba48_state {
158         // all start at zero
159         uint64_t lba;                  
160         uint16_t sector_count;            // for LBA48
161         uint8_t  sector_count_state;      // two step write to 1f2/172 (high first)
162         uint8_t  lba41_state;             // two step write to 1f3
163         uint8_t  lba52_state;             // two step write to 1f4
164         uint8_t  lba63_state;             // two step write to 15
165     } lba48;
166
167     void * private_data;
168     
169     union {
170         uint8_t sector_count;             // 0x1f2,0x172  (ATA)
171         struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
172     } __attribute__((packed));
173
174
175     union {
176         uint8_t sector_num;               // 0x1f3,0x173
177         uint8_t lba0;
178     } __attribute__((packed));
179
180     union {
181         uint16_t cylinder;
182         uint16_t lba12;
183         
184         struct {
185             uint8_t cylinder_low;       // 0x1f4,0x174
186             uint8_t cylinder_high;      // 0x1f5,0x175
187         } __attribute__((packed));
188         
189         struct {
190             uint8_t lba1;
191             uint8_t lba2;
192         } __attribute__((packed));
193         
194         
195         // The transfer length requested by the CPU 
196         uint16_t req_len;
197     } __attribute__((packed));
198
199 };
200
201
202
203 struct ide_channel {
204     struct ide_drive drives[2];
205
206     // Command Registers
207     struct ide_error_reg error_reg;     // [read] 0x1f1,0x171
208
209     struct ide_features_reg features;
210
211     struct ide_drive_head_reg drive_head; // 0x1f6,0x176
212
213     struct ide_status_reg status;       // [read] 0x1f7,0x177
214     uint8_t cmd_reg;                // [write] 0x1f7,0x177
215
216     int irq; // this is temporary until we add PCI support
217
218     // Control Registers
219     struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
220
221     union {
222         uint8_t dma_ports[8];
223         struct {
224             struct ide_dma_cmd_reg dma_cmd;
225             uint8_t rsvd1;
226             struct ide_dma_status_reg dma_status;
227             uint8_t rsvd2;
228             uint32_t dma_prd_addr;
229         } __attribute__((packed));
230     } __attribute__((packed));
231
232     uint32_t dma_tbl_index;
233 };
234
235
236
237 struct ide_internal {
238     struct ide_channel channels[2];
239
240     struct v3_southbridge * southbridge;
241     struct vm_device * pci_bus;
242
243     struct pci_device * ide_pci;
244
245     struct v3_vm_info * vm;
246 };
247
248
249
250
251
252 /* Utility functions */
253
254 static inline uint16_t be_to_le_16(const uint16_t val) {
255     uint8_t * buf = (uint8_t *)&val;
256     return (buf[0] << 8) | (buf[1]) ;
257 }
258
259 static inline uint16_t le_to_be_16(const uint16_t val) {
260     return be_to_le_16(val);
261 }
262
263
264 static inline uint32_t be_to_le_32(const uint32_t val) {
265     uint8_t * buf = (uint8_t *)&val;
266     return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
267 }
268
269 static inline uint32_t le_to_be_32(const uint32_t val) {
270     return be_to_le_32(val);
271 }
272
273
274 static inline int is_lba28(struct ide_channel * channel) {
275     return channel->drive_head.lba_mode && channel->drive_head.rsvd1 && channel->drive_head.rsvd2;
276 }
277
278 static inline int is_lba48(struct ide_channel * channel) {
279     return channel->drive_head.lba_mode && !channel->drive_head.rsvd1 && !channel->drive_head.rsvd2;
280 }
281
282 static inline int is_chs(struct ide_channel * channel) {
283     return !channel->drive_head.lba_mode;
284 }
285
286 static inline int get_channel_index(ushort_t port) {
287     if (((port & 0xfff8) == 0x1f0) ||
288         ((port & 0xfffe) == 0x3f6) || 
289         ((port & 0xfff8) == 0xc000)) {
290         return 0;
291     } else if (((port & 0xfff8) == 0x170) ||
292                ((port & 0xfffe) == 0x376) ||
293                ((port & 0xfff8) == 0xc008)) {
294         return 1;
295     }
296
297     return -1;
298 }
299
300 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
301     int channel_idx = get_channel_index(port);    
302     return &(ide->channels[channel_idx]);
303 }
304
305 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
306     return &(channel->drives[channel->drive_head.drive_sel]);
307 }
308
309
310
311
312 /* Drive Commands */
313 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
314     if (channel->ctrl_reg.irq_disable == 0) {
315
316         PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
317
318         channel->dma_status.int_gen = 1;
319         v3_raise_irq(ide->vm, channel->irq);
320     } else {
321         PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
322     }
323 }
324
325
326 static void drive_reset(struct ide_drive * drive) {
327     drive->sector_count = 0x01;
328     drive->sector_num = 0x01;
329
330     PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
331     
332     if (drive->drive_type == BLOCK_CDROM) {
333         drive->cylinder = 0xeb14;
334     } else {
335         drive->cylinder = 0x0000;
336         //drive->hd_state.accessed = 0;
337     }
338
339
340     memset(drive->data_buf, 0, sizeof(drive->data_buf));
341     drive->transfer_index = 0;
342
343     // Send the reset signal to the connected device callbacks
344     //     channel->drives[0].reset();
345     //    channel->drives[1].reset();
346 }
347
348 static void channel_reset(struct ide_channel * channel) {
349     
350     // set busy and seek complete flags
351     channel->status.val = 0x90;
352
353     // Clear errors
354     channel->error_reg.val = 0x01;
355
356     // clear commands
357     channel->cmd_reg = 0;  // NOP
358
359     channel->ctrl_reg.irq_disable = 0;
360 }
361
362 static void channel_reset_complete(struct ide_channel * channel) {
363     channel->status.busy = 0;
364     channel->status.ready = 1;
365
366     channel->drive_head.head_num = 0;    
367     
368     drive_reset(&(channel->drives[0]));
369     drive_reset(&(channel->drives[1]));
370 }
371
372
373 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
374
375     PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
376
377     channel->status.val = 0x41; // Error + ready
378     channel->error_reg.val = 0x04; // No idea...
379
380     ide_raise_irq(ide, channel);
381 }
382
383
384 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
385 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
386
387
388 /* ATAPI functions */
389 #include "atapi.h"
390
391 /* ATA functions */
392 #include "ata.h"
393
394
395
396 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
397     struct ide_dma_prd prd_entry;
398     int index = 0;
399
400     V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
401
402     while (1) {
403         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
404         int ret = 0;
405
406         ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
407         
408         if (ret != sizeof(struct ide_dma_prd)) {
409             PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
410             return;
411         }
412
413         V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n", 
414                    prd_entry.base_addr, 
415                    (prd_entry.size == 0) ? 0x10000 : prd_entry.size, 
416                    prd_entry.end_of_table);
417
418         if (prd_entry.end_of_table) {
419             break;
420         }
421
422         index++;
423     }
424
425     return;
426 }
427
428
429 /* IO Operations */
430 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
431     struct ide_drive * drive = get_selected_drive(channel);
432     // This is at top level scope to do the EOT test at the end
433     struct ide_dma_prd prd_entry = {};
434     uint_t bytes_left = drive->transfer_length;
435
436     // Read in the data buffer....
437     // Read a sector/block at a time until the prd entry is full.
438
439 #ifdef V3_CONFIG_DEBUG_IDE
440     print_prd_table(ide, channel);
441 #endif
442
443     PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
444
445     // Loop through the disk data
446     while (bytes_left > 0) {
447         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
448         uint_t prd_bytes_left = 0;
449         uint_t prd_offset = 0;
450         int ret;
451
452         PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
453
454         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
455
456         if (ret != sizeof(struct ide_dma_prd)) {
457             PrintError(core->vm_info, core, "Could not read PRD\n");
458             return -1;
459         }
460
461         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
462                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
463
464         // loop through the PRD data....
465
466         if (prd_entry.size == 0) {
467             // a size of 0 means 64k
468             prd_bytes_left = 0x10000;
469         } else {
470             prd_bytes_left = prd_entry.size;
471         }
472
473
474         while (prd_bytes_left > 0) {
475             uint_t bytes_to_write = 0;
476
477             if (drive->drive_type == BLOCK_DISK) {
478                 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
479
480
481                 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
482                     PrintError(core->vm_info, core, "Failed to read next disk sector\n");
483                     return -1;
484                 }
485             } else if (drive->drive_type == BLOCK_CDROM) {
486                 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
487                     bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
488
489                     if (atapi_read_chunk(ide, channel) == -1) {
490                         PrintError(core->vm_info, core, "Failed to read next disk sector\n");
491                         return -1;
492                     }
493                 } else {
494                     /*
495                     PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
496                     return -1;
497                     */
498                     int cmd_ret = 0;
499
500                     //V3_Print(core->vm_info, core, "DMA of command packet\n");
501
502                     bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
503                     prd_bytes_left = bytes_to_write;
504
505
506                     // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
507                     cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, 
508                                                   bytes_to_write, drive->data_buf); 
509
510                     // check cmd_ret
511
512
513                     bytes_to_write = 0;
514                     prd_bytes_left = 0;
515                     drive->transfer_index += bytes_to_write;
516
517                     channel->status.busy = 0;
518                     channel->status.ready = 1;
519                     channel->status.data_req = 0;
520                     channel->status.error = 0;
521                     channel->status.seek_complete = 1;
522
523                     channel->dma_status.active = 0;
524                     channel->dma_status.err = 0;
525
526                     ide_raise_irq(ide, channel);
527                     
528                     return 0;
529                 }
530             }
531
532             PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n", 
533                        (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
534
535             drive->current_lba++;
536
537             ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf); 
538
539             if (ret != bytes_to_write) {
540                 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
541                 return -1;
542             }
543
544             PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
545
546             drive->transfer_index += ret;
547             prd_bytes_left -= ret;
548             prd_offset += ret;
549             bytes_left -= ret;
550         }
551
552         channel->dma_tbl_index++;
553
554         if (drive->drive_type == BLOCK_DISK) {
555             if (drive->transfer_index % HD_SECTOR_SIZE) {
556                 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
557                 return -1;
558             }
559         } else if (drive->drive_type == BLOCK_CDROM) {
560             if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
561                 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
562                     PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
563                     PrintError(core->vm_info, core, "transfer_index=%llu, transfer_length=%llu\n", 
564                                drive->transfer_index, drive->transfer_length);
565                     return -1;
566                 }
567             }
568         }
569
570
571         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
572             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
573             return -1;
574         }
575     }
576
577     /*
578       drive->irq_flags.io_dir = 1;
579       drive->irq_flags.c_d = 1;
580       drive->irq_flags.rel = 0;
581     */
582
583
584     // Update to the next PRD entry
585
586     // set DMA status
587
588     if (prd_entry.end_of_table) {
589         channel->status.busy = 0;
590         channel->status.ready = 1;
591         channel->status.data_req = 0;
592         channel->status.error = 0;
593         channel->status.seek_complete = 1;
594
595         channel->dma_status.active = 0;
596         channel->dma_status.err = 0;
597     }
598
599     ide_raise_irq(ide, channel);
600
601     return 0;
602 }
603
604
605 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
606     struct ide_drive * drive = get_selected_drive(channel);
607     // This is at top level scope to do the EOT test at the end
608     struct ide_dma_prd prd_entry = {};
609     uint_t bytes_left = drive->transfer_length;
610
611
612     PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
613
614     // Loop through disk data
615     while (bytes_left > 0) {
616         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
617         uint_t prd_bytes_left = 0;
618         uint_t prd_offset = 0;
619         int ret;
620         
621         PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
622
623         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
624
625         if (ret != sizeof(struct ide_dma_prd)) {
626             PrintError(core->vm_info, core, "Could not read PRD\n");
627             return -1;
628         }
629
630         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
631                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
632
633
634         if (prd_entry.size == 0) {
635             // a size of 0 means 64k
636             prd_bytes_left = 0x10000;
637         } else {
638             prd_bytes_left = prd_entry.size;
639         }
640
641         while (prd_bytes_left > 0) {
642             uint_t bytes_to_write = 0;
643
644
645             bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
646
647
648             ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
649
650             if (ret != bytes_to_write) {
651                 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
652                 return -1;
653             }
654
655             PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
656
657
658             if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
659                 PrintError(core->vm_info, core, "Failed to write data to disk\n");
660                 return -1;
661             }
662             
663             drive->current_lba++;
664
665             drive->transfer_index += ret;
666             prd_bytes_left -= ret;
667             prd_offset += ret;
668             bytes_left -= ret;
669         }
670
671         channel->dma_tbl_index++;
672
673         if (drive->transfer_index % HD_SECTOR_SIZE) {
674             PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
675             return -1;
676         }
677
678         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
679             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
680             PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%llu)...\n", 
681                        bytes_left, drive->transfer_length);
682             PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
683                        prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
684
685             print_prd_table(ide, channel);
686             return -1;
687         }
688     }
689
690     if (prd_entry.end_of_table) {
691         channel->status.busy = 0;
692         channel->status.ready = 1;
693         channel->status.data_req = 0;
694         channel->status.error = 0;
695         channel->status.seek_complete = 1;
696
697         channel->dma_status.active = 0;
698         channel->dma_status.err = 0;
699     }
700
701     ide_raise_irq(ide, channel);
702
703     return 0;
704 }
705
706
707
708 #define DMA_CMD_PORT      0x00
709 #define DMA_STATUS_PORT   0x02
710 #define DMA_PRD_PORT0     0x04
711 #define DMA_PRD_PORT1     0x05
712 #define DMA_PRD_PORT2     0x06
713 #define DMA_PRD_PORT3     0x07
714
715 #define DMA_CHANNEL_FLAG  0x08
716
717 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
718     struct ide_internal * ide = (struct ide_internal *)private_data;
719     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
720     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
721     struct ide_channel * channel = &(ide->channels[channel_flag]);
722
723     PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n", 
724                port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
725
726     switch (port_offset) {
727         case DMA_CMD_PORT:
728             channel->dma_cmd.val = *(uint8_t *)src;
729
730             if (channel->dma_cmd.start == 0) {
731                 channel->dma_tbl_index = 0;
732             } else {
733                 channel->dma_status.active = 1;
734
735                 if (channel->dma_cmd.read == 1) {
736                     // DMA Read
737                     if (dma_read(core, ide, channel) == -1) {
738                         PrintError(core->vm_info, core, "Failed DMA Read\n");
739                         return -1;
740                     }
741                 } else {
742                     // DMA write
743                     if (dma_write(core, ide, channel) == -1) {
744                         PrintError(core->vm_info, core, "Failed DMA Write\n");
745                         return -1;
746                     }
747                 }
748
749                 channel->dma_cmd.val &= 0x09;
750             }
751
752             break;
753             
754         case DMA_STATUS_PORT: {
755             uint8_t val = *(uint8_t *)src;
756
757             if (length != 1) {
758                 PrintError(core->vm_info, core, "Invalid read length for DMA status port\n");
759                 return -1;
760             }
761
762             // weirdness
763             channel->dma_status.val = ((val & 0x60) | 
764                                        (channel->dma_status.val & 0x01) |
765                                        (channel->dma_status.val & ~val & 0x06));
766
767             break;
768         }           
769         case DMA_PRD_PORT0:
770         case DMA_PRD_PORT1:
771         case DMA_PRD_PORT2:
772         case DMA_PRD_PORT3: {
773             uint_t addr_index = port_offset & 0x3;
774             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
775             int i = 0;
776
777             if (addr_index + length > 4) {
778                 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
779                 return -1;
780             }
781
782             for (i = 0; i < length; i++) {
783                 addr_buf[addr_index + i] = *((uint8_t *)src + i);
784             }
785
786             PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
787
788             break;
789         }
790         default:
791             PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
792             break;
793     }
794
795     return length;
796 }
797
798
799 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
800     struct ide_internal * ide = (struct ide_internal *)private_data;
801     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
802     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
803     struct ide_channel * channel = &(ide->channels[channel_flag]);
804
805     PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
806
807     if (port_offset + length > 16) {
808         PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
809         return -1;
810     }
811
812     memcpy(dst, channel->dma_ports + port_offset, length);
813     
814     PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
815
816     return length;
817 }
818
819
820
821 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
822     struct ide_internal * ide = priv_data;
823     struct ide_channel * channel = get_selected_channel(ide, port);
824     struct ide_drive * drive = get_selected_drive(channel);
825
826     if (length != 1) {
827         PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
828         return -1;
829     }
830
831     PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
832     
833     channel->cmd_reg = *(uint8_t *)src;
834     
835     switch (channel->cmd_reg) {
836
837         case ATA_PIDENTIFY: // ATAPI Identify Device Packet
838             if (drive->drive_type != BLOCK_CDROM) {
839                 drive_reset(drive);
840
841                 // JRL: Should we abort here?
842                 ide_abort_command(ide, channel);
843             } else {
844                 
845                 atapi_identify_device(drive);
846                 
847                 channel->error_reg.val = 0;
848                 channel->status.val = 0x58; // ready, data_req, seek_complete
849             
850                 ide_raise_irq(ide, channel);
851             }
852             break;
853         case ATA_IDENTIFY: // Identify Device
854             if (drive->drive_type != BLOCK_DISK) {
855                 drive_reset(drive);
856
857                 // JRL: Should we abort here?
858                 ide_abort_command(ide, channel);
859             } else {
860                 ata_identify_device(drive);
861
862                 channel->error_reg.val = 0;
863                 channel->status.val = 0x58;
864
865                 ide_raise_irq(ide, channel);
866             }
867             break;
868
869         case ATA_PACKETCMD: // ATAPI Command Packet
870             if (drive->drive_type != BLOCK_CDROM) {
871                 ide_abort_command(ide, channel);
872             }
873             
874             drive->sector_count = 1;
875
876             channel->status.busy = 0;
877             channel->status.write_fault = 0;
878             channel->status.data_req = 1;
879             channel->status.error = 0;
880
881             // reset the data buffer...
882             drive->transfer_length = ATAPI_PACKET_SIZE;
883             drive->transfer_index = 0;
884
885             break;
886
887         case ATA_READ:      // Read Sectors with Retry
888         case ATA_READ_ONCE: // Read Sectors without Retry
889         case ATA_MULTREAD:  // Read multiple sectors per ire
890         case ATA_READ_EXT:  // Read Sectors Extended (LBA48)
891
892             if (channel->cmd_reg==ATA_MULTREAD) { 
893                 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
894             } else {
895                 drive->hd_state.cur_sector_num = 1;
896             }
897
898             if (ata_read_sectors(ide, channel) == -1) {
899                 PrintError(core->vm_info, core, "Error reading sectors\n");
900                 ide_abort_command(ide,channel);
901             }
902             break;
903
904         case ATA_WRITE:            // Write Sector with retry
905         case ATA_WRITE_ONCE:       // Write Sector without retry
906         case ATA_MULTWRITE:        // Write multiple sectors per irq
907         case ATA_WRITE_EXT:        // Write Sectors Extended (LBA48)
908
909             if (channel->cmd_reg==ATA_MULTWRITE) { 
910                 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
911             } else {
912                 drive->hd_state.cur_sector_num = 1;
913             }
914
915             if (ata_write_sectors(ide, channel) == -1) {
916                 PrintError(core->vm_info, core, "Error writing sectors\n");
917                 ide_abort_command(ide,channel);
918             }
919             break;
920
921         case ATA_READDMA: // Read DMA with retry
922         case ATA_READDMA_ONCE: { // Read DMA
923             uint64_t sect_cnt;
924
925             if (ata_get_lba_and_size(ide, channel, &(drive->current_lba), &sect_cnt) == -1) {
926                 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
927                 ide_abort_command(ide, channel);
928                 return length;
929             }
930             
931             drive->hd_state.cur_sector_num = 1;
932             
933             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
934             drive->transfer_index = 0;
935
936             if (channel->dma_status.active == 1) {
937                 // DMA Read
938                 if (dma_read(core, ide, channel) == -1) {
939                     PrintError(core->vm_info, core, "Failed DMA Read\n");
940                     ide_abort_command(ide, channel);
941                 }
942             } else {
943                 PrintError(core->vm_info,core,"Attempt to initiate DMA read on channel that is not active\n");
944                 ide_abort_command(ide, channel);
945             }
946             break;
947         }
948
949         case ATA_WRITEDMA: { // Write DMA
950             uint64_t sect_cnt;
951
952             if (ata_get_lba_and_size(ide, channel, &(drive->current_lba),&sect_cnt) == -1) {
953                 PrintError(core->vm_info,core,"Cannot get lba\n");
954                 ide_abort_command(ide, channel);
955                 return length;
956             }
957
958             drive->hd_state.cur_sector_num = 1;
959
960             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
961             drive->transfer_index = 0;
962
963             if (channel->dma_status.active == 1) {
964                 // DMA Write
965                 if (dma_write(core, ide, channel) == -1) {
966                     PrintError(core->vm_info, core, "Failed DMA Write\n");
967                     ide_abort_command(ide, channel);
968                 }
969             } else {
970                 PrintError(core->vm_info,core,"Attempt to initiate DMA write with DMA inactive\n");
971                 ide_abort_command(ide, channel);
972             }
973             break;
974         }
975         case ATA_STANDBYNOW1: // Standby Now 1
976         case ATA_IDLEIMMEDIATE: // Set Idle Immediate
977         case ATA_STANDBY: // Standby
978         case ATA_SETIDLE1: // Set Idle 1
979         case ATA_SLEEPNOW1: // Sleep Now 1
980         case ATA_STANDBYNOW2: // Standby Now 2
981         case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
982         case ATA_STANDBY2: // Standby 2
983         case ATA_SETIDLE2: // Set idle 2
984         case ATA_SLEEPNOW2: // Sleep Now 2
985             channel->status.val = 0;
986             channel->status.ready = 1;
987             ide_raise_irq(ide, channel);
988             break;
989
990         case ATA_SETFEATURES: // Set Features
991             // Prior to this the features register has been written to. 
992             // This command tells the drive to check if the new value is supported (the value is drive specific)
993             // Common is that bit0=DMA enable
994             // If valid the drive raises an interrupt, if not it aborts.
995
996             // Do some checking here...
997
998             channel->status.busy = 0;
999             channel->status.write_fault = 0;
1000             channel->status.error = 0;
1001             channel->status.ready = 1;
1002             channel->status.seek_complete = 1;
1003             
1004             ide_raise_irq(ide, channel);
1005             break;
1006
1007         case ATA_SPECIFY:  // Initialize Drive Parameters
1008         case ATA_RECAL:  // recalibrate?
1009             channel->status.error = 0;
1010             channel->status.ready = 1;
1011             channel->status.seek_complete = 1;
1012             ide_raise_irq(ide, channel);
1013             break;
1014
1015         case ATA_SETMULT: { // Set multiple mode (IDE Block mode) 
1016             // This makes the drive transfer multiple sectors before generating an interrupt
1017
1018             if (drive->sector_count == 0) {
1019                 PrintError(core->vm_info,core,"Attempt to set multiple to zero\n");
1020                 drive->hd_state.mult_sector_num= 1;
1021                 ide_abort_command(ide,channel);
1022                 break;
1023             } else {
1024                 drive->hd_state.mult_sector_num = drive->sector_count;
1025             }
1026
1027             channel->status.ready = 1;
1028             channel->status.error = 0;
1029
1030             ide_raise_irq(ide, channel);
1031
1032             break;
1033         }
1034
1035         case ATA_DEVICE_RESET: // Reset Device
1036             drive_reset(drive);
1037             channel->error_reg.val = 0x01;
1038             channel->status.busy = 0;
1039             channel->status.ready = 1;
1040             channel->status.seek_complete = 1;
1041             channel->status.write_fault = 0;
1042             channel->status.error = 0;
1043             break;
1044
1045         case ATA_CHECKPOWERMODE1: // Check power mode
1046             drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1047             channel->status.busy = 0;
1048             channel->status.ready = 1;
1049             channel->status.write_fault = 0;
1050             channel->status.data_req = 0;
1051             channel->status.error = 0;
1052             break;
1053
1054         default:
1055             PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1056             ide_abort_command(ide, channel);
1057             break;
1058     }
1059
1060     return length;
1061 }
1062
1063
1064
1065
1066 static int read_hd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1067     struct ide_drive * drive = get_selected_drive(channel);
1068     uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1069
1070
1071     PrintDebug(VM_NONE,VCORE_NONE, "Read HD data:  transfer_index %llu transfer length %llu current sector numer %llu\n",
1072                drive->transfer_index, drive->transfer_length, 
1073                drive->hd_state.cur_sector_num);
1074
1075     if (drive->transfer_index >= drive->transfer_length) {
1076         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1077                    drive->transfer_length, drive->transfer_index,
1078                    drive->transfer_index + length);
1079         return -1;
1080     }
1081
1082
1083     if (data_offset + length > HD_SECTOR_SIZE) { 
1084        PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1085     }
1086    
1087     // For index==0, the read has been done in ata_read_sectors
1088     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1089         // advance to next sector and read it
1090         
1091         drive->current_lba++;
1092
1093         if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1094             PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1095             return -1;
1096         }
1097     }
1098
1099     /*
1100       PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n", 
1101       *(uint32_t *)(drive->data_buf + data_offset), 
1102       length, data_offset);
1103     */
1104     memcpy(dst, drive->data_buf + data_offset, length);
1105
1106     drive->transfer_index += length;
1107
1108
1109     /* This is the trigger for interrupt injection.
1110      * For read single sector commands we interrupt after every sector
1111      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1112      * cur_sector_num is configured depending on the operation we are currently running
1113      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1114      */
1115     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1116         (drive->transfer_index == drive->transfer_length)) {
1117         if (drive->transfer_index < drive->transfer_length) {
1118             // An increment is complete, but there is still more data to be transferred...
1119             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1120             channel->status.data_req = 1;
1121         } else {
1122             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1123             // This was the final read of the request
1124             channel->status.data_req = 0;
1125         }
1126
1127         channel->status.ready = 1;
1128         channel->status.busy = 0;
1129
1130         ide_raise_irq(ide, channel);
1131     }
1132
1133
1134     return length;
1135 }
1136
1137 static int write_hd_data(uint8_t * src, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1138     struct ide_drive * drive = get_selected_drive(channel);
1139     uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1140
1141
1142     PrintDebug(VM_NONE,VCORE_NONE, "Write HD data:  transfer_index %llu transfer length %llu current sector numer %llu\n",
1143                drive->transfer_index, drive->transfer_length, 
1144                drive->hd_state.cur_sector_num);
1145
1146     if (drive->transfer_index >= drive->transfer_length) {
1147         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1148                    drive->transfer_length, drive->transfer_index,
1149                    drive->transfer_index + length);
1150         return -1;
1151     }
1152
1153     if (data_offset + length > HD_SECTOR_SIZE) { 
1154        PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1155     }
1156
1157     // Copy data into our buffer - there will be room due to
1158     // (a) the ata_write test below is flushing sectors
1159     // (b) if we somehow get a sector-stradling write (an error), this will
1160     //     be OK since the buffer itself is >1 sector in memory
1161     memcpy(drive->data_buf + data_offset, src, length);
1162
1163     drive->transfer_index += length;
1164
1165     if ((data_offset+length) >= HD_SECTOR_SIZE) {
1166         // Write out the sector we just finished
1167         if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1168             PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1169             return -1;
1170         }
1171
1172         // go onto next sector
1173         drive->current_lba++;
1174     }
1175
1176     /* This is the trigger for interrupt injection.
1177      * For write single sector commands we interrupt after every sector
1178      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1179      * cur_sector_num is configured depending on the operation we are currently running
1180      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1181      */
1182     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1183         (drive->transfer_index == drive->transfer_length)) {
1184         if (drive->transfer_index < drive->transfer_length) {
1185             // An increment is complete, but there is still more data to be transferred...
1186             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1187             channel->status.data_req = 1;
1188         } else {
1189             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1190             // This was the final read of the request
1191             channel->status.data_req = 0;
1192         }
1193
1194         channel->status.ready = 1;
1195         channel->status.busy = 0;
1196
1197         ide_raise_irq(ide, channel);
1198     }
1199
1200     return length;
1201 }
1202
1203
1204
1205 static int read_cd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1206     struct ide_drive * drive = get_selected_drive(channel);
1207     uint64_t data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1208     //  int req_offset = drive->transfer_index % drive->req_len;
1209     
1210     if (drive->cd_state.atapi_cmd != 0x28) {
1211         PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%llu) (req_len=%u)\n", length, drive->req_len);
1212         PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%llu, transfer idx=%llu\n", drive->transfer_length, drive->transfer_index);
1213     }
1214
1215     
1216
1217     if (drive->transfer_index >= drive->transfer_length) {
1218         PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n", 
1219                    drive->transfer_length, drive->transfer_index, 
1220                    drive->transfer_index + length);
1221         return -1;
1222     }
1223
1224     
1225     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1226         if (atapi_update_data_buf(ide, channel) == -1) {
1227             PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1228             return -1;
1229         }
1230     }
1231
1232     memcpy(dst, drive->data_buf + data_offset, length);
1233     
1234     drive->transfer_index += length;
1235
1236
1237     // Should the req_offset be recalculated here?????
1238     if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1239         if (drive->transfer_index < drive->transfer_length) {
1240             // An increment is complete, but there is still more data to be transferred...
1241             
1242             channel->status.data_req = 1;
1243
1244             drive->irq_flags.c_d = 0;
1245
1246             // Update the request length in the cylinder regs
1247             if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1248                 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1249                 return -1;
1250             }
1251         } else {
1252             // This was the final read of the request
1253
1254             drive->req_len = 0;
1255             channel->status.data_req = 0;
1256             channel->status.ready = 1;
1257             
1258             drive->irq_flags.c_d = 1;
1259             drive->irq_flags.rel = 0;
1260         }
1261
1262         drive->irq_flags.io_dir = 1;
1263         channel->status.busy = 0;
1264
1265         ide_raise_irq(ide, channel);
1266     }
1267
1268     return length;
1269 }
1270
1271
1272 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1273     struct ide_drive * drive = get_selected_drive(channel);
1274
1275     channel->status.busy = 0;
1276     channel->status.ready = 1;
1277     channel->status.write_fault = 0;
1278     channel->status.seek_complete = 1;
1279     channel->status.corrected = 0;
1280     channel->status.error = 0;
1281                 
1282     
1283     memcpy(dst, drive->data_buf + drive->transfer_index, length);
1284     drive->transfer_index += length;
1285     
1286     if (drive->transfer_index >= drive->transfer_length) {
1287         channel->status.data_req = 0;
1288     }
1289     
1290     return length;
1291 }
1292
1293
1294
1295 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1296     struct ide_internal * ide = priv_data;
1297     struct ide_channel * channel = get_selected_channel(ide, port);
1298     struct ide_drive * drive = get_selected_drive(channel);
1299
1300     //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1301
1302     if ((channel->cmd_reg == ATA_IDENTIFY) ||
1303         (channel->cmd_reg == ATA_PIDENTIFY)) {
1304         return read_drive_id((uint8_t *)dst, length, ide, channel);
1305     }
1306
1307     if (drive->drive_type == BLOCK_CDROM) {
1308         if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1309             PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1310             return -1;
1311         }
1312     } else if (drive->drive_type == BLOCK_DISK) {
1313         if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1314             PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1315             return -1;
1316         }
1317     } else {
1318         memset((uint8_t *)dst, 0, length);
1319     }
1320
1321     return length;
1322 }
1323
1324 // For the write side, we care both about
1325 // direct PIO writes to a drive as well as 
1326 // writes that pass a packet through to an CD
1327 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1328     struct ide_internal * ide = priv_data;
1329     struct ide_channel * channel = get_selected_channel(ide, port);
1330     struct ide_drive * drive = get_selected_drive(channel);
1331
1332     PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n", 
1333             port, *(uint32_t *)src, length);
1334
1335     if (drive->drive_type == BLOCK_CDROM) {
1336         if (channel->cmd_reg == ATA_PACKETCMD) { 
1337             // short command packet - no check for space... 
1338             memcpy(drive->data_buf + drive->transfer_index, src, length);
1339             drive->transfer_index += length;
1340             if (drive->transfer_index >= drive->transfer_length) {
1341                 if (atapi_handle_packet(core, ide, channel) == -1) {
1342                     PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1343                     return -1;
1344                 }
1345             }
1346         } else {
1347             PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1348             return -1;
1349         }
1350     } else if (drive->drive_type == BLOCK_DISK) {
1351         if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1352             PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1353             return -1;
1354         }
1355     } else {
1356         // nothing ... do not support writable cd
1357     }
1358
1359     return length;
1360 }
1361
1362 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1363     struct ide_internal * ide = priv_data;
1364     struct ide_channel * channel = get_selected_channel(ide, port);
1365     struct ide_drive * drive = get_selected_drive(channel);
1366             
1367     if (length != 1) {
1368         PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1369         return -1;
1370     }
1371
1372     PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1373
1374     switch (port) {
1375         // reset and interrupt enable
1376         case PRI_CTRL_PORT:
1377         case SEC_CTRL_PORT: {
1378             struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1379
1380             // only reset channel on a 0->1 reset bit transition
1381             if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1382                 channel_reset(channel);
1383             } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1384                 channel_reset_complete(channel);
1385             }
1386
1387             channel->ctrl_reg.val = tmp_ctrl->val;          
1388             break;
1389         }
1390         case PRI_FEATURES_PORT:
1391         case SEC_FEATURES_PORT:
1392             channel->features.val = *(uint8_t *)src;
1393             break;
1394
1395         case PRI_SECT_CNT_PORT:
1396         case SEC_SECT_CNT_PORT:
1397             // update CHS and LBA28 state
1398             channel->drives[0].sector_count = *(uint8_t *)src;
1399             channel->drives[1].sector_count = *(uint8_t *)src;
1400
1401             // update LBA48 state
1402             if (is_lba48(channel)) {
1403                 uint16_t val = *(uint8_t*)src; // top bits zero;
1404                 if (!channel->drives[0].lba48.sector_count_state) { 
1405                     channel->drives[0].lba48.sector_count = val<<8;
1406                 } else {
1407                     channel->drives[0].lba48.sector_count |= val;
1408                 }
1409                 channel->drives[0].lba48.sector_count_state ^= 1;
1410                 if (!channel->drives[1].lba48.sector_count_state) { 
1411                     channel->drives[1].lba48.sector_count = val<<8;
1412                 } else {
1413                     channel->drives[1].lba48.sector_count |= val;
1414                 }
1415                 channel->drives[0].lba48.sector_count_state ^= 1;
1416             }
1417             
1418             break;
1419
1420         case PRI_SECT_NUM_PORT:
1421         case SEC_SECT_NUM_PORT:
1422             // update CHS and LBA28 state
1423             channel->drives[0].sector_num = *(uint8_t *)src;
1424             channel->drives[1].sector_num = *(uint8_t *)src;
1425
1426             // update LBA48 state
1427             if (is_lba48(channel)) {
1428                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1429                 if (!channel->drives[0].lba48.lba41_state) { 
1430                     channel->drives[0].lba48.lba |= val<<24; 
1431                 } else {
1432                     channel->drives[0].lba48.lba |= val;
1433                 }
1434                 channel->drives[0].lba48.lba41_state ^= 1;
1435                 if (!channel->drives[1].lba48.lba41_state) { 
1436                     channel->drives[1].lba48.lba |= val<<24; 
1437                 } else {
1438                     channel->drives[1].lba48.lba |= val;
1439                 }
1440                 channel->drives[1].lba48.lba41_state ^= 1;
1441             }
1442
1443             break;
1444         case PRI_CYL_LOW_PORT:
1445         case SEC_CYL_LOW_PORT:
1446             // update CHS and LBA28 state
1447             channel->drives[0].cylinder_low = *(uint8_t *)src;
1448             channel->drives[1].cylinder_low = *(uint8_t *)src;
1449
1450             // update LBA48 state
1451             if (is_lba48(channel)) {
1452                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1453                 if (!channel->drives[0].lba48.lba52_state) { 
1454                     channel->drives[0].lba48.lba |= val<<32; 
1455                 } else {
1456                     channel->drives[0].lba48.lba |= val<<8;
1457                 }
1458                 channel->drives[0].lba48.lba52_state ^= 1;
1459                 if (!channel->drives[1].lba48.lba52_state) { 
1460                     channel->drives[1].lba48.lba |= val<<32; 
1461                 } else {
1462                     channel->drives[1].lba48.lba |= val<<8;
1463                 }
1464                 channel->drives[1].lba48.lba52_state ^= 1;
1465             }
1466
1467             break;
1468
1469         case PRI_CYL_HIGH_PORT:
1470         case SEC_CYL_HIGH_PORT:
1471             // update CHS and LBA28 state
1472             channel->drives[0].cylinder_high = *(uint8_t *)src;
1473             channel->drives[1].cylinder_high = *(uint8_t *)src;
1474
1475             // update LBA48 state
1476             if (is_lba48(channel)) {
1477                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1478                 if (!channel->drives[0].lba48.lba63_state) { 
1479                     channel->drives[0].lba48.lba |= val<<40; 
1480                 } else {
1481                     channel->drives[0].lba48.lba |= val<<16;
1482                 }
1483                 channel->drives[0].lba48.lba63_state ^= 1;
1484                 if (!channel->drives[1].lba48.lba63_state) { 
1485                     channel->drives[1].lba48.lba |= val<<40; 
1486                 } else {
1487                     channel->drives[1].lba48.lba |= val<<16;
1488                 }
1489                 channel->drives[1].lba48.lba63_state ^= 1;
1490             }
1491
1492             break;
1493
1494         case PRI_DRV_SEL_PORT:
1495         case SEC_DRV_SEL_PORT: {
1496             struct ide_drive_head_reg nh, oh;
1497
1498             oh.val = channel->drive_head.val;
1499             channel->drive_head.val = nh.val = *(uint8_t *)src;
1500
1501             // has LBA flipped?
1502             if ((oh.val & 0xe0) != (nh.val & 0xe0)) {
1503                 // reset LBA48 state
1504                 channel->drives[0].lba48.sector_count_state=0;
1505                 channel->drives[0].lba48.lba41_state=0;
1506                 channel->drives[0].lba48.lba52_state=0;
1507                 channel->drives[0].lba48.lba63_state=0;
1508                 channel->drives[1].lba48.sector_count_state=0;
1509                 channel->drives[1].lba48.lba41_state=0;
1510                 channel->drives[1].lba48.lba52_state=0;
1511                 channel->drives[1].lba48.lba63_state=0;
1512             }
1513             
1514
1515             drive = get_selected_drive(channel);
1516
1517             // Selecting a non-present device is a no-no
1518             if (drive->drive_type == BLOCK_NONE) {
1519                 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1520                 channel->error_reg.abort = 1;
1521                 channel->status.error = 1;
1522             } else {
1523                 channel->status.busy = 0;
1524                 channel->status.ready = 1;
1525                 channel->status.data_req = 0;
1526                 channel->status.error = 0;
1527                 channel->status.seek_complete = 1;
1528                 
1529                 channel->dma_status.active = 0;
1530                 channel->dma_status.err = 0;
1531             }
1532
1533             break;
1534         }
1535         default:
1536             PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1537             return -1;
1538     }
1539     return length;
1540 }
1541
1542
1543 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1544     struct ide_internal * ide = priv_data;
1545     struct ide_channel * channel = get_selected_channel(ide, port);
1546     struct ide_drive * drive = get_selected_drive(channel);
1547     
1548     if (length != 1) {
1549         PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1550         return -1;
1551     }
1552     
1553     PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1554
1555     if ((port == PRI_ADDR_REG_PORT) ||
1556         (port == SEC_ADDR_REG_PORT)) {
1557         // unused, return 0xff
1558         *(uint8_t *)dst = 0xff;
1559         return length;
1560     }
1561
1562
1563     // if no drive is present just return 0 + reserved bits
1564     if (drive->drive_type == BLOCK_NONE) {
1565         if ((port == PRI_DRV_SEL_PORT) ||
1566             (port == SEC_DRV_SEL_PORT)) {
1567             *(uint8_t *)dst = 0xa0;
1568         } else {
1569             *(uint8_t *)dst = 0;
1570         }
1571
1572         return length;
1573     }
1574
1575     switch (port) {
1576
1577         // This is really the error register.
1578         case PRI_FEATURES_PORT:
1579         case SEC_FEATURES_PORT:
1580             *(uint8_t *)dst = channel->error_reg.val;
1581             break;
1582             
1583         case PRI_SECT_CNT_PORT:
1584         case SEC_SECT_CNT_PORT:
1585             *(uint8_t *)dst = drive->sector_count;
1586             break;
1587
1588         case PRI_SECT_NUM_PORT:
1589         case SEC_SECT_NUM_PORT:
1590             *(uint8_t *)dst = drive->sector_num;
1591             break;
1592
1593         case PRI_CYL_LOW_PORT:
1594         case SEC_CYL_LOW_PORT:
1595             *(uint8_t *)dst = drive->cylinder_low;
1596             break;
1597
1598
1599         case PRI_CYL_HIGH_PORT:
1600         case SEC_CYL_HIGH_PORT:
1601             *(uint8_t *)dst = drive->cylinder_high;
1602             break;
1603
1604         case PRI_DRV_SEL_PORT:
1605         case SEC_DRV_SEL_PORT:  // hard disk drive and head register 0x1f6
1606             *(uint8_t *)dst = channel->drive_head.val;
1607             break;
1608
1609         case PRI_CTRL_PORT:
1610         case SEC_CTRL_PORT:
1611         case PRI_CMD_PORT:
1612         case SEC_CMD_PORT:
1613             // Something about lowering interrupts here....
1614             *(uint8_t *)dst = channel->status.val;
1615             break;
1616
1617         default:
1618             PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1619             return -1;
1620     }
1621
1622     PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1623
1624     return length;
1625 }
1626
1627
1628
1629 static void init_drive(struct ide_drive * drive) {
1630
1631     drive->sector_count = 0x01;
1632     drive->sector_num = 0x01;
1633     drive->cylinder = 0x0000;
1634
1635     drive->drive_type = BLOCK_NONE;
1636
1637     memset(drive->model, 0, sizeof(drive->model));
1638
1639     drive->transfer_index = 0;
1640     drive->transfer_length = 0;
1641     memset(drive->data_buf, 0, sizeof(drive->data_buf));
1642
1643     drive->num_cylinders = 0;
1644     drive->num_heads = 0;
1645     drive->num_sectors = 0;
1646     
1647
1648     drive->private_data = NULL;
1649     drive->ops = NULL;
1650 }
1651
1652 static void init_channel(struct ide_channel * channel) {
1653     int i = 0;
1654
1655     channel->error_reg.val = 0x01;
1656
1657     //** channel->features = 0x0;
1658
1659     channel->drive_head.val = 0x00;
1660     channel->status.val = 0x00;
1661     channel->cmd_reg = 0x00;
1662     channel->ctrl_reg.val = 0x08;
1663
1664     channel->dma_cmd.val = 0;
1665     channel->dma_status.val = 0;
1666     channel->dma_prd_addr = 0;
1667     channel->dma_tbl_index = 0;
1668
1669     for (i = 0; i < 2; i++) {
1670         init_drive(&(channel->drives[i]));
1671     }
1672
1673 }
1674
1675
1676 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1677     PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1678     /*
1679     struct ide_internal * ide = (struct ide_internal *)(private_data);
1680
1681     PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1682     */
1683
1684     return 0;
1685 }
1686
1687 static int init_ide_state(struct ide_internal * ide) {
1688
1689     /* 
1690      * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry 
1691      */
1692
1693     init_channel(&(ide->channels[0]));
1694     ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1695
1696     init_channel(&(ide->channels[1]));
1697     ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1698
1699
1700     return 0;
1701 }
1702
1703
1704
1705
1706 static int ide_free(struct ide_internal * ide) {
1707
1708     // deregister from PCI?
1709
1710     V3_Free(ide);
1711
1712     return 0;
1713 }
1714
1715 #ifdef V3_CONFIG_CHECKPOINT
1716
1717 #include <palacios/vmm_sprintf.h>
1718
1719 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1720     struct ide_internal * ide = (struct ide_internal *)private_data;
1721     struct v3_chkpt_ctx *ctx=0;
1722     int ch_num = 0;
1723     int drive_num = 0;
1724     char buf[128];
1725     
1726
1727     ctx=v3_chkpt_open_ctx(chkpt,id);
1728     
1729     if (!ctx) { 
1730       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1731       goto savefailout;
1732     }
1733
1734     // nothing saved yet
1735     
1736     v3_chkpt_close_ctx(ctx);ctx=0;
1737    
1738
1739     for (ch_num = 0; ch_num < 2; ch_num++) {
1740         struct ide_channel * ch = &(ide->channels[ch_num]);
1741
1742         snprintf(buf, 128, "%s-%d", id, ch_num);
1743
1744         ctx = v3_chkpt_open_ctx(chkpt, buf);
1745         
1746         if (!ctx) { 
1747           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1748           goto savefailout;
1749         }
1750
1751         V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1752         V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1753         V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1754         V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1755         V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1756         V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1757         V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1758         V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1759         V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1760         V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1761
1762
1763
1764         v3_chkpt_close_ctx(ctx); ctx=0;
1765
1766         for (drive_num = 0; drive_num < 2; drive_num++) {
1767             struct ide_drive * drive = &(ch->drives[drive_num]);
1768             
1769             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1770
1771             ctx = v3_chkpt_open_ctx(chkpt, buf);
1772             
1773             if (!ctx) { 
1774               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1775               goto savefailout;
1776             }
1777
1778             V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1779             V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1780             V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1781             V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1782
1783             V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1784             V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1785             V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1786
1787             V3_CHKPT_SAVE(ctx, "DATA_BUF",  drive->data_buf, savefailout);
1788
1789
1790             /* For now we'll just pack the type specific data at the end... */
1791             /* We should probably add a new context here in the future... */
1792             if (drive->drive_type == BLOCK_CDROM) {
1793               V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1794               V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1795               V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1796             } else if (drive->drive_type == BLOCK_DISK) {
1797               V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1798               V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1799               V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1800             } else if (drive->drive_type == BLOCK_NONE) { 
1801               // no drive connected, so no data
1802             } else {
1803               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1804               goto savefailout;
1805             }
1806
1807             V3_CHKPT_SAVE(ctx, "LBA48_LBA", drive->lba48.lba, savefailout);
1808             V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, savefailout);
1809             V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, savefailout);
1810             V3_CHKPT_SAVE(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, savefailout);
1811             V3_CHKPT_SAVE(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, savefailout);
1812             V3_CHKPT_SAVE(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, savefailout);
1813             
1814             v3_chkpt_close_ctx(ctx); ctx=0;
1815         }
1816     }
1817
1818 // goodout:
1819     return 0;
1820
1821  savefailout:
1822     PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1823     if (ctx) {v3_chkpt_close_ctx(ctx); }
1824     return -1;
1825 }
1826
1827
1828
1829 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1830     struct ide_internal * ide = (struct ide_internal *)private_data;
1831     struct v3_chkpt_ctx *ctx=0;
1832     int ch_num = 0;
1833     int drive_num = 0;
1834     char buf[128];
1835     
1836     ctx=v3_chkpt_open_ctx(chkpt,id);
1837     
1838     if (!ctx) { 
1839       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1840       goto loadfailout;
1841     }
1842
1843     // nothing saved yet
1844     
1845     v3_chkpt_close_ctx(ctx);ctx=0;
1846    
1847
1848     for (ch_num = 0; ch_num < 2; ch_num++) {
1849         struct ide_channel * ch = &(ide->channels[ch_num]);
1850
1851         snprintf(buf, 128, "%s-%d", id, ch_num);
1852
1853         ctx = v3_chkpt_open_ctx(chkpt, buf);
1854         
1855         if (!ctx) { 
1856           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1857           goto loadfailout;
1858         }
1859
1860         V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1861         V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1862         V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1863         V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1864         V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1865         V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1866         V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1867         V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1868         V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1869         V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1870
1871         v3_chkpt_close_ctx(ctx); ctx=0;
1872
1873         for (drive_num = 0; drive_num < 2; drive_num++) {
1874             struct ide_drive * drive = &(ch->drives[drive_num]);
1875             
1876             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1877
1878             ctx = v3_chkpt_open_ctx(chkpt, buf);
1879             
1880             if (!ctx) { 
1881               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1882               goto loadfailout;
1883             }
1884
1885             V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1886             V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1887             V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1888             V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1889
1890             V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1891             V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1892             V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1893
1894             V3_CHKPT_LOAD(ctx, "DATA_BUF",  drive->data_buf, loadfailout);
1895
1896             
1897             /* For now we'll just pack the type specific data at the end... */
1898             /* We should probably add a new context here in the future... */
1899             if (drive->drive_type == BLOCK_CDROM) {
1900               V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1901               V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1902               V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1903             } else if (drive->drive_type == BLOCK_DISK) {
1904               V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1905               V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1906               V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1907             } else if (drive->drive_type == BLOCK_NONE) { 
1908               // no drive connected, so no data
1909             } else {
1910               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1911               goto loadfailout;
1912             }
1913
1914             V3_CHKPT_LOAD(ctx, "LBA48_LBA", drive->lba48.lba, loadfailout);
1915             V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, loadfailout);
1916             V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, loadfailout);
1917             V3_CHKPT_LOAD(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, loadfailout);
1918             V3_CHKPT_LOAD(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, loadfailout);
1919             V3_CHKPT_LOAD(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, loadfailout);
1920             
1921         }
1922     }
1923 // goodout:
1924     return 0;
1925
1926  loadfailout:
1927     PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1928     if (ctx) {v3_chkpt_close_ctx(ctx); }
1929     return -1;
1930
1931 }
1932
1933
1934
1935 #endif
1936
1937
1938 static struct v3_device_ops dev_ops = {
1939     .free = (int (*)(void *))ide_free,
1940 #ifdef V3_CONFIG_CHECKPOINT
1941     .save_extended = ide_save_extended,
1942     .load_extended = ide_load_extended
1943 #endif
1944 };
1945
1946
1947
1948
1949 static int connect_fn(struct v3_vm_info * vm, 
1950                       void * frontend_data, 
1951                       struct v3_dev_blk_ops * ops, 
1952                       v3_cfg_tree_t * cfg, 
1953                       void * private_data) {
1954     struct ide_internal * ide  = (struct ide_internal *)(frontend_data);  
1955     struct ide_channel * channel = NULL;
1956     struct ide_drive * drive = NULL;
1957
1958     char * bus_str = v3_cfg_val(cfg, "bus_num");
1959     char * drive_str = v3_cfg_val(cfg, "drive_num");
1960     char * type_str = v3_cfg_val(cfg, "type");
1961     char * model_str = v3_cfg_val(cfg, "model");
1962     uint_t bus_num = 0;
1963     uint_t drive_num = 0;
1964
1965
1966     if ((!type_str) || (!drive_str) || (!bus_str)) {
1967         PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1968         return -1;
1969     }
1970
1971     bus_num = atoi(bus_str);
1972     drive_num = atoi(drive_str);
1973
1974     channel = &(ide->channels[bus_num]);
1975     drive = &(channel->drives[drive_num]);
1976
1977     if (drive->drive_type != BLOCK_NONE) {
1978         PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1979         return -1;
1980     }
1981
1982     if (model_str != NULL) {
1983         strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1984     }
1985
1986     if (strcasecmp(type_str, "cdrom") == 0) {
1987         drive->drive_type = BLOCK_CDROM;
1988
1989         while (strlen((char *)(drive->model)) < 40) {
1990             strcat((char*)(drive->model), " ");
1991         }
1992
1993     } else if (strcasecmp(type_str, "hd") == 0) {
1994         drive->drive_type = BLOCK_DISK;
1995
1996         drive->hd_state.accessed = 0;
1997         drive->hd_state.mult_sector_num = 1;
1998
1999         drive->num_sectors = 63;
2000         drive->num_heads = 16;
2001         drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
2002     } else {
2003         PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
2004         return -1;
2005     }
2006  
2007     drive->ops = ops;
2008
2009     if (ide->ide_pci) {
2010         // Hardcode this for now, but its not a good idea....
2011         ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
2012     }
2013  
2014     drive->private_data = private_data;
2015
2016     return 0;
2017 }
2018
2019
2020
2021
2022 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
2023     struct ide_internal * ide  = NULL;
2024     char * dev_id = v3_cfg_val(cfg, "ID");
2025     int ret = 0;
2026
2027     PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
2028
2029     ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
2030
2031     if (ide == NULL) {
2032         PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
2033         return -1;
2034     }
2035
2036     memset(ide, 0, sizeof(struct ide_internal));
2037
2038     ide->vm = vm;
2039     ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
2040
2041     if (ide->pci_bus != NULL) {
2042         struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
2043
2044         if (!southbridge) {
2045             PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
2046             V3_Free(ide);
2047             return -1;
2048         }
2049
2050         ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
2051     }
2052
2053     PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
2054
2055     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
2056
2057     if (dev == NULL) {
2058         PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
2059         V3_Free(ide);
2060         return -1;
2061     }
2062
2063     if (init_ide_state(ide) == -1) {
2064         PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
2065         v3_remove_device(dev);
2066         return -1;
2067     }
2068
2069     PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
2070
2071     ret |= v3_dev_hook_io(dev, PRI_DATA_PORT, 
2072                           &read_data_port, &write_data_port);
2073     ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT, 
2074                           &read_port_std, &write_port_std);
2075     ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT, 
2076                           &read_port_std, &write_port_std);
2077     ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT, 
2078                           &read_port_std, &write_port_std);
2079     ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT, 
2080                           &read_port_std, &write_port_std);
2081     ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT, 
2082                           &read_port_std, &write_port_std);
2083     ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT, 
2084                           &read_port_std, &write_port_std);
2085     ret |= v3_dev_hook_io(dev, PRI_CMD_PORT, 
2086                           &read_port_std, &write_cmd_port);
2087
2088     ret |= v3_dev_hook_io(dev, SEC_DATA_PORT, 
2089                           &read_data_port, &write_data_port);
2090     ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT, 
2091                           &read_port_std, &write_port_std);
2092     ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT, 
2093                           &read_port_std, &write_port_std);
2094     ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT, 
2095                           &read_port_std, &write_port_std);
2096     ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT, 
2097                           &read_port_std, &write_port_std);
2098     ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT, 
2099                           &read_port_std, &write_port_std);
2100     ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT, 
2101                           &read_port_std, &write_port_std);
2102     ret |= v3_dev_hook_io(dev, SEC_CMD_PORT, 
2103                           &read_port_std, &write_cmd_port);
2104   
2105
2106     ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT, 
2107                           &read_port_std, &write_port_std);
2108
2109     ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT, 
2110                           &read_port_std, &write_port_std);
2111   
2112
2113     ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT, 
2114                           &read_port_std, &write_port_std);
2115
2116     ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT, 
2117                           &read_port_std, &write_port_std);
2118
2119
2120     if (ret != 0) {
2121         PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
2122         v3_remove_device(dev);
2123         return -1;
2124     }
2125
2126
2127     if (ide->pci_bus) {
2128         struct v3_pci_bar bars[6];
2129         struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2130         struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2131         struct pci_device * pci_dev = NULL;
2132         int i;
2133
2134         PrintDebug(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2135
2136         for (i = 0; i < 6; i++) {
2137             bars[i].type = PCI_BAR_NONE;
2138         }
2139
2140         bars[4].type = PCI_BAR_IO;
2141         //      bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2142         bars[4].default_base_port = -1;
2143         bars[4].num_ports = 16;
2144
2145         bars[4].io_read = read_dma_port;
2146         bars[4].io_write = write_dma_port;
2147         bars[4].private_data = ide;
2148
2149         pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1, 
2150                                          "PIIX3_IDE", bars,
2151                                          pci_config_update, NULL, NULL, NULL, ide);
2152
2153         if (pci_dev == NULL) {
2154             PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i); 
2155             v3_remove_device(dev);
2156             return -1;
2157         }
2158
2159         /* This is for CMD646 devices 
2160            pci_dev->config_header.vendor_id = 0x1095;
2161            pci_dev->config_header.device_id = 0x0646;
2162            pci_dev->config_header.revision = 0x8f07;
2163         */
2164
2165         pci_dev->config_header.vendor_id = 0x8086;
2166         pci_dev->config_header.device_id = 0x7010;
2167         pci_dev->config_header.revision = 0x00;
2168
2169         pci_dev->config_header.prog_if = 0x80; // Master IDE device
2170         pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2171         pci_dev->config_header.class = PCI_CLASS_STORAGE;
2172
2173         pci_dev->config_header.command = 0;
2174         pci_dev->config_header.status = 0x0280;
2175
2176         ide->ide_pci = pci_dev;
2177
2178
2179     }
2180
2181     if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2182         PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2183         v3_remove_device(dev);
2184         return -1;
2185     }
2186     
2187
2188     PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2189
2190     return 0;
2191 }
2192
2193
2194 device_register("IDE", ide_init)
2195
2196
2197
2198
2199 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num, 
2200                         uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2201
2202     struct ide_internal * ide  = ide_data;  
2203     struct ide_channel * channel = &(ide->channels[channel_num]);
2204     struct ide_drive * drive = &(channel->drives[drive_num]);
2205     
2206     if (drive->drive_type == BLOCK_NONE) {
2207         return -1;
2208     }
2209
2210     *cylinders = drive->num_cylinders;
2211     *heads = drive->num_heads;
2212     *sectors = drive->num_sectors;
2213
2214     return 0;
2215 }