Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


checkpoint changes to get "reset to BIOS" working.
[palacios.git] / palacios / src / devices / ide.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
28
29 #ifndef V3_CONFIG_DEBUG_IDE
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
36
37
38 #define PRI_DATA_PORT         0x1f0
39 #define PRI_FEATURES_PORT     0x1f1
40 #define PRI_SECT_CNT_PORT     0x1f2
41 #define PRI_SECT_NUM_PORT     0x1f3
42 #define PRI_CYL_LOW_PORT      0x1f4
43 #define PRI_CYL_HIGH_PORT     0x1f5
44 #define PRI_DRV_SEL_PORT      0x1f6
45 #define PRI_CMD_PORT          0x1f7
46 #define PRI_CTRL_PORT         0x3f6
47 #define PRI_ADDR_REG_PORT     0x3f7
48
49 #define SEC_DATA_PORT         0x170
50 #define SEC_FEATURES_PORT     0x171
51 #define SEC_SECT_CNT_PORT     0x172
52 #define SEC_SECT_NUM_PORT     0x173
53 #define SEC_CYL_LOW_PORT      0x174
54 #define SEC_CYL_HIGH_PORT     0x175
55 #define SEC_DRV_SEL_PORT      0x176
56 #define SEC_CMD_PORT          0x177
57 #define SEC_CTRL_PORT         0x376
58 #define SEC_ADDR_REG_PORT     0x377
59
60
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
63
64 #define DATA_BUFFER_SIZE 2048
65
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
68
69
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM", 
71                                           "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72                                            "PRI_CTRL", "PRI_ADDR_REG"};
73
74
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM", 
76                                           "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77                                            "SEC_CTRL", "SEC_ADDR_REG"};
78
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80                                            "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
81
82
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
84
85 static inline const char * io_port_to_str(uint16_t port) {
86     if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87         return ide_pri_port_strs[port - PRI_DATA_PORT];
88     } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89         return ide_sec_port_strs[port - SEC_DATA_PORT];
90     } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91         return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92     } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93         return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94     } 
95     return NULL;
96 }
97
98
99 static inline const char * dma_port_to_str(uint16_t port) {
100     return ide_dma_port_strs[port & 0x7];
101 }
102
103
104
105 struct ide_cd_state {
106     struct atapi_sense_data sense;
107
108     uint8_t atapi_cmd;
109     struct atapi_error_recovery err_recovery;
110 };
111
112 struct ide_hd_state {
113     uint32_t accessed;
114
115     /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116     uint32_t mult_sector_num;
117
118     /* This is the current op sector size:
119      * for multiple sector ops this equals mult_sector_num
120      * for standard ops this equals 1
121      */
122     uint32_t cur_sector_num;
123 };
124
125 struct ide_drive {
126     // Command Registers
127
128     v3_block_type_t drive_type;
129
130     struct v3_dev_blk_ops * ops;
131
132     union {
133         struct ide_cd_state cd_state;
134         struct ide_hd_state hd_state;
135     };
136
137     char model[41];
138
139     // Where we are in the data transfer
140     uint32_t transfer_index;
141
142     // the length of a transfer
143     // calculated for easy access
144     uint32_t transfer_length;
145
146     uint64_t current_lba;
147
148     // We have a local data buffer that we use for IO port accesses
149     uint8_t data_buf[DATA_BUFFER_SIZE];
150
151
152     uint32_t num_cylinders;
153     uint32_t num_heads;
154     uint32_t num_sectors;
155
156     void * private_data;
157     
158     union {
159         uint8_t sector_count;             // 0x1f2,0x172
160         struct atapi_irq_flags irq_flags;
161     } __attribute__((packed));
162
163     union {
164         uint8_t sector_num;               // 0x1f3,0x173
165         uint8_t lba0;
166     } __attribute__((packed));
167
168     union {
169         uint16_t cylinder;
170         uint16_t lba12;
171         
172         struct {
173             uint8_t cylinder_low;       // 0x1f4,0x174
174             uint8_t cylinder_high;      // 0x1f5,0x175
175         } __attribute__((packed));
176         
177         struct {
178             uint8_t lba1;
179             uint8_t lba2;
180         } __attribute__((packed));
181         
182         
183         // The transfer length requested by the CPU 
184         uint16_t req_len;
185     } __attribute__((packed));
186
187 };
188
189
190
191 struct ide_channel {
192     struct ide_drive drives[2];
193
194     // Command Registers
195     struct ide_error_reg error_reg;     // [read] 0x1f1,0x171
196
197     struct ide_features_reg features;
198
199     struct ide_drive_head_reg drive_head; // 0x1f6,0x176
200
201     struct ide_status_reg status;       // [read] 0x1f7,0x177
202     uint8_t cmd_reg;                // [write] 0x1f7,0x177
203
204     int irq; // this is temporary until we add PCI support
205
206     // Control Registers
207     struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
208
209     struct ide_dma_cmd_reg dma_cmd;
210     struct ide_dma_status_reg dma_status;
211     uint32_t dma_prd_addr;
212     uint32_t dma_tbl_index;
213 };
214
215
216
217 struct ide_internal {
218     struct ide_channel channels[2];
219
220     struct v3_southbridge * southbridge;
221     struct vm_device * pci_bus;
222
223     struct pci_device * ide_pci;
224
225     struct v3_vm_info * vm;
226 };
227
228
229
230
231
232 /* Utility functions */
233
234 static inline uint16_t be_to_le_16(const uint16_t val) {
235     uint8_t * buf = (uint8_t *)&val;
236     return (buf[0] << 8) | (buf[1]) ;
237 }
238
239 static inline uint16_t le_to_be_16(const uint16_t val) {
240     return be_to_le_16(val);
241 }
242
243
244 static inline uint32_t be_to_le_32(const uint32_t val) {
245     uint8_t * buf = (uint8_t *)&val;
246     return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
247 }
248
249 static inline uint32_t le_to_be_32(const uint32_t val) {
250     return be_to_le_32(val);
251 }
252
253
254 static inline int get_channel_index(ushort_t port) {
255     if (((port & 0xfff8) == 0x1f0) ||
256         ((port & 0xfffe) == 0x3f6) || 
257         ((port & 0xfff8) == 0xc000)) {
258         return 0;
259     } else if (((port & 0xfff8) == 0x170) ||
260                ((port & 0xfffe) == 0x376) ||
261                ((port & 0xfff8) == 0xc008)) {
262         return 1;
263     }
264
265     return -1;
266 }
267
268 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
269     int channel_idx = get_channel_index(port);    
270     return &(ide->channels[channel_idx]);
271 }
272
273 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
274     return &(channel->drives[channel->drive_head.drive_sel]);
275 }
276
277
278 static inline int is_lba_enabled(struct ide_channel * channel) {
279     return channel->drive_head.lba_mode;
280 }
281
282
283 /* Drive Commands */
284 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
285     if (channel->ctrl_reg.irq_disable == 0) {
286         //        PrintError("Raising IDE Interrupt %d\n", channel->irq);
287         channel->dma_status.int_gen = 1;
288         v3_raise_irq(ide->vm, channel->irq);
289     }
290 }
291
292
293 static void drive_reset(struct ide_drive * drive) {
294     drive->sector_count = 0x01;
295     drive->sector_num = 0x01;
296
297     PrintDebug("Resetting drive %s\n", drive->model);
298     
299     if (drive->drive_type == BLOCK_CDROM) {
300         drive->cylinder = 0xeb14;
301     } else {
302         drive->cylinder = 0x0000;
303         //drive->hd_state.accessed = 0;
304     }
305
306
307     memset(drive->data_buf, 0, sizeof(drive->data_buf));
308     drive->transfer_index = 0;
309
310     // Send the reset signal to the connected device callbacks
311     //     channel->drives[0].reset();
312     //    channel->drives[1].reset();
313 }
314
315 static void channel_reset(struct ide_channel * channel) {
316     
317     // set busy and seek complete flags
318     channel->status.val = 0x90;
319
320     // Clear errors
321     channel->error_reg.val = 0x01;
322
323     // clear commands
324     channel->cmd_reg = 0x00;
325
326     channel->ctrl_reg.irq_disable = 0;
327 }
328
329 static void channel_reset_complete(struct ide_channel * channel) {
330     channel->status.busy = 0;
331     channel->status.ready = 1;
332
333     channel->drive_head.head_num = 0;    
334     
335     drive_reset(&(channel->drives[0]));
336     drive_reset(&(channel->drives[1]));
337 }
338
339
340 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
341     channel->status.val = 0x41; // Error + ready
342     channel->error_reg.val = 0x04; // No idea...
343
344     ide_raise_irq(ide, channel);
345 }
346
347
348 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
349 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
350
351
352 /* ATAPI functions */
353 #include "atapi.h"
354
355 /* ATA functions */
356 #include "ata.h"
357
358
359 #ifdef V3_CONFIG_DEBUG_IDE
360 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
361     struct ide_dma_prd prd_entry;
362     int index = 0;
363
364     PrintDebug("Dumping PRD table\n");
365
366     while (1) {
367         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
368         int ret;
369
370         ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
371         
372         if (ret != sizeof(struct ide_dma_prd)) {
373             PrintError("Could not read PRD\n");
374             return;
375         }
376
377         PrintDebug("\tPRD Addr: %x, PRD Len: %d, EOT: %d\n", 
378                    prd_entry.base_addr, 
379                    (prd_entry.size == 0) ? 0x10000 : prd_entry.size, 
380                    prd_entry.end_of_table);
381
382         if (prd_entry.end_of_table) {
383             break;
384         }
385
386         index++;
387     }
388
389     return;
390 }
391 #endif
392
393 /* IO Operations */
394 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
395     struct ide_drive * drive = get_selected_drive(channel);
396     // This is at top level scope to do the EOT test at the end
397     struct ide_dma_prd prd_entry = {};
398     uint_t bytes_left = drive->transfer_length;
399
400     // Read in the data buffer....
401     // Read a sector/block at a time until the prd entry is full.
402
403 #ifdef V3_CONFIG_DEBUG_IDE
404     print_prd_table(ide, channel);
405 #endif
406
407     PrintDebug("DMA read for %d bytes\n", bytes_left);
408
409     // Loop through the disk data
410     while (bytes_left > 0) {
411         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
412         uint_t prd_bytes_left = 0;
413         uint_t prd_offset = 0;
414         int ret;
415
416         PrintDebug("PRD table address = %x\n", channel->dma_prd_addr);
417
418         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
419
420         if (ret != sizeof(struct ide_dma_prd)) {
421             PrintError("Could not read PRD\n");
422             return -1;
423         }
424
425         PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
426                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
427
428         // loop through the PRD data....
429
430         if (prd_entry.size == 0) {
431             // a size of 0 means 64k
432             prd_bytes_left = 0x10000;
433         } else {
434             prd_bytes_left = prd_entry.size;
435         }
436
437
438         while (prd_bytes_left > 0) {
439             uint_t bytes_to_write = 0;
440
441             if (drive->drive_type == BLOCK_DISK) {
442                 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
443
444
445                 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
446                     PrintError("Failed to read next disk sector\n");
447                     return -1;
448                 }
449             } else if (drive->drive_type == BLOCK_CDROM) {
450                 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
451                     bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
452
453                     if (atapi_read_chunk(ide, channel) == -1) {
454                         PrintError("Failed to read next disk sector\n");
455                         return -1;
456                     }
457                 } else {
458                     /*
459                     PrintError("DMA of command packet\n");
460                     PrintError("How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
461                     return -1;
462                     */
463                     int cmd_ret = 0;
464
465                     bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
466                     prd_bytes_left = bytes_to_write;
467
468                     cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, 
469                                                   bytes_to_write, drive->data_buf); 
470
471                     // check cmd_ret
472
473
474                     bytes_to_write = 0;
475                     prd_bytes_left = 0;
476                     drive->transfer_index += bytes_to_write;
477
478                     channel->status.busy = 0;
479                     channel->status.ready = 1;
480                     channel->status.data_req = 0;
481                     channel->status.error = 0;
482                     channel->status.seek_complete = 1;
483
484                     channel->dma_status.active = 0;
485                     channel->dma_status.err = 0;
486
487                     ide_raise_irq(ide, channel);
488                     
489                     return 0;
490                 }
491             }
492
493             PrintDebug("Writing DMA data to guest Memory ptr=%p, len=%d\n", 
494                        (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
495
496             drive->current_lba++;
497
498             ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf); 
499
500             if (ret != bytes_to_write) {
501                 PrintError("Failed to copy data into guest memory... (ret=%d)\n", ret);
502                 return -1;
503             }
504
505             PrintDebug("\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
506
507             drive->transfer_index += ret;
508             prd_bytes_left -= ret;
509             prd_offset += ret;
510             bytes_left -= ret;
511         }
512
513         channel->dma_tbl_index++;
514
515         if (drive->drive_type == BLOCK_DISK) {
516             if (drive->transfer_index % HD_SECTOR_SIZE) {
517                 PrintError("We currently don't handle sectors that span PRD descriptors\n");
518                 return -1;
519             }
520         } else if (drive->drive_type == BLOCK_CDROM) {
521             if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
522                 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
523                     PrintError("We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
524                     PrintError("transfer_index=%d, transfer_length=%d\n", 
525                                drive->transfer_index, drive->transfer_length);
526                     return -1;
527                 }
528             }
529         }
530
531
532         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
533             PrintError("DMA table not large enough for data transfer...\n");
534             return -1;
535         }
536     }
537
538     /*
539       drive->irq_flags.io_dir = 1;
540       drive->irq_flags.c_d = 1;
541       drive->irq_flags.rel = 0;
542     */
543
544
545     // Update to the next PRD entry
546
547     // set DMA status
548
549     if (prd_entry.end_of_table) {
550         channel->status.busy = 0;
551         channel->status.ready = 1;
552         channel->status.data_req = 0;
553         channel->status.error = 0;
554         channel->status.seek_complete = 1;
555
556         channel->dma_status.active = 0;
557         channel->dma_status.err = 0;
558     }
559
560     ide_raise_irq(ide, channel);
561
562     return 0;
563 }
564
565
566 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
567     struct ide_drive * drive = get_selected_drive(channel);
568     // This is at top level scope to do the EOT test at the end
569     struct ide_dma_prd prd_entry = {};
570     uint_t bytes_left = drive->transfer_length;
571
572
573     PrintDebug("DMA write from %d bytes\n", bytes_left);
574
575     // Loop through disk data
576     while (bytes_left > 0) {
577         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
578         uint_t prd_bytes_left = 0;
579         uint_t prd_offset = 0;
580         int ret;
581         
582         PrintDebug("PRD Table address = %x\n", channel->dma_prd_addr);
583
584         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
585
586         if (ret != sizeof(struct ide_dma_prd)) {
587             PrintError("Could not read PRD\n");
588             return -1;
589         }
590
591         PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
592                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
593
594         prd_bytes_left = prd_entry.size;
595
596         while (prd_bytes_left > 0) {
597             uint_t bytes_to_write = 0;
598
599
600             bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
601
602
603             ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
604
605             if (ret != bytes_to_write) {
606                 PrintError("Faild to copy data from guest memory... (ret=%d)\n", ret);
607                 return -1;
608             }
609
610             PrintDebug("\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
611
612
613             if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
614                 PrintError("Failed to write data to disk\n");
615                 return -1;
616             }
617             
618             drive->current_lba++;
619
620             drive->transfer_index += ret;
621             prd_bytes_left -= ret;
622             prd_offset += ret;
623             bytes_left -= ret;
624         }
625
626         channel->dma_tbl_index++;
627
628         if (drive->transfer_index % HD_SECTOR_SIZE) {
629             PrintError("We currently don't handle sectors that span PRD descriptors\n");
630             return -1;
631         }
632
633         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
634             PrintError("DMA table not large enough for data transfer...\n");
635             return -1;
636         }
637     }
638
639     if (prd_entry.end_of_table) {
640         channel->status.busy = 0;
641         channel->status.ready = 1;
642         channel->status.data_req = 0;
643         channel->status.error = 0;
644         channel->status.seek_complete = 1;
645
646         channel->dma_status.active = 0;
647         channel->dma_status.err = 0;
648     }
649
650     ide_raise_irq(ide, channel);
651
652     return 0;
653 }
654
655
656
657 #define DMA_CMD_PORT      0x00
658 #define DMA_STATUS_PORT   0x02
659 #define DMA_PRD_PORT0     0x04
660 #define DMA_PRD_PORT1     0x05
661 #define DMA_PRD_PORT2     0x06
662 #define DMA_PRD_PORT3     0x07
663
664 #define DMA_CHANNEL_FLAG  0x08
665
666 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
667     struct ide_internal * ide = (struct ide_internal *)private_data;
668     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
669     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
670     struct ide_channel * channel = &(ide->channels[channel_flag]);
671
672     PrintDebug("IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n", 
673                port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
674
675     switch (port_offset) {
676         case DMA_CMD_PORT:
677             channel->dma_cmd.val = *(uint8_t *)src;
678
679             if (channel->dma_cmd.start == 0) {
680                 channel->dma_tbl_index = 0;
681             } else {
682                 channel->dma_status.active = 1;
683
684                 if (channel->dma_cmd.read == 1) {
685                     // DMA Read
686                     if (dma_read(core, ide, channel) == -1) {
687                         PrintError("Failed DMA Read\n");
688                         return -1;
689                     }
690                 } else {
691                     // DMA write
692                     if (dma_write(core, ide, channel) == -1) {
693                         PrintError("Failed DMA Write\n");
694                         return -1;
695                     }
696                 }
697
698                 channel->dma_cmd.val &= 0x09;
699             }
700
701             break;
702             
703         case DMA_STATUS_PORT: {
704             uint8_t val = *(uint8_t *)src;
705
706             if (length != 1) {
707                 PrintError("Invalid read length for DMA status port\n");
708                 return -1;
709             }
710
711             // weirdness
712             channel->dma_status.val = ((val & 0x60) | 
713                                        (channel->dma_status.val & 0x01) |
714                                        (channel->dma_status.val & ~val & 0x06));
715
716             break;
717         }           
718         case DMA_PRD_PORT0:
719         case DMA_PRD_PORT1:
720         case DMA_PRD_PORT2:
721         case DMA_PRD_PORT3: {
722             uint_t addr_index = port_offset & 0x3;
723             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
724             int i = 0;
725
726             if (addr_index + length > 4) {
727                 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
728                 return -1;
729             }
730
731             for (i = 0; i < length; i++) {
732                 addr_buf[addr_index + i] = *((uint8_t *)src + i);
733             }
734
735             PrintDebug("Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
736
737             break;
738         }
739         default:
740             PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
741             return -1;
742     }
743
744     return length;
745 }
746
747
748 static int read_dma_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * private_data) {
749     struct ide_internal * ide = (struct ide_internal *)private_data;
750     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
751     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
752     struct ide_channel * channel = &(ide->channels[channel_flag]);
753
754     PrintDebug("Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
755
756     switch (port_offset) {
757         case DMA_CMD_PORT:
758             *(uint8_t *)dst = channel->dma_cmd.val;
759             break;
760
761         case DMA_STATUS_PORT:
762             if (length != 1) {
763                 PrintError("Invalid read length for DMA status port\n");
764                 return -1;
765             }
766
767             *(uint8_t *)dst = channel->dma_status.val;
768             break;
769
770         case DMA_PRD_PORT0:
771         case DMA_PRD_PORT1:
772         case DMA_PRD_PORT2:
773         case DMA_PRD_PORT3: {
774             uint_t addr_index = port_offset & 0x3;
775             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
776             int i = 0;
777
778             if (addr_index + length > 4) {
779                 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
780                 return -1;
781             }
782
783             for (i = 0; i < length; i++) {
784                 *((uint8_t *)dst + i) = addr_buf[addr_index + i];
785             }
786
787             break;
788         }
789         default:
790             PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
791             return -1;
792     }
793
794     PrintDebug("\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
795
796     return length;
797 }
798
799
800
801 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
802     struct ide_internal * ide = priv_data;
803     struct ide_channel * channel = get_selected_channel(ide, port);
804     struct ide_drive * drive = get_selected_drive(channel);
805
806     if (length != 1) {
807         PrintError("Invalid Write Length on IDE command Port %x\n", port);
808         return -1;
809     }
810
811     PrintDebug("IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
812     
813     channel->cmd_reg = *(uint8_t *)src;
814     
815     switch (channel->cmd_reg) {
816
817         case 0xa1: // ATAPI Identify Device Packet
818             if (drive->drive_type != BLOCK_CDROM) {
819                 drive_reset(drive);
820
821                 // JRL: Should we abort here?
822                 ide_abort_command(ide, channel);
823             } else {
824                 
825                 atapi_identify_device(drive);
826                 
827                 channel->error_reg.val = 0;
828                 channel->status.val = 0x58; // ready, data_req, seek_complete
829             
830                 ide_raise_irq(ide, channel);
831             }
832             break;
833         case 0xec: // Identify Device
834             if (drive->drive_type != BLOCK_DISK) {
835                 drive_reset(drive);
836
837                 // JRL: Should we abort here?
838                 ide_abort_command(ide, channel);
839             } else {
840                 ata_identify_device(drive);
841
842                 channel->error_reg.val = 0;
843                 channel->status.val = 0x58;
844
845                 ide_raise_irq(ide, channel);
846             }
847             break;
848
849         case 0xa0: // ATAPI Command Packet
850             if (drive->drive_type != BLOCK_CDROM) {
851                 ide_abort_command(ide, channel);
852             }
853             
854             drive->sector_count = 1;
855
856             channel->status.busy = 0;
857             channel->status.write_fault = 0;
858             channel->status.data_req = 1;
859             channel->status.error = 0;
860
861             // reset the data buffer...
862             drive->transfer_length = ATAPI_PACKET_SIZE;
863             drive->transfer_index = 0;
864
865             break;
866
867         case 0x20: // Read Sectors with Retry
868         case 0x21: // Read Sectors without Retry
869             drive->hd_state.cur_sector_num = 1;
870
871             if (ata_read_sectors(ide, channel) == -1) {
872                 PrintError("Error reading sectors\n");
873                 return -1;
874             }
875             break;
876
877         case 0x24: // Read Sectors Extended
878             drive->hd_state.cur_sector_num = 1;
879
880             if (ata_read_sectors_ext(ide, channel) == -1) {
881                 PrintError("Error reading extended sectors\n");
882                 return -1;
883             }
884             break;
885
886         case 0xc8: // Read DMA with retry
887         case 0xc9: { // Read DMA
888             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
889
890             if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
891                 ide_abort_command(ide, channel);
892                 return 0;
893             }
894             
895             drive->hd_state.cur_sector_num = 1;
896             
897             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
898             drive->transfer_index = 0;
899
900             if (channel->dma_status.active == 1) {
901                 // DMA Read
902                 if (dma_read(core, ide, channel) == -1) {
903                     PrintError("Failed DMA Read\n");
904                     return -1;
905                 }
906             }
907             break;
908         }
909
910         case 0xca: { // Write DMA
911             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
912
913             if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
914                 ide_abort_command(ide, channel);
915                 return 0;
916             }
917
918             drive->hd_state.cur_sector_num = 1;
919
920             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
921             drive->transfer_index = 0;
922
923             if (channel->dma_status.active == 1) {
924                 // DMA Write
925                 if (dma_write(core, ide, channel) == -1) {
926                     PrintError("Failed DMA Write\n");
927                     return -1;
928                 }
929             }
930             break;
931         }
932         case 0xe0: // Standby Now 1
933         case 0xe1: // Set Idle Immediate
934         case 0xe2: // Standby
935         case 0xe3: // Set Idle 1
936         case 0xe6: // Sleep Now 1
937         case 0x94: // Standby Now 2
938         case 0x95: // Idle Immediate (CFA)
939         case 0x96: // Standby 2
940         case 0x97: // Set idle 2
941         case 0x99: // Sleep Now 2
942             channel->status.val = 0;
943             channel->status.ready = 1;
944             ide_raise_irq(ide, channel);
945             break;
946
947         case 0xef: // Set Features
948             // Prior to this the features register has been written to. 
949             // This command tells the drive to check if the new value is supported (the value is drive specific)
950             // Common is that bit0=DMA enable
951             // If valid the drive raises an interrupt, if not it aborts.
952
953             // Do some checking here...
954
955             channel->status.busy = 0;
956             channel->status.write_fault = 0;
957             channel->status.error = 0;
958             channel->status.ready = 1;
959             channel->status.seek_complete = 1;
960             
961             ide_raise_irq(ide, channel);
962             break;
963
964         case 0x91:  // Initialize Drive Parameters
965         case 0x10:  // recalibrate?
966             channel->status.error = 0;
967             channel->status.ready = 1;
968             channel->status.seek_complete = 1;
969             ide_raise_irq(ide, channel);
970             break;
971         case 0xc6: { // Set multiple mode (IDE Block mode) 
972             // This makes the drive transfer multiple sectors before generating an interrupt
973             uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
974
975             if (tmp_sect_num > MAX_MULT_SECTORS) {
976                 ide_abort_command(ide, channel);
977                 break;
978             }
979
980             if (drive->sector_count == 0) {
981                 drive->hd_state.mult_sector_num= 1;
982             } else {
983                 drive->hd_state.mult_sector_num = drive->sector_count;
984             }
985
986             channel->status.ready = 1;
987             channel->status.error = 0;
988
989             ide_raise_irq(ide, channel);
990
991             break;
992         }
993
994         case 0x08: // Reset Device
995             drive_reset(drive);
996             channel->error_reg.val = 0x01;
997             channel->status.busy = 0;
998             channel->status.ready = 1;
999             channel->status.seek_complete = 1;
1000             channel->status.write_fault = 0;
1001             channel->status.error = 0;
1002             break;
1003
1004         case 0xe5: // Check power mode
1005             drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1006             channel->status.busy = 0;
1007             channel->status.ready = 1;
1008             channel->status.write_fault = 0;
1009             channel->status.data_req = 0;
1010             channel->status.error = 0;
1011             break;
1012
1013         case 0xc4:  // read multiple sectors
1014             drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
1015         default:
1016             PrintError("Unimplemented IDE command (%x)\n", channel->cmd_reg);
1017             return -1;
1018     }
1019
1020     return length;
1021 }
1022
1023
1024 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1025     struct ide_internal * ide = priv_data;
1026     struct ide_channel * channel = get_selected_channel(ide, port);
1027     struct ide_drive * drive = get_selected_drive(channel);
1028
1029     //    PrintDebug("IDE: Writing Data Port %x (val=%x, len=%d)\n", 
1030     //         port, *(uint32_t *)src, length);
1031     
1032     memcpy(drive->data_buf + drive->transfer_index, src, length);    
1033     drive->transfer_index += length;
1034
1035     // Transfer is complete, dispatch the command
1036     if (drive->transfer_index >= drive->transfer_length) {
1037         switch (channel->cmd_reg) {
1038             case 0x30: // Write Sectors
1039                 PrintError("Writing Data not yet implemented\n");
1040                 return -1;
1041                 
1042             case 0xa0: // ATAPI packet command
1043                 if (atapi_handle_packet(core, ide, channel) == -1) {
1044                     PrintError("Error handling ATAPI packet\n");
1045                     return -1;
1046                 }
1047                 break;
1048             default:
1049                 PrintError("Unhandld IDE Command %x\n", channel->cmd_reg);
1050                 return -1;
1051         }
1052     }
1053
1054     return length;
1055 }
1056
1057
1058 static int read_hd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1059     struct ide_drive * drive = get_selected_drive(channel);
1060     int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1061
1062
1063
1064     if (drive->transfer_index >= drive->transfer_length) {
1065         PrintError("Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1066                    drive->transfer_length, drive->transfer_index,
1067                    drive->transfer_index + length);
1068         return -1;
1069     }
1070
1071     
1072     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1073         drive->current_lba++;
1074
1075         if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1076             PrintError("Could not read next disk sector\n");
1077             return -1;
1078         }
1079     }
1080
1081     /*
1082       PrintDebug("Reading HD Data (Val=%x), (len=%d) (offset=%d)\n", 
1083       *(uint32_t *)(drive->data_buf + data_offset), 
1084       length, data_offset);
1085     */
1086     memcpy(dst, drive->data_buf + data_offset, length);
1087
1088     drive->transfer_index += length;
1089
1090
1091     /* This is the trigger for interrupt injection.
1092      * For read single sector commands we interrupt after every sector
1093      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1094      * cur_sector_num is configured depending on the operation we are currently running
1095      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1096      */
1097     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1098         (drive->transfer_index == drive->transfer_length)) {
1099         if (drive->transfer_index < drive->transfer_length) {
1100             // An increment is complete, but there is still more data to be transferred...
1101             PrintDebug("Integral Complete, still transferring more sectors\n");
1102             channel->status.data_req = 1;
1103
1104             drive->irq_flags.c_d = 0;
1105         } else {
1106             PrintDebug("Final Sector Transferred\n");
1107             // This was the final read of the request
1108             channel->status.data_req = 0;
1109
1110             
1111             drive->irq_flags.c_d = 1;
1112             drive->irq_flags.rel = 0;
1113         }
1114
1115         channel->status.ready = 1;
1116         drive->irq_flags.io_dir = 1;
1117         channel->status.busy = 0;
1118
1119         ide_raise_irq(ide, channel);
1120     }
1121
1122
1123     return length;
1124 }
1125
1126
1127
1128 static int read_cd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1129     struct ide_drive * drive = get_selected_drive(channel);
1130     int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1131     //  int req_offset = drive->transfer_index % drive->req_len;
1132     
1133     if (drive->cd_state.atapi_cmd != 0x28) {
1134         PrintDebug("IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1135         PrintDebug("IDE: transfer len=%d, transfer idx=%d\n", drive->transfer_length, drive->transfer_index);
1136     }
1137
1138     
1139
1140     if (drive->transfer_index >= drive->transfer_length) {
1141         PrintError("Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n", 
1142                    drive->transfer_length, drive->transfer_index, 
1143                    drive->transfer_index + length);
1144         return -1;
1145     }
1146
1147     
1148     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1149         if (atapi_update_data_buf(ide, channel) == -1) {
1150             PrintError("Could not update CDROM data buffer\n");
1151             return -1;
1152         }
1153     }
1154
1155     memcpy(dst, drive->data_buf + data_offset, length);
1156     
1157     drive->transfer_index += length;
1158
1159
1160     // Should the req_offset be recalculated here?????
1161     if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1162         if (drive->transfer_index < drive->transfer_length) {
1163             // An increment is complete, but there is still more data to be transferred...
1164             
1165             channel->status.data_req = 1;
1166
1167             drive->irq_flags.c_d = 0;
1168
1169             // Update the request length in the cylinder regs
1170             if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1171                 PrintError("Could not update request length after completed increment\n");
1172                 return -1;
1173             }
1174         } else {
1175             // This was the final read of the request
1176
1177             drive->req_len = 0;
1178             channel->status.data_req = 0;
1179             channel->status.ready = 1;
1180             
1181             drive->irq_flags.c_d = 1;
1182             drive->irq_flags.rel = 0;
1183         }
1184
1185         drive->irq_flags.io_dir = 1;
1186         channel->status.busy = 0;
1187
1188         ide_raise_irq(ide, channel);
1189     }
1190
1191     return length;
1192 }
1193
1194
1195 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1196     struct ide_drive * drive = get_selected_drive(channel);
1197
1198     channel->status.busy = 0;
1199     channel->status.ready = 1;
1200     channel->status.write_fault = 0;
1201     channel->status.seek_complete = 1;
1202     channel->status.corrected = 0;
1203     channel->status.error = 0;
1204                 
1205     
1206     memcpy(dst, drive->data_buf + drive->transfer_index, length);
1207     drive->transfer_index += length;
1208     
1209     if (drive->transfer_index >= drive->transfer_length) {
1210         channel->status.data_req = 0;
1211     }
1212     
1213     return length;
1214 }
1215
1216
1217 static int ide_read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1218     struct ide_internal * ide = priv_data;
1219     struct ide_channel * channel = get_selected_channel(ide, port);
1220     struct ide_drive * drive = get_selected_drive(channel);
1221
1222     //       PrintDebug("IDE: Reading Data Port %x (len=%d)\n", port, length);
1223
1224     if ((channel->cmd_reg == 0xec) ||
1225         (channel->cmd_reg == 0xa1)) {
1226         return read_drive_id((uint8_t *)dst, length, ide, channel);
1227     }
1228
1229     if (drive->drive_type == BLOCK_CDROM) {
1230         if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1231             PrintError("IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1232             return -1;
1233         }
1234     } else if (drive->drive_type == BLOCK_DISK) {
1235         if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1236             PrintError("IDE: Could not read HD Data\n");
1237             return -1;
1238         }
1239     } else {
1240         memset((uint8_t *)dst, 0, length);
1241     }
1242
1243     return length;
1244 }
1245
1246 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1247     struct ide_internal * ide = priv_data;
1248     struct ide_channel * channel = get_selected_channel(ide, port);
1249     struct ide_drive * drive = get_selected_drive(channel);
1250             
1251     if (length != 1) {
1252         PrintError("Invalid Write length on IDE port %x\n", port);
1253         return -1;
1254     }
1255
1256     PrintDebug("IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1257
1258     switch (port) {
1259         // reset and interrupt enable
1260         case PRI_CTRL_PORT:
1261         case SEC_CTRL_PORT: {
1262             struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1263
1264             // only reset channel on a 0->1 reset bit transition
1265             if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1266                 channel_reset(channel);
1267             } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1268                 channel_reset_complete(channel);
1269             }
1270
1271             channel->ctrl_reg.val = tmp_ctrl->val;          
1272             break;
1273         }
1274         case PRI_FEATURES_PORT:
1275         case SEC_FEATURES_PORT:
1276             channel->features.val = *(uint8_t *)src;
1277             break;
1278
1279         case PRI_SECT_CNT_PORT:
1280         case SEC_SECT_CNT_PORT:
1281             channel->drives[0].sector_count = *(uint8_t *)src;
1282             channel->drives[1].sector_count = *(uint8_t *)src;
1283             break;
1284
1285         case PRI_SECT_NUM_PORT:
1286         case SEC_SECT_NUM_PORT:
1287             channel->drives[0].sector_num = *(uint8_t *)src;
1288             channel->drives[1].sector_num = *(uint8_t *)src;
1289             break;
1290         case PRI_CYL_LOW_PORT:
1291         case SEC_CYL_LOW_PORT:
1292             channel->drives[0].cylinder_low = *(uint8_t *)src;
1293             channel->drives[1].cylinder_low = *(uint8_t *)src;
1294             break;
1295
1296         case PRI_CYL_HIGH_PORT:
1297         case SEC_CYL_HIGH_PORT:
1298             channel->drives[0].cylinder_high = *(uint8_t *)src;
1299             channel->drives[1].cylinder_high = *(uint8_t *)src;
1300             break;
1301
1302         case PRI_DRV_SEL_PORT:
1303         case SEC_DRV_SEL_PORT: {
1304             channel->drive_head.val = *(uint8_t *)src;
1305             
1306             // make sure the reserved bits are ok..
1307             // JRL TODO: check with new ramdisk to make sure this is right...
1308             channel->drive_head.val |= 0xa0;
1309
1310             drive = get_selected_drive(channel);
1311
1312             // Selecting a non-present device is a no-no
1313             if (drive->drive_type == BLOCK_NONE) {
1314                 PrintDebug("Attempting to select a non-present drive\n");
1315                 channel->error_reg.abort = 1;
1316                 channel->status.error = 1;
1317             } else {
1318                 channel->status.busy = 0;
1319                 channel->status.ready = 1;
1320                 channel->status.data_req = 0;
1321                 channel->status.error = 0;
1322                 channel->status.seek_complete = 1;
1323                 
1324                 channel->dma_status.active = 0;
1325                 channel->dma_status.err = 0;
1326             }
1327
1328             break;
1329         }
1330         default:
1331             PrintError("IDE: Write to unknown Port %x\n", port);
1332             return -1;
1333     }
1334     return length;
1335 }
1336
1337
1338 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1339     struct ide_internal * ide = priv_data;
1340     struct ide_channel * channel = get_selected_channel(ide, port);
1341     struct ide_drive * drive = get_selected_drive(channel);
1342     
1343     if (length != 1) {
1344         PrintError("Invalid Read length on IDE port %x\n", port);
1345         return -1;
1346     }
1347     
1348     PrintDebug("IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1349
1350     if ((port == PRI_ADDR_REG_PORT) ||
1351         (port == SEC_ADDR_REG_PORT)) {
1352         // unused, return 0xff
1353         *(uint8_t *)dst = 0xff;
1354         return length;
1355     }
1356
1357
1358     // if no drive is present just return 0 + reserved bits
1359     if (drive->drive_type == BLOCK_NONE) {
1360         if ((port == PRI_DRV_SEL_PORT) ||
1361             (port == SEC_DRV_SEL_PORT)) {
1362             *(uint8_t *)dst = 0xa0;
1363         } else {
1364             *(uint8_t *)dst = 0;
1365         }
1366
1367         return length;
1368     }
1369
1370     switch (port) {
1371
1372         // This is really the error register.
1373         case PRI_FEATURES_PORT:
1374         case SEC_FEATURES_PORT:
1375             *(uint8_t *)dst = channel->error_reg.val;
1376             break;
1377             
1378         case PRI_SECT_CNT_PORT:
1379         case SEC_SECT_CNT_PORT:
1380             *(uint8_t *)dst = drive->sector_count;
1381             break;
1382
1383         case PRI_SECT_NUM_PORT:
1384         case SEC_SECT_NUM_PORT:
1385             *(uint8_t *)dst = drive->sector_num;
1386             break;
1387
1388         case PRI_CYL_LOW_PORT:
1389         case SEC_CYL_LOW_PORT:
1390             *(uint8_t *)dst = drive->cylinder_low;
1391             break;
1392
1393
1394         case PRI_CYL_HIGH_PORT:
1395         case SEC_CYL_HIGH_PORT:
1396             *(uint8_t *)dst = drive->cylinder_high;
1397             break;
1398
1399         case PRI_DRV_SEL_PORT:
1400         case SEC_DRV_SEL_PORT:  // hard disk drive and head register 0x1f6
1401             *(uint8_t *)dst = channel->drive_head.val;
1402             break;
1403
1404         case PRI_CTRL_PORT:
1405         case SEC_CTRL_PORT:
1406         case PRI_CMD_PORT:
1407         case SEC_CMD_PORT:
1408             // Something about lowering interrupts here....
1409             *(uint8_t *)dst = channel->status.val;
1410             break;
1411
1412         default:
1413             PrintError("Invalid Port: %x\n", port);
1414             return -1;
1415     }
1416
1417     PrintDebug("\tVal=%x\n", *(uint8_t *)dst);
1418
1419     return length;
1420 }
1421
1422
1423
1424 static void init_drive(struct ide_drive * drive) {
1425
1426     drive->sector_count = 0x01;
1427     drive->sector_num = 0x01;
1428     drive->cylinder = 0x0000;
1429
1430     drive->drive_type = BLOCK_NONE;
1431
1432     memset(drive->model, 0, sizeof(drive->model));
1433
1434     drive->transfer_index = 0;
1435     drive->transfer_length = 0;
1436     memset(drive->data_buf, 0, sizeof(drive->data_buf));
1437
1438     drive->num_cylinders = 0;
1439     drive->num_heads = 0;
1440     drive->num_sectors = 0;
1441     
1442
1443     drive->private_data = NULL;
1444     drive->ops = NULL;
1445 }
1446
1447 static void init_channel(struct ide_channel * channel) {
1448     int i = 0;
1449
1450     channel->error_reg.val = 0x01;
1451     channel->drive_head.val = 0x00;
1452     channel->status.val = 0x00;
1453     channel->cmd_reg = 0x00;
1454     channel->ctrl_reg.val = 0x08;
1455
1456
1457     channel->dma_cmd.val = 0;
1458     channel->dma_status.val = 0;
1459     channel->dma_prd_addr = 0;
1460     channel->dma_tbl_index = 0;
1461
1462     for (i = 0; i < 2; i++) {
1463         init_drive(&(channel->drives[i]));
1464     }
1465
1466 }
1467
1468
1469 static int pci_config_update(uint_t reg_num, void * src, uint_t length, void * private_data) {
1470     PrintDebug("PCI Config Update\n");
1471     /*
1472     struct ide_internal * ide = (struct ide_internal *)(private_data);
1473
1474     PrintDebug("\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1475     */
1476
1477     return 0;
1478 }
1479
1480 static int init_ide_state(struct ide_internal * ide) {
1481     int i;
1482
1483     /* 
1484      * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry 
1485      */
1486
1487     for (i = 0; i < 1; i++) {
1488         init_channel(&(ide->channels[i]));
1489
1490         // JRL: this is a terrible hack...
1491         ide->channels[i].irq = PRI_DEFAULT_IRQ + i;
1492     }
1493
1494
1495     return 0;
1496 }
1497
1498
1499
1500
1501 static int ide_free(struct ide_internal * ide) {
1502
1503     // deregister from PCI?
1504
1505     V3_Free(ide);
1506
1507     return 0;
1508 }
1509
1510 #ifdef V3_CONFIG_CHECKPOINT
1511
1512 #include <palacios/vmm_sprintf.h>
1513 static int ide_save(struct v3_chkpt_ctx * ctx, void * private_data) {
1514     struct ide_internal * ide = (struct ide_internal *)private_data;
1515     int ch_num = 0;
1516     int drive_num = 0;
1517     char buf[128];
1518     
1519
1520     for (ch_num = 0; ch_num < 2; ch_num++) {
1521         struct v3_chkpt_ctx * ch_ctx = NULL;
1522         struct ide_channel * ch = &(ide->channels[ch_num]);
1523
1524         snprintf(buf, 128, "channel-%d", ch_num);
1525         ch_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
1526
1527         v3_chkpt_save_8(ch_ctx, "ERROR", &(ch->error_reg.val));
1528         v3_chkpt_save_8(ch_ctx, "FEATURES", &(ch->features.val));
1529         v3_chkpt_save_8(ch_ctx, "DRIVE_HEAD", &(ch->drive_head.val));
1530         v3_chkpt_save_8(ch_ctx, "STATUS", &(ch->status.val));
1531         v3_chkpt_save_8(ch_ctx, "CMD_REG", &(ch->cmd_reg));
1532         v3_chkpt_save_8(ch_ctx, "CTRL_REG", &(ch->ctrl_reg.val));
1533         v3_chkpt_save_8(ch_ctx, "DMA_CMD", &(ch->dma_cmd.val));
1534         v3_chkpt_save_8(ch_ctx, "DMA_STATUS", &(ch->dma_status.val));
1535         v3_chkpt_save_32(ch_ctx, "PRD_ADDR", &(ch->dma_prd_addr));
1536         v3_chkpt_save_32(ch_ctx, "DMA_TBL_IDX", &(ch->dma_tbl_index));
1537
1538
1539         for (drive_num = 0; drive_num < 2; drive_num++) {
1540             struct v3_chkpt_ctx * drive_ctx = NULL;
1541             struct ide_drive * drive = &(ch->drives[drive_num]);
1542             
1543             snprintf(buf, 128, "drive-%d-%d", ch_num, drive_num);
1544             drive_ctx = v3_chkpt_open_ctx(ctx->chkpt, ch_ctx, buf);
1545             
1546             v3_chkpt_save_8(drive_ctx, "DRIVE_TYPE", &(drive->drive_type));
1547             v3_chkpt_save_8(drive_ctx, "SECTOR_COUNT", &(drive->sector_count));
1548             v3_chkpt_save_8(drive_ctx, "SECTOR_NUM", &(drive->sector_num));
1549             v3_chkpt_save_16(drive_ctx, "CYLINDER", &(drive->cylinder));
1550
1551             v3_chkpt_save_64(drive_ctx, "CURRENT_LBA", &(drive->current_lba));
1552             v3_chkpt_save_32(drive_ctx, "TRANSFER_LENGTH", &(drive->transfer_length));
1553             v3_chkpt_save_32(drive_ctx, "TRANSFER_INDEX", &(drive->transfer_index));
1554
1555             v3_chkpt_save(drive_ctx, "DATA_BUF", DATA_BUFFER_SIZE, drive->data_buf);
1556
1557
1558             /* For now we'll just pack the type specific data at the end... */
1559             /* We should probably add a new context here in the future... */
1560             if (drive->drive_type == BLOCK_CDROM) {
1561                 v3_chkpt_save(drive_ctx, "ATAPI_SENSE_DATA", 18, drive->cd_state.sense.buf);
1562                 v3_chkpt_save_8(drive_ctx, "ATAPI_CMD", &(drive->cd_state.atapi_cmd));
1563                 v3_chkpt_save(drive_ctx, "ATAPI_ERR_RECOVERY", 12, drive->cd_state.err_recovery.buf);
1564             } else if (drive->drive_type == BLOCK_DISK) {
1565                 v3_chkpt_save_32(drive_ctx, "ACCESSED", &(drive->hd_state.accessed));
1566                 v3_chkpt_save_32(drive_ctx, "MULT_SECT_NUM", &(drive->hd_state.mult_sector_num));
1567                 v3_chkpt_save_32(drive_ctx, "CUR_SECT_NUM", &(drive->hd_state.cur_sector_num));
1568             }
1569         }
1570     }
1571
1572     return 0;
1573 }
1574
1575
1576
1577 static int ide_load(struct v3_chkpt_ctx * ctx, void * private_data) {
1578     struct ide_internal * ide = (struct ide_internal *)private_data;
1579     int ch_num = 0;
1580     int drive_num = 0;
1581     char buf[128];
1582     
1583
1584     for (ch_num = 0; ch_num < 2; ch_num++) {
1585         struct v3_chkpt_ctx * ch_ctx = NULL;
1586         struct ide_channel * ch = &(ide->channels[ch_num]);
1587
1588         snprintf(buf, 128, "channel-%d", ch_num);
1589         ch_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
1590
1591         v3_chkpt_load_8(ch_ctx, "ERROR", &(ch->error_reg.val));
1592         v3_chkpt_load_8(ch_ctx, "FEATURES", &(ch->features.val));
1593         v3_chkpt_load_8(ch_ctx, "DRIVE_HEAD", &(ch->drive_head.val));
1594         v3_chkpt_load_8(ch_ctx, "STATUS", &(ch->status.val));
1595         v3_chkpt_load_8(ch_ctx, "CMD_REG", &(ch->cmd_reg));
1596         v3_chkpt_load_8(ch_ctx, "CTRL_REG", &(ch->ctrl_reg.val));
1597         v3_chkpt_load_8(ch_ctx, "DMA_CMD", &(ch->dma_cmd.val));
1598         v3_chkpt_load_8(ch_ctx, "DMA_STATUS", &(ch->dma_status.val));
1599         v3_chkpt_load_32(ch_ctx, "PRD_ADDR", &(ch->dma_prd_addr));
1600         v3_chkpt_load_32(ch_ctx, "DMA_TBL_IDX", &(ch->dma_tbl_index));
1601
1602
1603         for (drive_num = 0; drive_num < 2; drive_num++) {
1604             struct v3_chkpt_ctx * drive_ctx = NULL;
1605             struct ide_drive * drive = &(ch->drives[drive_num]);
1606             
1607             snprintf(buf, 128, "drive-%d-%d", ch_num, drive_num);
1608             drive_ctx = v3_chkpt_open_ctx(ctx->chkpt, ch_ctx, buf);
1609             
1610             v3_chkpt_load_8(drive_ctx, "DRIVE_TYPE", &(drive->drive_type));
1611             v3_chkpt_load_8(drive_ctx, "SECTOR_COUNT", &(drive->sector_count));
1612             v3_chkpt_load_8(drive_ctx, "SECTOR_NUM", &(drive->sector_num));
1613             v3_chkpt_load_16(drive_ctx, "CYLINDER", &(drive->cylinder));
1614
1615             v3_chkpt_load_64(drive_ctx, "CURRENT_LBA", &(drive->current_lba));
1616             v3_chkpt_load_32(drive_ctx, "TRANSFER_LENGTH", &(drive->transfer_length));
1617             v3_chkpt_load_32(drive_ctx, "TRANSFER_INDEX", &(drive->transfer_index));
1618
1619             v3_chkpt_load(drive_ctx, "DATA_BUF", DATA_BUFFER_SIZE, drive->data_buf);
1620
1621
1622             /* For now we'll just pack the type specific data at the end... */
1623             /* We should probably add a new context here in the future... */
1624             if (drive->drive_type == BLOCK_CDROM) {
1625                 v3_chkpt_load(drive_ctx, "ATAPI_SENSE_DATA", 18, drive->cd_state.sense.buf);
1626                 v3_chkpt_load_8(drive_ctx, "ATAPI_CMD", &(drive->cd_state.atapi_cmd));
1627                 v3_chkpt_load(drive_ctx, "ATAPI_ERR_RECOVERY", 12, drive->cd_state.err_recovery.buf);
1628             } else if (drive->drive_type == BLOCK_DISK) {
1629                 v3_chkpt_load_32(drive_ctx, "ACCESSED", &(drive->hd_state.accessed));
1630                 v3_chkpt_load_32(drive_ctx, "MULT_SECT_NUM", &(drive->hd_state.mult_sector_num));
1631                 v3_chkpt_load_32(drive_ctx, "CUR_SECT_NUM", &(drive->hd_state.cur_sector_num));
1632             }
1633         }
1634     }
1635
1636     return 0;
1637 }
1638
1639
1640
1641 #endif
1642
1643
1644 static struct v3_device_ops dev_ops = {
1645     .free = (int (*)(void *))ide_free,
1646 #ifdef V3_CONFIG_CHECKPOINT
1647     .save = ide_save,
1648     .load = ide_load
1649 #endif
1650
1651 };
1652
1653
1654
1655
1656 static int connect_fn(struct v3_vm_info * vm, 
1657                       void * frontend_data, 
1658                       struct v3_dev_blk_ops * ops, 
1659                       v3_cfg_tree_t * cfg, 
1660                       void * private_data) {
1661     struct ide_internal * ide  = (struct ide_internal *)(frontend_data);  
1662     struct ide_channel * channel = NULL;
1663     struct ide_drive * drive = NULL;
1664
1665     char * bus_str = v3_cfg_val(cfg, "bus_num");
1666     char * drive_str = v3_cfg_val(cfg, "drive_num");
1667     char * type_str = v3_cfg_val(cfg, "type");
1668     char * model_str = v3_cfg_val(cfg, "model");
1669     uint_t bus_num = 0;
1670     uint_t drive_num = 0;
1671
1672
1673     if ((!type_str) || (!drive_str) || (!bus_str)) {
1674         PrintError("Incomplete IDE Configuration\n");
1675         return -1;
1676     }
1677
1678     bus_num = atoi(bus_str);
1679     drive_num = atoi(drive_str);
1680
1681     channel = &(ide->channels[bus_num]);
1682     drive = &(channel->drives[drive_num]);
1683
1684     if (drive->drive_type != BLOCK_NONE) {
1685         PrintError("Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1686         return -1;
1687     }
1688
1689     if (model_str != NULL) {
1690         strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1691     }
1692
1693     if (strcasecmp(type_str, "cdrom") == 0) {
1694         drive->drive_type = BLOCK_CDROM;
1695
1696         while (strlen((char *)(drive->model)) < 40) {
1697             strcat((char*)(drive->model), " ");
1698         }
1699
1700     } else if (strcasecmp(type_str, "hd") == 0) {
1701         drive->drive_type = BLOCK_DISK;
1702
1703         drive->hd_state.accessed = 0;
1704         drive->hd_state.mult_sector_num = 1;
1705
1706         drive->num_sectors = 63;
1707         drive->num_heads = 16;
1708         drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
1709     } else {
1710         PrintError("invalid IDE drive type\n");
1711         return -1;
1712     }
1713  
1714     drive->ops = ops;
1715
1716     if (ide->ide_pci) {
1717         // Hardcode this for now, but its not a good idea....
1718         ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1719     }
1720  
1721     drive->private_data = private_data;
1722
1723     return 0;
1724 }
1725
1726
1727
1728
1729 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1730     struct ide_internal * ide  = NULL;
1731     char * dev_id = v3_cfg_val(cfg, "ID");
1732     int ret = 0;
1733
1734     PrintDebug("IDE: Initializing IDE\n");
1735
1736     ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1737
1738     if (ide == NULL) {
1739         PrintError("Error allocating IDE state\n");
1740         return -1;
1741     }
1742
1743     memset(ide, 0, sizeof(struct ide_internal));
1744
1745     ide->vm = vm;
1746     ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1747
1748     if (ide->pci_bus != NULL) {
1749         struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1750
1751         if (!southbridge) {
1752             PrintError("Could not find southbridge\n");
1753             V3_Free(ide);
1754             return -1;
1755         }
1756
1757         ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1758     }
1759
1760     PrintDebug("IDE: Creating IDE bus x 2\n");
1761
1762     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
1763
1764     if (dev == NULL) {
1765         PrintError("Could not attach device %s\n", dev_id);
1766         V3_Free(ide);
1767         return -1;
1768     }
1769
1770     if (init_ide_state(ide) == -1) {
1771         PrintError("Failed to initialize IDE state\n");
1772         v3_remove_device(dev);
1773         return -1;
1774     }
1775
1776     PrintDebug("Connecting to IDE IO ports\n");
1777
1778     ret |= v3_dev_hook_io(dev, PRI_DATA_PORT, 
1779                           &ide_read_data_port, &write_data_port);
1780     ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT, 
1781                           &read_port_std, &write_port_std);
1782     ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT, 
1783                           &read_port_std, &write_port_std);
1784     ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT, 
1785                           &read_port_std, &write_port_std);
1786     ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT, 
1787                           &read_port_std, &write_port_std);
1788     ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT, 
1789                           &read_port_std, &write_port_std);
1790     ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT, 
1791                           &read_port_std, &write_port_std);
1792     ret |= v3_dev_hook_io(dev, PRI_CMD_PORT, 
1793                           &read_port_std, &write_cmd_port);
1794
1795     ret |= v3_dev_hook_io(dev, SEC_DATA_PORT, 
1796                           &ide_read_data_port, &write_data_port);
1797     ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT, 
1798                           &read_port_std, &write_port_std);
1799     ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT, 
1800                           &read_port_std, &write_port_std);
1801     ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT, 
1802                           &read_port_std, &write_port_std);
1803     ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT, 
1804                           &read_port_std, &write_port_std);
1805     ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT, 
1806                           &read_port_std, &write_port_std);
1807     ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT, 
1808                           &read_port_std, &write_port_std);
1809     ret |= v3_dev_hook_io(dev, SEC_CMD_PORT, 
1810                           &read_port_std, &write_cmd_port);
1811   
1812
1813     ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT, 
1814                           &read_port_std, &write_port_std);
1815
1816     ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT, 
1817                           &read_port_std, &write_port_std);
1818   
1819
1820     ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT, 
1821                           &read_port_std, &write_port_std);
1822
1823     ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT, 
1824                           &read_port_std, &write_port_std);
1825
1826
1827     if (ret != 0) {
1828         PrintError("Error hooking IDE IO port\n");
1829         v3_remove_device(dev);
1830         return -1;
1831     }
1832
1833
1834     if (ide->pci_bus) {
1835         struct v3_pci_bar bars[6];
1836         struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
1837         struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
1838         struct pci_device * pci_dev = NULL;
1839         int i;
1840
1841         PrintDebug("Connecting IDE to PCI bus\n");
1842
1843         for (i = 0; i < 6; i++) {
1844             bars[i].type = PCI_BAR_NONE;
1845         }
1846
1847         bars[4].type = PCI_BAR_IO;
1848         //      bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
1849         bars[4].default_base_port = -1;
1850         bars[4].num_ports = 16;
1851
1852         bars[4].io_read = read_dma_port;
1853         bars[4].io_write = write_dma_port;
1854         bars[4].private_data = ide;
1855
1856         pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1, 
1857                                          "PIIX3_IDE", bars,
1858                                          pci_config_update, NULL, NULL, ide);
1859
1860         if (pci_dev == NULL) {
1861             PrintError("Failed to register IDE BUS %d with PCI\n", i); 
1862             v3_remove_device(dev);
1863             return -1;
1864         }
1865
1866         /* This is for CMD646 devices 
1867            pci_dev->config_header.vendor_id = 0x1095;
1868            pci_dev->config_header.device_id = 0x0646;
1869            pci_dev->config_header.revision = 0x8f07;
1870         */
1871
1872         pci_dev->config_header.vendor_id = 0x8086;
1873         pci_dev->config_header.device_id = 0x7010;
1874         pci_dev->config_header.revision = 0x00;
1875
1876         pci_dev->config_header.prog_if = 0x80; // Master IDE device
1877         pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
1878         pci_dev->config_header.class = PCI_CLASS_STORAGE;
1879
1880         pci_dev->config_header.command = 0;
1881         pci_dev->config_header.status = 0x0280;
1882
1883         ide->ide_pci = pci_dev;
1884
1885
1886     }
1887
1888     if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
1889         PrintError("Could not register %s as frontend\n", dev_id);
1890         v3_remove_device(dev);
1891         return -1;
1892     }
1893     
1894
1895     PrintDebug("IDE Initialized\n");
1896
1897     return 0;
1898 }
1899
1900
1901 device_register("IDE", ide_init)
1902
1903
1904
1905
1906 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num, 
1907                         uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
1908
1909     struct ide_internal * ide  = ide_data;  
1910     struct ide_channel * channel = &(ide->channels[channel_num]);
1911     struct ide_drive * drive = &(channel->drives[drive_num]);
1912     
1913     if (drive->drive_type == BLOCK_NONE) {
1914         return -1;
1915     }
1916
1917     *cylinders = drive->num_cylinders;
1918     *heads = drive->num_heads;
1919     *sectors = drive->num_sectors;
1920
1921     return 0;
1922 }