Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


IDE/ATAPI bug fix - allow sense and other requests to read past
[palacios.git] / palacios / src / devices / ide.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
28
29 #ifndef V3_CONFIG_DEBUG_IDE
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
36
37
38 #define PRI_DATA_PORT         0x1f0
39 #define PRI_FEATURES_PORT     0x1f1
40 #define PRI_SECT_CNT_PORT     0x1f2
41 #define PRI_SECT_NUM_PORT     0x1f3
42 #define PRI_CYL_LOW_PORT      0x1f4
43 #define PRI_CYL_HIGH_PORT     0x1f5
44 #define PRI_DRV_SEL_PORT      0x1f6
45 #define PRI_CMD_PORT          0x1f7
46 #define PRI_CTRL_PORT         0x3f6
47 #define PRI_ADDR_REG_PORT     0x3f7
48
49 #define SEC_DATA_PORT         0x170
50 #define SEC_FEATURES_PORT     0x171
51 #define SEC_SECT_CNT_PORT     0x172
52 #define SEC_SECT_NUM_PORT     0x173
53 #define SEC_CYL_LOW_PORT      0x174
54 #define SEC_CYL_HIGH_PORT     0x175
55 #define SEC_DRV_SEL_PORT      0x176
56 #define SEC_CMD_PORT          0x177
57 #define SEC_CTRL_PORT         0x376
58 #define SEC_ADDR_REG_PORT     0x377
59
60
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
63
64 #define DATA_BUFFER_SIZE 2048
65
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
68
69
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM", 
71                                           "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72                                            "PRI_CTRL", "PRI_ADDR_REG"};
73
74
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM", 
76                                           "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77                                            "SEC_CTRL", "SEC_ADDR_REG"};
78
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80                                            "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
81
82
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
84
85 static inline const char * io_port_to_str(uint16_t port) {
86     if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87         return ide_pri_port_strs[port - PRI_DATA_PORT];
88     } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89         return ide_sec_port_strs[port - SEC_DATA_PORT];
90     } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91         return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92     } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93         return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94     } 
95     return NULL;
96 }
97
98
99 static inline const char * dma_port_to_str(uint16_t port) {
100     return ide_dma_port_strs[port & 0x7];
101 }
102
103
104
105 struct ide_cd_state {
106     struct atapi_sense_data sense;
107
108     uint8_t atapi_cmd;
109     struct atapi_error_recovery err_recovery;
110 };
111
112 struct ide_hd_state {
113     uint32_t accessed;
114
115     /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116     uint32_t mult_sector_num;
117
118     /* This is the current op sector size:
119      * for multiple sector ops this equals mult_sector_num
120      * for standard ops this equals 1
121      */
122     uint64_t cur_sector_num;
123 };
124
125 struct ide_drive {
126     // Command Registers
127
128     v3_block_type_t drive_type;
129
130     struct v3_dev_blk_ops * ops;
131
132     union {
133         struct ide_cd_state cd_state;
134         struct ide_hd_state hd_state;
135     };
136
137     char model[41];
138
139     // Where we are in the data transfer
140     uint64_t transfer_index;
141
142     // the length of a transfer
143     // calculated for easy access
144     uint64_t transfer_length;
145
146     uint64_t current_lba;
147
148     // We have a local data buffer that we use for IO port accesses
149     uint8_t data_buf[DATA_BUFFER_SIZE];
150
151
152     uint32_t num_cylinders;
153     uint32_t num_heads;
154     uint32_t num_sectors;
155
156
157     struct lba48_state {
158         // all start at zero
159         uint64_t lba;                  
160         uint16_t sector_count;            // for LBA48
161         uint8_t  sector_count_state;      // two step write to 1f2/172 (high first)
162         uint8_t  lba41_state;             // two step write to 1f3
163         uint8_t  lba52_state;             // two step write to 1f4
164         uint8_t  lba63_state;             // two step write to 15
165     } lba48;
166
167     void * private_data;
168     
169     union {
170         uint8_t sector_count;             // 0x1f2,0x172  (ATA)
171         struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
172     } __attribute__((packed));
173
174
175     union {
176         uint8_t sector_num;               // 0x1f3,0x173
177         uint8_t lba0;
178     } __attribute__((packed));
179
180     union {
181         uint16_t cylinder;
182         uint16_t lba12;
183         
184         struct {
185             uint8_t cylinder_low;       // 0x1f4,0x174
186             uint8_t cylinder_high;      // 0x1f5,0x175
187         } __attribute__((packed));
188         
189         struct {
190             uint8_t lba1;
191             uint8_t lba2;
192         } __attribute__((packed));
193         
194         
195         // The transfer length requested by the CPU 
196         uint16_t req_len;
197     } __attribute__((packed));
198
199 };
200
201
202
203 struct ide_channel {
204     struct ide_drive drives[2];
205
206     // Command Registers
207     struct ide_error_reg error_reg;     // [read] 0x1f1,0x171
208
209     struct ide_features_reg features;
210
211     struct ide_drive_head_reg drive_head; // 0x1f6,0x176
212
213     struct ide_status_reg status;       // [read] 0x1f7,0x177
214     uint8_t cmd_reg;                // [write] 0x1f7,0x177
215
216     int irq; // this is temporary until we add PCI support
217
218     // Control Registers
219     struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
220
221     union {
222         uint8_t dma_ports[8];
223         struct {
224             struct ide_dma_cmd_reg dma_cmd;
225             uint8_t rsvd1;
226             struct ide_dma_status_reg dma_status;
227             uint8_t rsvd2;
228             uint32_t dma_prd_addr;
229         } __attribute__((packed));
230     } __attribute__((packed));
231
232     uint32_t dma_tbl_index;
233 };
234
235
236
237 struct ide_internal {
238     struct ide_channel channels[2];
239
240     struct v3_southbridge * southbridge;
241     struct vm_device * pci_bus;
242
243     struct pci_device * ide_pci;
244
245     struct v3_vm_info * vm;
246 };
247
248
249
250
251
252 /* Utility functions */
253
254 static inline uint16_t be_to_le_16(const uint16_t val) {
255     uint8_t * buf = (uint8_t *)&val;
256     return (buf[0] << 8) | (buf[1]) ;
257 }
258
259 static inline uint16_t le_to_be_16(const uint16_t val) {
260     return be_to_le_16(val);
261 }
262
263
264 static inline uint32_t be_to_le_32(const uint32_t val) {
265     uint8_t * buf = (uint8_t *)&val;
266     return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
267 }
268
269 static inline uint32_t le_to_be_32(const uint32_t val) {
270     return be_to_le_32(val);
271 }
272
273
274 static inline int is_lba28(struct ide_channel * channel) {
275     return channel->drive_head.lba_mode && channel->drive_head.rsvd1 && channel->drive_head.rsvd2;
276 }
277
278 static inline int is_lba48(struct ide_channel * channel) {
279     return channel->drive_head.lba_mode && !channel->drive_head.rsvd1 && !channel->drive_head.rsvd2;
280 }
281
282 static inline int is_chs(struct ide_channel * channel) {
283     return !channel->drive_head.lba_mode;
284 }
285
286 static inline int get_channel_index(ushort_t port) {
287     if (((port & 0xfff8) == 0x1f0) ||
288         ((port & 0xfffe) == 0x3f6) || 
289         ((port & 0xfff8) == 0xc000)) {
290         return 0;
291     } else if (((port & 0xfff8) == 0x170) ||
292                ((port & 0xfffe) == 0x376) ||
293                ((port & 0xfff8) == 0xc008)) {
294         return 1;
295     }
296
297     return -1;
298 }
299
300 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
301     int channel_idx = get_channel_index(port);    
302     return &(ide->channels[channel_idx]);
303 }
304
305 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
306     return &(channel->drives[channel->drive_head.drive_sel]);
307 }
308
309
310
311
312 /* Drive Commands */
313 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
314     if (channel->ctrl_reg.irq_disable == 0) {
315
316         PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
317
318         channel->dma_status.int_gen = 1;
319         v3_raise_irq(ide->vm, channel->irq);
320     } else {
321         PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
322     }
323 }
324
325
326 static void drive_reset(struct ide_drive * drive) {
327     drive->sector_count = 0x01;
328     drive->sector_num = 0x01;
329
330     PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
331     
332     if (drive->drive_type == BLOCK_CDROM) {
333         drive->cylinder = 0xeb14;
334     } else {
335         drive->cylinder = 0x0000;
336         //drive->hd_state.accessed = 0;
337     }
338
339
340     memset(drive->data_buf, 0, sizeof(drive->data_buf));
341     drive->transfer_index = 0;
342
343     // Send the reset signal to the connected device callbacks
344     //     channel->drives[0].reset();
345     //    channel->drives[1].reset();
346 }
347
348 static void channel_reset(struct ide_channel * channel) {
349     
350     // set busy and seek complete flags
351     channel->status.val = 0x90;
352
353     // Clear errors
354     channel->error_reg.val = 0x01;
355
356     // clear commands
357     channel->cmd_reg = 0;  // NOP
358
359     channel->ctrl_reg.irq_disable = 0;
360 }
361
362 static void channel_reset_complete(struct ide_channel * channel) {
363     channel->status.busy = 0;
364     channel->status.ready = 1;
365
366     channel->drive_head.head_num = 0;    
367     
368     drive_reset(&(channel->drives[0]));
369     drive_reset(&(channel->drives[1]));
370 }
371
372
373 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
374
375     PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
376
377     channel->status.val = 0x41; // Error + ready
378     channel->error_reg.val = 0x04; // No idea...
379
380     ide_raise_irq(ide, channel);
381 }
382
383
384 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
385 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
386
387
388 /* ATAPI functions */
389 #include "atapi.h"
390
391 /* ATA functions */
392 #include "ata.h"
393
394
395
396 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
397     struct ide_dma_prd prd_entry;
398     int index = 0;
399
400     V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
401
402     while (1) {
403         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
404         int ret = 0;
405
406         ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
407         
408         if (ret != sizeof(struct ide_dma_prd)) {
409             PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
410             return;
411         }
412
413         V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n", 
414                    prd_entry.base_addr, 
415                    (prd_entry.size == 0) ? 0x10000 : prd_entry.size, 
416                    prd_entry.end_of_table);
417
418         if (prd_entry.end_of_table) {
419             break;
420         }
421
422         index++;
423     }
424
425     return;
426 }
427
428
429 /* IO Operations */
430 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
431     struct ide_drive * drive = get_selected_drive(channel);
432     // This is at top level scope to do the EOT test at the end
433     struct ide_dma_prd prd_entry = {};
434     uint_t bytes_left = drive->transfer_length;
435
436     // Read in the data buffer....
437     // Read a sector/block at a time until the prd entry is full.
438
439 #ifdef V3_CONFIG_DEBUG_IDE
440     print_prd_table(ide, channel);
441 #endif
442
443     PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
444
445     // Loop through the disk data
446     while (bytes_left > 0) {
447         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
448         uint_t prd_bytes_left = 0;
449         uint_t prd_offset = 0;
450         int ret;
451
452         PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
453
454         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
455
456         if (ret != sizeof(struct ide_dma_prd)) {
457             PrintError(core->vm_info, core, "Could not read PRD\n");
458             return -1;
459         }
460
461         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
462                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
463
464         // loop through the PRD data....
465
466         if (prd_entry.size == 0) {
467             // a size of 0 means 64k
468             prd_bytes_left = 0x10000;
469         } else {
470             prd_bytes_left = prd_entry.size;
471         }
472
473
474         while (prd_bytes_left > 0) {
475             uint_t bytes_to_write = 0;
476
477             if (drive->drive_type == BLOCK_DISK) {
478                 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
479
480
481                 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
482                     PrintError(core->vm_info, core, "Failed to read next disk sector\n");
483                     return -1;
484                 }
485             } else if (drive->drive_type == BLOCK_CDROM) {
486                 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
487                     bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
488
489                     if (atapi_read_chunk(ide, channel) == -1) {
490                         PrintError(core->vm_info, core, "Failed to read next disk sector\n");
491                         return -1;
492                     }
493                 } else {
494                     /*
495                     PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
496                     return -1;
497                     */
498                     int cmd_ret = 0;
499
500                     //V3_Print(core->vm_info, core, "DMA of command packet\n");
501
502                     bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
503                     prd_bytes_left = bytes_to_write;
504
505
506                     // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
507                     cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, 
508                                                   bytes_to_write, drive->data_buf); 
509
510                     if (cmd_ret!=bytes_to_write) { 
511                         PrintError(core->vm_info, core, "Failed to write data to memory\n");
512                         return -1;
513                     }
514
515
516
517                     bytes_to_write = 0;
518                     prd_bytes_left = 0;
519                     drive->transfer_index += bytes_to_write;
520
521                     channel->status.busy = 0;
522                     channel->status.ready = 1;
523                     channel->status.data_req = 0;
524                     channel->status.error = 0;
525                     channel->status.seek_complete = 1;
526
527                     channel->dma_status.active = 0;
528                     channel->dma_status.err = 0;
529
530                     ide_raise_irq(ide, channel);
531                     
532                     return 0;
533                 }
534             }
535
536             PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n", 
537                        (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
538
539             drive->current_lba++;
540
541             ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf); 
542
543             if (ret != bytes_to_write) {
544                 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
545                 return -1;
546             }
547
548             PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
549
550             drive->transfer_index += ret;
551             prd_bytes_left -= ret;
552             prd_offset += ret;
553             bytes_left -= ret;
554         }
555
556         channel->dma_tbl_index++;
557
558         if (drive->drive_type == BLOCK_DISK) {
559             if (drive->transfer_index % HD_SECTOR_SIZE) {
560                 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
561                 return -1;
562             }
563         } else if (drive->drive_type == BLOCK_CDROM) {
564             if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
565                 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
566                     PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
567                     PrintError(core->vm_info, core, "transfer_index=%llu, transfer_length=%llu\n", 
568                                drive->transfer_index, drive->transfer_length);
569                     return -1;
570                 }
571             }
572         }
573
574
575         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
576             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
577             return -1;
578         }
579     }
580
581     /*
582       drive->irq_flags.io_dir = 1;
583       drive->irq_flags.c_d = 1;
584       drive->irq_flags.rel = 0;
585     */
586
587
588     // Update to the next PRD entry
589
590     // set DMA status
591
592     if (prd_entry.end_of_table) {
593         channel->status.busy = 0;
594         channel->status.ready = 1;
595         channel->status.data_req = 0;
596         channel->status.error = 0;
597         channel->status.seek_complete = 1;
598
599         channel->dma_status.active = 0;
600         channel->dma_status.err = 0;
601     }
602
603     ide_raise_irq(ide, channel);
604
605     return 0;
606 }
607
608
609 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
610     struct ide_drive * drive = get_selected_drive(channel);
611     // This is at top level scope to do the EOT test at the end
612     struct ide_dma_prd prd_entry = {};
613     uint_t bytes_left = drive->transfer_length;
614
615
616     PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
617
618     // Loop through disk data
619     while (bytes_left > 0) {
620         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
621         uint_t prd_bytes_left = 0;
622         uint_t prd_offset = 0;
623         int ret;
624         
625         PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
626
627         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
628
629         if (ret != sizeof(struct ide_dma_prd)) {
630             PrintError(core->vm_info, core, "Could not read PRD\n");
631             return -1;
632         }
633
634         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
635                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
636
637
638         if (prd_entry.size == 0) {
639             // a size of 0 means 64k
640             prd_bytes_left = 0x10000;
641         } else {
642             prd_bytes_left = prd_entry.size;
643         }
644
645         while (prd_bytes_left > 0) {
646             uint_t bytes_to_write = 0;
647
648
649             bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
650
651
652             ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
653
654             if (ret != bytes_to_write) {
655                 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
656                 return -1;
657             }
658
659             PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
660
661
662             if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
663                 PrintError(core->vm_info, core, "Failed to write data to disk\n");
664                 return -1;
665             }
666             
667             drive->current_lba++;
668
669             drive->transfer_index += ret;
670             prd_bytes_left -= ret;
671             prd_offset += ret;
672             bytes_left -= ret;
673         }
674
675         channel->dma_tbl_index++;
676
677         if (drive->transfer_index % HD_SECTOR_SIZE) {
678             PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
679             return -1;
680         }
681
682         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
683             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
684             PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%llu)...\n", 
685                        bytes_left, drive->transfer_length);
686             PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
687                        prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
688
689             print_prd_table(ide, channel);
690             return -1;
691         }
692     }
693
694     if (prd_entry.end_of_table) {
695         channel->status.busy = 0;
696         channel->status.ready = 1;
697         channel->status.data_req = 0;
698         channel->status.error = 0;
699         channel->status.seek_complete = 1;
700
701         channel->dma_status.active = 0;
702         channel->dma_status.err = 0;
703     }
704
705     ide_raise_irq(ide, channel);
706
707     return 0;
708 }
709
710
711
712 #define DMA_CMD_PORT      0x00
713 #define DMA_STATUS_PORT   0x02
714 #define DMA_PRD_PORT0     0x04
715 #define DMA_PRD_PORT1     0x05
716 #define DMA_PRD_PORT2     0x06
717 #define DMA_PRD_PORT3     0x07
718
719 #define DMA_CHANNEL_FLAG  0x08
720
721 /*
722   Note that DMA model is as follows:
723
724     1. Write the PRD pointer to the busmaster (DMA engine)
725     2. Start the transfer on the device
726     3. Tell the busmaster to start shoveling data (active DMA)
727 */
728
729 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
730     struct ide_internal * ide = (struct ide_internal *)private_data;
731     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
732     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
733     struct ide_channel * channel = &(ide->channels[channel_flag]);
734
735     PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n", 
736                port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
737
738     switch (port_offset) {
739         case DMA_CMD_PORT:
740             channel->dma_cmd.val = *(uint8_t *)src;
741             
742             PrintDebug(core->vm_info, core, "IDE: dma command write:  0x%x\n", channel->dma_cmd.val);
743
744             if (channel->dma_cmd.start == 0) {
745                 channel->dma_tbl_index = 0;
746             } else {
747                 // Launch DMA operation, interrupt at end
748
749                 channel->dma_status.active = 1;
750
751                 if (channel->dma_cmd.read == 1) {
752                     // DMA Read the whole thing - dma_read will raise irq
753                     if (dma_read(core, ide, channel) == -1) {
754                         PrintError(core->vm_info, core, "Failed DMA Read\n");
755                         return -1;
756                     }
757                 } else {
758                     // DMA write the whole thing - dma_write will raiase irw
759                     if (dma_write(core, ide, channel) == -1) {
760                         PrintError(core->vm_info, core, "Failed DMA Write\n");
761                         return -1;
762                     }
763                 }
764                 
765                 // DMA complete
766                 // Note that guest cannot abort a DMA transfer
767                 channel->dma_cmd.start = 0;
768             }
769
770             break;
771             
772         case DMA_STATUS_PORT: {
773             // This is intended to clear status
774
775             uint8_t val = *(uint8_t *)src;
776
777             if (length != 1) {
778                 PrintError(core->vm_info, core, "Invalid write length for DMA status port\n");
779                 return -1;
780             }
781
782             // but preserve certain bits
783             channel->dma_status.val = ((val & 0x60) | 
784                                        (channel->dma_status.val & 0x01) |
785                                        (channel->dma_status.val & ~val & 0x06));
786
787             break;
788         }           
789         case DMA_PRD_PORT0:
790         case DMA_PRD_PORT1:
791         case DMA_PRD_PORT2:
792         case DMA_PRD_PORT3: {
793             uint_t addr_index = port_offset & 0x3;
794             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
795             int i = 0;
796
797             if (addr_index + length > 4) {
798                 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
799                 return -1;
800             }
801
802             for (i = 0; i < length; i++) {
803                 addr_buf[addr_index + i] = *((uint8_t *)src + i);
804             }
805
806             PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
807
808             break;
809         }
810         default:
811             PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
812             break;
813     }
814
815     return length;
816 }
817
818
819 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
820     struct ide_internal * ide = (struct ide_internal *)private_data;
821     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
822     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
823     struct ide_channel * channel = &(ide->channels[channel_flag]);
824
825     PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
826
827     if (port_offset + length > 16) {
828         PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
829         return -1;
830     }
831
832     memcpy(dst, channel->dma_ports + port_offset, length);
833     
834     PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
835
836     return length;
837 }
838
839
840
841 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
842     struct ide_internal * ide = priv_data;
843     struct ide_channel * channel = get_selected_channel(ide, port);
844     struct ide_drive * drive = get_selected_drive(channel);
845
846     if (length != 1) {
847         PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
848         return -1;
849     }
850
851     PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
852     
853     channel->cmd_reg = *(uint8_t *)src;
854     
855     switch (channel->cmd_reg) {
856
857         case ATA_PIDENTIFY: // ATAPI Identify Device Packet (CDROM)
858             if (drive->drive_type != BLOCK_CDROM) {
859                 drive_reset(drive);
860
861                 // JRL: Should we abort here?
862                 ide_abort_command(ide, channel);
863             } else {
864                 
865                 atapi_identify_device(drive);
866                 
867                 channel->error_reg.val = 0;
868                 channel->status.val = 0x58; // ready, data_req, seek_complete
869             
870                 ide_raise_irq(ide, channel);
871             }
872             break;
873
874         case ATA_IDENTIFY: // Identify Device
875             if (drive->drive_type != BLOCK_DISK) {
876                 drive_reset(drive);
877
878                 // JRL: Should we abort here?
879                 ide_abort_command(ide, channel);
880             } else {
881                 ata_identify_device(drive);
882
883                 channel->error_reg.val = 0;
884                 channel->status.val = 0x58;
885
886                 ide_raise_irq(ide, channel);
887             }
888             break;
889
890         case ATA_PACKETCMD: // ATAPI Command Packet (CDROM)
891             if (drive->drive_type != BLOCK_CDROM) {
892                 ide_abort_command(ide, channel);
893             }
894             
895             drive->sector_count = 1;
896
897             channel->status.busy = 0;
898             channel->status.write_fault = 0;
899             channel->status.data_req = 1;
900             channel->status.error = 0;
901
902             // reset the data buffer...
903             drive->transfer_length = ATAPI_PACKET_SIZE;
904             drive->transfer_index = 0;
905
906             break;
907
908         case ATA_READ:      // Read Sectors with Retry
909         case ATA_READ_ONCE: // Read Sectors without Retry
910         case ATA_MULTREAD:  // Read multiple sectors per ire
911         case ATA_READ_EXT:  // Read Sectors Extended (LBA48)
912
913             if (channel->cmd_reg==ATA_MULTREAD) { 
914                 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
915             } else {
916                 drive->hd_state.cur_sector_num = 1;
917             }
918
919             if (ata_read_sectors(ide, channel) == -1) {
920                 PrintError(core->vm_info, core, "Error reading sectors\n");
921                 ide_abort_command(ide,channel);
922             }
923             break;
924
925         case ATA_WRITE:            // Write Sector with retry
926         case ATA_WRITE_ONCE:       // Write Sector without retry
927         case ATA_MULTWRITE:        // Write multiple sectors per irq
928         case ATA_WRITE_EXT:        // Write Sectors Extended (LBA48)
929
930             if (channel->cmd_reg==ATA_MULTWRITE) { 
931                 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
932             } else {
933                 drive->hd_state.cur_sector_num = 1;
934             }
935
936             if (ata_write_sectors(ide, channel) == -1) {
937                 PrintError(core->vm_info, core, "Error writing sectors\n");
938                 ide_abort_command(ide,channel);
939             }
940             break;
941
942         case ATA_READDMA:            // Read DMA with retry
943         case ATA_READDMA_ONCE:       // Read DMA without retry
944         case ATA_READDMA_EXT:      { // Read DMA (LBA48)
945             uint64_t sect_cnt;
946
947             if (ata_get_lba_and_size(ide, channel, &(drive->current_lba), &sect_cnt) == -1) {
948                 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
949                 ide_abort_command(ide, channel);
950                 return length;
951             }
952             
953             drive->hd_state.cur_sector_num = 1;  // Not used for DMA
954             
955             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
956             drive->transfer_index = 0;
957
958             // Now we wait for the transfer to be intiated by flipping the 
959             // bus-master start bit
960             break;
961         }
962
963         case ATA_WRITEDMA:        // Write DMA with retry
964         case ATA_WRITEDMA_ONCE:   // Write DMA without retry
965         case ATA_WRITEDMA_EXT:  { // Write DMA (LBA48)
966
967             uint64_t sect_cnt;
968
969             if (ata_get_lba_and_size(ide, channel, &(drive->current_lba),&sect_cnt) == -1) {
970                 PrintError(core->vm_info,core,"Cannot get lba\n");
971                 ide_abort_command(ide, channel);
972                 return length;
973             }
974
975             drive->hd_state.cur_sector_num = 1;  // Not used for DMA
976
977             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
978             drive->transfer_index = 0;
979
980             // Now we wait for the transfer to be intiated by flipping the 
981             // bus-master start bit
982             break;
983         }
984
985         case ATA_STANDBYNOW1: // Standby Now 1
986         case ATA_IDLEIMMEDIATE: // Set Idle Immediate
987         case ATA_STANDBY: // Standby
988         case ATA_SETIDLE1: // Set Idle 1
989         case ATA_SLEEPNOW1: // Sleep Now 1
990         case ATA_STANDBYNOW2: // Standby Now 2
991         case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
992         case ATA_STANDBY2: // Standby 2
993         case ATA_SETIDLE2: // Set idle 2
994         case ATA_SLEEPNOW2: // Sleep Now 2
995             channel->status.val = 0;
996             channel->status.ready = 1;
997             ide_raise_irq(ide, channel);
998             break;
999
1000         case ATA_SETFEATURES: // Set Features
1001             // Prior to this the features register has been written to. 
1002             // This command tells the drive to check if the new value is supported (the value is drive specific)
1003             // Common is that bit0=DMA enable
1004             // If valid the drive raises an interrupt, if not it aborts.
1005
1006             // Do some checking here...
1007
1008             channel->status.busy = 0;
1009             channel->status.write_fault = 0;
1010             channel->status.error = 0;
1011             channel->status.ready = 1;
1012             channel->status.seek_complete = 1;
1013             
1014             ide_raise_irq(ide, channel);
1015             break;
1016
1017         case ATA_SPECIFY:  // Initialize Drive Parameters
1018         case ATA_RECAL:  // recalibrate?
1019             channel->status.error = 0;
1020             channel->status.ready = 1;
1021             channel->status.seek_complete = 1;
1022             ide_raise_irq(ide, channel);
1023             break;
1024
1025         case ATA_SETMULT: { // Set multiple mode (IDE Block mode) 
1026             // This makes the drive transfer multiple sectors before generating an interrupt
1027
1028             if (drive->sector_count == 0) {
1029                 PrintError(core->vm_info,core,"Attempt to set multiple to zero\n");
1030                 drive->hd_state.mult_sector_num= 1;
1031                 ide_abort_command(ide,channel);
1032                 break;
1033             } else {
1034                 drive->hd_state.mult_sector_num = drive->sector_count;
1035             }
1036
1037             channel->status.ready = 1;
1038             channel->status.error = 0;
1039
1040             ide_raise_irq(ide, channel);
1041
1042             break;
1043         }
1044
1045         case ATA_DEVICE_RESET: // Reset Device
1046             drive_reset(drive);
1047             channel->error_reg.val = 0x01;
1048             channel->status.busy = 0;
1049             channel->status.ready = 1;
1050             channel->status.seek_complete = 1;
1051             channel->status.write_fault = 0;
1052             channel->status.error = 0;
1053             break;
1054
1055         case ATA_CHECKPOWERMODE1: // Check power mode
1056             drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1057             channel->status.busy = 0;
1058             channel->status.ready = 1;
1059             channel->status.write_fault = 0;
1060             channel->status.data_req = 0;
1061             channel->status.error = 0;
1062             break;
1063
1064         default:
1065             PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1066             ide_abort_command(ide, channel);
1067             break;
1068     }
1069
1070     return length;
1071 }
1072
1073
1074
1075
1076 static int read_hd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1077     struct ide_drive * drive = get_selected_drive(channel);
1078     uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1079
1080
1081     PrintDebug(VM_NONE,VCORE_NONE, "Read HD data:  transfer_index %llu transfer length %llu current sector numer %llu\n",
1082                drive->transfer_index, drive->transfer_length, 
1083                drive->hd_state.cur_sector_num);
1084
1085     if (drive->transfer_index >= drive->transfer_length && drive->transfer_index>=DATA_BUFFER_SIZE) {
1086         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1087                    drive->transfer_length, drive->transfer_index,
1088                    drive->transfer_index + length);
1089         return -1;
1090     }
1091
1092
1093     if (data_offset + length > HD_SECTOR_SIZE) { 
1094        PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1095     }
1096    
1097     // For index==0, the read has been done in ata_read_sectors
1098     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1099         // advance to next sector and read it
1100         
1101         drive->current_lba++;
1102
1103         if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1104             PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1105             return -1;
1106         }
1107     }
1108
1109     /*
1110       PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n", 
1111       *(uint32_t *)(drive->data_buf + data_offset), 
1112       length, data_offset);
1113     */
1114     memcpy(dst, drive->data_buf + data_offset, length);
1115
1116     drive->transfer_index += length;
1117
1118
1119     /* This is the trigger for interrupt injection.
1120      * For read single sector commands we interrupt after every sector
1121      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1122      * cur_sector_num is configured depending on the operation we are currently running
1123      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1124      */
1125     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1126         (drive->transfer_index == drive->transfer_length)) {
1127         if (drive->transfer_index < drive->transfer_length) {
1128             // An increment is complete, but there is still more data to be transferred...
1129             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1130             channel->status.data_req = 1;
1131         } else {
1132             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1133             // This was the final read of the request
1134             channel->status.data_req = 0;
1135         }
1136
1137         channel->status.ready = 1;
1138         channel->status.busy = 0;
1139
1140         ide_raise_irq(ide, channel);
1141     }
1142
1143
1144     return length;
1145 }
1146
1147 static int write_hd_data(uint8_t * src, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1148     struct ide_drive * drive = get_selected_drive(channel);
1149     uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1150
1151
1152     PrintDebug(VM_NONE,VCORE_NONE, "Write HD data:  transfer_index %llu transfer length %llu current sector numer %llu\n",
1153                drive->transfer_index, drive->transfer_length, 
1154                drive->hd_state.cur_sector_num);
1155
1156     if (drive->transfer_index >= drive->transfer_length) {
1157         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1158                    drive->transfer_length, drive->transfer_index,
1159                    drive->transfer_index + length);
1160         return -1;
1161     }
1162
1163     if (data_offset + length > HD_SECTOR_SIZE) { 
1164        PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1165     }
1166
1167     // Copy data into our buffer - there will be room due to
1168     // (a) the ata_write test below is flushing sectors
1169     // (b) if we somehow get a sector-stradling write (an error), this will
1170     //     be OK since the buffer itself is >1 sector in memory
1171     memcpy(drive->data_buf + data_offset, src, length);
1172
1173     drive->transfer_index += length;
1174
1175     if ((data_offset+length) >= HD_SECTOR_SIZE) {
1176         // Write out the sector we just finished
1177         if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1178             PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1179             return -1;
1180         }
1181
1182         // go onto next sector
1183         drive->current_lba++;
1184     }
1185
1186     /* This is the trigger for interrupt injection.
1187      * For write single sector commands we interrupt after every sector
1188      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1189      * cur_sector_num is configured depending on the operation we are currently running
1190      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1191      */
1192     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1193         (drive->transfer_index == drive->transfer_length)) {
1194         if (drive->transfer_index < drive->transfer_length) {
1195             // An increment is complete, but there is still more data to be transferred...
1196             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1197             channel->status.data_req = 1;
1198         } else {
1199             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1200             // This was the final read of the request
1201             channel->status.data_req = 0;
1202         }
1203
1204         channel->status.ready = 1;
1205         channel->status.busy = 0;
1206
1207         ide_raise_irq(ide, channel);
1208     }
1209
1210     return length;
1211 }
1212
1213
1214
1215 static int read_cd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1216     struct ide_drive * drive = get_selected_drive(channel);
1217     uint64_t data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1218     //  int req_offset = drive->transfer_index % drive->req_len;
1219     
1220     if (drive->cd_state.atapi_cmd != 0x28) {
1221         PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%llu) (req_len=%u)\n", length, drive->req_len);
1222         PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%llu, transfer idx=%llu\n", drive->transfer_length, drive->transfer_index);
1223     }
1224
1225     
1226
1227     if (drive->transfer_index >= drive->transfer_length && drive->transfer_index>=DATA_BUFFER_SIZE) {
1228         PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n", 
1229                    drive->transfer_length, drive->transfer_index, 
1230                    drive->transfer_index + length);
1231         return -1;
1232     }
1233
1234     
1235     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1236         if (atapi_update_data_buf(ide, channel) == -1) {
1237             PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1238             return -1;
1239         }
1240     }
1241
1242     memcpy(dst, drive->data_buf + data_offset, length);
1243     
1244     drive->transfer_index += length;
1245
1246
1247     // Should the req_offset be recalculated here?????
1248     if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1249         if (drive->transfer_index < drive->transfer_length) {
1250             // An increment is complete, but there is still more data to be transferred...
1251             
1252             channel->status.data_req = 1;
1253
1254             drive->irq_flags.c_d = 0;
1255
1256             // Update the request length in the cylinder regs
1257             if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1258                 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1259                 return -1;
1260             }
1261         } else {
1262             // This was the final read of the request
1263
1264             drive->req_len = 0;
1265             channel->status.data_req = 0;
1266             channel->status.ready = 1;
1267             
1268             drive->irq_flags.c_d = 1;
1269             drive->irq_flags.rel = 0;
1270         }
1271
1272         drive->irq_flags.io_dir = 1;
1273         channel->status.busy = 0;
1274
1275         ide_raise_irq(ide, channel);
1276     }
1277
1278     return length;
1279 }
1280
1281
1282 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1283     struct ide_drive * drive = get_selected_drive(channel);
1284
1285     channel->status.busy = 0;
1286     channel->status.ready = 1;
1287     channel->status.write_fault = 0;
1288     channel->status.seek_complete = 1;
1289     channel->status.corrected = 0;
1290     channel->status.error = 0;
1291                 
1292     
1293     memcpy(dst, drive->data_buf + drive->transfer_index, length);
1294     drive->transfer_index += length;
1295     
1296     if (drive->transfer_index >= drive->transfer_length) {
1297         channel->status.data_req = 0;
1298     }
1299     
1300     return length;
1301 }
1302
1303
1304
1305 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1306     struct ide_internal * ide = priv_data;
1307     struct ide_channel * channel = get_selected_channel(ide, port);
1308     struct ide_drive * drive = get_selected_drive(channel);
1309
1310     //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1311
1312     if ((channel->cmd_reg == ATA_IDENTIFY) ||
1313         (channel->cmd_reg == ATA_PIDENTIFY)) {
1314         return read_drive_id((uint8_t *)dst, length, ide, channel);
1315     }
1316
1317     if (drive->drive_type == BLOCK_CDROM) {
1318         if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1319             PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1320             return -1;
1321         }
1322     } else if (drive->drive_type == BLOCK_DISK) {
1323         if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1324             PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1325             return -1;
1326         }
1327     } else {
1328         memset((uint8_t *)dst, 0, length);
1329     }
1330
1331     return length;
1332 }
1333
1334 // For the write side, we care both about
1335 // direct PIO writes to a drive as well as 
1336 // writes that pass a packet through to an CD
1337 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1338     struct ide_internal * ide = priv_data;
1339     struct ide_channel * channel = get_selected_channel(ide, port);
1340     struct ide_drive * drive = get_selected_drive(channel);
1341
1342     PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n", 
1343             port, *(uint32_t *)src, length);
1344
1345     if (drive->drive_type == BLOCK_CDROM) {
1346         if (channel->cmd_reg == ATA_PACKETCMD) { 
1347             // short command packet - no check for space... 
1348             memcpy(drive->data_buf + drive->transfer_index, src, length);
1349             drive->transfer_index += length;
1350             if (drive->transfer_index >= drive->transfer_length) {
1351                 if (atapi_handle_packet(core, ide, channel) == -1) {
1352                     PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1353                     return -1;
1354                 }
1355             }
1356         } else {
1357             PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1358             return -1;
1359         }
1360     } else if (drive->drive_type == BLOCK_DISK) {
1361         if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1362             PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1363             return -1;
1364         }
1365     } else {
1366         // nothing ... do not support writable cd
1367     }
1368
1369     return length;
1370 }
1371
1372 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1373     struct ide_internal * ide = priv_data;
1374     struct ide_channel * channel = get_selected_channel(ide, port);
1375     struct ide_drive * drive = get_selected_drive(channel);
1376             
1377     if (length != 1) {
1378         PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1379         return -1;
1380     }
1381
1382     PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1383
1384     switch (port) {
1385         // reset and interrupt enable
1386         case PRI_CTRL_PORT:
1387         case SEC_CTRL_PORT: {
1388             struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1389
1390             // only reset channel on a 0->1 reset bit transition
1391             if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1392                 channel_reset(channel);
1393             } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1394                 channel_reset_complete(channel);
1395             }
1396
1397             channel->ctrl_reg.val = tmp_ctrl->val;          
1398             break;
1399         }
1400         case PRI_FEATURES_PORT:
1401         case SEC_FEATURES_PORT:
1402             channel->features.val = *(uint8_t *)src;
1403             break;
1404
1405         case PRI_SECT_CNT_PORT:
1406         case SEC_SECT_CNT_PORT:
1407             // update CHS and LBA28 state
1408             channel->drives[0].sector_count = *(uint8_t *)src;
1409             channel->drives[1].sector_count = *(uint8_t *)src;
1410
1411             // update LBA48 state
1412             if (is_lba48(channel)) {
1413                 uint16_t val = *(uint8_t*)src; // top bits zero;
1414                 if (!channel->drives[0].lba48.sector_count_state) { 
1415                     channel->drives[0].lba48.sector_count = val<<8;
1416                 } else {
1417                     channel->drives[0].lba48.sector_count |= val;
1418                 }
1419                 channel->drives[0].lba48.sector_count_state ^= 1;
1420                 if (!channel->drives[1].lba48.sector_count_state) { 
1421                     channel->drives[1].lba48.sector_count = val<<8;
1422                 } else {
1423                     channel->drives[1].lba48.sector_count |= val;
1424                 }
1425                 channel->drives[0].lba48.sector_count_state ^= 1;
1426             }
1427             
1428             break;
1429
1430         case PRI_SECT_NUM_PORT:
1431         case SEC_SECT_NUM_PORT:
1432             // update CHS and LBA28 state
1433             channel->drives[0].sector_num = *(uint8_t *)src;
1434             channel->drives[1].sector_num = *(uint8_t *)src;
1435
1436             // update LBA48 state
1437             if (is_lba48(channel)) {
1438                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1439                 if (!channel->drives[0].lba48.lba41_state) { 
1440                     channel->drives[0].lba48.lba |= val<<24; 
1441                 } else {
1442                     channel->drives[0].lba48.lba |= val;
1443                 }
1444                 channel->drives[0].lba48.lba41_state ^= 1;
1445                 if (!channel->drives[1].lba48.lba41_state) { 
1446                     channel->drives[1].lba48.lba |= val<<24; 
1447                 } else {
1448                     channel->drives[1].lba48.lba |= val;
1449                 }
1450                 channel->drives[1].lba48.lba41_state ^= 1;
1451             }
1452
1453             break;
1454         case PRI_CYL_LOW_PORT:
1455         case SEC_CYL_LOW_PORT:
1456             // update CHS and LBA28 state
1457             channel->drives[0].cylinder_low = *(uint8_t *)src;
1458             channel->drives[1].cylinder_low = *(uint8_t *)src;
1459
1460             // update LBA48 state
1461             if (is_lba48(channel)) {
1462                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1463                 if (!channel->drives[0].lba48.lba52_state) { 
1464                     channel->drives[0].lba48.lba |= val<<32; 
1465                 } else {
1466                     channel->drives[0].lba48.lba |= val<<8;
1467                 }
1468                 channel->drives[0].lba48.lba52_state ^= 1;
1469                 if (!channel->drives[1].lba48.lba52_state) { 
1470                     channel->drives[1].lba48.lba |= val<<32; 
1471                 } else {
1472                     channel->drives[1].lba48.lba |= val<<8;
1473                 }
1474                 channel->drives[1].lba48.lba52_state ^= 1;
1475             }
1476
1477             break;
1478
1479         case PRI_CYL_HIGH_PORT:
1480         case SEC_CYL_HIGH_PORT:
1481             // update CHS and LBA28 state
1482             channel->drives[0].cylinder_high = *(uint8_t *)src;
1483             channel->drives[1].cylinder_high = *(uint8_t *)src;
1484
1485             // update LBA48 state
1486             if (is_lba48(channel)) {
1487                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1488                 if (!channel->drives[0].lba48.lba63_state) { 
1489                     channel->drives[0].lba48.lba |= val<<40; 
1490                 } else {
1491                     channel->drives[0].lba48.lba |= val<<16;
1492                 }
1493                 channel->drives[0].lba48.lba63_state ^= 1;
1494                 if (!channel->drives[1].lba48.lba63_state) { 
1495                     channel->drives[1].lba48.lba |= val<<40; 
1496                 } else {
1497                     channel->drives[1].lba48.lba |= val<<16;
1498                 }
1499                 channel->drives[1].lba48.lba63_state ^= 1;
1500             }
1501
1502             break;
1503
1504         case PRI_DRV_SEL_PORT:
1505         case SEC_DRV_SEL_PORT: {
1506             struct ide_drive_head_reg nh, oh;
1507
1508             oh.val = channel->drive_head.val;
1509             channel->drive_head.val = nh.val = *(uint8_t *)src;
1510
1511             // has LBA flipped?
1512             if ((oh.val & 0xe0) != (nh.val & 0xe0)) {
1513                 // reset LBA48 state
1514                 channel->drives[0].lba48.sector_count_state=0;
1515                 channel->drives[0].lba48.lba41_state=0;
1516                 channel->drives[0].lba48.lba52_state=0;
1517                 channel->drives[0].lba48.lba63_state=0;
1518                 channel->drives[1].lba48.sector_count_state=0;
1519                 channel->drives[1].lba48.lba41_state=0;
1520                 channel->drives[1].lba48.lba52_state=0;
1521                 channel->drives[1].lba48.lba63_state=0;
1522             }
1523             
1524
1525             drive = get_selected_drive(channel);
1526
1527             // Selecting a non-present device is a no-no
1528             if (drive->drive_type == BLOCK_NONE) {
1529                 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1530                 channel->error_reg.abort = 1;
1531                 channel->status.error = 1;
1532             } else {
1533                 channel->status.busy = 0;
1534                 channel->status.ready = 1;
1535                 channel->status.data_req = 0;
1536                 channel->status.error = 0;
1537                 channel->status.seek_complete = 1;
1538                 
1539                 channel->dma_status.active = 0;
1540                 channel->dma_status.err = 0;
1541             }
1542
1543             break;
1544         }
1545         default:
1546             PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1547             return -1;
1548     }
1549     return length;
1550 }
1551
1552
1553 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1554     struct ide_internal * ide = priv_data;
1555     struct ide_channel * channel = get_selected_channel(ide, port);
1556     struct ide_drive * drive = get_selected_drive(channel);
1557     
1558     if (length != 1) {
1559         PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1560         return -1;
1561     }
1562     
1563     PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1564
1565     if ((port == PRI_ADDR_REG_PORT) ||
1566         (port == SEC_ADDR_REG_PORT)) {
1567         // unused, return 0xff
1568         *(uint8_t *)dst = 0xff;
1569         return length;
1570     }
1571
1572
1573     // if no drive is present just return 0 + reserved bits
1574     if (drive->drive_type == BLOCK_NONE) {
1575         if ((port == PRI_DRV_SEL_PORT) ||
1576             (port == SEC_DRV_SEL_PORT)) {
1577             *(uint8_t *)dst = 0xa0;
1578         } else {
1579             *(uint8_t *)dst = 0;
1580         }
1581
1582         return length;
1583     }
1584
1585     switch (port) {
1586
1587         // This is really the error register.
1588         case PRI_FEATURES_PORT:
1589         case SEC_FEATURES_PORT:
1590             *(uint8_t *)dst = channel->error_reg.val;
1591             break;
1592             
1593         case PRI_SECT_CNT_PORT:
1594         case SEC_SECT_CNT_PORT:
1595             *(uint8_t *)dst = drive->sector_count;
1596             break;
1597
1598         case PRI_SECT_NUM_PORT:
1599         case SEC_SECT_NUM_PORT:
1600             *(uint8_t *)dst = drive->sector_num;
1601             break;
1602
1603         case PRI_CYL_LOW_PORT:
1604         case SEC_CYL_LOW_PORT:
1605             *(uint8_t *)dst = drive->cylinder_low;
1606             break;
1607
1608
1609         case PRI_CYL_HIGH_PORT:
1610         case SEC_CYL_HIGH_PORT:
1611             *(uint8_t *)dst = drive->cylinder_high;
1612             break;
1613
1614         case PRI_DRV_SEL_PORT:
1615         case SEC_DRV_SEL_PORT:  // hard disk drive and head register 0x1f6
1616             *(uint8_t *)dst = channel->drive_head.val;
1617             break;
1618
1619         case PRI_CTRL_PORT:
1620         case SEC_CTRL_PORT:
1621         case PRI_CMD_PORT:
1622         case SEC_CMD_PORT:
1623             // Something about lowering interrupts here....
1624             *(uint8_t *)dst = channel->status.val;
1625             break;
1626
1627         default:
1628             PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1629             return -1;
1630     }
1631
1632     PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1633
1634     return length;
1635 }
1636
1637
1638
1639 static void init_drive(struct ide_drive * drive) {
1640
1641     drive->sector_count = 0x01;
1642     drive->sector_num = 0x01;
1643     drive->cylinder = 0x0000;
1644
1645     drive->drive_type = BLOCK_NONE;
1646
1647     memset(drive->model, 0, sizeof(drive->model));
1648
1649     drive->transfer_index = 0;
1650     drive->transfer_length = 0;
1651     memset(drive->data_buf, 0, sizeof(drive->data_buf));
1652
1653     drive->num_cylinders = 0;
1654     drive->num_heads = 0;
1655     drive->num_sectors = 0;
1656     
1657
1658     drive->private_data = NULL;
1659     drive->ops = NULL;
1660 }
1661
1662 static void init_channel(struct ide_channel * channel) {
1663     int i = 0;
1664
1665     channel->error_reg.val = 0x01;
1666
1667     //** channel->features = 0x0;
1668
1669     channel->drive_head.val = 0x00;
1670     channel->status.val = 0x00;
1671     channel->cmd_reg = 0x00;
1672     channel->ctrl_reg.val = 0x08;
1673
1674     channel->dma_cmd.val = 0;
1675     channel->dma_status.val = 0;
1676     channel->dma_prd_addr = 0;
1677     channel->dma_tbl_index = 0;
1678
1679     for (i = 0; i < 2; i++) {
1680         init_drive(&(channel->drives[i]));
1681     }
1682
1683 }
1684
1685
1686 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1687     PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1688     /*
1689     struct ide_internal * ide = (struct ide_internal *)(private_data);
1690
1691     PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1692     */
1693
1694     return 0;
1695 }
1696
1697 static int init_ide_state(struct ide_internal * ide) {
1698
1699     /* 
1700      * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry 
1701      */
1702
1703     init_channel(&(ide->channels[0]));
1704     ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1705
1706     init_channel(&(ide->channels[1]));
1707     ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1708
1709
1710     return 0;
1711 }
1712
1713
1714
1715
1716 static int ide_free(struct ide_internal * ide) {
1717
1718     // deregister from PCI?
1719
1720     V3_Free(ide);
1721
1722     return 0;
1723 }
1724
1725 #ifdef V3_CONFIG_CHECKPOINT
1726
1727 #include <palacios/vmm_sprintf.h>
1728
1729 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1730     struct ide_internal * ide = (struct ide_internal *)private_data;
1731     struct v3_chkpt_ctx *ctx=0;
1732     int ch_num = 0;
1733     int drive_num = 0;
1734     char buf[128];
1735     
1736
1737     ctx=v3_chkpt_open_ctx(chkpt,id);
1738     
1739     if (!ctx) { 
1740       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1741       goto savefailout;
1742     }
1743
1744     // nothing saved yet
1745     
1746     v3_chkpt_close_ctx(ctx);ctx=0;
1747    
1748
1749     for (ch_num = 0; ch_num < 2; ch_num++) {
1750         struct ide_channel * ch = &(ide->channels[ch_num]);
1751
1752         snprintf(buf, 128, "%s-%d", id, ch_num);
1753
1754         ctx = v3_chkpt_open_ctx(chkpt, buf);
1755         
1756         if (!ctx) { 
1757           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1758           goto savefailout;
1759         }
1760
1761         V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1762         V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1763         V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1764         V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1765         V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1766         V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1767         V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1768         V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1769         V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1770         V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1771
1772
1773
1774         v3_chkpt_close_ctx(ctx); ctx=0;
1775
1776         for (drive_num = 0; drive_num < 2; drive_num++) {
1777             struct ide_drive * drive = &(ch->drives[drive_num]);
1778             
1779             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1780
1781             ctx = v3_chkpt_open_ctx(chkpt, buf);
1782             
1783             if (!ctx) { 
1784               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1785               goto savefailout;
1786             }
1787
1788             V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1789             V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1790             V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1791             V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1792
1793             V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1794             V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1795             V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1796
1797             V3_CHKPT_SAVE(ctx, "DATA_BUF",  drive->data_buf, savefailout);
1798
1799
1800             /* For now we'll just pack the type specific data at the end... */
1801             /* We should probably add a new context here in the future... */
1802             if (drive->drive_type == BLOCK_CDROM) {
1803               V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1804               V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1805               V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1806             } else if (drive->drive_type == BLOCK_DISK) {
1807               V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1808               V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1809               V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1810             } else if (drive->drive_type == BLOCK_NONE) { 
1811               // no drive connected, so no data
1812             } else {
1813               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1814               goto savefailout;
1815             }
1816
1817             V3_CHKPT_SAVE(ctx, "LBA48_LBA", drive->lba48.lba, savefailout);
1818             V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, savefailout);
1819             V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, savefailout);
1820             V3_CHKPT_SAVE(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, savefailout);
1821             V3_CHKPT_SAVE(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, savefailout);
1822             V3_CHKPT_SAVE(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, savefailout);
1823             
1824             v3_chkpt_close_ctx(ctx); ctx=0;
1825         }
1826     }
1827
1828 // goodout:
1829     return 0;
1830
1831  savefailout:
1832     PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1833     if (ctx) {v3_chkpt_close_ctx(ctx); }
1834     return -1;
1835 }
1836
1837
1838
1839 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1840     struct ide_internal * ide = (struct ide_internal *)private_data;
1841     struct v3_chkpt_ctx *ctx=0;
1842     int ch_num = 0;
1843     int drive_num = 0;
1844     char buf[128];
1845     
1846     ctx=v3_chkpt_open_ctx(chkpt,id);
1847     
1848     if (!ctx) { 
1849       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1850       goto loadfailout;
1851     }
1852
1853     // nothing saved yet
1854     
1855     v3_chkpt_close_ctx(ctx);ctx=0;
1856    
1857
1858     for (ch_num = 0; ch_num < 2; ch_num++) {
1859         struct ide_channel * ch = &(ide->channels[ch_num]);
1860
1861         snprintf(buf, 128, "%s-%d", id, ch_num);
1862
1863         ctx = v3_chkpt_open_ctx(chkpt, buf);
1864         
1865         if (!ctx) { 
1866           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1867           goto loadfailout;
1868         }
1869
1870         V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1871         V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1872         V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1873         V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1874         V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1875         V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1876         V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1877         V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1878         V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1879         V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1880
1881         v3_chkpt_close_ctx(ctx); ctx=0;
1882
1883         for (drive_num = 0; drive_num < 2; drive_num++) {
1884             struct ide_drive * drive = &(ch->drives[drive_num]);
1885             
1886             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1887
1888             ctx = v3_chkpt_open_ctx(chkpt, buf);
1889             
1890             if (!ctx) { 
1891               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1892               goto loadfailout;
1893             }
1894
1895             V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1896             V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1897             V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1898             V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1899
1900             V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1901             V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1902             V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1903
1904             V3_CHKPT_LOAD(ctx, "DATA_BUF",  drive->data_buf, loadfailout);
1905
1906             
1907             /* For now we'll just pack the type specific data at the end... */
1908             /* We should probably add a new context here in the future... */
1909             if (drive->drive_type == BLOCK_CDROM) {
1910               V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1911               V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1912               V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1913             } else if (drive->drive_type == BLOCK_DISK) {
1914               V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1915               V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1916               V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1917             } else if (drive->drive_type == BLOCK_NONE) { 
1918               // no drive connected, so no data
1919             } else {
1920               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1921               goto loadfailout;
1922             }
1923
1924             V3_CHKPT_LOAD(ctx, "LBA48_LBA", drive->lba48.lba, loadfailout);
1925             V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, loadfailout);
1926             V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, loadfailout);
1927             V3_CHKPT_LOAD(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, loadfailout);
1928             V3_CHKPT_LOAD(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, loadfailout);
1929             V3_CHKPT_LOAD(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, loadfailout);
1930             
1931         }
1932     }
1933 // goodout:
1934     return 0;
1935
1936  loadfailout:
1937     PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1938     if (ctx) {v3_chkpt_close_ctx(ctx); }
1939     return -1;
1940
1941 }
1942
1943
1944
1945 #endif
1946
1947
1948 static struct v3_device_ops dev_ops = {
1949     .free = (int (*)(void *))ide_free,
1950 #ifdef V3_CONFIG_CHECKPOINT
1951     .save_extended = ide_save_extended,
1952     .load_extended = ide_load_extended
1953 #endif
1954 };
1955
1956
1957
1958
1959 static int connect_fn(struct v3_vm_info * vm, 
1960                       void * frontend_data, 
1961                       struct v3_dev_blk_ops * ops, 
1962                       v3_cfg_tree_t * cfg, 
1963                       void * private_data) {
1964     struct ide_internal * ide  = (struct ide_internal *)(frontend_data);  
1965     struct ide_channel * channel = NULL;
1966     struct ide_drive * drive = NULL;
1967
1968     char * bus_str = v3_cfg_val(cfg, "bus_num");
1969     char * drive_str = v3_cfg_val(cfg, "drive_num");
1970     char * type_str = v3_cfg_val(cfg, "type");
1971     char * model_str = v3_cfg_val(cfg, "model");
1972     uint_t bus_num = 0;
1973     uint_t drive_num = 0;
1974
1975
1976     if ((!type_str) || (!drive_str) || (!bus_str)) {
1977         PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1978         return -1;
1979     }
1980
1981     bus_num = atoi(bus_str);
1982     drive_num = atoi(drive_str);
1983
1984     channel = &(ide->channels[bus_num]);
1985     drive = &(channel->drives[drive_num]);
1986
1987     if (drive->drive_type != BLOCK_NONE) {
1988         PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1989         return -1;
1990     }
1991
1992     if (model_str != NULL) {
1993         strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1994     }
1995
1996     if (strcasecmp(type_str, "cdrom") == 0) {
1997         drive->drive_type = BLOCK_CDROM;
1998
1999         while (strlen((char *)(drive->model)) < 40) {
2000             strcat((char*)(drive->model), " ");
2001         }
2002
2003     } else if (strcasecmp(type_str, "hd") == 0) {
2004         drive->drive_type = BLOCK_DISK;
2005
2006         drive->hd_state.accessed = 0;
2007         drive->hd_state.mult_sector_num = 1;
2008
2009         drive->num_sectors = 63;
2010         drive->num_heads = 16;
2011         drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
2012     } else {
2013         PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
2014         return -1;
2015     }
2016  
2017     drive->ops = ops;
2018
2019     if (ide->ide_pci) {
2020         // Hardcode this for now, but its not a good idea....
2021         ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
2022     }
2023  
2024     drive->private_data = private_data;
2025
2026     return 0;
2027 }
2028
2029
2030
2031
2032 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
2033     struct ide_internal * ide  = NULL;
2034     char * dev_id = v3_cfg_val(cfg, "ID");
2035     int ret = 0;
2036
2037     PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
2038
2039     ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
2040
2041     if (ide == NULL) {
2042         PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
2043         return -1;
2044     }
2045
2046     memset(ide, 0, sizeof(struct ide_internal));
2047
2048     ide->vm = vm;
2049     ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
2050
2051     if (ide->pci_bus != NULL) {
2052         struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
2053
2054         if (!southbridge) {
2055             PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
2056             V3_Free(ide);
2057             return -1;
2058         }
2059
2060         ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
2061     } else {
2062         PrintError(vm,VCORE_NONE,"Strange - you don't have a PCI bus\n");
2063     }
2064
2065     PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
2066
2067     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
2068
2069     if (dev == NULL) {
2070         PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
2071         V3_Free(ide);
2072         return -1;
2073     }
2074
2075     if (init_ide_state(ide) == -1) {
2076         PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
2077         v3_remove_device(dev);
2078         return -1;
2079     }
2080
2081     PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
2082
2083     ret |= v3_dev_hook_io(dev, PRI_DATA_PORT, 
2084                           &read_data_port, &write_data_port);
2085     ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT, 
2086                           &read_port_std, &write_port_std);
2087     ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT, 
2088                           &read_port_std, &write_port_std);
2089     ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT, 
2090                           &read_port_std, &write_port_std);
2091     ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT, 
2092                           &read_port_std, &write_port_std);
2093     ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT, 
2094                           &read_port_std, &write_port_std);
2095     ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT, 
2096                           &read_port_std, &write_port_std);
2097     ret |= v3_dev_hook_io(dev, PRI_CMD_PORT, 
2098                           &read_port_std, &write_cmd_port);
2099
2100     ret |= v3_dev_hook_io(dev, SEC_DATA_PORT, 
2101                           &read_data_port, &write_data_port);
2102     ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT, 
2103                           &read_port_std, &write_port_std);
2104     ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT, 
2105                           &read_port_std, &write_port_std);
2106     ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT, 
2107                           &read_port_std, &write_port_std);
2108     ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT, 
2109                           &read_port_std, &write_port_std);
2110     ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT, 
2111                           &read_port_std, &write_port_std);
2112     ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT, 
2113                           &read_port_std, &write_port_std);
2114     ret |= v3_dev_hook_io(dev, SEC_CMD_PORT, 
2115                           &read_port_std, &write_cmd_port);
2116   
2117
2118     ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT, 
2119                           &read_port_std, &write_port_std);
2120
2121     ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT, 
2122                           &read_port_std, &write_port_std);
2123   
2124
2125     ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT, 
2126                           &read_port_std, &write_port_std);
2127
2128     ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT, 
2129                           &read_port_std, &write_port_std);
2130
2131
2132     if (ret != 0) {
2133         PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
2134         v3_remove_device(dev);
2135         return -1;
2136     }
2137
2138
2139     if (ide->pci_bus) {
2140         struct v3_pci_bar bars[6];
2141         struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2142         struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2143         struct pci_device * pci_dev = NULL;
2144         int i;
2145
2146         V3_Print(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2147
2148         for (i = 0; i < 6; i++) {
2149             bars[i].type = PCI_BAR_NONE;
2150         }
2151
2152         bars[4].type = PCI_BAR_IO;
2153         //      bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2154         bars[4].default_base_port = -1;
2155         bars[4].num_ports = 16;
2156
2157         bars[4].io_read = read_dma_port;
2158         bars[4].io_write = write_dma_port;
2159         bars[4].private_data = ide;
2160
2161         pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1, 
2162                                          "PIIX3_IDE", bars,
2163                                          pci_config_update, NULL, NULL, NULL, ide);
2164
2165         if (pci_dev == NULL) {
2166             PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i); 
2167             v3_remove_device(dev);
2168             return -1;
2169         }
2170
2171         /* This is for CMD646 devices 
2172            pci_dev->config_header.vendor_id = 0x1095;
2173            pci_dev->config_header.device_id = 0x0646;
2174            pci_dev->config_header.revision = 0x8f07;
2175         */
2176
2177         pci_dev->config_header.vendor_id = 0x8086;
2178         pci_dev->config_header.device_id = 0x7010;
2179         pci_dev->config_header.revision = 0x00;
2180
2181         pci_dev->config_header.prog_if = 0x80; // Master IDE device
2182         pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2183         pci_dev->config_header.class = PCI_CLASS_STORAGE;
2184
2185         pci_dev->config_header.command = 0;
2186         pci_dev->config_header.status = 0x0280;
2187
2188         ide->ide_pci = pci_dev;
2189
2190
2191     }
2192
2193     if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2194         PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2195         v3_remove_device(dev);
2196         return -1;
2197     }
2198     
2199
2200     PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2201
2202     return 0;
2203 }
2204
2205
2206 device_register("IDE", ide_init)
2207
2208
2209
2210
2211 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num, 
2212                         uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2213
2214     struct ide_internal * ide  = ide_data;  
2215     struct ide_channel * channel = &(ide->channels[channel_num]);
2216     struct ide_drive * drive = &(channel->drives[drive_num]);
2217     
2218     if (drive->drive_type == BLOCK_NONE) {
2219         return -1;
2220     }
2221
2222     *cylinders = drive->num_cylinders;
2223     *heads = drive->num_heads;
2224     *sectors = drive->num_sectors;
2225
2226     return 0;
2227 }