Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Cleanup and sanity-checking of OOB accesses and pointer-to-local issues (Coverity...
[palacios.git] / palacios / src / devices / ide.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
28
29 #ifndef V3_CONFIG_DEBUG_IDE
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
36
37
38 #define PRI_DATA_PORT         0x1f0
39 #define PRI_FEATURES_PORT     0x1f1
40 #define PRI_SECT_CNT_PORT     0x1f2
41 #define PRI_SECT_NUM_PORT     0x1f3
42 #define PRI_CYL_LOW_PORT      0x1f4
43 #define PRI_CYL_HIGH_PORT     0x1f5
44 #define PRI_DRV_SEL_PORT      0x1f6
45 #define PRI_CMD_PORT          0x1f7
46 #define PRI_CTRL_PORT         0x3f6
47 #define PRI_ADDR_REG_PORT     0x3f7
48
49 #define SEC_DATA_PORT         0x170
50 #define SEC_FEATURES_PORT     0x171
51 #define SEC_SECT_CNT_PORT     0x172
52 #define SEC_SECT_NUM_PORT     0x173
53 #define SEC_CYL_LOW_PORT      0x174
54 #define SEC_CYL_HIGH_PORT     0x175
55 #define SEC_DRV_SEL_PORT      0x176
56 #define SEC_CMD_PORT          0x177
57 #define SEC_CTRL_PORT         0x376
58 #define SEC_ADDR_REG_PORT     0x377
59
60
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
63
64 #define DATA_BUFFER_SIZE 2048
65
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
68
69
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM", 
71                                           "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72                                            "PRI_CTRL", "PRI_ADDR_REG"};
73
74
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM", 
76                                           "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77                                            "SEC_CTRL", "SEC_ADDR_REG"};
78
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80                                            "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
81
82
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
84
85 static inline const char * io_port_to_str(uint16_t port) {
86     if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87         return ide_pri_port_strs[port - PRI_DATA_PORT];
88     } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89         return ide_sec_port_strs[port - SEC_DATA_PORT];
90     } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91         return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92     } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93         return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94     } 
95     return NULL;
96 }
97
98
99 static inline const char * dma_port_to_str(uint16_t port) {
100     return ide_dma_port_strs[port & 0x7];
101 }
102
103
104
105 struct ide_cd_state {
106     struct atapi_sense_data sense;
107
108     uint8_t atapi_cmd;
109     struct atapi_error_recovery err_recovery;
110 };
111
112 struct ide_hd_state {
113     uint32_t accessed;
114
115     /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116     uint32_t mult_sector_num;
117
118     /* This is the current op sector size:
119      * for multiple sector ops this equals mult_sector_num
120      * for standard ops this equals 1
121      */
122     uint64_t cur_sector_num;
123 };
124
125 struct ide_drive {
126     // Command Registers
127
128     v3_block_type_t drive_type;
129
130     struct v3_dev_blk_ops * ops;
131
132     union {
133         struct ide_cd_state cd_state;
134         struct ide_hd_state hd_state;
135     };
136
137     char model[41];
138
139     // Where we are in the data transfer
140     uint64_t transfer_index;
141
142     // the length of a transfer
143     // calculated for easy access
144     uint64_t transfer_length;
145
146     uint64_t current_lba;
147
148     // We have a local data buffer that we use for IO port accesses
149     uint8_t data_buf[DATA_BUFFER_SIZE];
150
151
152     uint32_t num_cylinders;
153     uint32_t num_heads;
154     uint32_t num_sectors;
155
156
157     struct lba48_state {
158         // all start at zero
159         uint64_t lba;                  
160         uint16_t sector_count;            // for LBA48
161         uint8_t  sector_count_state;      // two step write to 1f2/172 (high first)
162         uint8_t  lba41_state;             // two step write to 1f3
163         uint8_t  lba52_state;             // two step write to 1f4
164         uint8_t  lba63_state;             // two step write to 15
165     } lba48;
166
167     void * private_data;
168     
169     union {
170         uint8_t sector_count;             // 0x1f2,0x172  (ATA)
171         struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
172     } __attribute__((packed));
173
174
175     union {
176         uint8_t sector_num;               // 0x1f3,0x173
177         uint8_t lba0;
178     } __attribute__((packed));
179
180     union {
181         uint16_t cylinder;
182         uint16_t lba12;
183         
184         struct {
185             uint8_t cylinder_low;       // 0x1f4,0x174
186             uint8_t cylinder_high;      // 0x1f5,0x175
187         } __attribute__((packed));
188         
189         struct {
190             uint8_t lba1;
191             uint8_t lba2;
192         } __attribute__((packed));
193         
194         
195         // The transfer length requested by the CPU 
196         uint16_t req_len;
197     } __attribute__((packed));
198
199 };
200
201
202
203 struct ide_channel {
204     struct ide_drive drives[2];
205
206     // Command Registers
207     struct ide_error_reg error_reg;     // [read] 0x1f1,0x171
208
209     struct ide_features_reg features;
210
211     struct ide_drive_head_reg drive_head; // 0x1f6,0x176
212
213     struct ide_status_reg status;       // [read] 0x1f7,0x177
214     uint8_t cmd_reg;                // [write] 0x1f7,0x177
215
216     int irq; // this is temporary until we add PCI support
217
218     // Control Registers
219     struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
220
221     union {
222         uint8_t dma_ports[8];
223         struct {
224             struct ide_dma_cmd_reg dma_cmd;
225             uint8_t rsvd1;
226             struct ide_dma_status_reg dma_status;
227             uint8_t rsvd2;
228             uint32_t dma_prd_addr;
229         } __attribute__((packed));
230     } __attribute__((packed));
231
232     uint32_t dma_tbl_index;
233 };
234
235
236
237 struct ide_internal {
238     struct ide_channel channels[2];
239
240     struct v3_southbridge * southbridge;
241     struct vm_device * pci_bus;
242
243     struct pci_device * ide_pci;
244
245     struct v3_vm_info * vm;
246 };
247
248
249
250
251
252 /* Utility functions */
253
254 static inline uint16_t be_to_le_16(const uint16_t val) {
255     uint8_t * buf = (uint8_t *)&val;
256     return (buf[0] << 8) | (buf[1]) ;
257 }
258
259 static inline uint16_t le_to_be_16(const uint16_t val) {
260     return be_to_le_16(val);
261 }
262
263
264 static inline uint32_t be_to_le_32(const uint32_t val) {
265     uint8_t * buf = (uint8_t *)&val;
266     return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
267 }
268
269 static inline uint32_t le_to_be_32(const uint32_t val) {
270     return be_to_le_32(val);
271 }
272
273
274 static inline int is_lba28(struct ide_channel * channel) {
275     return channel->drive_head.lba_mode && channel->drive_head.rsvd1 && channel->drive_head.rsvd2;
276 }
277
278 static inline int is_lba48(struct ide_channel * channel) {
279     return channel->drive_head.lba_mode && !channel->drive_head.rsvd1 && !channel->drive_head.rsvd2;
280 }
281
282 static inline int is_chs(struct ide_channel * channel) {
283     return !channel->drive_head.lba_mode;
284 }
285
286 static inline int get_channel_index(ushort_t port) {
287     if (((port & 0xfff8) == 0x1f0) ||
288         ((port & 0xfffe) == 0x3f6) || 
289         ((port & 0xfff8) == 0xc000)) {
290         return 0;
291     } else if (((port & 0xfff8) == 0x170) ||
292                ((port & 0xfffe) == 0x376) ||
293                ((port & 0xfff8) == 0xc008)) {
294         return 1;
295     }
296
297     return -1;
298 }
299
300 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
301     int channel_idx = get_channel_index(port);    
302     if (channel_idx >= 0) { 
303         return &(ide->channels[channel_idx]);
304     } else {
305         PrintError(VM_NONE,VCORE_NONE,"ide: Cannot Determine Selected Channel\n");
306         return 0;
307     }
308 }
309
310 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
311     return &(channel->drives[channel->drive_head.drive_sel]);
312 }
313
314
315
316
317 /* Drive Commands */
318 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
319     if (channel->ctrl_reg.irq_disable == 0) {
320
321         PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
322
323         channel->dma_status.int_gen = 1;
324         v3_raise_irq(ide->vm, channel->irq);
325     } else {
326         PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
327     }
328 }
329
330
331 static void drive_reset(struct ide_drive * drive) {
332     drive->sector_count = 0x01;
333     drive->sector_num = 0x01;
334
335     PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
336     
337     if (drive->drive_type == BLOCK_CDROM) {
338         drive->cylinder = 0xeb14;
339     } else {
340         drive->cylinder = 0x0000;
341         //drive->hd_state.accessed = 0;
342     }
343
344
345     memset(drive->data_buf, 0, sizeof(drive->data_buf));
346     drive->transfer_index = 0;
347
348     // Send the reset signal to the connected device callbacks
349     //     channel->drives[0].reset();
350     //    channel->drives[1].reset();
351 }
352
353 static void channel_reset(struct ide_channel * channel) {
354     
355     // set busy and seek complete flags
356     channel->status.val = 0x90;
357
358     // Clear errors
359     channel->error_reg.val = 0x01;
360
361     // clear commands
362     channel->cmd_reg = 0;  // NOP
363
364     channel->ctrl_reg.irq_disable = 0;
365 }
366
367 static void channel_reset_complete(struct ide_channel * channel) {
368     channel->status.busy = 0;
369     channel->status.ready = 1;
370
371     channel->drive_head.head_num = 0;    
372     
373     drive_reset(&(channel->drives[0]));
374     drive_reset(&(channel->drives[1]));
375 }
376
377
378 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
379
380     PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
381
382     channel->status.val = 0x41; // Error + ready
383     channel->error_reg.val = 0x04; // No idea...
384
385     ide_raise_irq(ide, channel);
386 }
387
388
389 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
390 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
391
392
393 /* ATAPI functions */
394 #include "atapi.h"
395
396 /* ATA functions */
397 #include "ata.h"
398
399
400
401 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
402     struct ide_dma_prd prd_entry;
403     int index = 0;
404
405     V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
406
407     while (1) {
408         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
409         int ret = 0;
410
411         ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
412         
413         if (ret != sizeof(struct ide_dma_prd)) {
414             PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
415             return;
416         }
417
418         V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n", 
419                    prd_entry.base_addr, 
420                    (prd_entry.size == 0) ? 0x10000 : prd_entry.size, 
421                    prd_entry.end_of_table);
422
423         if (prd_entry.end_of_table) {
424             break;
425         }
426
427         index++;
428     }
429
430     return;
431 }
432
433
434 /* IO Operations */
435 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
436     struct ide_drive * drive = get_selected_drive(channel);
437     // This is at top level scope to do the EOT test at the end
438     struct ide_dma_prd prd_entry = {};
439     uint_t bytes_left = drive->transfer_length;
440
441     // Read in the data buffer....
442     // Read a sector/block at a time until the prd entry is full.
443
444 #ifdef V3_CONFIG_DEBUG_IDE
445     print_prd_table(ide, channel);
446 #endif
447
448     PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
449
450     // Loop through the disk data
451     while (bytes_left > 0) {
452         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
453         uint_t prd_bytes_left = 0;
454         uint_t prd_offset = 0;
455         int ret;
456
457         PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
458
459         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
460
461         if (ret != sizeof(struct ide_dma_prd)) {
462             PrintError(core->vm_info, core, "Could not read PRD\n");
463             return -1;
464         }
465
466         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
467                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
468
469         // loop through the PRD data....
470
471         if (prd_entry.size == 0) {
472             // a size of 0 means 64k
473             prd_bytes_left = 0x10000;
474         } else {
475             prd_bytes_left = prd_entry.size;
476         }
477
478
479         while (prd_bytes_left > 0) {
480             uint_t bytes_to_write = 0;
481
482             if (drive->drive_type == BLOCK_DISK) {
483                 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
484
485
486                 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
487                     PrintError(core->vm_info, core, "Failed to read next disk sector\n");
488                     return -1;
489                 }
490             } else if (drive->drive_type == BLOCK_CDROM) {
491                 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
492                     bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
493
494                     if (atapi_read_chunk(ide, channel) == -1) {
495                         PrintError(core->vm_info, core, "Failed to read next disk sector\n");
496                         return -1;
497                     }
498                 } else {
499                     /*
500                     PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
501                     return -1;
502                     */
503                     int cmd_ret = 0;
504
505                     //V3_Print(core->vm_info, core, "DMA of command packet\n");
506
507                     bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
508                     prd_bytes_left = bytes_to_write;
509
510
511                     // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
512                     cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, 
513                                                   bytes_to_write, drive->data_buf); 
514
515                     if (cmd_ret!=bytes_to_write) { 
516                         PrintError(core->vm_info, core, "Failed to write data to memory\n");
517                         return -1;
518                     }
519
520
521
522                     bytes_to_write = 0;
523                     prd_bytes_left = 0;
524                     drive->transfer_index += bytes_to_write;
525
526                     channel->status.busy = 0;
527                     channel->status.ready = 1;
528                     channel->status.data_req = 0;
529                     channel->status.error = 0;
530                     channel->status.seek_complete = 1;
531
532                     channel->dma_status.active = 0;
533                     channel->dma_status.err = 0;
534
535                     ide_raise_irq(ide, channel);
536                     
537                     return 0;
538                 }
539             }
540
541             PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n", 
542                        (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
543
544             drive->current_lba++;
545
546             ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf); 
547
548             if (ret != bytes_to_write) {
549                 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
550                 return -1;
551             }
552
553             PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
554
555             drive->transfer_index += ret;
556             prd_bytes_left -= ret;
557             prd_offset += ret;
558             bytes_left -= ret;
559         }
560
561         channel->dma_tbl_index++;
562
563         if (drive->drive_type == BLOCK_DISK) {
564             if (drive->transfer_index % HD_SECTOR_SIZE) {
565                 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
566                 return -1;
567             }
568         } else if (drive->drive_type == BLOCK_CDROM) {
569             if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
570                 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
571                     PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
572                     PrintError(core->vm_info, core, "transfer_index=%llu, transfer_length=%llu\n", 
573                                drive->transfer_index, drive->transfer_length);
574                     return -1;
575                 }
576             }
577         }
578
579
580         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
581             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
582             return -1;
583         }
584     }
585
586     /*
587       drive->irq_flags.io_dir = 1;
588       drive->irq_flags.c_d = 1;
589       drive->irq_flags.rel = 0;
590     */
591
592
593     // Update to the next PRD entry
594
595     // set DMA status
596
597     if (prd_entry.end_of_table) {
598         channel->status.busy = 0;
599         channel->status.ready = 1;
600         channel->status.data_req = 0;
601         channel->status.error = 0;
602         channel->status.seek_complete = 1;
603
604         channel->dma_status.active = 0;
605         channel->dma_status.err = 0;
606     }
607
608     ide_raise_irq(ide, channel);
609
610     return 0;
611 }
612
613
614 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
615     struct ide_drive * drive = get_selected_drive(channel);
616     // This is at top level scope to do the EOT test at the end
617     struct ide_dma_prd prd_entry = {};
618     uint_t bytes_left = drive->transfer_length;
619
620
621     PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
622
623     // Loop through disk data
624     while (bytes_left > 0) {
625         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
626         uint_t prd_bytes_left = 0;
627         uint_t prd_offset = 0;
628         int ret;
629         
630         PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
631
632         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
633
634         if (ret != sizeof(struct ide_dma_prd)) {
635             PrintError(core->vm_info, core, "Could not read PRD\n");
636             return -1;
637         }
638
639         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
640                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
641
642
643         if (prd_entry.size == 0) {
644             // a size of 0 means 64k
645             prd_bytes_left = 0x10000;
646         } else {
647             prd_bytes_left = prd_entry.size;
648         }
649
650         while (prd_bytes_left > 0) {
651             uint_t bytes_to_write = 0;
652
653
654             bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
655
656
657             ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
658
659             if (ret != bytes_to_write) {
660                 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
661                 return -1;
662             }
663
664             PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
665
666
667             if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
668                 PrintError(core->vm_info, core, "Failed to write data to disk\n");
669                 return -1;
670             }
671             
672             drive->current_lba++;
673
674             drive->transfer_index += ret;
675             prd_bytes_left -= ret;
676             prd_offset += ret;
677             bytes_left -= ret;
678         }
679
680         channel->dma_tbl_index++;
681
682         if (drive->transfer_index % HD_SECTOR_SIZE) {
683             PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
684             return -1;
685         }
686
687         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
688             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
689             PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%llu)...\n", 
690                        bytes_left, drive->transfer_length);
691             PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
692                        prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
693
694             print_prd_table(ide, channel);
695             return -1;
696         }
697     }
698
699     if (prd_entry.end_of_table) {
700         channel->status.busy = 0;
701         channel->status.ready = 1;
702         channel->status.data_req = 0;
703         channel->status.error = 0;
704         channel->status.seek_complete = 1;
705
706         channel->dma_status.active = 0;
707         channel->dma_status.err = 0;
708     }
709
710     ide_raise_irq(ide, channel);
711
712     return 0;
713 }
714
715
716
717 #define DMA_CMD_PORT      0x00
718 #define DMA_STATUS_PORT   0x02
719 #define DMA_PRD_PORT0     0x04
720 #define DMA_PRD_PORT1     0x05
721 #define DMA_PRD_PORT2     0x06
722 #define DMA_PRD_PORT3     0x07
723
724 #define DMA_CHANNEL_FLAG  0x08
725
726 /*
727   Note that DMA model is as follows:
728
729     1. Write the PRD pointer to the busmaster (DMA engine)
730     2. Start the transfer on the device
731     3. Tell the busmaster to start shoveling data (active DMA)
732 */
733
734 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
735     struct ide_internal * ide = (struct ide_internal *)private_data;
736     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
737     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
738     struct ide_channel * channel = &(ide->channels[channel_flag]);
739
740     PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n", 
741                port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
742
743     switch (port_offset) {
744         case DMA_CMD_PORT:
745             channel->dma_cmd.val = *(uint8_t *)src;
746             
747             PrintDebug(core->vm_info, core, "IDE: dma command write:  0x%x\n", channel->dma_cmd.val);
748
749             if (channel->dma_cmd.start == 0) {
750                 channel->dma_tbl_index = 0;
751             } else {
752                 // Launch DMA operation, interrupt at end
753
754                 channel->dma_status.active = 1;
755
756                 if (channel->dma_cmd.read == 1) {
757                     // DMA Read the whole thing - dma_read will raise irq
758                     if (dma_read(core, ide, channel) == -1) {
759                         PrintError(core->vm_info, core, "Failed DMA Read\n");
760                         return -1;
761                     }
762                 } else {
763                     // DMA write the whole thing - dma_write will raiase irw
764                     if (dma_write(core, ide, channel) == -1) {
765                         PrintError(core->vm_info, core, "Failed DMA Write\n");
766                         return -1;
767                     }
768                 }
769                 
770                 // DMA complete
771                 // Note that guest cannot abort a DMA transfer
772                 channel->dma_cmd.start = 0;
773             }
774
775             break;
776             
777         case DMA_STATUS_PORT: {
778             // This is intended to clear status
779
780             uint8_t val = *(uint8_t *)src;
781
782             if (length != 1) {
783                 PrintError(core->vm_info, core, "Invalid write length for DMA status port\n");
784                 return -1;
785             }
786
787             // but preserve certain bits
788             channel->dma_status.val = ((val & 0x60) | 
789                                        (channel->dma_status.val & 0x01) |
790                                        (channel->dma_status.val & ~val & 0x06));
791
792             break;
793         }           
794         case DMA_PRD_PORT0:
795         case DMA_PRD_PORT1:
796         case DMA_PRD_PORT2:
797         case DMA_PRD_PORT3: {
798             uint_t addr_index = port_offset & 0x3;
799             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
800             int i = 0;
801
802             if (addr_index + length > 4) {
803                 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
804                 return -1;
805             }
806
807             for (i = 0; i < length; i++) {
808                 addr_buf[addr_index + i] = *((uint8_t *)src + i);
809             }
810
811             PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
812
813             break;
814         }
815         default:
816             PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
817             break;
818     }
819
820     return length;
821 }
822
823
824 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
825     struct ide_internal * ide = (struct ide_internal *)private_data;
826     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
827     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
828     struct ide_channel * channel = &(ide->channels[channel_flag]);
829
830     PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
831
832     if (port_offset + length > 16) {
833         PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
834         return -1;
835     }
836
837     memcpy(dst, channel->dma_ports + port_offset, length);
838     
839     PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
840
841     return length;
842 }
843
844
845
846 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
847     struct ide_internal * ide = priv_data;
848     struct ide_channel * channel = get_selected_channel(ide, port);
849     struct ide_drive * drive = get_selected_drive(channel);
850
851     if (length != 1) {
852         PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
853         return -1;
854     }
855
856     PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
857     
858     channel->cmd_reg = *(uint8_t *)src;
859     
860     switch (channel->cmd_reg) {
861
862         case ATA_PIDENTIFY: // ATAPI Identify Device Packet (CDROM)
863             if (drive->drive_type != BLOCK_CDROM) {
864                 drive_reset(drive);
865
866                 // JRL: Should we abort here?
867                 ide_abort_command(ide, channel);
868             } else {
869                 
870                 atapi_identify_device(drive);
871                 
872                 channel->error_reg.val = 0;
873                 channel->status.val = 0x58; // ready, data_req, seek_complete
874             
875                 ide_raise_irq(ide, channel);
876             }
877             break;
878
879         case ATA_IDENTIFY: // Identify Device
880             if (drive->drive_type != BLOCK_DISK) {
881                 drive_reset(drive);
882
883                 // JRL: Should we abort here?
884                 ide_abort_command(ide, channel);
885             } else {
886                 ata_identify_device(drive);
887
888                 channel->error_reg.val = 0;
889                 channel->status.val = 0x58;
890
891                 ide_raise_irq(ide, channel);
892             }
893             break;
894
895         case ATA_PACKETCMD: // ATAPI Command Packet (CDROM)
896             if (drive->drive_type != BLOCK_CDROM) {
897                 ide_abort_command(ide, channel);
898             }
899             
900             drive->sector_count = 1;
901
902             channel->status.busy = 0;
903             channel->status.write_fault = 0;
904             channel->status.data_req = 1;
905             channel->status.error = 0;
906
907             // reset the data buffer...
908             drive->transfer_length = ATAPI_PACKET_SIZE;
909             drive->transfer_index = 0;
910
911             break;
912
913         case ATA_READ:      // Read Sectors with Retry
914         case ATA_READ_ONCE: // Read Sectors without Retry
915         case ATA_MULTREAD:  // Read multiple sectors per ire
916         case ATA_READ_EXT:  // Read Sectors Extended (LBA48)
917
918             if (channel->cmd_reg==ATA_MULTREAD) { 
919                 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
920             } else {
921                 drive->hd_state.cur_sector_num = 1;
922             }
923
924             if (ata_read_sectors(ide, channel) == -1) {
925                 PrintError(core->vm_info, core, "Error reading sectors\n");
926                 ide_abort_command(ide,channel);
927             }
928             break;
929
930         case ATA_WRITE:            // Write Sector with retry
931         case ATA_WRITE_ONCE:       // Write Sector without retry
932         case ATA_MULTWRITE:        // Write multiple sectors per irq
933         case ATA_WRITE_EXT:        // Write Sectors Extended (LBA48)
934
935             if (channel->cmd_reg==ATA_MULTWRITE) { 
936                 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
937             } else {
938                 drive->hd_state.cur_sector_num = 1;
939             }
940
941             if (ata_write_sectors(ide, channel) == -1) {
942                 PrintError(core->vm_info, core, "Error writing sectors\n");
943                 ide_abort_command(ide,channel);
944             }
945             break;
946
947         case ATA_READDMA:            // Read DMA with retry
948         case ATA_READDMA_ONCE:       // Read DMA without retry
949         case ATA_READDMA_EXT:      { // Read DMA (LBA48)
950             uint64_t sect_cnt;
951
952             if (ata_get_lba_and_size(ide, channel, &(drive->current_lba), &sect_cnt) == -1) {
953                 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
954                 ide_abort_command(ide, channel);
955                 return length;
956             }
957             
958             drive->hd_state.cur_sector_num = 1;  // Not used for DMA
959             
960             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
961             drive->transfer_index = 0;
962
963             // Now we wait for the transfer to be intiated by flipping the 
964             // bus-master start bit
965             break;
966         }
967
968         case ATA_WRITEDMA:        // Write DMA with retry
969         case ATA_WRITEDMA_ONCE:   // Write DMA without retry
970         case ATA_WRITEDMA_EXT:  { // Write DMA (LBA48)
971
972             uint64_t sect_cnt;
973
974             if (ata_get_lba_and_size(ide, channel, &(drive->current_lba),&sect_cnt) == -1) {
975                 PrintError(core->vm_info,core,"Cannot get lba\n");
976                 ide_abort_command(ide, channel);
977                 return length;
978             }
979
980             drive->hd_state.cur_sector_num = 1;  // Not used for DMA
981
982             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
983             drive->transfer_index = 0;
984
985             // Now we wait for the transfer to be intiated by flipping the 
986             // bus-master start bit
987             break;
988         }
989
990         case ATA_STANDBYNOW1: // Standby Now 1
991         case ATA_IDLEIMMEDIATE: // Set Idle Immediate
992         case ATA_STANDBY: // Standby
993         case ATA_SETIDLE1: // Set Idle 1
994         case ATA_SLEEPNOW1: // Sleep Now 1
995         case ATA_STANDBYNOW2: // Standby Now 2
996         case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
997         case ATA_STANDBY2: // Standby 2
998         case ATA_SETIDLE2: // Set idle 2
999         case ATA_SLEEPNOW2: // Sleep Now 2
1000             channel->status.val = 0;
1001             channel->status.ready = 1;
1002             ide_raise_irq(ide, channel);
1003             break;
1004
1005         case ATA_SETFEATURES: // Set Features
1006             // Prior to this the features register has been written to. 
1007             // This command tells the drive to check if the new value is supported (the value is drive specific)
1008             // Common is that bit0=DMA enable
1009             // If valid the drive raises an interrupt, if not it aborts.
1010
1011             // Do some checking here...
1012
1013             channel->status.busy = 0;
1014             channel->status.write_fault = 0;
1015             channel->status.error = 0;
1016             channel->status.ready = 1;
1017             channel->status.seek_complete = 1;
1018             
1019             ide_raise_irq(ide, channel);
1020             break;
1021
1022         case ATA_SPECIFY:  // Initialize Drive Parameters
1023         case ATA_RECAL:  // recalibrate?
1024             channel->status.error = 0;
1025             channel->status.ready = 1;
1026             channel->status.seek_complete = 1;
1027             ide_raise_irq(ide, channel);
1028             break;
1029
1030         case ATA_SETMULT: { // Set multiple mode (IDE Block mode) 
1031             // This makes the drive transfer multiple sectors before generating an interrupt
1032
1033             if (drive->sector_count == 0) {
1034                 PrintError(core->vm_info,core,"Attempt to set multiple to zero\n");
1035                 drive->hd_state.mult_sector_num= 1;
1036                 ide_abort_command(ide,channel);
1037                 break;
1038             } else {
1039                 drive->hd_state.mult_sector_num = drive->sector_count;
1040             }
1041
1042             channel->status.ready = 1;
1043             channel->status.error = 0;
1044
1045             ide_raise_irq(ide, channel);
1046
1047             break;
1048         }
1049
1050         case ATA_DEVICE_RESET: // Reset Device
1051             drive_reset(drive);
1052             channel->error_reg.val = 0x01;
1053             channel->status.busy = 0;
1054             channel->status.ready = 1;
1055             channel->status.seek_complete = 1;
1056             channel->status.write_fault = 0;
1057             channel->status.error = 0;
1058             break;
1059
1060         case ATA_CHECKPOWERMODE1: // Check power mode
1061             drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1062             channel->status.busy = 0;
1063             channel->status.ready = 1;
1064             channel->status.write_fault = 0;
1065             channel->status.data_req = 0;
1066             channel->status.error = 0;
1067             break;
1068
1069         default:
1070             PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1071             ide_abort_command(ide, channel);
1072             break;
1073     }
1074
1075     return length;
1076 }
1077
1078
1079
1080
1081 static int read_hd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1082     struct ide_drive * drive = get_selected_drive(channel);
1083     uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1084
1085
1086     PrintDebug(VM_NONE,VCORE_NONE, "Read HD data:  transfer_index %llu transfer length %llu current sector numer %llu\n",
1087                drive->transfer_index, drive->transfer_length, 
1088                drive->hd_state.cur_sector_num);
1089
1090     if (drive->transfer_index >= drive->transfer_length && drive->transfer_index>=DATA_BUFFER_SIZE) {
1091         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1092                    drive->transfer_length, drive->transfer_index,
1093                    drive->transfer_index + length);
1094         return -1;
1095     }
1096
1097
1098     if (data_offset + length > HD_SECTOR_SIZE) { 
1099        PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1100     }
1101    
1102     // For index==0, the read has been done in ata_read_sectors
1103     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1104         // advance to next sector and read it
1105         
1106         drive->current_lba++;
1107
1108         if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1109             PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1110             return -1;
1111         }
1112     }
1113
1114     /*
1115       PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n", 
1116       *(uint32_t *)(drive->data_buf + data_offset), 
1117       length, data_offset);
1118     */
1119     memcpy(dst, drive->data_buf + data_offset, length);
1120
1121     drive->transfer_index += length;
1122
1123
1124     /* This is the trigger for interrupt injection.
1125      * For read single sector commands we interrupt after every sector
1126      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1127      * cur_sector_num is configured depending on the operation we are currently running
1128      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1129      */
1130     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1131         (drive->transfer_index == drive->transfer_length)) {
1132         if (drive->transfer_index < drive->transfer_length) {
1133             // An increment is complete, but there is still more data to be transferred...
1134             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1135             channel->status.data_req = 1;
1136         } else {
1137             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1138             // This was the final read of the request
1139             channel->status.data_req = 0;
1140         }
1141
1142         channel->status.ready = 1;
1143         channel->status.busy = 0;
1144
1145         ide_raise_irq(ide, channel);
1146     }
1147
1148
1149     return length;
1150 }
1151
1152 static int write_hd_data(uint8_t * src, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1153     struct ide_drive * drive = get_selected_drive(channel);
1154     uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1155
1156
1157     PrintDebug(VM_NONE,VCORE_NONE, "Write HD data:  transfer_index %llu transfer length %llu current sector numer %llu\n",
1158                drive->transfer_index, drive->transfer_length, 
1159                drive->hd_state.cur_sector_num);
1160
1161     if (drive->transfer_index >= drive->transfer_length) {
1162         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1163                    drive->transfer_length, drive->transfer_index,
1164                    drive->transfer_index + length);
1165         return -1;
1166     }
1167
1168     if (data_offset + length > HD_SECTOR_SIZE) { 
1169        PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1170     }
1171
1172     // Copy data into our buffer - there will be room due to
1173     // (a) the ata_write test below is flushing sectors
1174     // (b) if we somehow get a sector-stradling write (an error), this will
1175     //     be OK since the buffer itself is >1 sector in memory
1176     memcpy(drive->data_buf + data_offset, src, length);
1177
1178     drive->transfer_index += length;
1179
1180     if ((data_offset+length) >= HD_SECTOR_SIZE) {
1181         // Write out the sector we just finished
1182         if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1183             PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1184             return -1;
1185         }
1186
1187         // go onto next sector
1188         drive->current_lba++;
1189     }
1190
1191     /* This is the trigger for interrupt injection.
1192      * For write single sector commands we interrupt after every sector
1193      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1194      * cur_sector_num is configured depending on the operation we are currently running
1195      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1196      */
1197     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1198         (drive->transfer_index == drive->transfer_length)) {
1199         if (drive->transfer_index < drive->transfer_length) {
1200             // An increment is complete, but there is still more data to be transferred...
1201             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1202             channel->status.data_req = 1;
1203         } else {
1204             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1205             // This was the final read of the request
1206             channel->status.data_req = 0;
1207         }
1208
1209         channel->status.ready = 1;
1210         channel->status.busy = 0;
1211
1212         ide_raise_irq(ide, channel);
1213     }
1214
1215     return length;
1216 }
1217
1218
1219
1220 static int read_cd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1221     struct ide_drive * drive = get_selected_drive(channel);
1222     uint64_t data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1223     //  int req_offset = drive->transfer_index % drive->req_len;
1224     
1225     if (drive->cd_state.atapi_cmd != 0x28) {
1226         PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%llu) (req_len=%u)\n", length, drive->req_len);
1227         PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%llu, transfer idx=%llu\n", drive->transfer_length, drive->transfer_index);
1228     }
1229
1230     
1231
1232     if (drive->transfer_index >= drive->transfer_length && drive->transfer_index>=DATA_BUFFER_SIZE) {
1233         PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n", 
1234                    drive->transfer_length, drive->transfer_index, 
1235                    drive->transfer_index + length);
1236         return -1;
1237     }
1238
1239     
1240     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1241         if (atapi_update_data_buf(ide, channel) == -1) {
1242             PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1243             return -1;
1244         }
1245     }
1246
1247     memcpy(dst, drive->data_buf + data_offset, length);
1248     
1249     drive->transfer_index += length;
1250
1251
1252     // Should the req_offset be recalculated here?????
1253     if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1254         if (drive->transfer_index < drive->transfer_length) {
1255             // An increment is complete, but there is still more data to be transferred...
1256             
1257             channel->status.data_req = 1;
1258
1259             drive->irq_flags.c_d = 0;
1260
1261             // Update the request length in the cylinder regs
1262             if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1263                 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1264                 return -1;
1265             }
1266         } else {
1267             // This was the final read of the request
1268
1269             drive->req_len = 0;
1270             channel->status.data_req = 0;
1271             channel->status.ready = 1;
1272             
1273             drive->irq_flags.c_d = 1;
1274             drive->irq_flags.rel = 0;
1275         }
1276
1277         drive->irq_flags.io_dir = 1;
1278         channel->status.busy = 0;
1279
1280         ide_raise_irq(ide, channel);
1281     }
1282
1283     return length;
1284 }
1285
1286
1287 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1288     struct ide_drive * drive = get_selected_drive(channel);
1289
1290     channel->status.busy = 0;
1291     channel->status.ready = 1;
1292     channel->status.write_fault = 0;
1293     channel->status.seek_complete = 1;
1294     channel->status.corrected = 0;
1295     channel->status.error = 0;
1296                 
1297     
1298     memcpy(dst, drive->data_buf + drive->transfer_index, length);
1299     drive->transfer_index += length;
1300     
1301     if (drive->transfer_index >= drive->transfer_length) {
1302         channel->status.data_req = 0;
1303     }
1304     
1305     return length;
1306 }
1307
1308
1309
1310 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1311     struct ide_internal * ide = priv_data;
1312     struct ide_channel * channel = get_selected_channel(ide, port);
1313     struct ide_drive * drive = get_selected_drive(channel);
1314
1315     //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1316
1317     if ((channel->cmd_reg == ATA_IDENTIFY) ||
1318         (channel->cmd_reg == ATA_PIDENTIFY)) {
1319         return read_drive_id((uint8_t *)dst, length, ide, channel);
1320     }
1321
1322     if (drive->drive_type == BLOCK_CDROM) {
1323         if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1324             PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1325             return -1;
1326         }
1327     } else if (drive->drive_type == BLOCK_DISK) {
1328         if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1329             PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1330             return -1;
1331         }
1332     } else {
1333         memset((uint8_t *)dst, 0, length);
1334     }
1335
1336     return length;
1337 }
1338
1339 // For the write side, we care both about
1340 // direct PIO writes to a drive as well as 
1341 // writes that pass a packet through to an CD
1342 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1343     struct ide_internal * ide = priv_data;
1344     struct ide_channel * channel = get_selected_channel(ide, port);
1345     struct ide_drive * drive = get_selected_drive(channel);
1346
1347     PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n", 
1348             port, *(uint32_t *)src, length);
1349
1350     if (drive->drive_type == BLOCK_CDROM) {
1351         if (channel->cmd_reg == ATA_PACKETCMD) { 
1352             // short command packet - no check for space... 
1353             memcpy(drive->data_buf + drive->transfer_index, src, length);
1354             drive->transfer_index += length;
1355             if (drive->transfer_index >= drive->transfer_length) {
1356                 if (atapi_handle_packet(core, ide, channel) == -1) {
1357                     PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1358                     return -1;
1359                 }
1360             }
1361         } else {
1362             PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1363             return -1;
1364         }
1365     } else if (drive->drive_type == BLOCK_DISK) {
1366         if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1367             PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1368             return -1;
1369         }
1370     } else {
1371         // nothing ... do not support writable cd
1372     }
1373
1374     return length;
1375 }
1376
1377 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1378     struct ide_internal * ide = priv_data;
1379     struct ide_channel * channel = get_selected_channel(ide, port);
1380     struct ide_drive * drive = get_selected_drive(channel);
1381             
1382     if (length != 1) {
1383         PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1384         return -1;
1385     }
1386
1387     PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1388
1389     switch (port) {
1390         // reset and interrupt enable
1391         case PRI_CTRL_PORT:
1392         case SEC_CTRL_PORT: {
1393             struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1394
1395             // only reset channel on a 0->1 reset bit transition
1396             if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1397                 channel_reset(channel);
1398             } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1399                 channel_reset_complete(channel);
1400             }
1401
1402             channel->ctrl_reg.val = tmp_ctrl->val;          
1403             break;
1404         }
1405         case PRI_FEATURES_PORT:
1406         case SEC_FEATURES_PORT:
1407             channel->features.val = *(uint8_t *)src;
1408             break;
1409
1410         case PRI_SECT_CNT_PORT:
1411         case SEC_SECT_CNT_PORT:
1412             // update CHS and LBA28 state
1413             channel->drives[0].sector_count = *(uint8_t *)src;
1414             channel->drives[1].sector_count = *(uint8_t *)src;
1415
1416             // update LBA48 state
1417             if (is_lba48(channel)) {
1418                 uint16_t val = *(uint8_t*)src; // top bits zero;
1419                 if (!channel->drives[0].lba48.sector_count_state) { 
1420                     channel->drives[0].lba48.sector_count = val<<8;
1421                 } else {
1422                     channel->drives[0].lba48.sector_count |= val;
1423                 }
1424                 channel->drives[0].lba48.sector_count_state ^= 1;
1425                 if (!channel->drives[1].lba48.sector_count_state) { 
1426                     channel->drives[1].lba48.sector_count = val<<8;
1427                 } else {
1428                     channel->drives[1].lba48.sector_count |= val;
1429                 }
1430                 channel->drives[0].lba48.sector_count_state ^= 1;
1431             }
1432             
1433             break;
1434
1435         case PRI_SECT_NUM_PORT:
1436         case SEC_SECT_NUM_PORT:
1437             // update CHS and LBA28 state
1438             channel->drives[0].sector_num = *(uint8_t *)src;
1439             channel->drives[1].sector_num = *(uint8_t *)src;
1440
1441             // update LBA48 state
1442             if (is_lba48(channel)) {
1443                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1444                 if (!channel->drives[0].lba48.lba41_state) { 
1445                     channel->drives[0].lba48.lba |= val<<24; 
1446                 } else {
1447                     channel->drives[0].lba48.lba |= val;
1448                 }
1449                 channel->drives[0].lba48.lba41_state ^= 1;
1450                 if (!channel->drives[1].lba48.lba41_state) { 
1451                     channel->drives[1].lba48.lba |= val<<24; 
1452                 } else {
1453                     channel->drives[1].lba48.lba |= val;
1454                 }
1455                 channel->drives[1].lba48.lba41_state ^= 1;
1456             }
1457
1458             break;
1459         case PRI_CYL_LOW_PORT:
1460         case SEC_CYL_LOW_PORT:
1461             // update CHS and LBA28 state
1462             channel->drives[0].cylinder_low = *(uint8_t *)src;
1463             channel->drives[1].cylinder_low = *(uint8_t *)src;
1464
1465             // update LBA48 state
1466             if (is_lba48(channel)) {
1467                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1468                 if (!channel->drives[0].lba48.lba52_state) { 
1469                     channel->drives[0].lba48.lba |= val<<32; 
1470                 } else {
1471                     channel->drives[0].lba48.lba |= val<<8;
1472                 }
1473                 channel->drives[0].lba48.lba52_state ^= 1;
1474                 if (!channel->drives[1].lba48.lba52_state) { 
1475                     channel->drives[1].lba48.lba |= val<<32; 
1476                 } else {
1477                     channel->drives[1].lba48.lba |= val<<8;
1478                 }
1479                 channel->drives[1].lba48.lba52_state ^= 1;
1480             }
1481
1482             break;
1483
1484         case PRI_CYL_HIGH_PORT:
1485         case SEC_CYL_HIGH_PORT:
1486             // update CHS and LBA28 state
1487             channel->drives[0].cylinder_high = *(uint8_t *)src;
1488             channel->drives[1].cylinder_high = *(uint8_t *)src;
1489
1490             // update LBA48 state
1491             if (is_lba48(channel)) {
1492                 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1493                 if (!channel->drives[0].lba48.lba63_state) { 
1494                     channel->drives[0].lba48.lba |= val<<40; 
1495                 } else {
1496                     channel->drives[0].lba48.lba |= val<<16;
1497                 }
1498                 channel->drives[0].lba48.lba63_state ^= 1;
1499                 if (!channel->drives[1].lba48.lba63_state) { 
1500                     channel->drives[1].lba48.lba |= val<<40; 
1501                 } else {
1502                     channel->drives[1].lba48.lba |= val<<16;
1503                 }
1504                 channel->drives[1].lba48.lba63_state ^= 1;
1505             }
1506
1507             break;
1508
1509         case PRI_DRV_SEL_PORT:
1510         case SEC_DRV_SEL_PORT: {
1511             struct ide_drive_head_reg nh, oh;
1512
1513             oh.val = channel->drive_head.val;
1514             channel->drive_head.val = nh.val = *(uint8_t *)src;
1515
1516             // has LBA flipped?
1517             if ((oh.val & 0xe0) != (nh.val & 0xe0)) {
1518                 // reset LBA48 state
1519                 channel->drives[0].lba48.sector_count_state=0;
1520                 channel->drives[0].lba48.lba41_state=0;
1521                 channel->drives[0].lba48.lba52_state=0;
1522                 channel->drives[0].lba48.lba63_state=0;
1523                 channel->drives[1].lba48.sector_count_state=0;
1524                 channel->drives[1].lba48.lba41_state=0;
1525                 channel->drives[1].lba48.lba52_state=0;
1526                 channel->drives[1].lba48.lba63_state=0;
1527             }
1528             
1529
1530             drive = get_selected_drive(channel);
1531
1532             // Selecting a non-present device is a no-no
1533             if (drive->drive_type == BLOCK_NONE) {
1534                 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1535                 channel->error_reg.abort = 1;
1536                 channel->status.error = 1;
1537             } else {
1538                 channel->status.busy = 0;
1539                 channel->status.ready = 1;
1540                 channel->status.data_req = 0;
1541                 channel->status.error = 0;
1542                 channel->status.seek_complete = 1;
1543                 
1544                 channel->dma_status.active = 0;
1545                 channel->dma_status.err = 0;
1546             }
1547
1548             break;
1549         }
1550         default:
1551             PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1552             return -1;
1553     }
1554     return length;
1555 }
1556
1557
1558 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1559     struct ide_internal * ide = priv_data;
1560     struct ide_channel * channel = get_selected_channel(ide, port);
1561     struct ide_drive * drive = get_selected_drive(channel);
1562     
1563     if (length != 1) {
1564         PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1565         return -1;
1566     }
1567     
1568     PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1569
1570     if ((port == PRI_ADDR_REG_PORT) ||
1571         (port == SEC_ADDR_REG_PORT)) {
1572         // unused, return 0xff
1573         *(uint8_t *)dst = 0xff;
1574         return length;
1575     }
1576
1577
1578     // if no drive is present just return 0 + reserved bits
1579     if (drive->drive_type == BLOCK_NONE) {
1580         if ((port == PRI_DRV_SEL_PORT) ||
1581             (port == SEC_DRV_SEL_PORT)) {
1582             *(uint8_t *)dst = 0xa0;
1583         } else {
1584             *(uint8_t *)dst = 0;
1585         }
1586
1587         return length;
1588     }
1589
1590     switch (port) {
1591
1592         // This is really the error register.
1593         case PRI_FEATURES_PORT:
1594         case SEC_FEATURES_PORT:
1595             *(uint8_t *)dst = channel->error_reg.val;
1596             break;
1597             
1598         case PRI_SECT_CNT_PORT:
1599         case SEC_SECT_CNT_PORT:
1600             *(uint8_t *)dst = drive->sector_count;
1601             break;
1602
1603         case PRI_SECT_NUM_PORT:
1604         case SEC_SECT_NUM_PORT:
1605             *(uint8_t *)dst = drive->sector_num;
1606             break;
1607
1608         case PRI_CYL_LOW_PORT:
1609         case SEC_CYL_LOW_PORT:
1610             *(uint8_t *)dst = drive->cylinder_low;
1611             break;
1612
1613
1614         case PRI_CYL_HIGH_PORT:
1615         case SEC_CYL_HIGH_PORT:
1616             *(uint8_t *)dst = drive->cylinder_high;
1617             break;
1618
1619         case PRI_DRV_SEL_PORT:
1620         case SEC_DRV_SEL_PORT:  // hard disk drive and head register 0x1f6
1621             *(uint8_t *)dst = channel->drive_head.val;
1622             break;
1623
1624         case PRI_CTRL_PORT:
1625         case SEC_CTRL_PORT:
1626         case PRI_CMD_PORT:
1627         case SEC_CMD_PORT:
1628             // Something about lowering interrupts here....
1629             *(uint8_t *)dst = channel->status.val;
1630             break;
1631
1632         default:
1633             PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1634             return -1;
1635     }
1636
1637     PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1638
1639     return length;
1640 }
1641
1642
1643
1644 static void init_drive(struct ide_drive * drive) {
1645
1646     drive->sector_count = 0x01;
1647     drive->sector_num = 0x01;
1648     drive->cylinder = 0x0000;
1649
1650     drive->drive_type = BLOCK_NONE;
1651
1652     memset(drive->model, 0, sizeof(drive->model));
1653
1654     drive->transfer_index = 0;
1655     drive->transfer_length = 0;
1656     memset(drive->data_buf, 0, sizeof(drive->data_buf));
1657
1658     drive->num_cylinders = 0;
1659     drive->num_heads = 0;
1660     drive->num_sectors = 0;
1661     
1662
1663     drive->private_data = NULL;
1664     drive->ops = NULL;
1665 }
1666
1667 static void init_channel(struct ide_channel * channel) {
1668     int i = 0;
1669
1670     channel->error_reg.val = 0x01;
1671
1672     //** channel->features = 0x0;
1673
1674     channel->drive_head.val = 0x00;
1675     channel->status.val = 0x00;
1676     channel->cmd_reg = 0x00;
1677     channel->ctrl_reg.val = 0x08;
1678
1679     channel->dma_cmd.val = 0;
1680     channel->dma_status.val = 0;
1681     channel->dma_prd_addr = 0;
1682     channel->dma_tbl_index = 0;
1683
1684     for (i = 0; i < 2; i++) {
1685         init_drive(&(channel->drives[i]));
1686     }
1687
1688 }
1689
1690
1691 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1692     PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1693     /*
1694     struct ide_internal * ide = (struct ide_internal *)(private_data);
1695
1696     PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1697     */
1698
1699     return 0;
1700 }
1701
1702 static int init_ide_state(struct ide_internal * ide) {
1703
1704     /* 
1705      * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry 
1706      */
1707
1708     init_channel(&(ide->channels[0]));
1709     ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1710
1711     init_channel(&(ide->channels[1]));
1712     ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1713
1714
1715     return 0;
1716 }
1717
1718
1719
1720
1721 static int ide_free(struct ide_internal * ide) {
1722
1723     // deregister from PCI?
1724
1725     V3_Free(ide);
1726
1727     return 0;
1728 }
1729
1730 #ifdef V3_CONFIG_CHECKPOINT
1731
1732 #include <palacios/vmm_sprintf.h>
1733
1734 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1735     struct ide_internal * ide = (struct ide_internal *)private_data;
1736     struct v3_chkpt_ctx *ctx=0;
1737     int ch_num = 0;
1738     int drive_num = 0;
1739     char buf[128];
1740     
1741
1742     ctx=v3_chkpt_open_ctx(chkpt,id);
1743     
1744     if (!ctx) { 
1745       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1746       goto savefailout;
1747     }
1748
1749     // nothing saved yet
1750     
1751     v3_chkpt_close_ctx(ctx);ctx=0;
1752    
1753
1754     for (ch_num = 0; ch_num < 2; ch_num++) {
1755         struct ide_channel * ch = &(ide->channels[ch_num]);
1756
1757         snprintf(buf, 128, "%s-%d", id, ch_num);
1758
1759         ctx = v3_chkpt_open_ctx(chkpt, buf);
1760         
1761         if (!ctx) { 
1762           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1763           goto savefailout;
1764         }
1765
1766         V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1767         V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1768         V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1769         V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1770         V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1771         V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1772         V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1773         V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1774         V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1775         V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1776
1777
1778
1779         v3_chkpt_close_ctx(ctx); ctx=0;
1780
1781         for (drive_num = 0; drive_num < 2; drive_num++) {
1782             struct ide_drive * drive = &(ch->drives[drive_num]);
1783             
1784             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1785
1786             ctx = v3_chkpt_open_ctx(chkpt, buf);
1787             
1788             if (!ctx) { 
1789               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1790               goto savefailout;
1791             }
1792
1793             V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1794             V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1795             V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1796             V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1797
1798             V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1799             V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1800             V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1801
1802             V3_CHKPT_SAVE(ctx, "DATA_BUF",  drive->data_buf, savefailout);
1803
1804
1805             /* For now we'll just pack the type specific data at the end... */
1806             /* We should probably add a new context here in the future... */
1807             if (drive->drive_type == BLOCK_CDROM) {
1808               V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1809               V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1810               V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1811             } else if (drive->drive_type == BLOCK_DISK) {
1812               V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1813               V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1814               V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1815             } else if (drive->drive_type == BLOCK_NONE) { 
1816               // no drive connected, so no data
1817             } else {
1818               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1819               goto savefailout;
1820             }
1821
1822             V3_CHKPT_SAVE(ctx, "LBA48_LBA", drive->lba48.lba, savefailout);
1823             V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, savefailout);
1824             V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, savefailout);
1825             V3_CHKPT_SAVE(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, savefailout);
1826             V3_CHKPT_SAVE(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, savefailout);
1827             V3_CHKPT_SAVE(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, savefailout);
1828             
1829             v3_chkpt_close_ctx(ctx); ctx=0;
1830         }
1831     }
1832
1833 // goodout:
1834     return 0;
1835
1836  savefailout:
1837     PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1838     if (ctx) {v3_chkpt_close_ctx(ctx); }
1839     return -1;
1840 }
1841
1842
1843
1844 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1845     struct ide_internal * ide = (struct ide_internal *)private_data;
1846     struct v3_chkpt_ctx *ctx=0;
1847     int ch_num = 0;
1848     int drive_num = 0;
1849     char buf[128];
1850     
1851     ctx=v3_chkpt_open_ctx(chkpt,id);
1852     
1853     if (!ctx) { 
1854       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1855       goto loadfailout;
1856     }
1857
1858     // nothing saved yet
1859     
1860     v3_chkpt_close_ctx(ctx);ctx=0;
1861    
1862
1863     for (ch_num = 0; ch_num < 2; ch_num++) {
1864         struct ide_channel * ch = &(ide->channels[ch_num]);
1865
1866         snprintf(buf, 128, "%s-%d", id, ch_num);
1867
1868         ctx = v3_chkpt_open_ctx(chkpt, buf);
1869         
1870         if (!ctx) { 
1871           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1872           goto loadfailout;
1873         }
1874
1875         V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1876         V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1877         V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1878         V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1879         V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1880         V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1881         V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1882         V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1883         V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1884         V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1885
1886         v3_chkpt_close_ctx(ctx); ctx=0;
1887
1888         for (drive_num = 0; drive_num < 2; drive_num++) {
1889             struct ide_drive * drive = &(ch->drives[drive_num]);
1890             
1891             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1892
1893             ctx = v3_chkpt_open_ctx(chkpt, buf);
1894             
1895             if (!ctx) { 
1896               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1897               goto loadfailout;
1898             }
1899
1900             V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1901             V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1902             V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1903             V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1904
1905             V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1906             V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1907             V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1908
1909             V3_CHKPT_LOAD(ctx, "DATA_BUF",  drive->data_buf, loadfailout);
1910
1911             
1912             /* For now we'll just pack the type specific data at the end... */
1913             /* We should probably add a new context here in the future... */
1914             if (drive->drive_type == BLOCK_CDROM) {
1915               V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1916               V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1917               V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1918             } else if (drive->drive_type == BLOCK_DISK) {
1919               V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1920               V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1921               V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1922             } else if (drive->drive_type == BLOCK_NONE) { 
1923               // no drive connected, so no data
1924             } else {
1925               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1926               goto loadfailout;
1927             }
1928
1929             V3_CHKPT_LOAD(ctx, "LBA48_LBA", drive->lba48.lba, loadfailout);
1930             V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, loadfailout);
1931             V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, loadfailout);
1932             V3_CHKPT_LOAD(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, loadfailout);
1933             V3_CHKPT_LOAD(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, loadfailout);
1934             V3_CHKPT_LOAD(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, loadfailout);
1935             
1936         }
1937     }
1938 // goodout:
1939     return 0;
1940
1941  loadfailout:
1942     PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1943     if (ctx) {v3_chkpt_close_ctx(ctx); }
1944     return -1;
1945
1946 }
1947
1948
1949
1950 #endif
1951
1952
1953 static struct v3_device_ops dev_ops = {
1954     .free = (int (*)(void *))ide_free,
1955 #ifdef V3_CONFIG_CHECKPOINT
1956     .save_extended = ide_save_extended,
1957     .load_extended = ide_load_extended
1958 #endif
1959 };
1960
1961
1962
1963
1964 static int connect_fn(struct v3_vm_info * vm, 
1965                       void * frontend_data, 
1966                       struct v3_dev_blk_ops * ops, 
1967                       v3_cfg_tree_t * cfg, 
1968                       void * private_data) {
1969     struct ide_internal * ide  = (struct ide_internal *)(frontend_data);  
1970     struct ide_channel * channel = NULL;
1971     struct ide_drive * drive = NULL;
1972
1973     char * bus_str = v3_cfg_val(cfg, "bus_num");
1974     char * drive_str = v3_cfg_val(cfg, "drive_num");
1975     char * type_str = v3_cfg_val(cfg, "type");
1976     char * model_str = v3_cfg_val(cfg, "model");
1977     uint_t bus_num = 0;
1978     uint_t drive_num = 0;
1979
1980
1981     if ((!type_str) || (!drive_str) || (!bus_str)) {
1982         PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1983         return -1;
1984     }
1985
1986     bus_num = atoi(bus_str);
1987     drive_num = atoi(drive_str);
1988
1989     channel = &(ide->channels[bus_num]);
1990     drive = &(channel->drives[drive_num]);
1991
1992     if (drive->drive_type != BLOCK_NONE) {
1993         PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1994         return -1;
1995     }
1996
1997     if (model_str != NULL) {
1998         strncpy(drive->model, model_str, sizeof(drive->model));
1999         drive->model[sizeof(drive->model)-1] = 0;
2000     }
2001
2002     if (strcasecmp(type_str, "cdrom") == 0) {
2003         drive->drive_type = BLOCK_CDROM;
2004
2005         while (strlen((char *)(drive->model)) < 40) {
2006             strcat((char*)(drive->model), " ");
2007         }
2008
2009     } else if (strcasecmp(type_str, "hd") == 0) {
2010         drive->drive_type = BLOCK_DISK;
2011
2012         drive->hd_state.accessed = 0;
2013         drive->hd_state.mult_sector_num = 1;
2014
2015         drive->num_sectors = 63;
2016         drive->num_heads = 16;
2017         drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
2018     } else {
2019         PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
2020         return -1;
2021     }
2022  
2023     drive->ops = ops;
2024
2025     if (ide->ide_pci) {
2026         // Hardcode this for now, but its not a good idea....
2027         ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
2028     }
2029  
2030     drive->private_data = private_data;
2031
2032     return 0;
2033 }
2034
2035
2036
2037
2038 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
2039     struct ide_internal * ide  = NULL;
2040     char * dev_id = v3_cfg_val(cfg, "ID");
2041     int ret = 0;
2042
2043     PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
2044
2045     ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
2046
2047     if (ide == NULL) {
2048         PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
2049         return -1;
2050     }
2051
2052     memset(ide, 0, sizeof(struct ide_internal));
2053
2054     ide->vm = vm;
2055     ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
2056
2057     if (ide->pci_bus != NULL) {
2058         struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
2059
2060         if (!southbridge) {
2061             PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
2062             V3_Free(ide);
2063             return -1;
2064         }
2065
2066         ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
2067     } else {
2068         PrintError(vm,VCORE_NONE,"Strange - you don't have a PCI bus\n");
2069     }
2070
2071     PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
2072
2073     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
2074
2075     if (dev == NULL) {
2076         PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
2077         V3_Free(ide);
2078         return -1;
2079     }
2080
2081     if (init_ide_state(ide) == -1) {
2082         PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
2083         v3_remove_device(dev);
2084         return -1;
2085     }
2086
2087     PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
2088
2089     ret |= v3_dev_hook_io(dev, PRI_DATA_PORT, 
2090                           &read_data_port, &write_data_port);
2091     ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT, 
2092                           &read_port_std, &write_port_std);
2093     ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT, 
2094                           &read_port_std, &write_port_std);
2095     ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT, 
2096                           &read_port_std, &write_port_std);
2097     ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT, 
2098                           &read_port_std, &write_port_std);
2099     ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT, 
2100                           &read_port_std, &write_port_std);
2101     ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT, 
2102                           &read_port_std, &write_port_std);
2103     ret |= v3_dev_hook_io(dev, PRI_CMD_PORT, 
2104                           &read_port_std, &write_cmd_port);
2105
2106     ret |= v3_dev_hook_io(dev, SEC_DATA_PORT, 
2107                           &read_data_port, &write_data_port);
2108     ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT, 
2109                           &read_port_std, &write_port_std);
2110     ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT, 
2111                           &read_port_std, &write_port_std);
2112     ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT, 
2113                           &read_port_std, &write_port_std);
2114     ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT, 
2115                           &read_port_std, &write_port_std);
2116     ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT, 
2117                           &read_port_std, &write_port_std);
2118     ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT, 
2119                           &read_port_std, &write_port_std);
2120     ret |= v3_dev_hook_io(dev, SEC_CMD_PORT, 
2121                           &read_port_std, &write_cmd_port);
2122   
2123
2124     ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT, 
2125                           &read_port_std, &write_port_std);
2126
2127     ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT, 
2128                           &read_port_std, &write_port_std);
2129   
2130
2131     ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT, 
2132                           &read_port_std, &write_port_std);
2133
2134     ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT, 
2135                           &read_port_std, &write_port_std);
2136
2137
2138     if (ret != 0) {
2139         PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
2140         v3_remove_device(dev);
2141         return -1;
2142     }
2143
2144
2145     if (ide->pci_bus) {
2146         struct v3_pci_bar bars[6];
2147         struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2148         struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2149         struct pci_device * pci_dev = NULL;
2150         int i;
2151
2152         V3_Print(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2153
2154         for (i = 0; i < 6; i++) {
2155             bars[i].type = PCI_BAR_NONE;
2156         }
2157
2158         bars[4].type = PCI_BAR_IO;
2159         //      bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2160         bars[4].default_base_port = -1;
2161         bars[4].num_ports = 16;
2162
2163         bars[4].io_read = read_dma_port;
2164         bars[4].io_write = write_dma_port;
2165         bars[4].private_data = ide;
2166
2167         pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1, 
2168                                          "PIIX3_IDE", bars,
2169                                          pci_config_update, NULL, NULL, NULL, ide);
2170
2171         if (pci_dev == NULL) {
2172             PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i); 
2173             v3_remove_device(dev);
2174             return -1;
2175         }
2176
2177         /* This is for CMD646 devices 
2178            pci_dev->config_header.vendor_id = 0x1095;
2179            pci_dev->config_header.device_id = 0x0646;
2180            pci_dev->config_header.revision = 0x8f07;
2181         */
2182
2183         pci_dev->config_header.vendor_id = 0x8086;
2184         pci_dev->config_header.device_id = 0x7010;
2185         pci_dev->config_header.revision = 0x00;
2186
2187         pci_dev->config_header.prog_if = 0x80; // Master IDE device
2188         pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2189         pci_dev->config_header.class = PCI_CLASS_STORAGE;
2190
2191         pci_dev->config_header.command = 0;
2192         pci_dev->config_header.status = 0x0280;
2193
2194         ide->ide_pci = pci_dev;
2195
2196
2197     }
2198
2199     if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2200         PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2201         v3_remove_device(dev);
2202         return -1;
2203     }
2204     
2205
2206     PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2207
2208     return 0;
2209 }
2210
2211
2212 device_register("IDE", ide_init)
2213
2214
2215
2216
2217 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num, 
2218                         uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2219
2220     struct ide_internal * ide  = ide_data;  
2221     struct ide_channel * channel = &(ide->channels[channel_num]);
2222     struct ide_drive * drive = &(channel->drives[drive_num]);
2223     
2224     if (drive->drive_type == BLOCK_NONE) {
2225         return -1;
2226     }
2227
2228     *cylinders = drive->num_cylinders;
2229     *heads = drive->num_heads;
2230     *sectors = drive->num_sectors;
2231
2232     return 0;
2233 }