Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


IDE bug fix
[palacios.git] / palacios / src / devices / ide.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
28
29 #ifndef V3_CONFIG_DEBUG_IDE
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
36
37
38 #define PRI_DATA_PORT         0x1f0
39 #define PRI_FEATURES_PORT     0x1f1
40 #define PRI_SECT_CNT_PORT     0x1f2
41 #define PRI_SECT_NUM_PORT     0x1f3
42 #define PRI_CYL_LOW_PORT      0x1f4
43 #define PRI_CYL_HIGH_PORT     0x1f5
44 #define PRI_DRV_SEL_PORT      0x1f6
45 #define PRI_CMD_PORT          0x1f7
46 #define PRI_CTRL_PORT         0x3f6
47 #define PRI_ADDR_REG_PORT     0x3f7
48
49 #define SEC_DATA_PORT         0x170
50 #define SEC_FEATURES_PORT     0x171
51 #define SEC_SECT_CNT_PORT     0x172
52 #define SEC_SECT_NUM_PORT     0x173
53 #define SEC_CYL_LOW_PORT      0x174
54 #define SEC_CYL_HIGH_PORT     0x175
55 #define SEC_DRV_SEL_PORT      0x176
56 #define SEC_CMD_PORT          0x177
57 #define SEC_CTRL_PORT         0x376
58 #define SEC_ADDR_REG_PORT     0x377
59
60
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
63
64 #define DATA_BUFFER_SIZE 2048
65
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
68
69
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM", 
71                                           "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72                                            "PRI_CTRL", "PRI_ADDR_REG"};
73
74
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM", 
76                                           "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77                                            "SEC_CTRL", "SEC_ADDR_REG"};
78
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80                                            "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
81
82
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
84
85 static inline const char * io_port_to_str(uint16_t port) {
86     if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87         return ide_pri_port_strs[port - PRI_DATA_PORT];
88     } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89         return ide_sec_port_strs[port - SEC_DATA_PORT];
90     } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91         return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92     } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93         return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94     } 
95     return NULL;
96 }
97
98
99 static inline const char * dma_port_to_str(uint16_t port) {
100     return ide_dma_port_strs[port & 0x7];
101 }
102
103
104
105 struct ide_cd_state {
106     struct atapi_sense_data sense;
107
108     uint8_t atapi_cmd;
109     struct atapi_error_recovery err_recovery;
110 };
111
112 struct ide_hd_state {
113     uint32_t accessed;
114
115     /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116     uint32_t mult_sector_num;
117
118     /* This is the current op sector size:
119      * for multiple sector ops this equals mult_sector_num
120      * for standard ops this equals 1
121      */
122     uint32_t cur_sector_num;
123 };
124
125 struct ide_drive {
126     // Command Registers
127
128     v3_block_type_t drive_type;
129
130     struct v3_dev_blk_ops * ops;
131
132     union {
133         struct ide_cd_state cd_state;
134         struct ide_hd_state hd_state;
135     };
136
137     char model[41];
138
139     // Where we are in the data transfer
140     uint32_t transfer_index;
141
142     // the length of a transfer
143     // calculated for easy access
144     uint32_t transfer_length;
145
146     uint64_t current_lba;
147
148     // We have a local data buffer that we use for IO port accesses
149     uint8_t data_buf[DATA_BUFFER_SIZE];
150
151
152     uint32_t num_cylinders;
153     uint32_t num_heads;
154     uint32_t num_sectors;
155
156     void * private_data;
157     
158     union {
159         uint8_t sector_count;             // 0x1f2,0x172
160         struct atapi_irq_flags irq_flags;
161     } __attribute__((packed));
162
163     union {
164         uint8_t sector_num;               // 0x1f3,0x173
165         uint8_t lba0;
166     } __attribute__((packed));
167
168     union {
169         uint16_t cylinder;
170         uint16_t lba12;
171         
172         struct {
173             uint8_t cylinder_low;       // 0x1f4,0x174
174             uint8_t cylinder_high;      // 0x1f5,0x175
175         } __attribute__((packed));
176         
177         struct {
178             uint8_t lba1;
179             uint8_t lba2;
180         } __attribute__((packed));
181         
182         
183         // The transfer length requested by the CPU 
184         uint16_t req_len;
185     } __attribute__((packed));
186
187 };
188
189
190
191 struct ide_channel {
192     struct ide_drive drives[2];
193
194     // Command Registers
195     struct ide_error_reg error_reg;     // [read] 0x1f1,0x171
196
197     struct ide_features_reg features;
198
199     struct ide_drive_head_reg drive_head; // 0x1f6,0x176
200
201     struct ide_status_reg status;       // [read] 0x1f7,0x177
202     uint8_t cmd_reg;                // [write] 0x1f7,0x177
203
204     int irq; // this is temporary until we add PCI support
205
206     // Control Registers
207     struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
208
209     union {
210         uint8_t dma_ports[8];
211         struct {
212             struct ide_dma_cmd_reg dma_cmd;
213             uint8_t rsvd1;
214             struct ide_dma_status_reg dma_status;
215             uint8_t rsvd2;
216             uint32_t dma_prd_addr;
217         } __attribute__((packed));
218     } __attribute__((packed));
219
220     uint32_t dma_tbl_index;
221 };
222
223
224
225 struct ide_internal {
226     struct ide_channel channels[2];
227
228     struct v3_southbridge * southbridge;
229     struct vm_device * pci_bus;
230
231     struct pci_device * ide_pci;
232
233     struct v3_vm_info * vm;
234 };
235
236
237
238
239
240 /* Utility functions */
241
242 static inline uint16_t be_to_le_16(const uint16_t val) {
243     uint8_t * buf = (uint8_t *)&val;
244     return (buf[0] << 8) | (buf[1]) ;
245 }
246
247 static inline uint16_t le_to_be_16(const uint16_t val) {
248     return be_to_le_16(val);
249 }
250
251
252 static inline uint32_t be_to_le_32(const uint32_t val) {
253     uint8_t * buf = (uint8_t *)&val;
254     return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
255 }
256
257 static inline uint32_t le_to_be_32(const uint32_t val) {
258     return be_to_le_32(val);
259 }
260
261
262 static inline int get_channel_index(ushort_t port) {
263     if (((port & 0xfff8) == 0x1f0) ||
264         ((port & 0xfffe) == 0x3f6) || 
265         ((port & 0xfff8) == 0xc000)) {
266         return 0;
267     } else if (((port & 0xfff8) == 0x170) ||
268                ((port & 0xfffe) == 0x376) ||
269                ((port & 0xfff8) == 0xc008)) {
270         return 1;
271     }
272
273     return -1;
274 }
275
276 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
277     int channel_idx = get_channel_index(port);    
278     return &(ide->channels[channel_idx]);
279 }
280
281 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
282     return &(channel->drives[channel->drive_head.drive_sel]);
283 }
284
285
286 static inline int is_lba_enabled(struct ide_channel * channel) {
287     return channel->drive_head.lba_mode;
288 }
289
290
291 /* Drive Commands */
292 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
293     if (channel->ctrl_reg.irq_disable == 0) {
294
295         PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
296
297         channel->dma_status.int_gen = 1;
298         v3_raise_irq(ide->vm, channel->irq);
299     } else {
300         PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
301     }
302 }
303
304
305 static void drive_reset(struct ide_drive * drive) {
306     drive->sector_count = 0x01;
307     drive->sector_num = 0x01;
308
309     PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
310     
311     if (drive->drive_type == BLOCK_CDROM) {
312         drive->cylinder = 0xeb14;
313     } else {
314         drive->cylinder = 0x0000;
315         //drive->hd_state.accessed = 0;
316     }
317
318
319     memset(drive->data_buf, 0, sizeof(drive->data_buf));
320     drive->transfer_index = 0;
321
322     // Send the reset signal to the connected device callbacks
323     //     channel->drives[0].reset();
324     //    channel->drives[1].reset();
325 }
326
327 static void channel_reset(struct ide_channel * channel) {
328     
329     // set busy and seek complete flags
330     channel->status.val = 0x90;
331
332     // Clear errors
333     channel->error_reg.val = 0x01;
334
335     // clear commands
336     channel->cmd_reg = 0x00;
337
338     channel->ctrl_reg.irq_disable = 0;
339 }
340
341 static void channel_reset_complete(struct ide_channel * channel) {
342     channel->status.busy = 0;
343     channel->status.ready = 1;
344
345     channel->drive_head.head_num = 0;    
346     
347     drive_reset(&(channel->drives[0]));
348     drive_reset(&(channel->drives[1]));
349 }
350
351
352 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
353     channel->status.val = 0x41; // Error + ready
354     channel->error_reg.val = 0x04; // No idea...
355
356     ide_raise_irq(ide, channel);
357 }
358
359
360 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
361 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
362
363
364 /* ATAPI functions */
365 #include "atapi.h"
366
367 /* ATA functions */
368 #include "ata.h"
369
370
371
372 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
373     struct ide_dma_prd prd_entry;
374     int index = 0;
375
376     V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
377
378     while (1) {
379         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
380         int ret;
381
382         ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
383         
384         if (ret != sizeof(struct ide_dma_prd)) {
385             PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
386             return;
387         }
388
389         V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n", 
390                    prd_entry.base_addr, 
391                    (prd_entry.size == 0) ? 0x10000 : prd_entry.size, 
392                    prd_entry.end_of_table);
393
394         if (prd_entry.end_of_table) {
395             break;
396         }
397
398         index++;
399     }
400
401     return;
402 }
403
404
405 /* IO Operations */
406 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
407     struct ide_drive * drive = get_selected_drive(channel);
408     // This is at top level scope to do the EOT test at the end
409     struct ide_dma_prd prd_entry = {};
410     uint_t bytes_left = drive->transfer_length;
411
412     // Read in the data buffer....
413     // Read a sector/block at a time until the prd entry is full.
414
415 #ifdef V3_CONFIG_DEBUG_IDE
416     print_prd_table(ide, channel);
417 #endif
418
419     PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
420
421     // Loop through the disk data
422     while (bytes_left > 0) {
423         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
424         uint_t prd_bytes_left = 0;
425         uint_t prd_offset = 0;
426         int ret;
427
428         PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
429
430         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
431
432         if (ret != sizeof(struct ide_dma_prd)) {
433             PrintError(core->vm_info, core, "Could not read PRD\n");
434             return -1;
435         }
436
437         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
438                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
439
440         // loop through the PRD data....
441
442         if (prd_entry.size == 0) {
443             // a size of 0 means 64k
444             prd_bytes_left = 0x10000;
445         } else {
446             prd_bytes_left = prd_entry.size;
447         }
448
449
450         while (prd_bytes_left > 0) {
451             uint_t bytes_to_write = 0;
452
453             if (drive->drive_type == BLOCK_DISK) {
454                 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
455
456
457                 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
458                     PrintError(core->vm_info, core, "Failed to read next disk sector\n");
459                     return -1;
460                 }
461             } else if (drive->drive_type == BLOCK_CDROM) {
462                 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
463                     bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
464
465                     if (atapi_read_chunk(ide, channel) == -1) {
466                         PrintError(core->vm_info, core, "Failed to read next disk sector\n");
467                         return -1;
468                     }
469                 } else {
470                     /*
471                     PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
472                     return -1;
473                     */
474                     int cmd_ret = 0;
475
476                     //V3_Print(core->vm_info, core, "DMA of command packet\n");
477
478                     bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
479                     prd_bytes_left = bytes_to_write;
480
481
482                     // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
483                     cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, 
484                                                   bytes_to_write, drive->data_buf); 
485
486                     // check cmd_ret
487
488
489                     bytes_to_write = 0;
490                     prd_bytes_left = 0;
491                     drive->transfer_index += bytes_to_write;
492
493                     channel->status.busy = 0;
494                     channel->status.ready = 1;
495                     channel->status.data_req = 0;
496                     channel->status.error = 0;
497                     channel->status.seek_complete = 1;
498
499                     channel->dma_status.active = 0;
500                     channel->dma_status.err = 0;
501
502                     ide_raise_irq(ide, channel);
503                     
504                     return 0;
505                 }
506             }
507
508             PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n", 
509                        (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
510
511             drive->current_lba++;
512
513             ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf); 
514
515             if (ret != bytes_to_write) {
516                 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
517                 return -1;
518             }
519
520             PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
521
522             drive->transfer_index += ret;
523             prd_bytes_left -= ret;
524             prd_offset += ret;
525             bytes_left -= ret;
526         }
527
528         channel->dma_tbl_index++;
529
530         if (drive->drive_type == BLOCK_DISK) {
531             if (drive->transfer_index % HD_SECTOR_SIZE) {
532                 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
533                 return -1;
534             }
535         } else if (drive->drive_type == BLOCK_CDROM) {
536             if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
537                 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
538                     PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
539                     PrintError(core->vm_info, core, "transfer_index=%d, transfer_length=%d\n", 
540                                drive->transfer_index, drive->transfer_length);
541                     return -1;
542                 }
543             }
544         }
545
546
547         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
548             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
549             return -1;
550         }
551     }
552
553     /*
554       drive->irq_flags.io_dir = 1;
555       drive->irq_flags.c_d = 1;
556       drive->irq_flags.rel = 0;
557     */
558
559
560     // Update to the next PRD entry
561
562     // set DMA status
563
564     if (prd_entry.end_of_table) {
565         channel->status.busy = 0;
566         channel->status.ready = 1;
567         channel->status.data_req = 0;
568         channel->status.error = 0;
569         channel->status.seek_complete = 1;
570
571         channel->dma_status.active = 0;
572         channel->dma_status.err = 0;
573     }
574
575     ide_raise_irq(ide, channel);
576
577     return 0;
578 }
579
580
581 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
582     struct ide_drive * drive = get_selected_drive(channel);
583     // This is at top level scope to do the EOT test at the end
584     struct ide_dma_prd prd_entry = {};
585     uint_t bytes_left = drive->transfer_length;
586
587
588     PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
589
590     // Loop through disk data
591     while (bytes_left > 0) {
592         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
593         uint_t prd_bytes_left = 0;
594         uint_t prd_offset = 0;
595         int ret;
596         
597         PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
598
599         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
600
601         if (ret != sizeof(struct ide_dma_prd)) {
602             PrintError(core->vm_info, core, "Could not read PRD\n");
603             return -1;
604         }
605
606         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
607                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
608
609
610         if (prd_entry.size == 0) {
611             // a size of 0 means 64k
612             prd_bytes_left = 0x10000;
613         } else {
614             prd_bytes_left = prd_entry.size;
615         }
616
617         while (prd_bytes_left > 0) {
618             uint_t bytes_to_write = 0;
619
620
621             bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
622
623
624             ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
625
626             if (ret != bytes_to_write) {
627                 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
628                 return -1;
629             }
630
631             PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
632
633
634             if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
635                 PrintError(core->vm_info, core, "Failed to write data to disk\n");
636                 return -1;
637             }
638             
639             drive->current_lba++;
640
641             drive->transfer_index += ret;
642             prd_bytes_left -= ret;
643             prd_offset += ret;
644             bytes_left -= ret;
645         }
646
647         channel->dma_tbl_index++;
648
649         if (drive->transfer_index % HD_SECTOR_SIZE) {
650             PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
651             return -1;
652         }
653
654         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
655             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
656             PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%u)...\n", 
657                        bytes_left, drive->transfer_length);
658             PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
659                        prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
660
661             print_prd_table(ide, channel);
662             return -1;
663         }
664     }
665
666     if (prd_entry.end_of_table) {
667         channel->status.busy = 0;
668         channel->status.ready = 1;
669         channel->status.data_req = 0;
670         channel->status.error = 0;
671         channel->status.seek_complete = 1;
672
673         channel->dma_status.active = 0;
674         channel->dma_status.err = 0;
675     }
676
677     ide_raise_irq(ide, channel);
678
679     return 0;
680 }
681
682
683
684 #define DMA_CMD_PORT      0x00
685 #define DMA_STATUS_PORT   0x02
686 #define DMA_PRD_PORT0     0x04
687 #define DMA_PRD_PORT1     0x05
688 #define DMA_PRD_PORT2     0x06
689 #define DMA_PRD_PORT3     0x07
690
691 #define DMA_CHANNEL_FLAG  0x08
692
693 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
694     struct ide_internal * ide = (struct ide_internal *)private_data;
695     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
696     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
697     struct ide_channel * channel = &(ide->channels[channel_flag]);
698
699     PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n", 
700                port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
701
702     switch (port_offset) {
703         case DMA_CMD_PORT:
704             channel->dma_cmd.val = *(uint8_t *)src;
705
706             if (channel->dma_cmd.start == 0) {
707                 channel->dma_tbl_index = 0;
708             } else {
709                 channel->dma_status.active = 1;
710
711                 if (channel->dma_cmd.read == 1) {
712                     // DMA Read
713                     if (dma_read(core, ide, channel) == -1) {
714                         PrintError(core->vm_info, core, "Failed DMA Read\n");
715                         return -1;
716                     }
717                 } else {
718                     // DMA write
719                     if (dma_write(core, ide, channel) == -1) {
720                         PrintError(core->vm_info, core, "Failed DMA Write\n");
721                         return -1;
722                     }
723                 }
724
725                 channel->dma_cmd.val &= 0x09;
726             }
727
728             break;
729             
730         case DMA_STATUS_PORT: {
731             uint8_t val = *(uint8_t *)src;
732
733             if (length != 1) {
734                 PrintError(core->vm_info, core, "Invalid read length for DMA status port\n");
735                 return -1;
736             }
737
738             // weirdness
739             channel->dma_status.val = ((val & 0x60) | 
740                                        (channel->dma_status.val & 0x01) |
741                                        (channel->dma_status.val & ~val & 0x06));
742
743             break;
744         }           
745         case DMA_PRD_PORT0:
746         case DMA_PRD_PORT1:
747         case DMA_PRD_PORT2:
748         case DMA_PRD_PORT3: {
749             uint_t addr_index = port_offset & 0x3;
750             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
751             int i = 0;
752
753             if (addr_index + length > 4) {
754                 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
755                 return -1;
756             }
757
758             for (i = 0; i < length; i++) {
759                 addr_buf[addr_index + i] = *((uint8_t *)src + i);
760             }
761
762             PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
763
764             break;
765         }
766         default:
767             PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
768             break;
769     }
770
771     return length;
772 }
773
774
775 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
776     struct ide_internal * ide = (struct ide_internal *)private_data;
777     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
778     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
779     struct ide_channel * channel = &(ide->channels[channel_flag]);
780
781     PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
782
783     if (port_offset + length > 16) {
784         PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
785         return -1;
786     }
787
788     memcpy(dst, channel->dma_ports + port_offset, length);
789     
790     PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
791
792     return length;
793 }
794
795
796
797 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
798     struct ide_internal * ide = priv_data;
799     struct ide_channel * channel = get_selected_channel(ide, port);
800     struct ide_drive * drive = get_selected_drive(channel);
801
802     if (length != 1) {
803         PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
804         return -1;
805     }
806
807     PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
808     
809     channel->cmd_reg = *(uint8_t *)src;
810     
811     switch (channel->cmd_reg) {
812
813         case ATAPI_PIDENTIFY: // ATAPI Identify Device Packet
814             if (drive->drive_type != BLOCK_CDROM) {
815                 drive_reset(drive);
816
817                 // JRL: Should we abort here?
818                 ide_abort_command(ide, channel);
819             } else {
820                 
821                 atapi_identify_device(drive);
822                 
823                 channel->error_reg.val = 0;
824                 channel->status.val = 0x58; // ready, data_req, seek_complete
825             
826                 ide_raise_irq(ide, channel);
827             }
828             break;
829         case ATAPI_IDENTIFY: // Identify Device
830             if (drive->drive_type != BLOCK_DISK) {
831                 drive_reset(drive);
832
833                 // JRL: Should we abort here?
834                 ide_abort_command(ide, channel);
835             } else {
836                 ata_identify_device(drive);
837
838                 channel->error_reg.val = 0;
839                 channel->status.val = 0x58;
840
841                 ide_raise_irq(ide, channel);
842             }
843             break;
844
845         case ATAPI_PACKETCMD: // ATAPI Command Packet
846             if (drive->drive_type != BLOCK_CDROM) {
847                 ide_abort_command(ide, channel);
848             }
849             
850             drive->sector_count = 1;
851
852             channel->status.busy = 0;
853             channel->status.write_fault = 0;
854             channel->status.data_req = 1;
855             channel->status.error = 0;
856
857             // reset the datxgoto-la buffer...
858             drive->transfer_length = ATAPI_PACKET_SIZE;
859             drive->transfer_index = 0;
860
861             break;
862
863         case ATAPI_READ: // Read Sectors with Retry
864         case ATAPI_READ_ONCE: // Read Sectors without Retry
865             drive->hd_state.cur_sector_num = 1;
866
867             if (ata_read_sectors(ide, channel) == -1) {
868                 PrintError(core->vm_info, core, "Error reading sectors\n");
869                 return -1;
870             }
871             break;
872
873         case ATAPI_READ_EXT: // Read Sectors Extended
874             drive->hd_state.cur_sector_num = 1;
875
876             if (ata_read_sectors_ext(ide, channel) == -1) {
877                 PrintError(core->vm_info, core, "Error reading extended sectors\n");
878                 return -1;
879             }
880             break;
881
882         case ATAPI_WRITE: {// Write Sector
883             drive->hd_state.cur_sector_num = 1;
884
885             if (ata_write_sectors(ide, channel) == -1) {
886                 PrintError(core->vm_info, core, "Error writing sectors\n");
887                 return -1;
888             }
889             break;
890         }
891
892             
893
894         case ATAPI_READDMA: // Read DMA with retry
895         case ATAPI_READDMA_ONCE: { // Read DMA
896             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
897
898             if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
899                 ide_abort_command(ide, channel);
900                 return 0;
901             }
902             
903             drive->hd_state.cur_sector_num = 1;
904             
905             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
906             drive->transfer_index = 0;
907
908             if (channel->dma_status.active == 1) {
909                 // DMA Read
910                 if (dma_read(core, ide, channel) == -1) {
911                     PrintError(core->vm_info, core, "Failed DMA Read\n");
912                     return -1;
913                 }
914             }
915             break;
916         }
917
918         case ATAPI_WRITEDMA: { // Write DMA
919             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
920
921             if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
922                 ide_abort_command(ide, channel);
923                 return 0;
924             }
925
926             drive->hd_state.cur_sector_num = 1;
927
928             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
929             drive->transfer_index = 0;
930
931             if (channel->dma_status.active == 1) {
932                 // DMA Write
933                 if (dma_write(core, ide, channel) == -1) {
934                     PrintError(core->vm_info, core, "Failed DMA Write\n");
935                     return -1;
936                 }
937             }
938             break;
939         }
940         case ATAPI_STANDBYNOW1: // Standby Now 1
941         case ATAPI_IDLEIMMEDIATE: // Set Idle Immediate
942         case ATAPI_STANDBY: // Standby
943         case ATAPI_SETIDLE1: // Set Idle 1
944         case ATAPI_SLEEPNOW1: // Sleep Now 1
945         case ATAPI_STANDBYNOW2: // Standby Now 2
946         case ATAPI_IDLEIMMEDIATE2: // Idle Immediate (CFA)
947         case ATAPI_STANDBY2: // Standby 2
948         case ATAPI_SETIDLE2: // Set idle 2
949         case ATAPI_SLEEPNOW2: // Sleep Now 2
950             channel->status.val = 0;
951             channel->status.ready = 1;
952             ide_raise_irq(ide, channel);
953             break;
954
955         case ATAPI_SETFEATURES: // Set Features
956             // Prior to this the features register has been written to. 
957             // This command tells the drive to check if the new value is supported (the value is drive specific)
958             // Common is that bit0=DMA enable
959             // If valid the drive raises an interrupt, if not it aborts.
960
961             // Do some checking here...
962
963             channel->status.busy = 0;
964             channel->status.write_fault = 0;
965             channel->status.error = 0;
966             channel->status.ready = 1;
967             channel->status.seek_complete = 1;
968             
969             ide_raise_irq(ide, channel);
970             break;
971
972         case ATAPI_SPECIFY:  // Initialize Drive Parameters
973         case ATAPI_RECAL:  // recalibrate?
974             channel->status.error = 0;
975             channel->status.ready = 1;
976             channel->status.seek_complete = 1;
977             ide_raise_irq(ide, channel);
978             break;
979         case ATAPI_SETMULT: { // Set multiple mode (IDE Block mode) 
980             // This makes the drive transfer multiple sectors before generating an interrupt
981             uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
982
983             if (tmp_sect_num > MAX_MULT_SECTORS) {
984                 ide_abort_command(ide, channel);
985                 break;
986             }
987
988             if (drive->sector_count == 0) {
989                 drive->hd_state.mult_sector_num= 1;
990             } else {
991                 drive->hd_state.mult_sector_num = drive->sector_count;
992             }
993
994             channel->status.ready = 1;
995             channel->status.error = 0;
996
997             ide_raise_irq(ide, channel);
998
999             break;
1000         }
1001
1002         case ATAPI_DEVICE_RESET: // Reset Device
1003             drive_reset(drive);
1004             channel->error_reg.val = 0x01;
1005             channel->status.busy = 0;
1006             channel->status.ready = 1;
1007             channel->status.seek_complete = 1;
1008             channel->status.write_fault = 0;
1009             channel->status.error = 0;
1010             break;
1011
1012         case ATAPI_CHECKPOWERMODE1: // Check power mode
1013             drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1014             channel->status.busy = 0;
1015             channel->status.ready = 1;
1016             channel->status.write_fault = 0;
1017             channel->status.data_req = 0;
1018             channel->status.error = 0;
1019             break;
1020
1021         case ATAPI_MULTREAD:  // read multiple sectors
1022             drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
1023         default:
1024             PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1025             return -1;
1026     }
1027
1028     return length;
1029 }
1030
1031
1032 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1033     struct ide_internal * ide = priv_data;
1034     struct ide_channel * channel = get_selected_channel(ide, port);
1035     struct ide_drive * drive = get_selected_drive(channel);
1036
1037     PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n", 
1038             port, *(uint32_t *)src, length);
1039
1040     memcpy(drive->data_buf + drive->transfer_index, src, length);    
1041     drive->transfer_index += length;
1042
1043     // Transfer is complete, dispatch the command
1044     if (drive->transfer_index >= drive->transfer_length) {
1045         switch (channel->cmd_reg) {
1046
1047             case ATAPI_WRITE: // Write Sectors
1048
1049                 channel->status.busy = 1;
1050                 channel->status.data_req = 0;
1051                     
1052                 if (ata_write(ide, channel, drive->data_buf, drive->transfer_length/HD_SECTOR_SIZE) == -1) {
1053                     PrintError(core->vm_info, core, "Error writing to disk\n");
1054                     return -1;
1055                 }
1056
1057                 PrintDebug(core->vm_info, core, "IDE: Write sectors complete\n");
1058
1059                 channel->status.error = 0;
1060                 channel->status.busy = 0;
1061
1062                 ide_raise_irq(ide, channel);
1063                 break;
1064
1065             case ATAPI_PACKETCMD: // ATAPI packet command
1066                 if (atapi_handle_packet(core, ide, channel) == -1) {
1067                     PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1068                     return -1;
1069                 }
1070                 break;
1071             default:
1072                 PrintError(core->vm_info, core, "Unhandld IDE Command %x\n", channel->cmd_reg);
1073                 return -1;
1074         }
1075     }
1076
1077     return length;
1078 }
1079
1080
1081 static int read_hd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1082     struct ide_drive * drive = get_selected_drive(channel);
1083     int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1084
1085
1086
1087     if (drive->transfer_index >= drive->transfer_length) {
1088         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1089                    drive->transfer_length, drive->transfer_index,
1090                    drive->transfer_index + length);
1091         return -1;
1092     }
1093
1094     
1095     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1096         drive->current_lba++;
1097
1098         if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1099             PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1100             return -1;
1101         }
1102     }
1103
1104     /*
1105       PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n", 
1106       *(uint32_t *)(drive->data_buf + data_offset), 
1107       length, data_offset);
1108     */
1109     memcpy(dst, drive->data_buf + data_offset, length);
1110
1111     drive->transfer_index += length;
1112
1113
1114     /* This is the trigger for interrupt injection.
1115      * For read single sector commands we interrupt after every sector
1116      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1117      * cur_sector_num is configured depending on the operation we are currently running
1118      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1119      */
1120     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1121         (drive->transfer_index == drive->transfer_length)) {
1122         if (drive->transfer_index < drive->transfer_length) {
1123             // An increment is complete, but there is still more data to be transferred...
1124             PrintDebug(VM_NONE, VCORE_NONE, "Integral Complete, still transferring more sectors\n");
1125             channel->status.data_req = 1;
1126
1127             drive->irq_flags.c_d = 0;
1128         } else {
1129             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1130             // This was the final read of the request
1131             channel->status.data_req = 0;
1132
1133             
1134             drive->irq_flags.c_d = 1;
1135             drive->irq_flags.rel = 0;
1136         }
1137
1138         channel->status.ready = 1;
1139         drive->irq_flags.io_dir = 1;
1140         channel->status.busy = 0;
1141
1142         ide_raise_irq(ide, channel);
1143     }
1144
1145
1146     return length;
1147 }
1148
1149
1150
1151 static int read_cd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1152     struct ide_drive * drive = get_selected_drive(channel);
1153     int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1154     //  int req_offset = drive->transfer_index % drive->req_len;
1155     
1156     if (drive->cd_state.atapi_cmd != 0x28) {
1157         PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1158         PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%d, transfer idx=%d\n", drive->transfer_length, drive->transfer_index);
1159     }
1160
1161     
1162
1163     if (drive->transfer_index >= drive->transfer_length) {
1164         PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n", 
1165                    drive->transfer_length, drive->transfer_index, 
1166                    drive->transfer_index + length);
1167         return -1;
1168     }
1169
1170     
1171     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1172         if (atapi_update_data_buf(ide, channel) == -1) {
1173             PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1174             return -1;
1175         }
1176     }
1177
1178     memcpy(dst, drive->data_buf + data_offset, length);
1179     
1180     drive->transfer_index += length;
1181
1182
1183     // Should the req_offset be recalculated here?????
1184     if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1185         if (drive->transfer_index < drive->transfer_length) {
1186             // An increment is complete, but there is still more data to be transferred...
1187             
1188             channel->status.data_req = 1;
1189
1190             drive->irq_flags.c_d = 0;
1191
1192             // Update the request length in the cylinder regs
1193             if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1194                 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1195                 return -1;
1196             }
1197         } else {
1198             // This was the final read of the request
1199
1200             drive->req_len = 0;
1201             channel->status.data_req = 0;
1202             channel->status.ready = 1;
1203             
1204             drive->irq_flags.c_d = 1;
1205             drive->irq_flags.rel = 0;
1206         }
1207
1208         drive->irq_flags.io_dir = 1;
1209         channel->status.busy = 0;
1210
1211         ide_raise_irq(ide, channel);
1212     }
1213
1214     return length;
1215 }
1216
1217
1218 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1219     struct ide_drive * drive = get_selected_drive(channel);
1220
1221     channel->status.busy = 0;
1222     channel->status.ready = 1;
1223     channel->status.write_fault = 0;
1224     channel->status.seek_complete = 1;
1225     channel->status.corrected = 0;
1226     channel->status.error = 0;
1227                 
1228     
1229     memcpy(dst, drive->data_buf + drive->transfer_index, length);
1230     drive->transfer_index += length;
1231     
1232     if (drive->transfer_index >= drive->transfer_length) {
1233         channel->status.data_req = 0;
1234     }
1235     
1236     return length;
1237 }
1238
1239
1240 static int ide_read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1241     struct ide_internal * ide = priv_data;
1242     struct ide_channel * channel = get_selected_channel(ide, port);
1243     struct ide_drive * drive = get_selected_drive(channel);
1244
1245     //       PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1246
1247     if ((channel->cmd_reg == 0xec) ||
1248         (channel->cmd_reg == 0xa1)) {
1249         return read_drive_id((uint8_t *)dst, length, ide, channel);
1250     }
1251
1252     if (drive->drive_type == BLOCK_CDROM) {
1253         if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1254             PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1255             return -1;
1256         }
1257     } else if (drive->drive_type == BLOCK_DISK) {
1258         if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1259             PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1260             return -1;
1261         }
1262     } else {
1263         memset((uint8_t *)dst, 0, length);
1264     }
1265
1266     return length;
1267 }
1268
1269 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1270     struct ide_internal * ide = priv_data;
1271     struct ide_channel * channel = get_selected_channel(ide, port);
1272     struct ide_drive * drive = get_selected_drive(channel);
1273             
1274     if (length != 1) {
1275         PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1276         return -1;
1277     }
1278
1279     PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1280
1281     switch (port) {
1282         // reset and interrupt enable
1283         case PRI_CTRL_PORT:
1284         case SEC_CTRL_PORT: {
1285             struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1286
1287             // only reset channel on a 0->1 reset bit transition
1288             if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1289                 channel_reset(channel);
1290             } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1291                 channel_reset_complete(channel);
1292             }
1293
1294             channel->ctrl_reg.val = tmp_ctrl->val;          
1295             break;
1296         }
1297         case PRI_FEATURES_PORT:
1298         case SEC_FEATURES_PORT:
1299             channel->features.val = *(uint8_t *)src;
1300             break;
1301
1302         case PRI_SECT_CNT_PORT:
1303         case SEC_SECT_CNT_PORT:
1304             channel->drives[0].sector_count = *(uint8_t *)src;
1305             channel->drives[1].sector_count = *(uint8_t *)src;
1306             break;
1307
1308         case PRI_SECT_NUM_PORT:
1309         case SEC_SECT_NUM_PORT:
1310             channel->drives[0].sector_num = *(uint8_t *)src;
1311             channel->drives[1].sector_num = *(uint8_t *)src;
1312             break;
1313         case PRI_CYL_LOW_PORT:
1314         case SEC_CYL_LOW_PORT:
1315             channel->drives[0].cylinder_low = *(uint8_t *)src;
1316             channel->drives[1].cylinder_low = *(uint8_t *)src;
1317             break;
1318
1319         case PRI_CYL_HIGH_PORT:
1320         case SEC_CYL_HIGH_PORT:
1321             channel->drives[0].cylinder_high = *(uint8_t *)src;
1322             channel->drives[1].cylinder_high = *(uint8_t *)src;
1323             break;
1324
1325         case PRI_DRV_SEL_PORT:
1326         case SEC_DRV_SEL_PORT: {
1327             channel->drive_head.val = *(uint8_t *)src;
1328             
1329             // make sure the reserved bits are ok..
1330             // JRL TODO: check with new ramdisk to make sure this is right...
1331             channel->drive_head.val |= 0xa0;
1332
1333             drive = get_selected_drive(channel);
1334
1335             // Selecting a non-present device is a no-no
1336             if (drive->drive_type == BLOCK_NONE) {
1337                 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1338                 channel->error_reg.abort = 1;
1339                 channel->status.error = 1;
1340             } else {
1341                 channel->status.busy = 0;
1342                 channel->status.ready = 1;
1343                 channel->status.data_req = 0;
1344                 channel->status.error = 0;
1345                 channel->status.seek_complete = 1;
1346                 
1347                 channel->dma_status.active = 0;
1348                 channel->dma_status.err = 0;
1349             }
1350
1351             break;
1352         }
1353         default:
1354             PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1355             return -1;
1356     }
1357     return length;
1358 }
1359
1360
1361 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1362     struct ide_internal * ide = priv_data;
1363     struct ide_channel * channel = get_selected_channel(ide, port);
1364     struct ide_drive * drive = get_selected_drive(channel);
1365     
1366     if (length != 1) {
1367         PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1368         return -1;
1369     }
1370     
1371     PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1372
1373     if ((port == PRI_ADDR_REG_PORT) ||
1374         (port == SEC_ADDR_REG_PORT)) {
1375         // unused, return 0xff
1376         *(uint8_t *)dst = 0xff;
1377         return length;
1378     }
1379
1380
1381     // if no drive is present just return 0 + reserved bits
1382     if (drive->drive_type == BLOCK_NONE) {
1383         if ((port == PRI_DRV_SEL_PORT) ||
1384             (port == SEC_DRV_SEL_PORT)) {
1385             *(uint8_t *)dst = 0xa0;
1386         } else {
1387             *(uint8_t *)dst = 0;
1388         }
1389
1390         return length;
1391     }
1392
1393     switch (port) {
1394
1395         // This is really the error register.
1396         case PRI_FEATURES_PORT:
1397         case SEC_FEATURES_PORT:
1398             *(uint8_t *)dst = channel->error_reg.val;
1399             break;
1400             
1401         case PRI_SECT_CNT_PORT:
1402         case SEC_SECT_CNT_PORT:
1403             *(uint8_t *)dst = drive->sector_count;
1404             break;
1405
1406         case PRI_SECT_NUM_PORT:
1407         case SEC_SECT_NUM_PORT:
1408             *(uint8_t *)dst = drive->sector_num;
1409             break;
1410
1411         case PRI_CYL_LOW_PORT:
1412         case SEC_CYL_LOW_PORT:
1413             *(uint8_t *)dst = drive->cylinder_low;
1414             break;
1415
1416
1417         case PRI_CYL_HIGH_PORT:
1418         case SEC_CYL_HIGH_PORT:
1419             *(uint8_t *)dst = drive->cylinder_high;
1420             break;
1421
1422         case PRI_DRV_SEL_PORT:
1423         case SEC_DRV_SEL_PORT:  // hard disk drive and head register 0x1f6
1424             *(uint8_t *)dst = channel->drive_head.val;
1425             break;
1426
1427         case PRI_CTRL_PORT:
1428         case SEC_CTRL_PORT:
1429         case PRI_CMD_PORT:
1430         case SEC_CMD_PORT:
1431             // Something about lowering interrupts here....
1432             *(uint8_t *)dst = channel->status.val;
1433             break;
1434
1435         default:
1436             PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1437             return -1;
1438     }
1439
1440     PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1441
1442     return length;
1443 }
1444
1445
1446
1447 static void init_drive(struct ide_drive * drive) {
1448
1449     drive->sector_count = 0x01;
1450     drive->sector_num = 0x01;
1451     drive->cylinder = 0x0000;
1452
1453     drive->drive_type = BLOCK_NONE;
1454
1455     memset(drive->model, 0, sizeof(drive->model));
1456
1457     drive->transfer_index = 0;
1458     drive->transfer_length = 0;
1459     memset(drive->data_buf, 0, sizeof(drive->data_buf));
1460
1461     drive->num_cylinders = 0;
1462     drive->num_heads = 0;
1463     drive->num_sectors = 0;
1464     
1465
1466     drive->private_data = NULL;
1467     drive->ops = NULL;
1468 }
1469
1470 static void init_channel(struct ide_channel * channel) {
1471     int i = 0;
1472
1473     channel->error_reg.val = 0x01;
1474
1475     //** channel->features = 0x0;
1476
1477     channel->drive_head.val = 0x00;
1478     channel->status.val = 0x00;
1479     channel->cmd_reg = 0x00;
1480     channel->ctrl_reg.val = 0x08;
1481
1482     channel->dma_cmd.val = 0;
1483     channel->dma_status.val = 0;
1484     channel->dma_prd_addr = 0;
1485     channel->dma_tbl_index = 0;
1486
1487     for (i = 0; i < 2; i++) {
1488         init_drive(&(channel->drives[i]));
1489     }
1490
1491 }
1492
1493
1494 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1495     PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1496     /*
1497     struct ide_internal * ide = (struct ide_internal *)(private_data);
1498
1499     PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1500     */
1501
1502     return 0;
1503 }
1504
1505 static int init_ide_state(struct ide_internal * ide) {
1506
1507     /* 
1508      * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry 
1509      */
1510
1511     init_channel(&(ide->channels[0]));
1512     ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1513
1514     init_channel(&(ide->channels[1]));
1515     ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1516
1517
1518     return 0;
1519 }
1520
1521
1522
1523
1524 static int ide_free(struct ide_internal * ide) {
1525
1526     // deregister from PCI?
1527
1528     V3_Free(ide);
1529
1530     return 0;
1531 }
1532
1533 #ifdef V3_CONFIG_CHECKPOINT
1534
1535 #include <palacios/vmm_sprintf.h>
1536
1537 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1538     struct ide_internal * ide = (struct ide_internal *)private_data;
1539     struct v3_chkpt_ctx *ctx=0;
1540     int ch_num = 0;
1541     int drive_num = 0;
1542     char buf[128];
1543     
1544
1545     ctx=v3_chkpt_open_ctx(chkpt,id);
1546     
1547     if (!ctx) { 
1548       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1549       goto savefailout;
1550     }
1551
1552     // nothing saved yet
1553     
1554     v3_chkpt_close_ctx(ctx);ctx=0;
1555    
1556
1557     for (ch_num = 0; ch_num < 2; ch_num++) {
1558         struct ide_channel * ch = &(ide->channels[ch_num]);
1559
1560         snprintf(buf, 128, "%s-%d", id, ch_num);
1561
1562         ctx = v3_chkpt_open_ctx(chkpt, buf);
1563         
1564         if (!ctx) { 
1565           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1566           goto savefailout;
1567         }
1568
1569         V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1570         V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1571         V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1572         V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1573         V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1574         V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1575         V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1576         V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1577         V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1578         V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1579
1580         v3_chkpt_close_ctx(ctx); ctx=0;
1581
1582         for (drive_num = 0; drive_num < 2; drive_num++) {
1583             struct ide_drive * drive = &(ch->drives[drive_num]);
1584             
1585             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1586
1587             ctx = v3_chkpt_open_ctx(chkpt, buf);
1588             
1589             if (!ctx) { 
1590               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1591               goto savefailout;
1592             }
1593
1594             V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1595             V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1596             V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1597             V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1598
1599             V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1600             V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1601             V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1602
1603             V3_CHKPT_SAVE(ctx, "DATA_BUF",  drive->data_buf, savefailout);
1604
1605
1606             /* For now we'll just pack the type specific data at the end... */
1607             /* We should probably add a new context here in the future... */
1608             if (drive->drive_type == BLOCK_CDROM) {
1609               V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1610               V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1611               V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1612             } else if (drive->drive_type == BLOCK_DISK) {
1613               V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1614               V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1615               V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1616             } else if (drive->drive_type == BLOCK_NONE) { 
1617               // no drive connected, so no data
1618             } else {
1619               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1620               goto savefailout;
1621             }
1622             
1623             v3_chkpt_close_ctx(ctx); ctx=0;
1624         }
1625     }
1626
1627 // goodout:
1628     return 0;
1629
1630  savefailout:
1631     PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1632     if (ctx) {v3_chkpt_close_ctx(ctx); }
1633     return -1;
1634 }
1635
1636
1637
1638 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1639     struct ide_internal * ide = (struct ide_internal *)private_data;
1640     struct v3_chkpt_ctx *ctx=0;
1641     int ch_num = 0;
1642     int drive_num = 0;
1643     char buf[128];
1644     
1645     ctx=v3_chkpt_open_ctx(chkpt,id);
1646     
1647     if (!ctx) { 
1648       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1649       goto loadfailout;
1650     }
1651
1652     // nothing saved yet
1653     
1654     v3_chkpt_close_ctx(ctx);ctx=0;
1655    
1656
1657     for (ch_num = 0; ch_num < 2; ch_num++) {
1658         struct ide_channel * ch = &(ide->channels[ch_num]);
1659
1660         snprintf(buf, 128, "%s-%d", id, ch_num);
1661
1662         ctx = v3_chkpt_open_ctx(chkpt, buf);
1663         
1664         if (!ctx) { 
1665           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1666           goto loadfailout;
1667         }
1668
1669         V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1670         V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1671         V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1672         V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1673         V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1674         V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1675         V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1676         V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1677         V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1678         V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1679
1680         v3_chkpt_close_ctx(ctx); ctx=0;
1681
1682         for (drive_num = 0; drive_num < 2; drive_num++) {
1683             struct ide_drive * drive = &(ch->drives[drive_num]);
1684             
1685             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1686
1687             ctx = v3_chkpt_open_ctx(chkpt, buf);
1688             
1689             if (!ctx) { 
1690               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1691               goto loadfailout;
1692             }
1693
1694             V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1695             V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1696             V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1697             V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1698
1699             V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1700             V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1701             V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1702
1703             V3_CHKPT_LOAD(ctx, "DATA_BUF",  drive->data_buf, loadfailout);
1704
1705             
1706             /* For now we'll just pack the type specific data at the end... */
1707             /* We should probably add a new context here in the future... */
1708             if (drive->drive_type == BLOCK_CDROM) {
1709               V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1710               V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1711               V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1712             } else if (drive->drive_type == BLOCK_DISK) {
1713               V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1714               V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1715               V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1716             } else if (drive->drive_type == BLOCK_NONE) { 
1717               // no drive connected, so no data
1718             } else {
1719               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1720               goto loadfailout;
1721             }
1722         }
1723     }
1724 // goodout:
1725     return 0;
1726
1727  loadfailout:
1728     PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1729     if (ctx) {v3_chkpt_close_ctx(ctx); }
1730     return -1;
1731
1732 }
1733
1734
1735
1736 #endif
1737
1738
1739 static struct v3_device_ops dev_ops = {
1740     .free = (int (*)(void *))ide_free,
1741 #ifdef V3_CONFIG_CHECKPOINT
1742     .save_extended = ide_save_extended,
1743     .load_extended = ide_load_extended
1744 #endif
1745 };
1746
1747
1748
1749
1750 static int connect_fn(struct v3_vm_info * vm, 
1751                       void * frontend_data, 
1752                       struct v3_dev_blk_ops * ops, 
1753                       v3_cfg_tree_t * cfg, 
1754                       void * private_data) {
1755     struct ide_internal * ide  = (struct ide_internal *)(frontend_data);  
1756     struct ide_channel * channel = NULL;
1757     struct ide_drive * drive = NULL;
1758
1759     char * bus_str = v3_cfg_val(cfg, "bus_num");
1760     char * drive_str = v3_cfg_val(cfg, "drive_num");
1761     char * type_str = v3_cfg_val(cfg, "type");
1762     char * model_str = v3_cfg_val(cfg, "model");
1763     uint_t bus_num = 0;
1764     uint_t drive_num = 0;
1765
1766
1767     if ((!type_str) || (!drive_str) || (!bus_str)) {
1768         PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1769         return -1;
1770     }
1771
1772     bus_num = atoi(bus_str);
1773     drive_num = atoi(drive_str);
1774
1775     channel = &(ide->channels[bus_num]);
1776     drive = &(channel->drives[drive_num]);
1777
1778     if (drive->drive_type != BLOCK_NONE) {
1779         PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1780         return -1;
1781     }
1782
1783     if (model_str != NULL) {
1784         strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1785     }
1786
1787     if (strcasecmp(type_str, "cdrom") == 0) {
1788         drive->drive_type = BLOCK_CDROM;
1789
1790         while (strlen((char *)(drive->model)) < 40) {
1791             strcat((char*)(drive->model), " ");
1792         }
1793
1794     } else if (strcasecmp(type_str, "hd") == 0) {
1795         drive->drive_type = BLOCK_DISK;
1796
1797         drive->hd_state.accessed = 0;
1798         drive->hd_state.mult_sector_num = 1;
1799
1800         drive->num_sectors = 63;
1801         drive->num_heads = 16;
1802         drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
1803     } else {
1804         PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
1805         return -1;
1806     }
1807  
1808     drive->ops = ops;
1809
1810     if (ide->ide_pci) {
1811         // Hardcode this for now, but its not a good idea....
1812         ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1813     }
1814  
1815     drive->private_data = private_data;
1816
1817     return 0;
1818 }
1819
1820
1821
1822
1823 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1824     struct ide_internal * ide  = NULL;
1825     char * dev_id = v3_cfg_val(cfg, "ID");
1826     int ret = 0;
1827
1828     PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
1829
1830     ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1831
1832     if (ide == NULL) {
1833         PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
1834         return -1;
1835     }
1836
1837     memset(ide, 0, sizeof(struct ide_internal));
1838
1839     ide->vm = vm;
1840     ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1841
1842     if (ide->pci_bus != NULL) {
1843         struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1844
1845         if (!southbridge) {
1846             PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
1847             V3_Free(ide);
1848             return -1;
1849         }
1850
1851         ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1852     }
1853
1854     PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
1855
1856     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
1857
1858     if (dev == NULL) {
1859         PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
1860         V3_Free(ide);
1861         return -1;
1862     }
1863
1864     if (init_ide_state(ide) == -1) {
1865         PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
1866         v3_remove_device(dev);
1867         return -1;
1868     }
1869
1870     PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
1871
1872     ret |= v3_dev_hook_io(dev, PRI_DATA_PORT, 
1873                           &ide_read_data_port, &write_data_port);
1874     ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT, 
1875                           &read_port_std, &write_port_std);
1876     ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT, 
1877                           &read_port_std, &write_port_std);
1878     ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT, 
1879                           &read_port_std, &write_port_std);
1880     ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT, 
1881                           &read_port_std, &write_port_std);
1882     ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT, 
1883                           &read_port_std, &write_port_std);
1884     ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT, 
1885                           &read_port_std, &write_port_std);
1886     ret |= v3_dev_hook_io(dev, PRI_CMD_PORT, 
1887                           &read_port_std, &write_cmd_port);
1888
1889     ret |= v3_dev_hook_io(dev, SEC_DATA_PORT, 
1890                           &ide_read_data_port, &write_data_port);
1891     ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT, 
1892                           &read_port_std, &write_port_std);
1893     ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT, 
1894                           &read_port_std, &write_port_std);
1895     ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT, 
1896                           &read_port_std, &write_port_std);
1897     ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT, 
1898                           &read_port_std, &write_port_std);
1899     ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT, 
1900                           &read_port_std, &write_port_std);
1901     ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT, 
1902                           &read_port_std, &write_port_std);
1903     ret |= v3_dev_hook_io(dev, SEC_CMD_PORT, 
1904                           &read_port_std, &write_cmd_port);
1905   
1906
1907     ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT, 
1908                           &read_port_std, &write_port_std);
1909
1910     ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT, 
1911                           &read_port_std, &write_port_std);
1912   
1913
1914     ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT, 
1915                           &read_port_std, &write_port_std);
1916
1917     ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT, 
1918                           &read_port_std, &write_port_std);
1919
1920
1921     if (ret != 0) {
1922         PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
1923         v3_remove_device(dev);
1924         return -1;
1925     }
1926
1927
1928     if (ide->pci_bus) {
1929         struct v3_pci_bar bars[6];
1930         struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
1931         struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
1932         struct pci_device * pci_dev = NULL;
1933         int i;
1934
1935         PrintDebug(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
1936
1937         for (i = 0; i < 6; i++) {
1938             bars[i].type = PCI_BAR_NONE;
1939         }
1940
1941         bars[4].type = PCI_BAR_IO;
1942         //      bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
1943         bars[4].default_base_port = -1;
1944         bars[4].num_ports = 16;
1945
1946         bars[4].io_read = read_dma_port;
1947         bars[4].io_write = write_dma_port;
1948         bars[4].private_data = ide;
1949
1950         pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1, 
1951                                          "PIIX3_IDE", bars,
1952                                          pci_config_update, NULL, NULL, NULL, ide);
1953
1954         if (pci_dev == NULL) {
1955             PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i); 
1956             v3_remove_device(dev);
1957             return -1;
1958         }
1959
1960         /* This is for CMD646 devices 
1961            pci_dev->config_header.vendor_id = 0x1095;
1962            pci_dev->config_header.device_id = 0x0646;
1963            pci_dev->config_header.revision = 0x8f07;
1964         */
1965
1966         pci_dev->config_header.vendor_id = 0x8086;
1967         pci_dev->config_header.device_id = 0x7010;
1968         pci_dev->config_header.revision = 0x00;
1969
1970         pci_dev->config_header.prog_if = 0x80; // Master IDE device
1971         pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
1972         pci_dev->config_header.class = PCI_CLASS_STORAGE;
1973
1974         pci_dev->config_header.command = 0;
1975         pci_dev->config_header.status = 0x0280;
1976
1977         ide->ide_pci = pci_dev;
1978
1979
1980     }
1981
1982     if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
1983         PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
1984         v3_remove_device(dev);
1985         return -1;
1986     }
1987     
1988
1989     PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
1990
1991     return 0;
1992 }
1993
1994
1995 device_register("IDE", ide_init)
1996
1997
1998
1999
2000 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num, 
2001                         uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2002
2003     struct ide_internal * ide  = ide_data;  
2004     struct ide_channel * channel = &(ide->channels[channel_num]);
2005     struct ide_drive * drive = &(channel->drives[drive_num]);
2006     
2007     if (drive->drive_type == BLOCK_NONE) {
2008         return -1;
2009     }
2010
2011     *cylinders = drive->num_cylinders;
2012     *heads = drive->num_heads;
2013     *sectors = drive->num_sectors;
2014
2015     return 0;
2016 }