Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


855549fec12e23a26140e14a1df908d89f90beef
[palacios.git] / palacios / src / devices / ide.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
28
29 #ifndef V3_CONFIG_DEBUG_IDE
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
36
37
38 #define PRI_DATA_PORT         0x1f0
39 #define PRI_FEATURES_PORT     0x1f1
40 #define PRI_SECT_CNT_PORT     0x1f2
41 #define PRI_SECT_NUM_PORT     0x1f3
42 #define PRI_CYL_LOW_PORT      0x1f4
43 #define PRI_CYL_HIGH_PORT     0x1f5
44 #define PRI_DRV_SEL_PORT      0x1f6
45 #define PRI_CMD_PORT          0x1f7
46 #define PRI_CTRL_PORT         0x3f6
47 #define PRI_ADDR_REG_PORT     0x3f7
48
49 #define SEC_DATA_PORT         0x170
50 #define SEC_FEATURES_PORT     0x171
51 #define SEC_SECT_CNT_PORT     0x172
52 #define SEC_SECT_NUM_PORT     0x173
53 #define SEC_CYL_LOW_PORT      0x174
54 #define SEC_CYL_HIGH_PORT     0x175
55 #define SEC_DRV_SEL_PORT      0x176
56 #define SEC_CMD_PORT          0x177
57 #define SEC_CTRL_PORT         0x376
58 #define SEC_ADDR_REG_PORT     0x377
59
60
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
63
64 #define DATA_BUFFER_SIZE 2048
65
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
68
69
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM", 
71                                           "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72                                            "PRI_CTRL", "PRI_ADDR_REG"};
73
74
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM", 
76                                           "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77                                            "SEC_CTRL", "SEC_ADDR_REG"};
78
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80                                            "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
81
82
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
84
85 static inline const char * io_port_to_str(uint16_t port) {
86     if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87         return ide_pri_port_strs[port - PRI_DATA_PORT];
88     } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89         return ide_sec_port_strs[port - SEC_DATA_PORT];
90     } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91         return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92     } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93         return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94     } 
95     return NULL;
96 }
97
98
99 static inline const char * dma_port_to_str(uint16_t port) {
100     return ide_dma_port_strs[port & 0x7];
101 }
102
103
104
105 struct ide_cd_state {
106     struct atapi_sense_data sense;
107
108     uint8_t atapi_cmd;
109     struct atapi_error_recovery err_recovery;
110 };
111
112 struct ide_hd_state {
113     uint32_t accessed;
114
115     /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116     uint32_t mult_sector_num;
117
118     /* This is the current op sector size:
119      * for multiple sector ops this equals mult_sector_num
120      * for standard ops this equals 1
121      */
122     uint32_t cur_sector_num;
123 };
124
125 struct ide_drive {
126     // Command Registers
127
128     v3_block_type_t drive_type;
129
130     struct v3_dev_blk_ops * ops;
131
132     union {
133         struct ide_cd_state cd_state;
134         struct ide_hd_state hd_state;
135     };
136
137     char model[41];
138
139     // Where we are in the data transfer
140     uint32_t transfer_index;
141
142     // the length of a transfer
143     // calculated for easy access
144     uint32_t transfer_length;
145
146     uint64_t current_lba;
147
148     // We have a local data buffer that we use for IO port accesses
149     uint8_t data_buf[DATA_BUFFER_SIZE];
150
151
152     uint32_t num_cylinders;
153     uint32_t num_heads;
154     uint32_t num_sectors;
155
156     void * private_data;
157     
158     union {
159         uint8_t sector_count;             // 0x1f2,0x172
160         struct atapi_irq_flags irq_flags;
161     } __attribute__((packed));
162
163     union {
164         uint8_t sector_num;               // 0x1f3,0x173
165         uint8_t lba0;
166     } __attribute__((packed));
167
168     union {
169         uint16_t cylinder;
170         uint16_t lba12;
171         
172         struct {
173             uint8_t cylinder_low;       // 0x1f4,0x174
174             uint8_t cylinder_high;      // 0x1f5,0x175
175         } __attribute__((packed));
176         
177         struct {
178             uint8_t lba1;
179             uint8_t lba2;
180         } __attribute__((packed));
181         
182         
183         // The transfer length requested by the CPU 
184         uint16_t req_len;
185     } __attribute__((packed));
186
187 };
188
189
190
191 struct ide_channel {
192     struct ide_drive drives[2];
193
194     // Command Registers
195     struct ide_error_reg error_reg;     // [read] 0x1f1,0x171
196
197     struct ide_features_reg features;
198
199     struct ide_drive_head_reg drive_head; // 0x1f6,0x176
200
201     struct ide_status_reg status;       // [read] 0x1f7,0x177
202     uint8_t cmd_reg;                // [write] 0x1f7,0x177
203
204     int irq; // this is temporary until we add PCI support
205
206     // Control Registers
207     struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
208
209     union {
210         uint8_t dma_ports[8];
211         struct {
212             struct ide_dma_cmd_reg dma_cmd;
213             uint8_t rsvd1;
214             struct ide_dma_status_reg dma_status;
215             uint8_t rsvd2;
216             uint32_t dma_prd_addr;
217         } __attribute__((packed));
218     } __attribute__((packed));
219
220     uint32_t dma_tbl_index;
221 };
222
223
224
225 struct ide_internal {
226     struct ide_channel channels[2];
227
228     struct v3_southbridge * southbridge;
229     struct vm_device * pci_bus;
230
231     struct pci_device * ide_pci;
232
233     struct v3_vm_info * vm;
234 };
235
236
237
238
239
240 /* Utility functions */
241
242 static inline uint16_t be_to_le_16(const uint16_t val) {
243     uint8_t * buf = (uint8_t *)&val;
244     return (buf[0] << 8) | (buf[1]) ;
245 }
246
247 static inline uint16_t le_to_be_16(const uint16_t val) {
248     return be_to_le_16(val);
249 }
250
251
252 static inline uint32_t be_to_le_32(const uint32_t val) {
253     uint8_t * buf = (uint8_t *)&val;
254     return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
255 }
256
257 static inline uint32_t le_to_be_32(const uint32_t val) {
258     return be_to_le_32(val);
259 }
260
261
262 static inline int get_channel_index(ushort_t port) {
263     if (((port & 0xfff8) == 0x1f0) ||
264         ((port & 0xfffe) == 0x3f6) || 
265         ((port & 0xfff8) == 0xc000)) {
266         return 0;
267     } else if (((port & 0xfff8) == 0x170) ||
268                ((port & 0xfffe) == 0x376) ||
269                ((port & 0xfff8) == 0xc008)) {
270         return 1;
271     }
272
273     return -1;
274 }
275
276 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
277     int channel_idx = get_channel_index(port);    
278     return &(ide->channels[channel_idx]);
279 }
280
281 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
282     return &(channel->drives[channel->drive_head.drive_sel]);
283 }
284
285
286 static inline int is_lba_enabled(struct ide_channel * channel) {
287     return channel->drive_head.lba_mode;
288 }
289
290
291 /* Drive Commands */
292 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
293     if (channel->ctrl_reg.irq_disable == 0) {
294
295         //PrintError(info->vm_info, info, "Raising IDE Interrupt %d\n", channel->irq);
296
297         channel->dma_status.int_gen = 1;
298         v3_raise_irq(ide->vm, channel->irq);
299     }
300 }
301
302
303 static void drive_reset(struct ide_drive * drive) {
304     drive->sector_count = 0x01;
305     drive->sector_num = 0x01;
306
307     PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
308     
309     if (drive->drive_type == BLOCK_CDROM) {
310         drive->cylinder = 0xeb14;
311     } else {
312         drive->cylinder = 0x0000;
313         //drive->hd_state.accessed = 0;
314     }
315
316
317     memset(drive->data_buf, 0, sizeof(drive->data_buf));
318     drive->transfer_index = 0;
319
320     // Send the reset signal to the connected device callbacks
321     //     channel->drives[0].reset();
322     //    channel->drives[1].reset();
323 }
324
325 static void channel_reset(struct ide_channel * channel) {
326     
327     // set busy and seek complete flags
328     channel->status.val = 0x90;
329
330     // Clear errors
331     channel->error_reg.val = 0x01;
332
333     // clear commands
334     channel->cmd_reg = 0x00;
335
336     channel->ctrl_reg.irq_disable = 0;
337 }
338
339 static void channel_reset_complete(struct ide_channel * channel) {
340     channel->status.busy = 0;
341     channel->status.ready = 1;
342
343     channel->drive_head.head_num = 0;    
344     
345     drive_reset(&(channel->drives[0]));
346     drive_reset(&(channel->drives[1]));
347 }
348
349
350 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
351     channel->status.val = 0x41; // Error + ready
352     channel->error_reg.val = 0x04; // No idea...
353
354     ide_raise_irq(ide, channel);
355 }
356
357
358 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
359 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
360
361
362 /* ATAPI functions */
363 #include "atapi.h"
364
365 /* ATA functions */
366 #include "ata.h"
367
368
369
370 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
371     struct ide_dma_prd prd_entry;
372     int index = 0;
373
374     V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
375
376     while (1) {
377         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
378         int ret;
379
380         ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
381         
382         if (ret != sizeof(struct ide_dma_prd)) {
383             PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
384             return;
385         }
386
387         V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n", 
388                    prd_entry.base_addr, 
389                    (prd_entry.size == 0) ? 0x10000 : prd_entry.size, 
390                    prd_entry.end_of_table);
391
392         if (prd_entry.end_of_table) {
393             break;
394         }
395
396         index++;
397     }
398
399     return;
400 }
401
402
403 /* IO Operations */
404 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
405     struct ide_drive * drive = get_selected_drive(channel);
406     // This is at top level scope to do the EOT test at the end
407     struct ide_dma_prd prd_entry = {};
408     uint_t bytes_left = drive->transfer_length;
409
410     // Read in the data buffer....
411     // Read a sector/block at a time until the prd entry is full.
412
413 #ifdef V3_CONFIG_DEBUG_IDE
414     print_prd_table(ide, channel);
415 #endif
416
417     PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
418
419     // Loop through the disk data
420     while (bytes_left > 0) {
421         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
422         uint_t prd_bytes_left = 0;
423         uint_t prd_offset = 0;
424         int ret;
425
426         PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
427
428         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
429
430         if (ret != sizeof(struct ide_dma_prd)) {
431             PrintError(core->vm_info, core, "Could not read PRD\n");
432             return -1;
433         }
434
435         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
436                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
437
438         // loop through the PRD data....
439
440         if (prd_entry.size == 0) {
441             // a size of 0 means 64k
442             prd_bytes_left = 0x10000;
443         } else {
444             prd_bytes_left = prd_entry.size;
445         }
446
447
448         while (prd_bytes_left > 0) {
449             uint_t bytes_to_write = 0;
450
451             if (drive->drive_type == BLOCK_DISK) {
452                 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
453
454
455                 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
456                     PrintError(core->vm_info, core, "Failed to read next disk sector\n");
457                     return -1;
458                 }
459             } else if (drive->drive_type == BLOCK_CDROM) {
460                 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
461                     bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
462
463                     if (atapi_read_chunk(ide, channel) == -1) {
464                         PrintError(core->vm_info, core, "Failed to read next disk sector\n");
465                         return -1;
466                     }
467                 } else {
468                     /*
469                     PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
470                     return -1;
471                     */
472                     int cmd_ret = 0;
473
474                     //V3_Print(core->vm_info, core, "DMA of command packet\n");
475
476                     bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
477                     prd_bytes_left = bytes_to_write;
478
479
480                     // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
481                     cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, 
482                                                   bytes_to_write, drive->data_buf); 
483
484                     // check cmd_ret
485
486
487                     bytes_to_write = 0;
488                     prd_bytes_left = 0;
489                     drive->transfer_index += bytes_to_write;
490
491                     channel->status.busy = 0;
492                     channel->status.ready = 1;
493                     channel->status.data_req = 0;
494                     channel->status.error = 0;
495                     channel->status.seek_complete = 1;
496
497                     channel->dma_status.active = 0;
498                     channel->dma_status.err = 0;
499
500                     ide_raise_irq(ide, channel);
501                     
502                     return 0;
503                 }
504             }
505
506             PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n", 
507                        (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
508
509             drive->current_lba++;
510
511             ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf); 
512
513             if (ret != bytes_to_write) {
514                 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
515                 return -1;
516             }
517
518             PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
519
520             drive->transfer_index += ret;
521             prd_bytes_left -= ret;
522             prd_offset += ret;
523             bytes_left -= ret;
524         }
525
526         channel->dma_tbl_index++;
527
528         if (drive->drive_type == BLOCK_DISK) {
529             if (drive->transfer_index % HD_SECTOR_SIZE) {
530                 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
531                 return -1;
532             }
533         } else if (drive->drive_type == BLOCK_CDROM) {
534             if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
535                 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
536                     PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
537                     PrintError(core->vm_info, core, "transfer_index=%d, transfer_length=%d\n", 
538                                drive->transfer_index, drive->transfer_length);
539                     return -1;
540                 }
541             }
542         }
543
544
545         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
546             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
547             return -1;
548         }
549     }
550
551     /*
552       drive->irq_flags.io_dir = 1;
553       drive->irq_flags.c_d = 1;
554       drive->irq_flags.rel = 0;
555     */
556
557
558     // Update to the next PRD entry
559
560     // set DMA status
561
562     if (prd_entry.end_of_table) {
563         channel->status.busy = 0;
564         channel->status.ready = 1;
565         channel->status.data_req = 0;
566         channel->status.error = 0;
567         channel->status.seek_complete = 1;
568
569         channel->dma_status.active = 0;
570         channel->dma_status.err = 0;
571     }
572
573     ide_raise_irq(ide, channel);
574
575     return 0;
576 }
577
578
579 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
580     struct ide_drive * drive = get_selected_drive(channel);
581     // This is at top level scope to do the EOT test at the end
582     struct ide_dma_prd prd_entry = {};
583     uint_t bytes_left = drive->transfer_length;
584
585
586     PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
587
588     // Loop through disk data
589     while (bytes_left > 0) {
590         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
591         uint_t prd_bytes_left = 0;
592         uint_t prd_offset = 0;
593         int ret;
594         
595         PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
596
597         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
598
599         if (ret != sizeof(struct ide_dma_prd)) {
600             PrintError(core->vm_info, core, "Could not read PRD\n");
601             return -1;
602         }
603
604         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
605                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
606
607
608         if (prd_entry.size == 0) {
609             // a size of 0 means 64k
610             prd_bytes_left = 0x10000;
611         } else {
612             prd_bytes_left = prd_entry.size;
613         }
614
615         while (prd_bytes_left > 0) {
616             uint_t bytes_to_write = 0;
617
618
619             bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
620
621
622             ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
623
624             if (ret != bytes_to_write) {
625                 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
626                 return -1;
627             }
628
629             PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
630
631
632             if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
633                 PrintError(core->vm_info, core, "Failed to write data to disk\n");
634                 return -1;
635             }
636             
637             drive->current_lba++;
638
639             drive->transfer_index += ret;
640             prd_bytes_left -= ret;
641             prd_offset += ret;
642             bytes_left -= ret;
643         }
644
645         channel->dma_tbl_index++;
646
647         if (drive->transfer_index % HD_SECTOR_SIZE) {
648             PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
649             return -1;
650         }
651
652         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
653             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
654             PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%u)...\n", 
655                        bytes_left, drive->transfer_length);
656             PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
657                        prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
658
659             print_prd_table(ide, channel);
660             return -1;
661         }
662     }
663
664     if (prd_entry.end_of_table) {
665         channel->status.busy = 0;
666         channel->status.ready = 1;
667         channel->status.data_req = 0;
668         channel->status.error = 0;
669         channel->status.seek_complete = 1;
670
671         channel->dma_status.active = 0;
672         channel->dma_status.err = 0;
673     }
674
675     ide_raise_irq(ide, channel);
676
677     return 0;
678 }
679
680
681
682 #define DMA_CMD_PORT      0x00
683 #define DMA_STATUS_PORT   0x02
684 #define DMA_PRD_PORT0     0x04
685 #define DMA_PRD_PORT1     0x05
686 #define DMA_PRD_PORT2     0x06
687 #define DMA_PRD_PORT3     0x07
688
689 #define DMA_CHANNEL_FLAG  0x08
690
691 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
692     struct ide_internal * ide = (struct ide_internal *)private_data;
693     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
694     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
695     struct ide_channel * channel = &(ide->channels[channel_flag]);
696
697     PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n", 
698                port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
699
700     switch (port_offset) {
701         case DMA_CMD_PORT:
702             channel->dma_cmd.val = *(uint8_t *)src;
703
704             if (channel->dma_cmd.start == 0) {
705                 channel->dma_tbl_index = 0;
706             } else {
707                 channel->dma_status.active = 1;
708
709                 if (channel->dma_cmd.read == 1) {
710                     // DMA Read
711                     if (dma_read(core, ide, channel) == -1) {
712                         PrintError(core->vm_info, core, "Failed DMA Read\n");
713                         return -1;
714                     }
715                 } else {
716                     // DMA write
717                     if (dma_write(core, ide, channel) == -1) {
718                         PrintError(core->vm_info, core, "Failed DMA Write\n");
719                         return -1;
720                     }
721                 }
722
723                 channel->dma_cmd.val &= 0x09;
724             }
725
726             break;
727             
728         case DMA_STATUS_PORT: {
729             uint8_t val = *(uint8_t *)src;
730
731             if (length != 1) {
732                 PrintError(core->vm_info, core, "Invalid read length for DMA status port\n");
733                 return -1;
734             }
735
736             // weirdness
737             channel->dma_status.val = ((val & 0x60) | 
738                                        (channel->dma_status.val & 0x01) |
739                                        (channel->dma_status.val & ~val & 0x06));
740
741             break;
742         }           
743         case DMA_PRD_PORT0:
744         case DMA_PRD_PORT1:
745         case DMA_PRD_PORT2:
746         case DMA_PRD_PORT3: {
747             uint_t addr_index = port_offset & 0x3;
748             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
749             int i = 0;
750
751             if (addr_index + length > 4) {
752                 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
753                 return -1;
754             }
755
756             for (i = 0; i < length; i++) {
757                 addr_buf[addr_index + i] = *((uint8_t *)src + i);
758             }
759
760             PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
761
762             break;
763         }
764         default:
765             PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
766             break;
767     }
768
769     return length;
770 }
771
772
773 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
774     struct ide_internal * ide = (struct ide_internal *)private_data;
775     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
776     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
777     struct ide_channel * channel = &(ide->channels[channel_flag]);
778
779     PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
780
781     if (port_offset + length > 16) {
782         PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
783         return -1;
784     }
785
786     memcpy(dst, channel->dma_ports + port_offset, length);
787     
788     PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
789
790     return length;
791 }
792
793
794
795 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
796     struct ide_internal * ide = priv_data;
797     struct ide_channel * channel = get_selected_channel(ide, port);
798     struct ide_drive * drive = get_selected_drive(channel);
799
800     if (length != 1) {
801         PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
802         return -1;
803     }
804
805     PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
806     
807     channel->cmd_reg = *(uint8_t *)src;
808     
809     switch (channel->cmd_reg) {
810
811         case ATAPI_PIDENTIFY: // ATAPI Identify Device Packet
812             if (drive->drive_type != BLOCK_CDROM) {
813                 drive_reset(drive);
814
815                 // JRL: Should we abort here?
816                 ide_abort_command(ide, channel);
817             } else {
818                 
819                 atapi_identify_device(drive);
820                 
821                 channel->error_reg.val = 0;
822                 channel->status.val = 0x58; // ready, data_req, seek_complete
823             
824                 ide_raise_irq(ide, channel);
825             }
826             break;
827         case ATAPI_IDENTIFY: // Identify Device
828             if (drive->drive_type != BLOCK_DISK) {
829                 drive_reset(drive);
830
831                 // JRL: Should we abort here?
832                 ide_abort_command(ide, channel);
833             } else {
834                 ata_identify_device(drive);
835
836                 channel->error_reg.val = 0;
837                 channel->status.val = 0x58;
838
839                 ide_raise_irq(ide, channel);
840             }
841             break;
842
843         case ATAPI_PACKETCMD: // ATAPI Command Packet
844             if (drive->drive_type != BLOCK_CDROM) {
845                 ide_abort_command(ide, channel);
846             }
847             
848             drive->sector_count = 1;
849
850             channel->status.busy = 0;
851             channel->status.write_fault = 0;
852             channel->status.data_req = 1;
853             channel->status.error = 0;
854
855             // reset the datxgoto-la buffer...
856             drive->transfer_length = ATAPI_PACKET_SIZE;
857             drive->transfer_index = 0;
858
859             break;
860
861         case ATAPI_READ: // Read Sectors with Retry
862         case ATAPI_READ_ONCE: // Read Sectors without Retry
863             drive->hd_state.cur_sector_num = 1;
864
865             if (ata_read_sectors(ide, channel) == -1) {
866                 PrintError(core->vm_info, core, "Error reading sectors\n");
867                 return -1;
868             }
869             break;
870
871         case ATAPI_READ_EXT: // Read Sectors Extended
872             drive->hd_state.cur_sector_num = 1;
873
874             if (ata_read_sectors_ext(ide, channel) == -1) {
875                 PrintError(core->vm_info, core, "Error reading extended sectors\n");
876                 return -1;
877             }
878             break;
879
880         case ATAPI_WRITE: {// Write Sector
881             drive->hd_state.cur_sector_num = 1;
882
883             if (ata_write_sectors(ide, channel) == -1) {
884                 PrintError(core->vm_info, core, "Error writing sectors\n");
885                 return -1;
886             }
887             break;
888         }
889
890             
891
892         case ATAPI_READDMA: // Read DMA with retry
893         case ATAPI_READDMA_ONCE: { // Read DMA
894             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
895
896             if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
897                 ide_abort_command(ide, channel);
898                 return 0;
899             }
900             
901             drive->hd_state.cur_sector_num = 1;
902             
903             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
904             drive->transfer_index = 0;
905
906             if (channel->dma_status.active == 1) {
907                 // DMA Read
908                 if (dma_read(core, ide, channel) == -1) {
909                     PrintError(core->vm_info, core, "Failed DMA Read\n");
910                     return -1;
911                 }
912             }
913             break;
914         }
915
916         case ATAPI_WRITEDMA: { // Write DMA
917             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
918
919             if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
920                 ide_abort_command(ide, channel);
921                 return 0;
922             }
923
924             drive->hd_state.cur_sector_num = 1;
925
926             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
927             drive->transfer_index = 0;
928
929             if (channel->dma_status.active == 1) {
930                 // DMA Write
931                 if (dma_write(core, ide, channel) == -1) {
932                     PrintError(core->vm_info, core, "Failed DMA Write\n");
933                     return -1;
934                 }
935             }
936             break;
937         }
938         case ATAPI_STANDBYNOW1: // Standby Now 1
939         case ATAPI_IDLEIMMEDIATE: // Set Idle Immediate
940         case ATAPI_STANDBY: // Standby
941         case ATAPI_SETIDLE1: // Set Idle 1
942         case ATAPI_SLEEPNOW1: // Sleep Now 1
943         case ATAPI_STANDBYNOW2: // Standby Now 2
944         case ATAPI_IDLEIMMEDIATE2: // Idle Immediate (CFA)
945         case ATAPI_STANDBY2: // Standby 2
946         case ATAPI_SETIDLE2: // Set idle 2
947         case ATAPI_SLEEPNOW2: // Sleep Now 2
948             channel->status.val = 0;
949             channel->status.ready = 1;
950             ide_raise_irq(ide, channel);
951             break;
952
953         case ATAPI_SETFEATURES: // Set Features
954             // Prior to this the features register has been written to. 
955             // This command tells the drive to check if the new value is supported (the value is drive specific)
956             // Common is that bit0=DMA enable
957             // If valid the drive raises an interrupt, if not it aborts.
958
959             // Do some checking here...
960
961             channel->status.busy = 0;
962             channel->status.write_fault = 0;
963             channel->status.error = 0;
964             channel->status.ready = 1;
965             channel->status.seek_complete = 1;
966             
967             ide_raise_irq(ide, channel);
968             break;
969
970         case ATAPI_SPECIFY:  // Initialize Drive Parameters
971         case ATAPI_RECAL:  // recalibrate?
972             channel->status.error = 0;
973             channel->status.ready = 1;
974             channel->status.seek_complete = 1;
975             ide_raise_irq(ide, channel);
976             break;
977         case ATAPI_SETMULT: { // Set multiple mode (IDE Block mode) 
978             // This makes the drive transfer multiple sectors before generating an interrupt
979             uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
980
981             if (tmp_sect_num > MAX_MULT_SECTORS) {
982                 ide_abort_command(ide, channel);
983                 break;
984             }
985
986             if (drive->sector_count == 0) {
987                 drive->hd_state.mult_sector_num= 1;
988             } else {
989                 drive->hd_state.mult_sector_num = drive->sector_count;
990             }
991
992             channel->status.ready = 1;
993             channel->status.error = 0;
994
995             ide_raise_irq(ide, channel);
996
997             break;
998         }
999
1000         case ATAPI_DEVICE_RESET: // Reset Device
1001             drive_reset(drive);
1002             channel->error_reg.val = 0x01;
1003             channel->status.busy = 0;
1004             channel->status.ready = 1;
1005             channel->status.seek_complete = 1;
1006             channel->status.write_fault = 0;
1007             channel->status.error = 0;
1008             break;
1009
1010         case ATAPI_CHECKPOWERMODE1: // Check power mode
1011             drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1012             channel->status.busy = 0;
1013             channel->status.ready = 1;
1014             channel->status.write_fault = 0;
1015             channel->status.data_req = 0;
1016             channel->status.error = 0;
1017             break;
1018
1019         case ATAPI_MULTREAD:  // read multiple sectors
1020             drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
1021         default:
1022             PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1023             return -1;
1024     }
1025
1026     return length;
1027 }
1028
1029
1030 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1031     struct ide_internal * ide = priv_data;
1032     struct ide_channel * channel = get_selected_channel(ide, port);
1033     struct ide_drive * drive = get_selected_drive(channel);
1034
1035     PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n", 
1036             port, *(uint32_t *)src, length);
1037
1038     memcpy(drive->data_buf + drive->transfer_index, src, length);    
1039     drive->transfer_index += length;
1040
1041     // Transfer is complete, dispatch the command
1042     if (drive->transfer_index >= drive->transfer_length) {
1043         switch (channel->cmd_reg) {
1044
1045             case ATAPI_WRITE: // Write Sectors
1046
1047                 channel->status.busy = 1;
1048                 channel->status.data_req = 0;
1049                     
1050                 if (ata_write(ide, channel, drive->data_buf, drive->transfer_length/HD_SECTOR_SIZE) == -1) {
1051                     PrintError(core->vm_info, core, "Error writing to disk\n");
1052                     return -1;
1053                 }
1054
1055                 PrintDebug(core->vm_info, core, "IDE: Write sectors complete\n");
1056
1057                 channel->status.error = 0;
1058                 channel->status.busy = 0;
1059
1060                 ide_raise_irq(ide, channel);
1061                 break;
1062
1063             case ATAPI_PACKETCMD: // ATAPI packet command
1064                 if (atapi_handle_packet(core, ide, channel) == -1) {
1065                     PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1066                     return -1;
1067                 }
1068                 break;
1069             default:
1070                 PrintError(core->vm_info, core, "Unhandld IDE Command %x\n", channel->cmd_reg);
1071                 return -1;
1072         }
1073     }
1074
1075     return length;
1076 }
1077
1078
1079 static int read_hd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1080     struct ide_drive * drive = get_selected_drive(channel);
1081     int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1082
1083
1084
1085     if (drive->transfer_index >= drive->transfer_length) {
1086         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1087                    drive->transfer_length, drive->transfer_index,
1088                    drive->transfer_index + length);
1089         return -1;
1090     }
1091
1092     
1093     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1094         drive->current_lba++;
1095
1096         if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1097             PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1098             return -1;
1099         }
1100     }
1101
1102     /*
1103       PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n", 
1104       *(uint32_t *)(drive->data_buf + data_offset), 
1105       length, data_offset);
1106     */
1107     memcpy(dst, drive->data_buf + data_offset, length);
1108
1109     drive->transfer_index += length;
1110
1111
1112     /* This is the trigger for interrupt injection.
1113      * For read single sector commands we interrupt after every sector
1114      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1115      * cur_sector_num is configured depending on the operation we are currently running
1116      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1117      */
1118     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1119         (drive->transfer_index == drive->transfer_length)) {
1120         if (drive->transfer_index < drive->transfer_length) {
1121             // An increment is complete, but there is still more data to be transferred...
1122             PrintDebug(VM_NONE, VCORE_NONE, "Integral Complete, still transferring more sectors\n");
1123             channel->status.data_req = 1;
1124
1125             drive->irq_flags.c_d = 0;
1126         } else {
1127             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1128             // This was the final read of the request
1129             channel->status.data_req = 0;
1130
1131             
1132             drive->irq_flags.c_d = 1;
1133             drive->irq_flags.rel = 0;
1134         }
1135
1136         channel->status.ready = 1;
1137         drive->irq_flags.io_dir = 1;
1138         channel->status.busy = 0;
1139
1140         ide_raise_irq(ide, channel);
1141     }
1142
1143
1144     return length;
1145 }
1146
1147
1148
1149 static int read_cd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1150     struct ide_drive * drive = get_selected_drive(channel);
1151     int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1152     //  int req_offset = drive->transfer_index % drive->req_len;
1153     
1154     if (drive->cd_state.atapi_cmd != 0x28) {
1155         PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1156         PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%d, transfer idx=%d\n", drive->transfer_length, drive->transfer_index);
1157     }
1158
1159     
1160
1161     if (drive->transfer_index >= drive->transfer_length) {
1162         PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n", 
1163                    drive->transfer_length, drive->transfer_index, 
1164                    drive->transfer_index + length);
1165         return -1;
1166     }
1167
1168     
1169     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1170         if (atapi_update_data_buf(ide, channel) == -1) {
1171             PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1172             return -1;
1173         }
1174     }
1175
1176     memcpy(dst, drive->data_buf + data_offset, length);
1177     
1178     drive->transfer_index += length;
1179
1180
1181     // Should the req_offset be recalculated here?????
1182     if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1183         if (drive->transfer_index < drive->transfer_length) {
1184             // An increment is complete, but there is still more data to be transferred...
1185             
1186             channel->status.data_req = 1;
1187
1188             drive->irq_flags.c_d = 0;
1189
1190             // Update the request length in the cylinder regs
1191             if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1192                 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1193                 return -1;
1194             }
1195         } else {
1196             // This was the final read of the request
1197
1198             drive->req_len = 0;
1199             channel->status.data_req = 0;
1200             channel->status.ready = 1;
1201             
1202             drive->irq_flags.c_d = 1;
1203             drive->irq_flags.rel = 0;
1204         }
1205
1206         drive->irq_flags.io_dir = 1;
1207         channel->status.busy = 0;
1208
1209         ide_raise_irq(ide, channel);
1210     }
1211
1212     return length;
1213 }
1214
1215
1216 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1217     struct ide_drive * drive = get_selected_drive(channel);
1218
1219     channel->status.busy = 0;
1220     channel->status.ready = 1;
1221     channel->status.write_fault = 0;
1222     channel->status.seek_complete = 1;
1223     channel->status.corrected = 0;
1224     channel->status.error = 0;
1225                 
1226     
1227     memcpy(dst, drive->data_buf + drive->transfer_index, length);
1228     drive->transfer_index += length;
1229     
1230     if (drive->transfer_index >= drive->transfer_length) {
1231         channel->status.data_req = 0;
1232     }
1233     
1234     return length;
1235 }
1236
1237
1238 static int ide_read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1239     struct ide_internal * ide = priv_data;
1240     struct ide_channel * channel = get_selected_channel(ide, port);
1241     struct ide_drive * drive = get_selected_drive(channel);
1242
1243     //       PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1244
1245     if ((channel->cmd_reg == 0xec) ||
1246         (channel->cmd_reg == 0xa1)) {
1247         return read_drive_id((uint8_t *)dst, length, ide, channel);
1248     }
1249
1250     if (drive->drive_type == BLOCK_CDROM) {
1251         if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1252             PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1253             return -1;
1254         }
1255     } else if (drive->drive_type == BLOCK_DISK) {
1256         if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1257             PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1258             return -1;
1259         }
1260     } else {
1261         memset((uint8_t *)dst, 0, length);
1262     }
1263
1264     return length;
1265 }
1266
1267 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1268     struct ide_internal * ide = priv_data;
1269     struct ide_channel * channel = get_selected_channel(ide, port);
1270     struct ide_drive * drive = get_selected_drive(channel);
1271             
1272     if (length != 1) {
1273         PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1274         return -1;
1275     }
1276
1277     PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1278
1279     switch (port) {
1280         // reset and interrupt enable
1281         case PRI_CTRL_PORT:
1282         case SEC_CTRL_PORT: {
1283             struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1284
1285             // only reset channel on a 0->1 reset bit transition
1286             if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1287                 channel_reset(channel);
1288             } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1289                 channel_reset_complete(channel);
1290             }
1291
1292             channel->ctrl_reg.val = tmp_ctrl->val;          
1293             break;
1294         }
1295         case PRI_FEATURES_PORT:
1296         case SEC_FEATURES_PORT:
1297             channel->features.val = *(uint8_t *)src;
1298             break;
1299
1300         case PRI_SECT_CNT_PORT:
1301         case SEC_SECT_CNT_PORT:
1302             channel->drives[0].sector_count = *(uint8_t *)src;
1303             channel->drives[1].sector_count = *(uint8_t *)src;
1304             break;
1305
1306         case PRI_SECT_NUM_PORT:
1307         case SEC_SECT_NUM_PORT:
1308             channel->drives[0].sector_num = *(uint8_t *)src;
1309             channel->drives[1].sector_num = *(uint8_t *)src;
1310             break;
1311         case PRI_CYL_LOW_PORT:
1312         case SEC_CYL_LOW_PORT:
1313             channel->drives[0].cylinder_low = *(uint8_t *)src;
1314             channel->drives[1].cylinder_low = *(uint8_t *)src;
1315             break;
1316
1317         case PRI_CYL_HIGH_PORT:
1318         case SEC_CYL_HIGH_PORT:
1319             channel->drives[0].cylinder_high = *(uint8_t *)src;
1320             channel->drives[1].cylinder_high = *(uint8_t *)src;
1321             break;
1322
1323         case PRI_DRV_SEL_PORT:
1324         case SEC_DRV_SEL_PORT: {
1325             channel->drive_head.val = *(uint8_t *)src;
1326             
1327             // make sure the reserved bits are ok..
1328             // JRL TODO: check with new ramdisk to make sure this is right...
1329             channel->drive_head.val |= 0xa0;
1330
1331             drive = get_selected_drive(channel);
1332
1333             // Selecting a non-present device is a no-no
1334             if (drive->drive_type == BLOCK_NONE) {
1335                 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1336                 channel->error_reg.abort = 1;
1337                 channel->status.error = 1;
1338             } else {
1339                 channel->status.busy = 0;
1340                 channel->status.ready = 1;
1341                 channel->status.data_req = 0;
1342                 channel->status.error = 0;
1343                 channel->status.seek_complete = 1;
1344                 
1345                 channel->dma_status.active = 0;
1346                 channel->dma_status.err = 0;
1347             }
1348
1349             break;
1350         }
1351         default:
1352             PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1353             return -1;
1354     }
1355     return length;
1356 }
1357
1358
1359 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1360     struct ide_internal * ide = priv_data;
1361     struct ide_channel * channel = get_selected_channel(ide, port);
1362     struct ide_drive * drive = get_selected_drive(channel);
1363     
1364     if (length != 1) {
1365         PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1366         return -1;
1367     }
1368     
1369     PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1370
1371     if ((port == PRI_ADDR_REG_PORT) ||
1372         (port == SEC_ADDR_REG_PORT)) {
1373         // unused, return 0xff
1374         *(uint8_t *)dst = 0xff;
1375         return length;
1376     }
1377
1378
1379     // if no drive is present just return 0 + reserved bits
1380     if (drive->drive_type == BLOCK_NONE) {
1381         if ((port == PRI_DRV_SEL_PORT) ||
1382             (port == SEC_DRV_SEL_PORT)) {
1383             *(uint8_t *)dst = 0xa0;
1384         } else {
1385             *(uint8_t *)dst = 0;
1386         }
1387
1388         return length;
1389     }
1390
1391     switch (port) {
1392
1393         // This is really the error register.
1394         case PRI_FEATURES_PORT:
1395         case SEC_FEATURES_PORT:
1396             *(uint8_t *)dst = channel->error_reg.val;
1397             break;
1398             
1399         case PRI_SECT_CNT_PORT:
1400         case SEC_SECT_CNT_PORT:
1401             *(uint8_t *)dst = drive->sector_count;
1402             break;
1403
1404         case PRI_SECT_NUM_PORT:
1405         case SEC_SECT_NUM_PORT:
1406             *(uint8_t *)dst = drive->sector_num;
1407             break;
1408
1409         case PRI_CYL_LOW_PORT:
1410         case SEC_CYL_LOW_PORT:
1411             *(uint8_t *)dst = drive->cylinder_low;
1412             break;
1413
1414
1415         case PRI_CYL_HIGH_PORT:
1416         case SEC_CYL_HIGH_PORT:
1417             *(uint8_t *)dst = drive->cylinder_high;
1418             break;
1419
1420         case PRI_DRV_SEL_PORT:
1421         case SEC_DRV_SEL_PORT:  // hard disk drive and head register 0x1f6
1422             *(uint8_t *)dst = channel->drive_head.val;
1423             break;
1424
1425         case PRI_CTRL_PORT:
1426         case SEC_CTRL_PORT:
1427         case PRI_CMD_PORT:
1428         case SEC_CMD_PORT:
1429             // Something about lowering interrupts here....
1430             *(uint8_t *)dst = channel->status.val;
1431             break;
1432
1433         default:
1434             PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1435             return -1;
1436     }
1437
1438     PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1439
1440     return length;
1441 }
1442
1443
1444
1445 static void init_drive(struct ide_drive * drive) {
1446
1447     drive->sector_count = 0x01;
1448     drive->sector_num = 0x01;
1449     drive->cylinder = 0x0000;
1450
1451     drive->drive_type = BLOCK_NONE;
1452
1453     memset(drive->model, 0, sizeof(drive->model));
1454
1455     drive->transfer_index = 0;
1456     drive->transfer_length = 0;
1457     memset(drive->data_buf, 0, sizeof(drive->data_buf));
1458
1459     drive->num_cylinders = 0;
1460     drive->num_heads = 0;
1461     drive->num_sectors = 0;
1462     
1463
1464     drive->private_data = NULL;
1465     drive->ops = NULL;
1466 }
1467
1468 static void init_channel(struct ide_channel * channel) {
1469     int i = 0;
1470
1471     channel->error_reg.val = 0x01;
1472     channel->drive_head.val = 0x00;
1473     channel->status.val = 0x00;
1474     channel->cmd_reg = 0x00;
1475     channel->ctrl_reg.val = 0x08;
1476
1477     channel->dma_cmd.val = 0;
1478     channel->dma_status.val = 0;
1479     channel->dma_prd_addr = 0;
1480     channel->dma_tbl_index = 0;
1481
1482     for (i = 0; i < 2; i++) {
1483         init_drive(&(channel->drives[i]));
1484     }
1485
1486 }
1487
1488
1489 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1490     PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1491     /*
1492     struct ide_internal * ide = (struct ide_internal *)(private_data);
1493
1494     PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1495     */
1496
1497     return 0;
1498 }
1499
1500 static int init_ide_state(struct ide_internal * ide) {
1501     int i;
1502
1503     /* 
1504      * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry 
1505      */
1506
1507     for (i = 0; i < 1; i++) {
1508         init_channel(&(ide->channels[i]));
1509
1510         // JRL: this is a terrible hack...
1511         ide->channels[i].irq = PRI_DEFAULT_IRQ + i;
1512     }
1513
1514
1515     return 0;
1516 }
1517
1518
1519
1520
1521 static int ide_free(struct ide_internal * ide) {
1522
1523     // deregister from PCI?
1524
1525     V3_Free(ide);
1526
1527     return 0;
1528 }
1529
1530 #ifdef V3_CONFIG_CHECKPOINT
1531
1532 #include <palacios/vmm_sprintf.h>
1533
1534 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1535     struct ide_internal * ide = (struct ide_internal *)private_data;
1536     struct v3_chkpt_ctx *ctx=0;
1537     int ch_num = 0;
1538     int drive_num = 0;
1539     char buf[128];
1540     
1541
1542     ctx=v3_chkpt_open_ctx(chkpt,id);
1543     
1544     if (!ctx) { 
1545       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1546       goto savefailout;
1547     }
1548
1549     // nothing saved yet
1550     
1551     v3_chkpt_close_ctx(ctx);ctx=0;
1552    
1553
1554     for (ch_num = 0; ch_num < 2; ch_num++) {
1555         struct ide_channel * ch = &(ide->channels[ch_num]);
1556
1557         snprintf(buf, 128, "%s-%d", id, ch_num);
1558
1559         ctx = v3_chkpt_open_ctx(chkpt, buf);
1560         
1561         if (!ctx) { 
1562           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1563           goto savefailout;
1564         }
1565
1566         V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1567         V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1568         V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1569         V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1570         V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1571         V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1572         V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1573         V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1574         V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1575         V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1576
1577         v3_chkpt_close_ctx(ctx); ctx=0;
1578
1579         for (drive_num = 0; drive_num < 2; drive_num++) {
1580             struct ide_drive * drive = &(ch->drives[drive_num]);
1581             
1582             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1583
1584             ctx = v3_chkpt_open_ctx(chkpt, buf);
1585             
1586             if (!ctx) { 
1587               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1588               goto savefailout;
1589             }
1590
1591             V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1592             V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1593             V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1594             V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1595
1596             V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1597             V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1598             V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1599
1600             V3_CHKPT_SAVE(ctx, "DATA_BUF",  drive->data_buf, savefailout);
1601
1602
1603             /* For now we'll just pack the type specific data at the end... */
1604             /* We should probably add a new context here in the future... */
1605             if (drive->drive_type == BLOCK_CDROM) {
1606               V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1607               V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1608               V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1609             } else if (drive->drive_type == BLOCK_DISK) {
1610               V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1611               V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1612               V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1613             } else if (drive->drive_type == BLOCK_NONE) { 
1614               // no drive connected, so no data
1615             } else {
1616               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1617               goto savefailout;
1618             }
1619             
1620             v3_chkpt_close_ctx(ctx); ctx=0;
1621         }
1622     }
1623
1624 // goodout:
1625     return 0;
1626
1627  savefailout:
1628     PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1629     if (ctx) {v3_chkpt_close_ctx(ctx); }
1630     return -1;
1631 }
1632
1633
1634
1635 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1636     struct ide_internal * ide = (struct ide_internal *)private_data;
1637     struct v3_chkpt_ctx *ctx=0;
1638     int ch_num = 0;
1639     int drive_num = 0;
1640     char buf[128];
1641     
1642     ctx=v3_chkpt_open_ctx(chkpt,id);
1643     
1644     if (!ctx) { 
1645       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1646       goto loadfailout;
1647     }
1648
1649     // nothing saved yet
1650     
1651     v3_chkpt_close_ctx(ctx);ctx=0;
1652    
1653
1654     for (ch_num = 0; ch_num < 2; ch_num++) {
1655         struct ide_channel * ch = &(ide->channels[ch_num]);
1656
1657         snprintf(buf, 128, "%s-%d", id, ch_num);
1658
1659         ctx = v3_chkpt_open_ctx(chkpt, buf);
1660         
1661         if (!ctx) { 
1662           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1663           goto loadfailout;
1664         }
1665
1666         V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1667         V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1668         V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1669         V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1670         V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1671         V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1672         V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1673         V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1674         V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1675         V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1676
1677         v3_chkpt_close_ctx(ctx); ctx=0;
1678
1679         for (drive_num = 0; drive_num < 2; drive_num++) {
1680             struct ide_drive * drive = &(ch->drives[drive_num]);
1681             
1682             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1683
1684             ctx = v3_chkpt_open_ctx(chkpt, buf);
1685             
1686             if (!ctx) { 
1687               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1688               goto loadfailout;
1689             }
1690
1691             V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1692             V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1693             V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1694             V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1695
1696             V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1697             V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1698             V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1699
1700             V3_CHKPT_LOAD(ctx, "DATA_BUF",  drive->data_buf, loadfailout);
1701
1702             
1703             /* For now we'll just pack the type specific data at the end... */
1704             /* We should probably add a new context here in the future... */
1705             if (drive->drive_type == BLOCK_CDROM) {
1706               V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1707               V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1708               V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1709             } else if (drive->drive_type == BLOCK_DISK) {
1710               V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1711               V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1712               V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1713             } else if (drive->drive_type == BLOCK_NONE) { 
1714               // no drive connected, so no data
1715             } else {
1716               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1717               goto loadfailout;
1718             }
1719         }
1720     }
1721 // goodout:
1722     return 0;
1723
1724  loadfailout:
1725     PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1726     if (ctx) {v3_chkpt_close_ctx(ctx); }
1727     return -1;
1728
1729 }
1730
1731
1732
1733 #endif
1734
1735
1736 static struct v3_device_ops dev_ops = {
1737     .free = (int (*)(void *))ide_free,
1738 #ifdef V3_CONFIG_CHECKPOINT
1739     .save_extended = ide_save_extended,
1740     .load_extended = ide_load_extended
1741 #endif
1742 };
1743
1744
1745
1746
1747 static int connect_fn(struct v3_vm_info * vm, 
1748                       void * frontend_data, 
1749                       struct v3_dev_blk_ops * ops, 
1750                       v3_cfg_tree_t * cfg, 
1751                       void * private_data) {
1752     struct ide_internal * ide  = (struct ide_internal *)(frontend_data);  
1753     struct ide_channel * channel = NULL;
1754     struct ide_drive * drive = NULL;
1755
1756     char * bus_str = v3_cfg_val(cfg, "bus_num");
1757     char * drive_str = v3_cfg_val(cfg, "drive_num");
1758     char * type_str = v3_cfg_val(cfg, "type");
1759     char * model_str = v3_cfg_val(cfg, "model");
1760     uint_t bus_num = 0;
1761     uint_t drive_num = 0;
1762
1763
1764     if ((!type_str) || (!drive_str) || (!bus_str)) {
1765         PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1766         return -1;
1767     }
1768
1769     bus_num = atoi(bus_str);
1770     drive_num = atoi(drive_str);
1771
1772     channel = &(ide->channels[bus_num]);
1773     drive = &(channel->drives[drive_num]);
1774
1775     if (drive->drive_type != BLOCK_NONE) {
1776         PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1777         return -1;
1778     }
1779
1780     if (model_str != NULL) {
1781         strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1782     }
1783
1784     if (strcasecmp(type_str, "cdrom") == 0) {
1785         drive->drive_type = BLOCK_CDROM;
1786
1787         while (strlen((char *)(drive->model)) < 40) {
1788             strcat((char*)(drive->model), " ");
1789         }
1790
1791     } else if (strcasecmp(type_str, "hd") == 0) {
1792         drive->drive_type = BLOCK_DISK;
1793
1794         drive->hd_state.accessed = 0;
1795         drive->hd_state.mult_sector_num = 1;
1796
1797         drive->num_sectors = 63;
1798         drive->num_heads = 16;
1799         drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
1800     } else {
1801         PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
1802         return -1;
1803     }
1804  
1805     drive->ops = ops;
1806
1807     if (ide->ide_pci) {
1808         // Hardcode this for now, but its not a good idea....
1809         ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1810     }
1811  
1812     drive->private_data = private_data;
1813
1814     return 0;
1815 }
1816
1817
1818
1819
1820 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1821     struct ide_internal * ide  = NULL;
1822     char * dev_id = v3_cfg_val(cfg, "ID");
1823     int ret = 0;
1824
1825     PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
1826
1827     ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1828
1829     if (ide == NULL) {
1830         PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
1831         return -1;
1832     }
1833
1834     memset(ide, 0, sizeof(struct ide_internal));
1835
1836     ide->vm = vm;
1837     ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1838
1839     if (ide->pci_bus != NULL) {
1840         struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1841
1842         if (!southbridge) {
1843             PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
1844             V3_Free(ide);
1845             return -1;
1846         }
1847
1848         ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1849     }
1850
1851     PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
1852
1853     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
1854
1855     if (dev == NULL) {
1856         PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
1857         V3_Free(ide);
1858         return -1;
1859     }
1860
1861     if (init_ide_state(ide) == -1) {
1862         PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
1863         v3_remove_device(dev);
1864         return -1;
1865     }
1866
1867     PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
1868
1869     ret |= v3_dev_hook_io(dev, PRI_DATA_PORT, 
1870                           &ide_read_data_port, &write_data_port);
1871     ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT, 
1872                           &read_port_std, &write_port_std);
1873     ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT, 
1874                           &read_port_std, &write_port_std);
1875     ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT, 
1876                           &read_port_std, &write_port_std);
1877     ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT, 
1878                           &read_port_std, &write_port_std);
1879     ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT, 
1880                           &read_port_std, &write_port_std);
1881     ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT, 
1882                           &read_port_std, &write_port_std);
1883     ret |= v3_dev_hook_io(dev, PRI_CMD_PORT, 
1884                           &read_port_std, &write_cmd_port);
1885
1886     ret |= v3_dev_hook_io(dev, SEC_DATA_PORT, 
1887                           &ide_read_data_port, &write_data_port);
1888     ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT, 
1889                           &read_port_std, &write_port_std);
1890     ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT, 
1891                           &read_port_std, &write_port_std);
1892     ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT, 
1893                           &read_port_std, &write_port_std);
1894     ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT, 
1895                           &read_port_std, &write_port_std);
1896     ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT, 
1897                           &read_port_std, &write_port_std);
1898     ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT, 
1899                           &read_port_std, &write_port_std);
1900     ret |= v3_dev_hook_io(dev, SEC_CMD_PORT, 
1901                           &read_port_std, &write_cmd_port);
1902   
1903
1904     ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT, 
1905                           &read_port_std, &write_port_std);
1906
1907     ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT, 
1908                           &read_port_std, &write_port_std);
1909   
1910
1911     ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT, 
1912                           &read_port_std, &write_port_std);
1913
1914     ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT, 
1915                           &read_port_std, &write_port_std);
1916
1917
1918     if (ret != 0) {
1919         PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
1920         v3_remove_device(dev);
1921         return -1;
1922     }
1923
1924
1925     if (ide->pci_bus) {
1926         struct v3_pci_bar bars[6];
1927         struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
1928         struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
1929         struct pci_device * pci_dev = NULL;
1930         int i;
1931
1932         PrintDebug(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
1933
1934         for (i = 0; i < 6; i++) {
1935             bars[i].type = PCI_BAR_NONE;
1936         }
1937
1938         bars[4].type = PCI_BAR_IO;
1939         //      bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
1940         bars[4].default_base_port = -1;
1941         bars[4].num_ports = 16;
1942
1943         bars[4].io_read = read_dma_port;
1944         bars[4].io_write = write_dma_port;
1945         bars[4].private_data = ide;
1946
1947         pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1, 
1948                                          "PIIX3_IDE", bars,
1949                                          pci_config_update, NULL, NULL, NULL, ide);
1950
1951         if (pci_dev == NULL) {
1952             PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i); 
1953             v3_remove_device(dev);
1954             return -1;
1955         }
1956
1957         /* This is for CMD646 devices 
1958            pci_dev->config_header.vendor_id = 0x1095;
1959            pci_dev->config_header.device_id = 0x0646;
1960            pci_dev->config_header.revision = 0x8f07;
1961         */
1962
1963         pci_dev->config_header.vendor_id = 0x8086;
1964         pci_dev->config_header.device_id = 0x7010;
1965         pci_dev->config_header.revision = 0x00;
1966
1967         pci_dev->config_header.prog_if = 0x80; // Master IDE device
1968         pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
1969         pci_dev->config_header.class = PCI_CLASS_STORAGE;
1970
1971         pci_dev->config_header.command = 0;
1972         pci_dev->config_header.status = 0x0280;
1973
1974         ide->ide_pci = pci_dev;
1975
1976
1977     }
1978
1979     if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
1980         PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
1981         v3_remove_device(dev);
1982         return -1;
1983     }
1984     
1985
1986     PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
1987
1988     return 0;
1989 }
1990
1991
1992 device_register("IDE", ide_init)
1993
1994
1995
1996
1997 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num, 
1998                         uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
1999
2000     struct ide_internal * ide  = ide_data;  
2001     struct ide_channel * channel = &(ide->channels[channel_num]);
2002     struct ide_drive * drive = &(channel->drives[drive_num]);
2003     
2004     if (drive->drive_type == BLOCK_NONE) {
2005         return -1;
2006     }
2007
2008     *cylinders = drive->num_cylinders;
2009     *heads = drive->num_heads;
2010     *sectors = drive->num_sectors;
2011
2012     return 0;
2013 }