Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


7b13e5b6506d5fa3c51033f932f3a6d0b4c655b7
[palacios.git] / palacios / src / devices / ide.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
28
29 #ifndef CONFIG_DEBUG_IDE
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
36
37
38 #define PRI_DATA_PORT         0x1f0
39 #define PRI_FEATURES_PORT     0x1f1
40 #define PRI_SECT_CNT_PORT     0x1f2
41 #define PRI_SECT_NUM_PORT     0x1f3
42 #define PRI_CYL_LOW_PORT      0x1f4
43 #define PRI_CYL_HIGH_PORT     0x1f5
44 #define PRI_DRV_SEL_PORT      0x1f6
45 #define PRI_CMD_PORT          0x1f7
46 #define PRI_CTRL_PORT         0x3f6
47 #define PRI_ADDR_REG_PORT     0x3f7
48
49 #define SEC_DATA_PORT         0x170
50 #define SEC_FEATURES_PORT     0x171
51 #define SEC_SECT_CNT_PORT     0x172
52 #define SEC_SECT_NUM_PORT     0x173
53 #define SEC_CYL_LOW_PORT      0x174
54 #define SEC_CYL_HIGH_PORT     0x175
55 #define SEC_DRV_SEL_PORT      0x176
56 #define SEC_CMD_PORT          0x177
57 #define SEC_CTRL_PORT         0x376
58 #define SEC_ADDR_REG_PORT     0x377
59
60
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
63
64 #define DATA_BUFFER_SIZE 2048
65
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
68
69
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM", 
71                                           "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72                                            "PRI_CTRL", "PRI_ADDR_REG"};
73
74
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM", 
76                                           "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77                                            "SEC_CTRL", "SEC_ADDR_REG"};
78
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80                                            "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
81
82
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
84
85 static inline const char * io_port_to_str(uint16_t port) {
86     if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87         return ide_pri_port_strs[port - PRI_DATA_PORT];
88     } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89         return ide_sec_port_strs[port - SEC_DATA_PORT];
90     } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91         return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92     } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93         return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94     } 
95     return NULL;
96 }
97
98
99 static inline const char * dma_port_to_str(uint16_t port) {
100     return ide_dma_port_strs[port & 0x7];
101 }
102
103
104
105 struct ide_cd_state {
106     struct atapi_sense_data sense;
107
108     uint8_t atapi_cmd;
109     struct atapi_error_recovery err_recovery;
110 };
111
112 struct ide_hd_state {
113     int accessed;
114
115     /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116     uint_t mult_sector_num;
117
118     /* This is the current op sector size:
119      * for multiple sector ops this equals mult_sector_num
120      * for standard ops this equals 1
121      */
122     uint_t cur_sector_num;
123 };
124
125 struct ide_drive {
126     // Command Registers
127
128     v3_block_type_t drive_type;
129
130     struct v3_dev_blk_ops * ops;
131
132     union {
133         struct ide_cd_state cd_state;
134         struct ide_hd_state hd_state;
135     };
136
137     char model[41];
138
139     // Where we are in the data transfer
140     uint_t transfer_index;
141
142     // the length of a transfer
143     // calculated for easy access
144     uint_t transfer_length;
145
146     uint64_t current_lba;
147
148     // We have a local data buffer that we use for IO port accesses
149     uint8_t data_buf[DATA_BUFFER_SIZE];
150
151
152     uint32_t num_cylinders;
153     uint32_t num_heads;
154     uint32_t num_sectors;
155
156     void * private_data;
157     
158     union {
159         uint8_t sector_count;             // 0x1f2,0x172
160         struct atapi_irq_flags irq_flags;
161     } __attribute__((packed));
162
163     union {
164         uint8_t sector_num;               // 0x1f3,0x173
165         uint8_t lba0;
166     } __attribute__((packed));
167
168     union {
169         uint16_t cylinder;
170         uint16_t lba12;
171         
172         struct {
173             uint8_t cylinder_low;       // 0x1f4,0x174
174             uint8_t cylinder_high;      // 0x1f5,0x175
175         } __attribute__((packed));
176         
177         struct {
178             uint8_t lba1;
179             uint8_t lba2;
180         } __attribute__((packed));
181         
182         
183         // The transfer length requested by the CPU 
184         uint16_t req_len;
185     } __attribute__((packed));
186
187 };
188
189
190
191 struct ide_channel {
192     struct ide_drive drives[2];
193
194     // Command Registers
195     struct ide_error_reg error_reg;     // [read] 0x1f1,0x171
196
197     struct ide_features_reg features;
198
199     struct ide_drive_head_reg drive_head; // 0x1f6,0x176
200
201     struct ide_status_reg status;       // [read] 0x1f7,0x177
202     uint8_t cmd_reg;                // [write] 0x1f7,0x177
203
204     int irq; // this is temporary until we add PCI support
205
206     // Control Registers
207     struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
208
209     struct ide_dma_cmd_reg dma_cmd;
210     struct ide_dma_status_reg dma_status;
211     uint32_t dma_prd_addr;
212     uint_t dma_tbl_index;
213 };
214
215
216
217 struct ide_internal {
218     struct ide_channel channels[2];
219
220     struct v3_southbridge * southbridge;
221     struct vm_device * pci_bus;
222
223     struct pci_device * ide_pci;
224 };
225
226
227
228
229
230 /* Utility functions */
231
232 static inline uint16_t be_to_le_16(const uint16_t val) {
233     uint8_t * buf = (uint8_t *)&val;
234     return (buf[0] << 8) | (buf[1]) ;
235 }
236
237 static inline uint16_t le_to_be_16(const uint16_t val) {
238     return be_to_le_16(val);
239 }
240
241
242 static inline uint32_t be_to_le_32(const uint32_t val) {
243     uint8_t * buf = (uint8_t *)&val;
244     return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
245 }
246
247 static inline uint32_t le_to_be_32(const uint32_t val) {
248     return be_to_le_32(val);
249 }
250
251
252 static inline int get_channel_index(ushort_t port) {
253     if (((port & 0xfff8) == 0x1f0) ||
254         ((port & 0xfffe) == 0x3f6) || 
255         ((port & 0xfff8) == 0xc000)) {
256         return 0;
257     } else if (((port & 0xfff8) == 0x170) ||
258                ((port & 0xfffe) == 0x376) ||
259                ((port & 0xfff8) == 0xc008)) {
260         return 1;
261     }
262
263     return -1;
264 }
265
266 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
267     int channel_idx = get_channel_index(port);    
268     return &(ide->channels[channel_idx]);
269 }
270
271 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
272     return &(channel->drives[channel->drive_head.drive_sel]);
273 }
274
275
276 static inline int is_lba_enabled(struct ide_channel * channel) {
277     return channel->drive_head.lba_mode;
278 }
279
280
281 /* Drive Commands */
282 static void ide_raise_irq(struct vm_device * dev, struct ide_channel * channel) {
283     if (channel->ctrl_reg.irq_disable == 0) {
284         //        PrintError("Raising IDE Interrupt %d\n", channel->irq);
285         channel->dma_status.int_gen = 1;
286         v3_raise_irq(dev->vm, channel->irq);
287     }
288 }
289
290
291 static void drive_reset(struct ide_drive * drive) {
292     drive->sector_count = 0x01;
293     drive->sector_num = 0x01;
294
295     PrintDebug("Resetting drive %s\n", drive->model);
296     
297     if (drive->drive_type == BLOCK_CDROM) {
298         drive->cylinder = 0xeb14;
299     } else {
300         drive->cylinder = 0x0000;
301         //drive->hd_state.accessed = 0;
302     }
303
304
305     memset(drive->data_buf, 0, sizeof(drive->data_buf));
306     drive->transfer_index = 0;
307
308     // Send the reset signal to the connected device callbacks
309     //     channel->drives[0].reset();
310     //    channel->drives[1].reset();
311 }
312
313 static void channel_reset(struct ide_channel * channel) {
314     
315     // set busy and seek complete flags
316     channel->status.val = 0x90;
317
318     // Clear errors
319     channel->error_reg.val = 0x01;
320
321     // clear commands
322     channel->cmd_reg = 0x00;
323
324     channel->ctrl_reg.irq_disable = 0;
325 }
326
327 static void channel_reset_complete(struct ide_channel * channel) {
328     channel->status.busy = 0;
329     channel->status.ready = 1;
330
331     channel->drive_head.head_num = 0;    
332     
333     drive_reset(&(channel->drives[0]));
334     drive_reset(&(channel->drives[1]));
335 }
336
337
338 static void ide_abort_command(struct vm_device * dev, struct ide_channel * channel) {
339     channel->status.val = 0x41; // Error + ready
340     channel->error_reg.val = 0x04; // No idea...
341
342     ide_raise_irq(dev, channel);
343 }
344
345
346 static int dma_read(struct vm_device * dev, struct ide_channel * channel);
347 static int dma_write(struct vm_device * dev, struct ide_channel * channel);
348
349
350 /* ATAPI functions */
351 #include "atapi.h"
352
353 /* ATA functions */
354 #include "ata.h"
355
356
357 #ifdef CONFIG_DEBUG_IDE
358 static void print_prd_table(struct vm_device * dev, struct ide_channel * channel) {
359     struct ide_dma_prd prd_entry;
360     int index = 0;
361
362     PrintDebug("Dumping PRD table\n");
363
364     while (1) {
365         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
366         int ret;
367
368         ret = read_guest_pa_memory(dev->vm, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
369         
370         if (ret != sizeof(struct ide_dma_prd)) {
371             PrintError("Could not read PRD\n");
372             return;
373         }
374
375         PrintDebug("\tPRD Addr: %x, PRD Len: %d, EOT: %d\n", 
376                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
377
378         if (prd_entry.end_of_table) {
379             break;
380         }
381
382         index++;
383     }
384
385     return;
386 }
387 #endif
388
389 /* IO Operations */
390 static int dma_read(struct vm_device * dev, struct ide_channel * channel) {
391     struct ide_drive * drive = get_selected_drive(channel);
392     // This is at top level scope to do the EOT test at the end
393     struct ide_dma_prd prd_entry;
394     uint_t bytes_left = drive->transfer_length;
395
396     // Read in the data buffer....
397     // Read a sector/block at a time until the prd entry is full.
398
399 #ifdef CONFIG_DEBUG_IDE
400     print_prd_table(dev, channel);
401 #endif
402
403     PrintDebug("DMA read for %d bytes\n", bytes_left);
404
405     // Loop through the disk data
406     while (bytes_left > 0) {
407         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
408         uint_t prd_bytes_left = 0;
409         uint_t prd_offset = 0;
410         int ret;
411
412         PrintDebug("PRD table address = %x\n", channel->dma_prd_addr);
413
414         ret = read_guest_pa_memory(dev->vm, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
415
416         if (ret != sizeof(struct ide_dma_prd)) {
417             PrintError("Could not read PRD\n");
418             return -1;
419         }
420
421         PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
422                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
423
424         // loop through the PRD data....
425
426         prd_bytes_left = prd_entry.size;
427
428
429         while (prd_bytes_left > 0) {
430             uint_t bytes_to_write = 0;
431
432             if (drive->drive_type == BLOCK_DISK) {
433                 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
434
435
436                 if (ata_read(dev, channel, drive->data_buf, 1) == -1) {
437                     PrintError("Failed to read next disk sector\n");
438                     return -1;
439                 }
440             } else if (drive->drive_type == BLOCK_CDROM) {
441                 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
442                     bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
443
444                     if (atapi_read_chunk(dev, channel) == -1) {
445                         PrintError("Failed to read next disk sector\n");
446                         return -1;
447                     }
448                 } else {
449                     PrintDebug("DMA of command packet\n");
450                     PrintError("How does this work???\n");
451                     return -1;
452                     bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
453                     prd_bytes_left = bytes_to_write;
454                 }
455             }
456
457             PrintDebug("Writing DMA data to guest Memory ptr=%p, len=%d\n", 
458                        (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
459
460             drive->current_lba++;
461
462             ret = write_guest_pa_memory(dev->vm, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf); 
463
464             if (ret != bytes_to_write) {
465                 PrintError("Failed to copy data into guest memory... (ret=%d)\n", ret);
466                 return -1;
467             }
468
469             PrintDebug("\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
470
471             drive->transfer_index += ret;
472             prd_bytes_left -= ret;
473             prd_offset += ret;
474             bytes_left -= ret;
475         }
476
477         channel->dma_tbl_index++;
478
479         if (drive->drive_type == BLOCK_DISK) {
480             if (drive->transfer_index % HD_SECTOR_SIZE) {
481                 PrintError("We currently don't handle sectors that span PRD descriptors\n");
482                 return -1;
483             }
484         } else if (drive->drive_type == BLOCK_CDROM) {
485             if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
486                 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
487                     PrintError("We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
488                     PrintError("transfer_index=%d, transfer_length=%d\n", 
489                                drive->transfer_index, drive->transfer_length);
490                     return -1;
491                 }
492             }
493         }
494
495
496         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
497             PrintError("DMA table not large enough for data transfer...\n");
498             return -1;
499         }
500     }
501
502     /*
503       drive->irq_flags.io_dir = 1;
504       drive->irq_flags.c_d = 1;
505       drive->irq_flags.rel = 0;
506     */
507
508
509     // Update to the next PRD entry
510
511     // set DMA status
512
513     if (prd_entry.end_of_table) {
514         channel->status.busy = 0;
515         channel->status.ready = 1;
516         channel->status.data_req = 0;
517         channel->status.error = 0;
518         channel->status.seek_complete = 1;
519
520         channel->dma_status.active = 0;
521         channel->dma_status.err = 0;
522     }
523
524     ide_raise_irq(dev, channel);
525
526     return 0;
527 }
528
529
530 static int dma_write(struct vm_device * dev, struct ide_channel * channel) {
531     struct ide_drive * drive = get_selected_drive(channel);
532     // This is at top level scope to do the EOT test at the end
533     struct ide_dma_prd prd_entry;
534     uint_t bytes_left = drive->transfer_length;
535
536
537     PrintDebug("DMA write from %d bytes\n", bytes_left);
538
539     // Loop through disk data
540     while (bytes_left > 0) {
541         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
542         uint_t prd_bytes_left = 0;
543         uint_t prd_offset = 0;
544         int ret;
545         
546         PrintDebug("PRD Table address = %x\n", channel->dma_prd_addr);
547
548         ret = read_guest_pa_memory(dev->vm, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
549
550         if (ret != sizeof(struct ide_dma_prd)) {
551             PrintError("Could not read PRD\n");
552             return -1;
553         }
554
555         PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
556                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
557
558         prd_bytes_left = prd_entry.size;
559
560         while (prd_bytes_left > 0) {
561             uint_t bytes_to_write = 0;
562
563
564             bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
565
566
567             ret = read_guest_pa_memory(dev->vm, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
568
569             if (ret != bytes_to_write) {
570                 PrintError("Faild to copy data from guest memory... (ret=%d)\n", ret);
571                 return -1;
572             }
573
574             PrintDebug("\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
575
576
577             if (ata_write(dev, channel, drive->data_buf, 1) == -1) {
578                 PrintError("Failed to write data to disk\n");
579                 return -1;
580             }
581             
582             drive->current_lba++;
583
584             drive->transfer_index += ret;
585             prd_bytes_left -= ret;
586             prd_offset += ret;
587             bytes_left -= ret;
588         }
589
590         channel->dma_tbl_index++;
591
592         if (drive->transfer_index % HD_SECTOR_SIZE) {
593             PrintError("We currently don't handle sectors that span PRD descriptors\n");
594             return -1;
595         }
596
597         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
598             PrintError("DMA table not large enough for data transfer...\n");
599             return -1;
600         }
601     }
602
603     if (prd_entry.end_of_table) {
604         channel->status.busy = 0;
605         channel->status.ready = 1;
606         channel->status.data_req = 0;
607         channel->status.error = 0;
608         channel->status.seek_complete = 1;
609
610         channel->dma_status.active = 0;
611         channel->dma_status.err = 0;
612     }
613
614     ide_raise_irq(dev, channel);
615
616     return 0;
617 }
618
619
620
621 #define DMA_CMD_PORT      0x00
622 #define DMA_STATUS_PORT   0x02
623 #define DMA_PRD_PORT0     0x04
624 #define DMA_PRD_PORT1     0x05
625 #define DMA_PRD_PORT2     0x06
626 #define DMA_PRD_PORT3     0x07
627
628 #define DMA_CHANNEL_FLAG  0x08
629
630 static int write_dma_port(ushort_t port, void * src, uint_t length, void * private_data) {
631     struct vm_device * dev = (struct vm_device *)private_data;
632     struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
633     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
634     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
635     struct ide_channel * channel = &(ide->channels[channel_flag]);
636
637     PrintDebug("IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n", 
638                port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
639
640     switch (port_offset) {
641         case DMA_CMD_PORT:
642             channel->dma_cmd.val = *(uint8_t *)src;
643
644             if (channel->dma_cmd.start == 0) {
645                 channel->dma_tbl_index = 0;
646             } else {
647                 channel->dma_status.active = 1;
648
649                 if (channel->dma_cmd.read == 1) {
650                     // DMA Read
651                     if (dma_read(dev, channel) == -1) {
652                         PrintError("Failed DMA Read\n");
653                         return -1;
654                     }
655                 } else {
656                     // DMA write
657                     if (dma_write(dev, channel) == -1) {
658                         PrintError("Failed DMA Write\n");
659                         return -1;
660                     }
661                 }
662
663                 channel->dma_cmd.val &= 0x09;
664             }
665
666             break;
667             
668         case DMA_STATUS_PORT: {
669             uint8_t val = *(uint8_t *)src;
670
671             if (length != 1) {
672                 PrintError("Invalid read length for DMA status port\n");
673                 return -1;
674             }
675
676             // weirdness
677             channel->dma_status.val = ((val & 0x60) | 
678                                        (channel->dma_status.val & 0x01) |
679                                        (channel->dma_status.val & ~val & 0x06));
680
681             break;
682         }           
683         case DMA_PRD_PORT0:
684         case DMA_PRD_PORT1:
685         case DMA_PRD_PORT2:
686         case DMA_PRD_PORT3: {
687             uint_t addr_index = port_offset & 0x3;
688             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
689             int i = 0;
690
691             if (addr_index + length > 4) {
692                 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
693                 return -1;
694             }
695
696             for (i = 0; i < length; i++) {
697                 addr_buf[addr_index + i] = *((uint8_t *)src + i);
698             }
699
700             PrintDebug("Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
701
702             break;
703         }
704         default:
705             PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
706             return -1;
707     }
708
709     return length;
710 }
711
712
713 static int read_dma_port(ushort_t port, void * dst, uint_t length, void * private_data) {
714     struct vm_device * dev = (struct vm_device *)private_data;
715     struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
716     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
717     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
718     struct ide_channel * channel = &(ide->channels[channel_flag]);
719
720     PrintDebug("Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
721
722     switch (port_offset) {
723         case DMA_CMD_PORT:
724             *(uint8_t *)dst = channel->dma_cmd.val;
725             break;
726
727         case DMA_STATUS_PORT:
728             if (length != 1) {
729                 PrintError("Invalid read length for DMA status port\n");
730                 return -1;
731             }
732
733             *(uint8_t *)dst = channel->dma_status.val;
734             break;
735
736         case DMA_PRD_PORT0:
737         case DMA_PRD_PORT1:
738         case DMA_PRD_PORT2:
739         case DMA_PRD_PORT3: {
740             uint_t addr_index = port_offset & 0x3;
741             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
742             int i = 0;
743
744             if (addr_index + length > 4) {
745                 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
746                 return -1;
747             }
748
749             for (i = 0; i < length; i++) {
750                 *((uint8_t *)dst + i) = addr_buf[addr_index + i];
751             }
752
753             break;
754         }
755         default:
756             PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
757             return -1;
758     }
759
760     PrintDebug("\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
761
762     return length;
763 }
764
765
766
767 static int write_cmd_port(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
768     struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
769     struct ide_channel * channel = get_selected_channel(ide, port);
770     struct ide_drive * drive = get_selected_drive(channel);
771
772     if (length != 1) {
773         PrintError("Invalid Write Length on IDE command Port %x\n", port);
774         return -1;
775     }
776
777     PrintDebug("IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
778     
779     channel->cmd_reg = *(uint8_t *)src;
780     
781     switch (channel->cmd_reg) {
782
783         case 0xa1: // ATAPI Identify Device Packet
784             if (drive->drive_type != BLOCK_CDROM) {
785                 drive_reset(drive);
786
787                 // JRL: Should we abort here?
788                 ide_abort_command(dev, channel);
789             } else {
790                 
791                 atapi_identify_device(drive);
792                 
793                 channel->error_reg.val = 0;
794                 channel->status.val = 0x58; // ready, data_req, seek_complete
795             
796                 ide_raise_irq(dev, channel);
797             }
798             break;
799         case 0xec: // Identify Device
800             if (drive->drive_type != BLOCK_DISK) {
801                 drive_reset(drive);
802
803                 // JRL: Should we abort here?
804                 ide_abort_command(dev, channel);
805             } else {
806                 ata_identify_device(drive);
807
808                 channel->error_reg.val = 0;
809                 channel->status.val = 0x58;
810
811                 ide_raise_irq(dev, channel);
812             }
813             break;
814
815         case 0xa0: // ATAPI Command Packet
816             if (drive->drive_type != BLOCK_CDROM) {
817                 ide_abort_command(dev, channel);
818             }
819             
820             drive->sector_count = 1;
821
822             channel->status.busy = 0;
823             channel->status.write_fault = 0;
824             channel->status.data_req = 1;
825             channel->status.error = 0;
826
827             // reset the data buffer...
828             drive->transfer_length = ATAPI_PACKET_SIZE;
829             drive->transfer_index = 0;
830
831             break;
832
833         case 0x20: // Read Sectors with Retry
834         case 0x21: // Read Sectors without Retry
835             drive->hd_state.cur_sector_num = 1;
836
837             if (ata_read_sectors(dev, channel) == -1) {
838                 PrintError("Error reading sectors\n");
839                 return -1;
840             }
841             break;
842
843         case 0x24: // Read Sectors Extended
844             drive->hd_state.cur_sector_num = 1;
845
846             if (ata_read_sectors_ext(dev, channel) == -1) {
847                 PrintError("Error reading extended sectors\n");
848                 return -1;
849             }
850             break;
851
852         case 0xc8: // Read DMA with retry
853         case 0xc9: { // Read DMA
854             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
855
856             if (ata_get_lba(dev, channel, &(drive->current_lba)) == -1) {
857                 ide_abort_command(dev, channel);
858                 return 0;
859             }
860             
861             drive->hd_state.cur_sector_num = 1;
862             
863             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
864             drive->transfer_index = 0;
865
866             if (channel->dma_status.active == 1) {
867                 // DMA Read
868                 if (dma_read(dev, channel) == -1) {
869                     PrintError("Failed DMA Read\n");
870                     return -1;
871                 }
872             }
873             break;
874         }
875
876         case 0xca: { // Write DMA
877             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
878
879             if (ata_get_lba(dev, channel, &(drive->current_lba)) == -1) {
880                 ide_abort_command(dev, channel);
881                 return 0;
882             }
883
884             drive->hd_state.cur_sector_num = 1;
885
886             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
887             drive->transfer_index = 0;
888
889             if (channel->dma_status.active == 1) {
890                 // DMA Write
891                 if (dma_write(dev, channel) == -1) {
892                     PrintError("Failed DMA Write\n");
893                     return -1;
894                 }
895             }
896             break;
897         }
898         case 0xe0: // Standby Now 1
899         case 0xe1: // Set Idle Immediate
900         case 0xe2: // Standby
901         case 0xe3: // Set Idle 1
902         case 0xe6: // Sleep Now 1
903         case 0x94: // Standby Now 2
904         case 0x95: // Idle Immediate (CFA)
905         case 0x96: // Standby 2
906         case 0x97: // Set idle 2
907         case 0x99: // Sleep Now 2
908             channel->status.val = 0;
909             channel->status.ready = 1;
910             ide_raise_irq(dev, channel);
911             break;
912
913         case 0xef: // Set Features
914             // Prior to this the features register has been written to. 
915             // This command tells the drive to check if the new value is supported (the value is drive specific)
916             // Common is that bit0=DMA enable
917             // If valid the drive raises an interrupt, if not it aborts.
918
919             // Do some checking here...
920
921             channel->status.busy = 0;
922             channel->status.write_fault = 0;
923             channel->status.error = 0;
924             channel->status.ready = 1;
925             channel->status.seek_complete = 1;
926             
927             ide_raise_irq(dev, channel);
928             break;
929
930         case 0x91:  // Initialize Drive Parameters
931         case 0x10:  // recalibrate?
932             channel->status.error = 0;
933             channel->status.ready = 1;
934             channel->status.seek_complete = 1;
935             ide_raise_irq(dev, channel);
936             break;
937         case 0xc6: { // Set multiple mode (IDE Block mode) 
938             // This makes the drive transfer multiple sectors before generating an interrupt
939             uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
940
941             if (tmp_sect_num > MAX_MULT_SECTORS) {
942                 ide_abort_command(dev, channel);
943                 break;
944             }
945
946             if (drive->sector_count == 0) {
947                 drive->hd_state.mult_sector_num= 1;
948             } else {
949                 drive->hd_state.mult_sector_num = drive->sector_count;
950             }
951
952             channel->status.ready = 1;
953             channel->status.error = 0;
954
955             ide_raise_irq(dev, channel);
956
957             break;
958         }
959         case 0xc4:  // read multiple sectors
960             drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
961         default:
962             PrintError("Unimplemented IDE command (%x)\n", channel->cmd_reg);
963             return -1;
964     }
965
966     return length;
967 }
968
969
970 static int write_data_port(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
971     struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
972     struct ide_channel * channel = get_selected_channel(ide, port);
973     struct ide_drive * drive = get_selected_drive(channel);
974
975     //    PrintDebug("IDE: Writing Data Port %x (val=%x, len=%d)\n", 
976     //         port, *(uint32_t *)src, length);
977     
978     memcpy(drive->data_buf + drive->transfer_index, src, length);    
979     drive->transfer_index += length;
980
981     // Transfer is complete, dispatch the command
982     if (drive->transfer_index >= drive->transfer_length) {
983         switch (channel->cmd_reg) {
984             case 0x30: // Write Sectors
985                 PrintError("Writing Data not yet implemented\n");
986                 return -1;
987                 
988             case 0xa0: // ATAPI packet command
989                 if (atapi_handle_packet(dev, channel) == -1) {
990                     PrintError("Error handling ATAPI packet\n");
991                     return -1;
992                 }
993                 break;
994             default:
995                 PrintError("Unhandld IDE Command %x\n", channel->cmd_reg);
996                 return -1;
997         }
998     }
999
1000     return length;
1001 }
1002
1003
1004 static int read_hd_data(uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
1005     struct ide_drive * drive = get_selected_drive(channel);
1006     int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1007
1008
1009
1010     if (drive->transfer_index >= drive->transfer_length) {
1011         PrintError("Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1012                    drive->transfer_length, drive->transfer_index,
1013                    drive->transfer_index + length);
1014         return -1;
1015     }
1016
1017     
1018     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1019         drive->current_lba++;
1020
1021         if (ata_read(dev, channel, drive->data_buf, 1) == -1) {
1022             PrintError("Could not read next disk sector\n");
1023             return -1;
1024         }
1025     }
1026
1027     /*
1028       PrintDebug("Reading HD Data (Val=%x), (len=%d) (offset=%d)\n", 
1029       *(uint32_t *)(drive->data_buf + data_offset), 
1030       length, data_offset);
1031     */
1032     memcpy(dst, drive->data_buf + data_offset, length);
1033
1034     drive->transfer_index += length;
1035
1036
1037     /* This is the trigger for interrupt injection.
1038      * For read single sector commands we interrupt after every sector
1039      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1040      * cur_sector_num is configured depending on the operation we are currently running
1041      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1042      */
1043     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1044         (drive->transfer_index == drive->transfer_length)) {
1045         if (drive->transfer_index < drive->transfer_length) {
1046             // An increment is complete, but there is still more data to be transferred...
1047             PrintDebug("Integral Complete, still transferring more sectors\n");
1048             channel->status.data_req = 1;
1049
1050             drive->irq_flags.c_d = 0;
1051         } else {
1052             PrintDebug("Final Sector Transferred\n");
1053             // This was the final read of the request
1054             channel->status.data_req = 0;
1055
1056             
1057             drive->irq_flags.c_d = 1;
1058             drive->irq_flags.rel = 0;
1059         }
1060
1061         channel->status.ready = 1;
1062         drive->irq_flags.io_dir = 1;
1063         channel->status.busy = 0;
1064
1065         ide_raise_irq(dev, channel);
1066     }
1067
1068
1069     return length;
1070 }
1071
1072
1073
1074 static int read_cd_data(uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
1075     struct ide_drive * drive = get_selected_drive(channel);
1076     int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1077     int req_offset = drive->transfer_index % drive->req_len;
1078     
1079     if (drive->cd_state.atapi_cmd != 0x28) {
1080         PrintDebug("IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1081     }
1082
1083     if (drive->transfer_index >= drive->transfer_length) {
1084         PrintError("Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n", 
1085                    drive->transfer_length, drive->transfer_index, 
1086                    drive->transfer_index + length);
1087         return -1;
1088     }
1089
1090     
1091     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1092         if (atapi_update_data_buf(dev, channel) == -1) {
1093             PrintError("Could not update CDROM data buffer\n");
1094             return -1;
1095         }
1096     }
1097
1098     memcpy(dst, drive->data_buf + data_offset, length);
1099     
1100     drive->transfer_index += length;
1101
1102
1103     // Should the req_offset be recalculated here?????
1104     if ((req_offset == 0) && (drive->transfer_index > 0)) {
1105         if (drive->transfer_index < drive->transfer_length) {
1106             // An increment is complete, but there is still more data to be transferred...
1107             
1108             channel->status.data_req = 1;
1109
1110             drive->irq_flags.c_d = 0;
1111
1112             // Update the request length in the cylinder regs
1113             if (atapi_update_req_len(dev, channel, drive->transfer_length - drive->transfer_index) == -1) {
1114                 PrintError("Could not update request length after completed increment\n");
1115                 return -1;
1116             }
1117         } else {
1118             // This was the final read of the request
1119             channel->status.data_req = 0;
1120             channel->status.ready = 1;
1121             
1122             drive->irq_flags.c_d = 1;
1123             drive->irq_flags.rel = 0;
1124         }
1125
1126         drive->irq_flags.io_dir = 1;
1127         channel->status.busy = 0;
1128
1129         ide_raise_irq(dev, channel);
1130     }
1131
1132     return length;
1133 }
1134
1135
1136 static int read_drive_id(uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
1137     struct ide_drive * drive = get_selected_drive(channel);
1138
1139     channel->status.busy = 0;
1140     channel->status.ready = 1;
1141     channel->status.write_fault = 0;
1142     channel->status.seek_complete = 1;
1143     channel->status.corrected = 0;
1144     channel->status.error = 0;
1145                 
1146     
1147     memcpy(dst, drive->data_buf + drive->transfer_index, length);
1148     drive->transfer_index += length;
1149     
1150     if (drive->transfer_index >= drive->transfer_length) {
1151         channel->status.data_req = 0;
1152     }
1153     
1154     return length;
1155 }
1156
1157
1158 static int ide_read_data_port(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
1159     struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1160     struct ide_channel * channel = get_selected_channel(ide, port);
1161     struct ide_drive * drive = get_selected_drive(channel);
1162
1163        PrintDebug("IDE: Reading Data Port %x (len=%d)\n", port, length);
1164
1165     if ((channel->cmd_reg == 0xec) ||
1166         (channel->cmd_reg == 0xa1)) {
1167         return read_drive_id((uint8_t *)dst, length, dev, channel);
1168     }
1169
1170     if (drive->drive_type == BLOCK_CDROM) {
1171         if (read_cd_data((uint8_t *)dst, length, dev, channel) == -1) {
1172             PrintError("IDE: Could not read CD Data\n");
1173             return -1;
1174         }
1175     } else if (drive->drive_type == BLOCK_DISK) {
1176         if (read_hd_data((uint8_t *)dst, length, dev, channel) == -1) {
1177             PrintError("IDE: Could not read HD Data\n");
1178             return -1;
1179         }
1180     } else {
1181         memset((uint8_t *)dst, 0, length);
1182     }
1183
1184     return length;
1185 }
1186
1187 static int write_port_std(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
1188     struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1189     struct ide_channel * channel = get_selected_channel(ide, port);
1190     struct ide_drive * drive = get_selected_drive(channel);
1191             
1192     if (length != 1) {
1193         PrintError("Invalid Write length on IDE port %x\n", port);
1194         return -1;
1195     }
1196
1197     PrintDebug("IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1198
1199     switch (port) {
1200         // reset and interrupt enable
1201         case PRI_CTRL_PORT:
1202         case SEC_CTRL_PORT: {
1203             struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1204
1205             // only reset channel on a 0->1 reset bit transition
1206             if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1207                 channel_reset(channel);
1208             } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1209                 channel_reset_complete(channel);
1210             }
1211
1212             channel->ctrl_reg.val = tmp_ctrl->val;          
1213             break;
1214         }
1215         case PRI_FEATURES_PORT:
1216         case SEC_FEATURES_PORT:
1217             channel->features.val = *(uint8_t *)src;
1218             break;
1219
1220         case PRI_SECT_CNT_PORT:
1221         case SEC_SECT_CNT_PORT:
1222             channel->drives[0].sector_count = *(uint8_t *)src;
1223             channel->drives[1].sector_count = *(uint8_t *)src;
1224             break;
1225
1226         case PRI_SECT_NUM_PORT:
1227         case SEC_SECT_NUM_PORT:
1228             channel->drives[0].sector_num = *(uint8_t *)src;
1229             channel->drives[1].sector_num = *(uint8_t *)src;
1230             break;
1231         case PRI_CYL_LOW_PORT:
1232         case SEC_CYL_LOW_PORT:
1233             channel->drives[0].cylinder_low = *(uint8_t *)src;
1234             channel->drives[1].cylinder_low = *(uint8_t *)src;
1235             break;
1236
1237         case PRI_CYL_HIGH_PORT:
1238         case SEC_CYL_HIGH_PORT:
1239             channel->drives[0].cylinder_high = *(uint8_t *)src;
1240             channel->drives[1].cylinder_high = *(uint8_t *)src;
1241             break;
1242
1243         case PRI_DRV_SEL_PORT:
1244         case SEC_DRV_SEL_PORT: {
1245             channel->drive_head.val = *(uint8_t *)src;
1246             
1247             // make sure the reserved bits are ok..
1248             // JRL TODO: check with new ramdisk to make sure this is right...
1249             channel->drive_head.val |= 0xa0;
1250
1251             drive = get_selected_drive(channel);
1252
1253             // Selecting a non-present device is a no-no
1254             if (drive->drive_type == BLOCK_NONE) {
1255                 PrintDebug("Attempting to select a non-present drive\n");
1256                 channel->error_reg.abort = 1;
1257                 channel->status.error = 1;
1258             }
1259
1260             break;
1261         }
1262         default:
1263             PrintError("IDE: Write to unknown Port %x\n", port);
1264             return -1;
1265     }
1266     return length;
1267 }
1268
1269
1270 static int read_port_std(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
1271     struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1272     struct ide_channel * channel = get_selected_channel(ide, port);
1273     struct ide_drive * drive = get_selected_drive(channel);
1274     
1275     if (length != 1) {
1276         PrintError("Invalid Read length on IDE port %x\n", port);
1277         return -1;
1278     }
1279     
1280     PrintDebug("IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1281
1282     if ((port == PRI_ADDR_REG_PORT) ||
1283         (port == SEC_ADDR_REG_PORT)) {
1284         // unused, return 0xff
1285         *(uint8_t *)dst = 0xff;
1286         return length;
1287     }
1288
1289
1290     // if no drive is present just return 0 + reserved bits
1291     if (drive->drive_type == BLOCK_NONE) {
1292         if ((port == PRI_DRV_SEL_PORT) ||
1293             (port == SEC_DRV_SEL_PORT)) {
1294             *(uint8_t *)dst = 0xa0;
1295         } else {
1296             *(uint8_t *)dst = 0;
1297         }
1298
1299         return length;
1300     }
1301
1302     switch (port) {
1303
1304         // This is really the error register.
1305         case PRI_FEATURES_PORT:
1306         case SEC_FEATURES_PORT:
1307             *(uint8_t *)dst = channel->error_reg.val;
1308             break;
1309             
1310         case PRI_SECT_CNT_PORT:
1311         case SEC_SECT_CNT_PORT:
1312             *(uint8_t *)dst = drive->sector_count;
1313             break;
1314
1315         case PRI_SECT_NUM_PORT:
1316         case SEC_SECT_NUM_PORT:
1317             *(uint8_t *)dst = drive->sector_num;
1318             break;
1319
1320         case PRI_CYL_LOW_PORT:
1321         case SEC_CYL_LOW_PORT:
1322             *(uint8_t *)dst = drive->cylinder_low;
1323             break;
1324
1325
1326         case PRI_CYL_HIGH_PORT:
1327         case SEC_CYL_HIGH_PORT:
1328             *(uint8_t *)dst = drive->cylinder_high;
1329             break;
1330
1331         case PRI_DRV_SEL_PORT:
1332         case SEC_DRV_SEL_PORT:  // hard disk drive and head register 0x1f6
1333             *(uint8_t *)dst = channel->drive_head.val;
1334             break;
1335
1336         case PRI_CTRL_PORT:
1337         case SEC_CTRL_PORT:
1338         case PRI_CMD_PORT:
1339         case SEC_CMD_PORT:
1340             // Something about lowering interrupts here....
1341             *(uint8_t *)dst = channel->status.val;
1342             break;
1343
1344         default:
1345             PrintError("Invalid Port: %x\n", port);
1346             return -1;
1347     }
1348
1349     PrintDebug("\tVal=%x\n", *(uint8_t *)dst);
1350
1351     return length;
1352 }
1353
1354
1355
1356 static void init_drive(struct ide_drive * drive) {
1357
1358     drive->sector_count = 0x01;
1359     drive->sector_num = 0x01;
1360     drive->cylinder = 0x0000;
1361
1362     drive->drive_type = BLOCK_NONE;
1363
1364     memset(drive->model, 0, sizeof(drive->model));
1365
1366     drive->transfer_index = 0;
1367     drive->transfer_length = 0;
1368     memset(drive->data_buf, 0, sizeof(drive->data_buf));
1369
1370     drive->num_cylinders = 0;
1371     drive->num_heads = 0;
1372     drive->num_sectors = 0;
1373     
1374
1375     drive->private_data = NULL;
1376     drive->ops = NULL;
1377 }
1378
1379 static void init_channel(struct ide_channel * channel) {
1380     int i = 0;
1381
1382     channel->error_reg.val = 0x01;
1383     channel->drive_head.val = 0x00;
1384     channel->status.val = 0x00;
1385     channel->cmd_reg = 0x00;
1386     channel->ctrl_reg.val = 0x08;
1387
1388
1389     channel->dma_cmd.val = 0;
1390     channel->dma_status.val = 0;
1391     channel->dma_prd_addr = 0;
1392     channel->dma_tbl_index = 0;
1393
1394     for (i = 0; i < 2; i++) {
1395         init_drive(&(channel->drives[i]));
1396     }
1397
1398 }
1399
1400
1401 static int pci_config_update(uint_t reg_num, void * src, uint_t length, void * private_data) {
1402     PrintDebug("PCI Config Update\n");
1403     PrintDebug("\t\tInterupt register (Dev=%s), irq=%d\n", pci_dev->name, pci_dev->config_header.intr_line);
1404
1405     return 0;
1406 }
1407
1408 static int init_ide_state(struct vm_device * dev) {
1409     struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1410     int i;
1411
1412     /* 
1413      * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry 
1414      */
1415
1416     for (i = 0; i < 1; i++) {
1417         init_channel(&(ide->channels[i]));
1418
1419         // JRL: this is a terrible hack...
1420         ide->channels[i].irq = PRI_DEFAULT_IRQ + i;
1421     }
1422
1423
1424     return 0;
1425 }
1426
1427
1428
1429
1430 static int ide_free(struct vm_device * dev) {
1431     // unhook io ports....
1432     // deregister from PCI?
1433     return 0;
1434 }
1435
1436
1437 static struct v3_device_ops dev_ops = {
1438     .free = ide_free,
1439     .reset = NULL,
1440     .start = NULL,
1441     .stop = NULL,
1442 };
1443
1444
1445
1446
1447 static int connect_fn(struct guest_info * info, 
1448                       void * frontend_data, 
1449                       struct v3_dev_blk_ops * ops, 
1450                       v3_cfg_tree_t * cfg, 
1451                       void * private_data) {
1452     struct ide_internal * ide  = (struct ide_internal *)(frontend_data);  
1453     struct ide_channel * channel = NULL;
1454     struct ide_drive * drive = NULL;
1455
1456     char * bus_str = v3_cfg_val(cfg, "bus_num");
1457     char * drive_str = v3_cfg_val(cfg, "drive_num");
1458     char * type_str = v3_cfg_val(cfg, "type");
1459     char * model_str = v3_cfg_val(cfg, "model");
1460     uint_t bus_num = 0;
1461     uint_t drive_num = 0;
1462
1463
1464     if ((!type_str) || (!drive_str) || (!bus_str)) {
1465         PrintError("Incomplete IDE Configuration\n");
1466         return -1;
1467     }
1468
1469     bus_num = atoi(bus_str);
1470     drive_num = atoi(drive_str);
1471
1472     channel = &(ide->channels[bus_num]);
1473     drive = &(channel->drives[drive_num]);
1474
1475     if (drive->drive_type != BLOCK_NONE) {
1476         PrintError("Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1477         return -1;
1478     }
1479
1480     strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1481     
1482     if (strcasecmp(type_str, "cdrom") == 0) {
1483         drive->drive_type = BLOCK_CDROM;
1484
1485         while (strlen((char *)(drive->model)) < 40) {
1486             strcat((char*)(drive->model), " ");
1487         }
1488
1489     } else if (strcasecmp(type_str, "hd") == 0) {
1490         drive->drive_type = BLOCK_DISK;
1491
1492         drive->hd_state.accessed = 0;
1493         drive->hd_state.mult_sector_num = 1;
1494
1495         drive->num_sectors = 63;
1496         drive->num_heads = 16;
1497         drive->num_cylinders = ops->get_capacity(private_data)  / (drive->num_sectors * drive->num_heads);
1498     } else {
1499         PrintError("invalid IDE drive type\n");
1500         return -1;
1501     }
1502  
1503
1504     drive->ops = ops;
1505
1506     if (ide->ide_pci) {
1507         // Hardcode this for now, but its not a good idea....
1508         ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1509     }
1510  
1511     drive->private_data = private_data;
1512
1513     return 0;
1514 }
1515
1516
1517
1518
1519 static int ide_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
1520     struct ide_internal * ide  = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));  
1521     char * name = v3_cfg_val(cfg, "name");
1522
1523     PrintDebug("IDE: Initializing IDE\n");
1524     memset(ide, 0, sizeof(struct ide_internal));
1525
1526
1527     ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1528
1529     if (ide->pci_bus != NULL) {
1530         struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1531
1532         if (!southbridge) {
1533             PrintError("Could not find southbridge\n");
1534             return -1;
1535         }
1536
1537         ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1538     }
1539
1540     PrintDebug("IDE: Creating IDE bus x 2\n");
1541
1542     struct vm_device * dev = v3_allocate_device(name, &dev_ops, ide);
1543
1544     if (v3_attach_device(vm, dev) == -1) {
1545         PrintError("Could not attach device %s\n", name);
1546         return -1;
1547     }
1548
1549     if (init_ide_state(dev) == -1) {
1550         PrintError("Failed to initialize IDE state\n");
1551         return -1;
1552     }
1553
1554     PrintDebug("Connecting to IDE IO ports\n");
1555
1556     v3_dev_hook_io(dev, PRI_DATA_PORT, 
1557                    &ide_read_data_port, &write_data_port);
1558     v3_dev_hook_io(dev, PRI_FEATURES_PORT, 
1559                    &read_port_std, &write_port_std);
1560     v3_dev_hook_io(dev, PRI_SECT_CNT_PORT, 
1561                    &read_port_std, &write_port_std);
1562     v3_dev_hook_io(dev, PRI_SECT_NUM_PORT, 
1563                    &read_port_std, &write_port_std);
1564     v3_dev_hook_io(dev, PRI_CYL_LOW_PORT, 
1565                    &read_port_std, &write_port_std);
1566     v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT, 
1567                    &read_port_std, &write_port_std);
1568     v3_dev_hook_io(dev, PRI_DRV_SEL_PORT, 
1569                    &read_port_std, &write_port_std);
1570     v3_dev_hook_io(dev, PRI_CMD_PORT, 
1571                    &read_port_std, &write_cmd_port);
1572
1573     v3_dev_hook_io(dev, SEC_DATA_PORT, 
1574                    &ide_read_data_port, &write_data_port);
1575     v3_dev_hook_io(dev, SEC_FEATURES_PORT, 
1576                    &read_port_std, &write_port_std);
1577     v3_dev_hook_io(dev, SEC_SECT_CNT_PORT, 
1578                    &read_port_std, &write_port_std);
1579     v3_dev_hook_io(dev, SEC_SECT_NUM_PORT, 
1580                    &read_port_std, &write_port_std);
1581     v3_dev_hook_io(dev, SEC_CYL_LOW_PORT, 
1582                    &read_port_std, &write_port_std);
1583     v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT, 
1584                    &read_port_std, &write_port_std);
1585     v3_dev_hook_io(dev, SEC_DRV_SEL_PORT, 
1586                    &read_port_std, &write_port_std);
1587     v3_dev_hook_io(dev, SEC_CMD_PORT, 
1588                    &read_port_std, &write_cmd_port);
1589   
1590
1591     v3_dev_hook_io(dev, PRI_CTRL_PORT, 
1592                    &read_port_std, &write_port_std);
1593
1594     v3_dev_hook_io(dev, SEC_CTRL_PORT, 
1595                    &read_port_std, &write_port_std);
1596   
1597
1598     v3_dev_hook_io(dev, SEC_ADDR_REG_PORT, 
1599                    &read_port_std, &write_port_std);
1600
1601     v3_dev_hook_io(dev, PRI_ADDR_REG_PORT, 
1602                    &read_port_std, &write_port_std);
1603
1604
1605
1606
1607     if (ide->pci_bus) {
1608         struct v3_pci_bar bars[6];
1609         struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
1610         struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
1611         struct pci_device * pci_dev = NULL;
1612         int i;
1613
1614         PrintDebug("Connecting IDE to PCI bus\n");
1615
1616         for (i = 0; i < 6; i++) {
1617             bars[i].type = PCI_BAR_NONE;
1618         }
1619
1620         bars[4].type = PCI_BAR_IO;
1621         //      bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
1622         bars[4].default_base_port = -1;
1623         bars[4].num_ports = 16;
1624
1625         bars[4].io_read = read_dma_port;
1626         bars[4].io_write = write_dma_port;
1627         bars[4].private_data = dev;
1628
1629         pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1, 
1630                                          "PIIX3_IDE", bars,
1631                                          pci_config_update, NULL, NULL, dev);
1632
1633         if (pci_dev == NULL) {
1634             PrintError("Failed to register IDE BUS %d with PCI\n", i); 
1635             return -1;
1636         }
1637
1638         /* This is for CMD646 devices 
1639            pci_dev->config_header.vendor_id = 0x1095;
1640            pci_dev->config_header.device_id = 0x0646;
1641            pci_dev->config_header.revision = 0x8f07;
1642         */
1643
1644         pci_dev->config_header.vendor_id = 0x8086;
1645         pci_dev->config_header.device_id = 0x7010;
1646         pci_dev->config_header.revision = 0x00;
1647
1648         pci_dev->config_header.prog_if = 0x80; // Master IDE device
1649         pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
1650         pci_dev->config_header.class = PCI_CLASS_STORAGE;
1651
1652         pci_dev->config_header.command = 0;
1653         pci_dev->config_header.status = 0x0280;
1654
1655         ide->ide_pci = pci_dev;
1656
1657
1658     }
1659
1660     if (v3_dev_add_blk_frontend(vm, name, connect_fn, (void *)ide) == -1) {
1661         PrintError("Could not register %s as frontend\n", name);
1662         return -1;
1663     }
1664     
1665
1666     PrintDebug("IDE Initialized\n");
1667
1668     return 0;
1669 }
1670
1671
1672 device_register("IDE", ide_init)
1673
1674
1675
1676
1677 int v3_ide_get_geometry(struct vm_device * ide_dev, int channel_num, int drive_num, 
1678                         uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
1679
1680     struct ide_internal * ide  = (struct ide_internal *)(ide_dev->private_data);  
1681     struct ide_channel * channel = &(ide->channels[channel_num]);
1682     struct ide_drive * drive = &(channel->drives[drive_num]);
1683     
1684     if (drive->drive_type == BLOCK_NONE) {
1685         return -1;
1686     }
1687
1688     *cylinders = drive->num_cylinders;
1689     *heads = drive->num_heads;
1690     *sectors = drive->num_sectors;
1691
1692     return 0;
1693 }
1694
1695
1696