Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


IDE / ATA rewrites (1st step)
[palacios.git] / palacios / src / devices / ide.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
28
29 #ifndef V3_CONFIG_DEBUG_IDE
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
36
37
38 #define PRI_DATA_PORT         0x1f0
39 #define PRI_FEATURES_PORT     0x1f1
40 #define PRI_SECT_CNT_PORT     0x1f2
41 #define PRI_SECT_NUM_PORT     0x1f3
42 #define PRI_CYL_LOW_PORT      0x1f4
43 #define PRI_CYL_HIGH_PORT     0x1f5
44 #define PRI_DRV_SEL_PORT      0x1f6
45 #define PRI_CMD_PORT          0x1f7
46 #define PRI_CTRL_PORT         0x3f6
47 #define PRI_ADDR_REG_PORT     0x3f7
48
49 #define SEC_DATA_PORT         0x170
50 #define SEC_FEATURES_PORT     0x171
51 #define SEC_SECT_CNT_PORT     0x172
52 #define SEC_SECT_NUM_PORT     0x173
53 #define SEC_CYL_LOW_PORT      0x174
54 #define SEC_CYL_HIGH_PORT     0x175
55 #define SEC_DRV_SEL_PORT      0x176
56 #define SEC_CMD_PORT          0x177
57 #define SEC_CTRL_PORT         0x376
58 #define SEC_ADDR_REG_PORT     0x377
59
60
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
63
64 #define DATA_BUFFER_SIZE 2048
65
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
68
69
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM", 
71                                           "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72                                            "PRI_CTRL", "PRI_ADDR_REG"};
73
74
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM", 
76                                           "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77                                            "SEC_CTRL", "SEC_ADDR_REG"};
78
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80                                            "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
81
82
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
84
85 static inline const char * io_port_to_str(uint16_t port) {
86     if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87         return ide_pri_port_strs[port - PRI_DATA_PORT];
88     } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89         return ide_sec_port_strs[port - SEC_DATA_PORT];
90     } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91         return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92     } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93         return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94     } 
95     return NULL;
96 }
97
98
99 static inline const char * dma_port_to_str(uint16_t port) {
100     return ide_dma_port_strs[port & 0x7];
101 }
102
103
104
105 struct ide_cd_state {
106     struct atapi_sense_data sense;
107
108     uint8_t atapi_cmd;
109     struct atapi_error_recovery err_recovery;
110 };
111
112 struct ide_hd_state {
113     uint32_t accessed;
114
115     /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116     uint32_t mult_sector_num;
117
118     /* This is the current op sector size:
119      * for multiple sector ops this equals mult_sector_num
120      * for standard ops this equals 1
121      */
122     uint32_t cur_sector_num;
123 };
124
125 struct ide_drive {
126     // Command Registers
127
128     v3_block_type_t drive_type;
129
130     struct v3_dev_blk_ops * ops;
131
132     union {
133         struct ide_cd_state cd_state;
134         struct ide_hd_state hd_state;
135     };
136
137     char model[41];
138
139     // Where we are in the data transfer
140     uint32_t transfer_index;
141
142     // the length of a transfer
143     // calculated for easy access
144     uint32_t transfer_length;
145
146     uint64_t current_lba;
147
148     // We have a local data buffer that we use for IO port accesses
149     uint8_t data_buf[DATA_BUFFER_SIZE];
150
151
152     uint32_t num_cylinders;
153     uint32_t num_heads;
154     uint32_t num_sectors;
155
156     void * private_data;
157     
158     union {
159         uint8_t sector_count;             // 0x1f2,0x172  (ATA)
160         struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
161     } __attribute__((packed));
162
163     union {
164         uint8_t sector_num;               // 0x1f3,0x173
165         uint8_t lba0;
166     } __attribute__((packed));
167
168     union {
169         uint16_t cylinder;
170         uint16_t lba12;
171         
172         struct {
173             uint8_t cylinder_low;       // 0x1f4,0x174
174             uint8_t cylinder_high;      // 0x1f5,0x175
175         } __attribute__((packed));
176         
177         struct {
178             uint8_t lba1;
179             uint8_t lba2;
180         } __attribute__((packed));
181         
182         
183         // The transfer length requested by the CPU 
184         uint16_t req_len;
185     } __attribute__((packed));
186
187 };
188
189
190
191 struct ide_channel {
192     struct ide_drive drives[2];
193
194     // Command Registers
195     struct ide_error_reg error_reg;     // [read] 0x1f1,0x171
196
197     struct ide_features_reg features;
198
199     struct ide_drive_head_reg drive_head; // 0x1f6,0x176
200
201     struct ide_status_reg status;       // [read] 0x1f7,0x177
202     uint8_t cmd_reg;                // [write] 0x1f7,0x177
203
204     int irq; // this is temporary until we add PCI support
205
206     // Control Registers
207     struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
208
209     union {
210         uint8_t dma_ports[8];
211         struct {
212             struct ide_dma_cmd_reg dma_cmd;
213             uint8_t rsvd1;
214             struct ide_dma_status_reg dma_status;
215             uint8_t rsvd2;
216             uint32_t dma_prd_addr;
217         } __attribute__((packed));
218     } __attribute__((packed));
219
220     uint32_t dma_tbl_index;
221 };
222
223
224
225 struct ide_internal {
226     struct ide_channel channels[2];
227
228     struct v3_southbridge * southbridge;
229     struct vm_device * pci_bus;
230
231     struct pci_device * ide_pci;
232
233     struct v3_vm_info * vm;
234 };
235
236
237
238
239
240 /* Utility functions */
241
242 static inline uint16_t be_to_le_16(const uint16_t val) {
243     uint8_t * buf = (uint8_t *)&val;
244     return (buf[0] << 8) | (buf[1]) ;
245 }
246
247 static inline uint16_t le_to_be_16(const uint16_t val) {
248     return be_to_le_16(val);
249 }
250
251
252 static inline uint32_t be_to_le_32(const uint32_t val) {
253     uint8_t * buf = (uint8_t *)&val;
254     return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
255 }
256
257 static inline uint32_t le_to_be_32(const uint32_t val) {
258     return be_to_le_32(val);
259 }
260
261
262 static inline int get_channel_index(ushort_t port) {
263     if (((port & 0xfff8) == 0x1f0) ||
264         ((port & 0xfffe) == 0x3f6) || 
265         ((port & 0xfff8) == 0xc000)) {
266         return 0;
267     } else if (((port & 0xfff8) == 0x170) ||
268                ((port & 0xfffe) == 0x376) ||
269                ((port & 0xfff8) == 0xc008)) {
270         return 1;
271     }
272
273     return -1;
274 }
275
276 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
277     int channel_idx = get_channel_index(port);    
278     return &(ide->channels[channel_idx]);
279 }
280
281 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
282     return &(channel->drives[channel->drive_head.drive_sel]);
283 }
284
285
286 static inline int is_lba_enabled(struct ide_channel * channel) {
287     return channel->drive_head.lba_mode;
288 }
289
290
291 /* Drive Commands */
292 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
293     if (channel->ctrl_reg.irq_disable == 0) {
294
295         PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
296
297         channel->dma_status.int_gen = 1;
298         v3_raise_irq(ide->vm, channel->irq);
299     } else {
300         PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
301     }
302 }
303
304
305 static void drive_reset(struct ide_drive * drive) {
306     drive->sector_count = 0x01;
307     drive->sector_num = 0x01;
308
309     PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
310     
311     if (drive->drive_type == BLOCK_CDROM) {
312         drive->cylinder = 0xeb14;
313     } else {
314         drive->cylinder = 0x0000;
315         //drive->hd_state.accessed = 0;
316     }
317
318
319     memset(drive->data_buf, 0, sizeof(drive->data_buf));
320     drive->transfer_index = 0;
321
322     // Send the reset signal to the connected device callbacks
323     //     channel->drives[0].reset();
324     //    channel->drives[1].reset();
325 }
326
327 static void channel_reset(struct ide_channel * channel) {
328     
329     // set busy and seek complete flags
330     channel->status.val = 0x90;
331
332     // Clear errors
333     channel->error_reg.val = 0x01;
334
335     // clear commands
336     channel->cmd_reg = 0;  // NOP
337
338     channel->ctrl_reg.irq_disable = 0;
339 }
340
341 static void channel_reset_complete(struct ide_channel * channel) {
342     channel->status.busy = 0;
343     channel->status.ready = 1;
344
345     channel->drive_head.head_num = 0;    
346     
347     drive_reset(&(channel->drives[0]));
348     drive_reset(&(channel->drives[1]));
349 }
350
351
352 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
353
354     PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
355
356     channel->status.val = 0x41; // Error + ready
357     channel->error_reg.val = 0x04; // No idea...
358
359     ide_raise_irq(ide, channel);
360 }
361
362
363 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
364 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
365
366
367 /* ATAPI functions */
368 #include "atapi.h"
369
370 /* ATA functions */
371 #include "ata.h"
372
373
374
375 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
376     struct ide_dma_prd prd_entry;
377     int index = 0;
378
379     V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
380
381     while (1) {
382         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
383         int ret = 0;
384
385         ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
386         
387         if (ret != sizeof(struct ide_dma_prd)) {
388             PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
389             return;
390         }
391
392         V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n", 
393                    prd_entry.base_addr, 
394                    (prd_entry.size == 0) ? 0x10000 : prd_entry.size, 
395                    prd_entry.end_of_table);
396
397         if (prd_entry.end_of_table) {
398             break;
399         }
400
401         index++;
402     }
403
404     return;
405 }
406
407
408 /* IO Operations */
409 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
410     struct ide_drive * drive = get_selected_drive(channel);
411     // This is at top level scope to do the EOT test at the end
412     struct ide_dma_prd prd_entry = {};
413     uint_t bytes_left = drive->transfer_length;
414
415     // Read in the data buffer....
416     // Read a sector/block at a time until the prd entry is full.
417
418 #ifdef V3_CONFIG_DEBUG_IDE
419     print_prd_table(ide, channel);
420 #endif
421
422     PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
423
424     // Loop through the disk data
425     while (bytes_left > 0) {
426         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
427         uint_t prd_bytes_left = 0;
428         uint_t prd_offset = 0;
429         int ret;
430
431         PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
432
433         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
434
435         if (ret != sizeof(struct ide_dma_prd)) {
436             PrintError(core->vm_info, core, "Could not read PRD\n");
437             return -1;
438         }
439
440         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
441                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
442
443         // loop through the PRD data....
444
445         if (prd_entry.size == 0) {
446             // a size of 0 means 64k
447             prd_bytes_left = 0x10000;
448         } else {
449             prd_bytes_left = prd_entry.size;
450         }
451
452
453         while (prd_bytes_left > 0) {
454             uint_t bytes_to_write = 0;
455
456             if (drive->drive_type == BLOCK_DISK) {
457                 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
458
459
460                 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
461                     PrintError(core->vm_info, core, "Failed to read next disk sector\n");
462                     return -1;
463                 }
464             } else if (drive->drive_type == BLOCK_CDROM) {
465                 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
466                     bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
467
468                     if (atapi_read_chunk(ide, channel) == -1) {
469                         PrintError(core->vm_info, core, "Failed to read next disk sector\n");
470                         return -1;
471                     }
472                 } else {
473                     /*
474                     PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
475                     return -1;
476                     */
477                     int cmd_ret = 0;
478
479                     //V3_Print(core->vm_info, core, "DMA of command packet\n");
480
481                     bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
482                     prd_bytes_left = bytes_to_write;
483
484
485                     // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
486                     cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, 
487                                                   bytes_to_write, drive->data_buf); 
488
489                     // check cmd_ret
490
491
492                     bytes_to_write = 0;
493                     prd_bytes_left = 0;
494                     drive->transfer_index += bytes_to_write;
495
496                     channel->status.busy = 0;
497                     channel->status.ready = 1;
498                     channel->status.data_req = 0;
499                     channel->status.error = 0;
500                     channel->status.seek_complete = 1;
501
502                     channel->dma_status.active = 0;
503                     channel->dma_status.err = 0;
504
505                     ide_raise_irq(ide, channel);
506                     
507                     return 0;
508                 }
509             }
510
511             PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n", 
512                        (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
513
514             drive->current_lba++;
515
516             ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf); 
517
518             if (ret != bytes_to_write) {
519                 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
520                 return -1;
521             }
522
523             PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
524
525             drive->transfer_index += ret;
526             prd_bytes_left -= ret;
527             prd_offset += ret;
528             bytes_left -= ret;
529         }
530
531         channel->dma_tbl_index++;
532
533         if (drive->drive_type == BLOCK_DISK) {
534             if (drive->transfer_index % HD_SECTOR_SIZE) {
535                 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
536                 return -1;
537             }
538         } else if (drive->drive_type == BLOCK_CDROM) {
539             if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
540                 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
541                     PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
542                     PrintError(core->vm_info, core, "transfer_index=%d, transfer_length=%d\n", 
543                                drive->transfer_index, drive->transfer_length);
544                     return -1;
545                 }
546             }
547         }
548
549
550         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
551             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
552             return -1;
553         }
554     }
555
556     /*
557       drive->irq_flags.io_dir = 1;
558       drive->irq_flags.c_d = 1;
559       drive->irq_flags.rel = 0;
560     */
561
562
563     // Update to the next PRD entry
564
565     // set DMA status
566
567     if (prd_entry.end_of_table) {
568         channel->status.busy = 0;
569         channel->status.ready = 1;
570         channel->status.data_req = 0;
571         channel->status.error = 0;
572         channel->status.seek_complete = 1;
573
574         channel->dma_status.active = 0;
575         channel->dma_status.err = 0;
576     }
577
578     ide_raise_irq(ide, channel);
579
580     return 0;
581 }
582
583
584 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
585     struct ide_drive * drive = get_selected_drive(channel);
586     // This is at top level scope to do the EOT test at the end
587     struct ide_dma_prd prd_entry = {};
588     uint_t bytes_left = drive->transfer_length;
589
590
591     PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
592
593     // Loop through disk data
594     while (bytes_left > 0) {
595         uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
596         uint_t prd_bytes_left = 0;
597         uint_t prd_offset = 0;
598         int ret;
599         
600         PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
601
602         ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
603
604         if (ret != sizeof(struct ide_dma_prd)) {
605             PrintError(core->vm_info, core, "Could not read PRD\n");
606             return -1;
607         }
608
609         PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
610                    prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
611
612
613         if (prd_entry.size == 0) {
614             // a size of 0 means 64k
615             prd_bytes_left = 0x10000;
616         } else {
617             prd_bytes_left = prd_entry.size;
618         }
619
620         while (prd_bytes_left > 0) {
621             uint_t bytes_to_write = 0;
622
623
624             bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
625
626
627             ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
628
629             if (ret != bytes_to_write) {
630                 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
631                 return -1;
632             }
633
634             PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
635
636
637             if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
638                 PrintError(core->vm_info, core, "Failed to write data to disk\n");
639                 return -1;
640             }
641             
642             drive->current_lba++;
643
644             drive->transfer_index += ret;
645             prd_bytes_left -= ret;
646             prd_offset += ret;
647             bytes_left -= ret;
648         }
649
650         channel->dma_tbl_index++;
651
652         if (drive->transfer_index % HD_SECTOR_SIZE) {
653             PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
654             return -1;
655         }
656
657         if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
658             PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
659             PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%u)...\n", 
660                        bytes_left, drive->transfer_length);
661             PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n", 
662                        prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
663
664             print_prd_table(ide, channel);
665             return -1;
666         }
667     }
668
669     if (prd_entry.end_of_table) {
670         channel->status.busy = 0;
671         channel->status.ready = 1;
672         channel->status.data_req = 0;
673         channel->status.error = 0;
674         channel->status.seek_complete = 1;
675
676         channel->dma_status.active = 0;
677         channel->dma_status.err = 0;
678     }
679
680     ide_raise_irq(ide, channel);
681
682     return 0;
683 }
684
685
686
687 #define DMA_CMD_PORT      0x00
688 #define DMA_STATUS_PORT   0x02
689 #define DMA_PRD_PORT0     0x04
690 #define DMA_PRD_PORT1     0x05
691 #define DMA_PRD_PORT2     0x06
692 #define DMA_PRD_PORT3     0x07
693
694 #define DMA_CHANNEL_FLAG  0x08
695
696 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
697     struct ide_internal * ide = (struct ide_internal *)private_data;
698     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
699     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
700     struct ide_channel * channel = &(ide->channels[channel_flag]);
701
702     PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n", 
703                port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
704
705     switch (port_offset) {
706         case DMA_CMD_PORT:
707             channel->dma_cmd.val = *(uint8_t *)src;
708
709             if (channel->dma_cmd.start == 0) {
710                 channel->dma_tbl_index = 0;
711             } else {
712                 channel->dma_status.active = 1;
713
714                 if (channel->dma_cmd.read == 1) {
715                     // DMA Read
716                     if (dma_read(core, ide, channel) == -1) {
717                         PrintError(core->vm_info, core, "Failed DMA Read\n");
718                         return -1;
719                     }
720                 } else {
721                     // DMA write
722                     if (dma_write(core, ide, channel) == -1) {
723                         PrintError(core->vm_info, core, "Failed DMA Write\n");
724                         return -1;
725                     }
726                 }
727
728                 channel->dma_cmd.val &= 0x09;
729             }
730
731             break;
732             
733         case DMA_STATUS_PORT: {
734             uint8_t val = *(uint8_t *)src;
735
736             if (length != 1) {
737                 PrintError(core->vm_info, core, "Invalid read length for DMA status port\n");
738                 return -1;
739             }
740
741             // weirdness
742             channel->dma_status.val = ((val & 0x60) | 
743                                        (channel->dma_status.val & 0x01) |
744                                        (channel->dma_status.val & ~val & 0x06));
745
746             break;
747         }           
748         case DMA_PRD_PORT0:
749         case DMA_PRD_PORT1:
750         case DMA_PRD_PORT2:
751         case DMA_PRD_PORT3: {
752             uint_t addr_index = port_offset & 0x3;
753             uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
754             int i = 0;
755
756             if (addr_index + length > 4) {
757                 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
758                 return -1;
759             }
760
761             for (i = 0; i < length; i++) {
762                 addr_buf[addr_index + i] = *((uint8_t *)src + i);
763             }
764
765             PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
766
767             break;
768         }
769         default:
770             PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
771             break;
772     }
773
774     return length;
775 }
776
777
778 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
779     struct ide_internal * ide = (struct ide_internal *)private_data;
780     uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
781     uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
782     struct ide_channel * channel = &(ide->channels[channel_flag]);
783
784     PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
785
786     if (port_offset + length > 16) {
787         PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
788         return -1;
789     }
790
791     memcpy(dst, channel->dma_ports + port_offset, length);
792     
793     PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
794
795     return length;
796 }
797
798
799
800 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
801     struct ide_internal * ide = priv_data;
802     struct ide_channel * channel = get_selected_channel(ide, port);
803     struct ide_drive * drive = get_selected_drive(channel);
804
805     if (length != 1) {
806         PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
807         return -1;
808     }
809
810     PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
811     
812     channel->cmd_reg = *(uint8_t *)src;
813     
814     switch (channel->cmd_reg) {
815
816         case ATA_PIDENTIFY: // ATAPI Identify Device Packet
817             if (drive->drive_type != BLOCK_CDROM) {
818                 drive_reset(drive);
819
820                 // JRL: Should we abort here?
821                 ide_abort_command(ide, channel);
822             } else {
823                 
824                 atapi_identify_device(drive);
825                 
826                 channel->error_reg.val = 0;
827                 channel->status.val = 0x58; // ready, data_req, seek_complete
828             
829                 ide_raise_irq(ide, channel);
830             }
831             break;
832         case ATA_IDENTIFY: // Identify Device
833             if (drive->drive_type != BLOCK_DISK) {
834                 drive_reset(drive);
835
836                 // JRL: Should we abort here?
837                 ide_abort_command(ide, channel);
838             } else {
839                 ata_identify_device(drive);
840
841                 channel->error_reg.val = 0;
842                 channel->status.val = 0x58;
843
844                 ide_raise_irq(ide, channel);
845             }
846             break;
847
848         case ATA_PACKETCMD: // ATAPI Command Packet
849             if (drive->drive_type != BLOCK_CDROM) {
850                 ide_abort_command(ide, channel);
851             }
852             
853             drive->sector_count = 1;
854
855             channel->status.busy = 0;
856             channel->status.write_fault = 0;
857             channel->status.data_req = 1;
858             channel->status.error = 0;
859
860             // reset the data buffer...
861             drive->transfer_length = ATAPI_PACKET_SIZE;
862             drive->transfer_index = 0;
863
864             break;
865
866         case ATA_READ: // Read Sectors with Retry
867         case ATA_READ_ONCE: // Read Sectors without Retry
868             drive->hd_state.cur_sector_num = 1;
869
870             if (ata_read_sectors(ide, channel) == -1) {
871                 PrintError(core->vm_info, core, "Error reading sectors\n");
872                 ide_abort_command(ide,channel);
873             }
874             break;
875
876         case ATA_READ_EXT: // Read Sectors Extended
877             drive->hd_state.cur_sector_num = 1;
878
879             if (ata_read_sectors_ext(ide, channel) == -1) {
880                 PrintError(core->vm_info, core, "Error reading extended sectors\n");
881                 ide_abort_command(ide,channel);
882                 
883             }
884             break;
885
886         case ATA_WRITE: 
887         case ATA_WRITE_ONCE:        {// Write Sector
888             drive->hd_state.cur_sector_num = 1;
889
890             if (ata_write_sectors(ide, channel) == -1) {
891                 PrintError(core->vm_info, core, "Error writing sectors\n");
892                 ide_abort_command(ide,channel);
893             }
894             break;
895         }
896
897         case ATA_READDMA: // Read DMA with retry
898         case ATA_READDMA_ONCE: { // Read DMA
899             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
900
901             if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
902                 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
903                 ide_abort_command(ide, channel);
904                 return length;
905             }
906             
907             drive->hd_state.cur_sector_num = 1;
908             
909             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
910             drive->transfer_index = 0;
911
912             if (channel->dma_status.active == 1) {
913                 // DMA Read
914                 if (dma_read(core, ide, channel) == -1) {
915                     PrintError(core->vm_info, core, "Failed DMA Read\n");
916                     ide_abort_command(ide, channel);
917                 }
918             } else {
919                 PrintError(core->vm_info,core,"Attempt to initiate DMA read on channel that is not active\n");
920                 ide_abort_command(ide, channel);
921             }
922             break;
923         }
924
925         case ATA_WRITEDMA: { // Write DMA
926             uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
927
928             if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
929                 PrintError(core->vm_info,core,"Cannot get lba\n");
930                 ide_abort_command(ide, channel);
931                 return length;
932             }
933
934             drive->hd_state.cur_sector_num = 1;
935
936             drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
937             drive->transfer_index = 0;
938
939             if (channel->dma_status.active == 1) {
940                 // DMA Write
941                 if (dma_write(core, ide, channel) == -1) {
942                     PrintError(core->vm_info, core, "Failed DMA Write\n");
943                     ide_abort_command(ide, channel);
944                 }
945             } else {
946                 PrintError(core->vm_info,core,"Attempt to initiate DMA write with DMA inactive\n");
947                 ide_abort_command(ide, channel);
948             }
949             break;
950         }
951         case ATA_STANDBYNOW1: // Standby Now 1
952         case ATA_IDLEIMMEDIATE: // Set Idle Immediate
953         case ATA_STANDBY: // Standby
954         case ATA_SETIDLE1: // Set Idle 1
955         case ATA_SLEEPNOW1: // Sleep Now 1
956         case ATA_STANDBYNOW2: // Standby Now 2
957         case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
958         case ATA_STANDBY2: // Standby 2
959         case ATA_SETIDLE2: // Set idle 2
960         case ATA_SLEEPNOW2: // Sleep Now 2
961             channel->status.val = 0;
962             channel->status.ready = 1;
963             ide_raise_irq(ide, channel);
964             break;
965
966         case ATA_SETFEATURES: // Set Features
967             // Prior to this the features register has been written to. 
968             // This command tells the drive to check if the new value is supported (the value is drive specific)
969             // Common is that bit0=DMA enable
970             // If valid the drive raises an interrupt, if not it aborts.
971
972             // Do some checking here...
973
974             channel->status.busy = 0;
975             channel->status.write_fault = 0;
976             channel->status.error = 0;
977             channel->status.ready = 1;
978             channel->status.seek_complete = 1;
979             
980             ide_raise_irq(ide, channel);
981             break;
982
983         case ATA_SPECIFY:  // Initialize Drive Parameters
984         case ATA_RECAL:  // recalibrate?
985             channel->status.error = 0;
986             channel->status.ready = 1;
987             channel->status.seek_complete = 1;
988             ide_raise_irq(ide, channel);
989             break;
990         case ATA_SETMULT: { // Set multiple mode (IDE Block mode) 
991             // This makes the drive transfer multiple sectors before generating an interrupt
992             uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
993
994             if (tmp_sect_num > MAX_MULT_SECTORS) {
995                 ide_abort_command(ide, channel);
996                 break;
997             }
998
999             if (drive->sector_count == 0) {
1000                 drive->hd_state.mult_sector_num= 1;
1001             } else {
1002                 drive->hd_state.mult_sector_num = drive->sector_count;
1003             }
1004
1005             channel->status.ready = 1;
1006             channel->status.error = 0;
1007
1008             ide_raise_irq(ide, channel);
1009
1010             break;
1011         }
1012
1013         case ATA_DEVICE_RESET: // Reset Device
1014             drive_reset(drive);
1015             channel->error_reg.val = 0x01;
1016             channel->status.busy = 0;
1017             channel->status.ready = 1;
1018             channel->status.seek_complete = 1;
1019             channel->status.write_fault = 0;
1020             channel->status.error = 0;
1021             break;
1022
1023         case ATA_CHECKPOWERMODE1: // Check power mode
1024             drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1025             channel->status.busy = 0;
1026             channel->status.ready = 1;
1027             channel->status.write_fault = 0;
1028             channel->status.data_req = 0;
1029             channel->status.error = 0;
1030             break;
1031             /*
1032         case ATA_MULTREAD:  // read multiple sectors
1033             drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
1034             */
1035
1036         default:
1037             PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1038             ide_abort_command(ide, channel);
1039             break;
1040     }
1041
1042     return length;
1043 }
1044
1045
1046
1047
1048 static int read_hd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1049     struct ide_drive * drive = get_selected_drive(channel);
1050     int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1051
1052
1053     PrintDebug(VM_NONE,VCORE_NONE, "Read HD data:  transfer_index %x transfer length %x current sector numer %x\n",
1054                drive->transfer_index, drive->transfer_length, 
1055                drive->hd_state.cur_sector_num);
1056
1057     if (drive->transfer_index >= drive->transfer_length) {
1058         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1059                    drive->transfer_length, drive->transfer_index,
1060                    drive->transfer_index + length);
1061         return -1;
1062     }
1063
1064
1065     if (data_offset + length > HD_SECTOR_SIZE) { 
1066        PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%d length=%u)!\n",data_offset,length);
1067     }
1068    
1069     // For index==0, the read has been done in ata_read_sectors
1070     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1071         // advance to next sector and read it
1072         
1073         drive->current_lba++;
1074
1075         if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1076             PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1077             return -1;
1078         }
1079     }
1080
1081     /*
1082       PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n", 
1083       *(uint32_t *)(drive->data_buf + data_offset), 
1084       length, data_offset);
1085     */
1086     memcpy(dst, drive->data_buf + data_offset, length);
1087
1088     drive->transfer_index += length;
1089
1090
1091     /* This is the trigger for interrupt injection.
1092      * For read single sector commands we interrupt after every sector
1093      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1094      * cur_sector_num is configured depending on the operation we are currently running
1095      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1096      */
1097     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1098         (drive->transfer_index == drive->transfer_length)) {
1099         if (drive->transfer_index < drive->transfer_length) {
1100             // An increment is complete, but there is still more data to be transferred...
1101             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1102             channel->status.data_req = 1;
1103         } else {
1104             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1105             // This was the final read of the request
1106             channel->status.data_req = 0;
1107         }
1108
1109         channel->status.ready = 1;
1110         channel->status.busy = 0;
1111
1112         ide_raise_irq(ide, channel);
1113     }
1114
1115
1116     return length;
1117 }
1118
1119 static int write_hd_data(uint8_t * src, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1120     struct ide_drive * drive = get_selected_drive(channel);
1121     int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1122
1123
1124     PrintDebug(VM_NONE,VCORE_NONE, "Write HD data:  transfer_index %x transfer length %x current sector numer %x\n",
1125                drive->transfer_index, drive->transfer_length, 
1126                drive->hd_state.cur_sector_num);
1127
1128     if (drive->transfer_index >= drive->transfer_length) {
1129         PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1130                    drive->transfer_length, drive->transfer_index,
1131                    drive->transfer_index + length);
1132         return -1;
1133     }
1134
1135     if (data_offset + length > HD_SECTOR_SIZE) { 
1136        PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%d length=%u)!\n",data_offset,length);
1137     }
1138
1139     // Copy data into our buffer - there will be room due to
1140     // (a) the ata_write test below is flushing sectors
1141     // (b) if we somehow get a sector-stradling write (an error), this will
1142     //     be OK since the buffer itself is >1 sector in memory
1143     memcpy(drive->data_buf + data_offset, src, length);
1144
1145     drive->transfer_index += length;
1146
1147     if ((data_offset+length) >= HD_SECTOR_SIZE) {
1148         // Write out the sector we just finished
1149         if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1150             PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1151             return -1;
1152         }
1153
1154         // go onto next sector
1155         drive->current_lba++;
1156     }
1157
1158     /* This is the trigger for interrupt injection.
1159      * For write single sector commands we interrupt after every sector
1160      * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1161      * cur_sector_num is configured depending on the operation we are currently running
1162      * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1163      */
1164     if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) || 
1165         (drive->transfer_index == drive->transfer_length)) {
1166         if (drive->transfer_index < drive->transfer_length) {
1167             // An increment is complete, but there is still more data to be transferred...
1168             PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1169             channel->status.data_req = 1;
1170         } else {
1171             PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1172             // This was the final read of the request
1173             channel->status.data_req = 0;
1174         }
1175
1176         channel->status.ready = 1;
1177         channel->status.busy = 0;
1178
1179         ide_raise_irq(ide, channel);
1180     }
1181
1182     return length;
1183 }
1184
1185
1186
1187 static int read_cd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1188     struct ide_drive * drive = get_selected_drive(channel);
1189     int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1190     //  int req_offset = drive->transfer_index % drive->req_len;
1191     
1192     if (drive->cd_state.atapi_cmd != 0x28) {
1193         PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1194         PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%d, transfer idx=%d\n", drive->transfer_length, drive->transfer_index);
1195     }
1196
1197     
1198
1199     if (drive->transfer_index >= drive->transfer_length) {
1200         PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n", 
1201                    drive->transfer_length, drive->transfer_index, 
1202                    drive->transfer_index + length);
1203         return -1;
1204     }
1205
1206     
1207     if ((data_offset == 0) && (drive->transfer_index > 0)) {
1208         if (atapi_update_data_buf(ide, channel) == -1) {
1209             PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1210             return -1;
1211         }
1212     }
1213
1214     memcpy(dst, drive->data_buf + data_offset, length);
1215     
1216     drive->transfer_index += length;
1217
1218
1219     // Should the req_offset be recalculated here?????
1220     if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1221         if (drive->transfer_index < drive->transfer_length) {
1222             // An increment is complete, but there is still more data to be transferred...
1223             
1224             channel->status.data_req = 1;
1225
1226             drive->irq_flags.c_d = 0;
1227
1228             // Update the request length in the cylinder regs
1229             if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1230                 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1231                 return -1;
1232             }
1233         } else {
1234             // This was the final read of the request
1235
1236             drive->req_len = 0;
1237             channel->status.data_req = 0;
1238             channel->status.ready = 1;
1239             
1240             drive->irq_flags.c_d = 1;
1241             drive->irq_flags.rel = 0;
1242         }
1243
1244         drive->irq_flags.io_dir = 1;
1245         channel->status.busy = 0;
1246
1247         ide_raise_irq(ide, channel);
1248     }
1249
1250     return length;
1251 }
1252
1253
1254 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1255     struct ide_drive * drive = get_selected_drive(channel);
1256
1257     channel->status.busy = 0;
1258     channel->status.ready = 1;
1259     channel->status.write_fault = 0;
1260     channel->status.seek_complete = 1;
1261     channel->status.corrected = 0;
1262     channel->status.error = 0;
1263                 
1264     
1265     memcpy(dst, drive->data_buf + drive->transfer_index, length);
1266     drive->transfer_index += length;
1267     
1268     if (drive->transfer_index >= drive->transfer_length) {
1269         channel->status.data_req = 0;
1270     }
1271     
1272     return length;
1273 }
1274
1275
1276
1277 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1278     struct ide_internal * ide = priv_data;
1279     struct ide_channel * channel = get_selected_channel(ide, port);
1280     struct ide_drive * drive = get_selected_drive(channel);
1281
1282     //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1283
1284     if ((channel->cmd_reg == ATA_IDENTIFY) ||
1285         (channel->cmd_reg == ATA_PIDENTIFY)) {
1286         return read_drive_id((uint8_t *)dst, length, ide, channel);
1287     }
1288
1289     if (drive->drive_type == BLOCK_CDROM) {
1290         if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1291             PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1292             return -1;
1293         }
1294     } else if (drive->drive_type == BLOCK_DISK) {
1295         if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1296             PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1297             return -1;
1298         }
1299     } else {
1300         memset((uint8_t *)dst, 0, length);
1301     }
1302
1303     return length;
1304 }
1305
1306 // For the write side, we care both about
1307 // direct PIO writes to a drive as well as 
1308 // writes that pass a packet through to an CD
1309 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1310     struct ide_internal * ide = priv_data;
1311     struct ide_channel * channel = get_selected_channel(ide, port);
1312     struct ide_drive * drive = get_selected_drive(channel);
1313
1314     PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n", 
1315             port, *(uint32_t *)src, length);
1316
1317     if (drive->drive_type == BLOCK_CDROM) {
1318         if (channel->cmd_reg == ATA_PACKETCMD) { 
1319             // short command packet - no check for space... 
1320             memcpy(drive->data_buf + drive->transfer_index, src, length);
1321             drive->transfer_index += length;
1322             if (drive->transfer_index >= drive->transfer_length) {
1323                 if (atapi_handle_packet(core, ide, channel) == -1) {
1324                     PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1325                     return -1;
1326                 }
1327             }
1328         } else {
1329             PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1330             return -1;
1331         }
1332     } else if (drive->drive_type == BLOCK_DISK) {
1333         if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1334             PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1335             return -1;
1336         }
1337     } else {
1338         // nothing ... do not support writable cd
1339     }
1340
1341     return length;
1342 }
1343
1344 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1345     struct ide_internal * ide = priv_data;
1346     struct ide_channel * channel = get_selected_channel(ide, port);
1347     struct ide_drive * drive = get_selected_drive(channel);
1348             
1349     if (length != 1) {
1350         PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1351         return -1;
1352     }
1353
1354     PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1355
1356     switch (port) {
1357         // reset and interrupt enable
1358         case PRI_CTRL_PORT:
1359         case SEC_CTRL_PORT: {
1360             struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1361
1362             // only reset channel on a 0->1 reset bit transition
1363             if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1364                 channel_reset(channel);
1365             } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1366                 channel_reset_complete(channel);
1367             }
1368
1369             channel->ctrl_reg.val = tmp_ctrl->val;          
1370             break;
1371         }
1372         case PRI_FEATURES_PORT:
1373         case SEC_FEATURES_PORT:
1374             channel->features.val = *(uint8_t *)src;
1375             break;
1376
1377         case PRI_SECT_CNT_PORT:
1378         case SEC_SECT_CNT_PORT:
1379             channel->drives[0].sector_count = *(uint8_t *)src;
1380             channel->drives[1].sector_count = *(uint8_t *)src;
1381             break;
1382
1383         case PRI_SECT_NUM_PORT:
1384         case SEC_SECT_NUM_PORT:
1385             channel->drives[0].sector_num = *(uint8_t *)src;
1386             channel->drives[1].sector_num = *(uint8_t *)src;
1387             break;
1388         case PRI_CYL_LOW_PORT:
1389         case SEC_CYL_LOW_PORT:
1390             channel->drives[0].cylinder_low = *(uint8_t *)src;
1391             channel->drives[1].cylinder_low = *(uint8_t *)src;
1392             break;
1393
1394         case PRI_CYL_HIGH_PORT:
1395         case SEC_CYL_HIGH_PORT:
1396             channel->drives[0].cylinder_high = *(uint8_t *)src;
1397             channel->drives[1].cylinder_high = *(uint8_t *)src;
1398             break;
1399
1400         case PRI_DRV_SEL_PORT:
1401         case SEC_DRV_SEL_PORT: {
1402             channel->drive_head.val = *(uint8_t *)src;
1403             
1404             // make sure the reserved bits are ok..
1405             // JRL TODO: check with new ramdisk to make sure this is right...
1406             channel->drive_head.val |= 0xa0;
1407
1408             drive = get_selected_drive(channel);
1409
1410             // Selecting a non-present device is a no-no
1411             if (drive->drive_type == BLOCK_NONE) {
1412                 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1413                 channel->error_reg.abort = 1;
1414                 channel->status.error = 1;
1415             } else {
1416                 channel->status.busy = 0;
1417                 channel->status.ready = 1;
1418                 channel->status.data_req = 0;
1419                 channel->status.error = 0;
1420                 channel->status.seek_complete = 1;
1421                 
1422                 channel->dma_status.active = 0;
1423                 channel->dma_status.err = 0;
1424             }
1425
1426             break;
1427         }
1428         default:
1429             PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1430             return -1;
1431     }
1432     return length;
1433 }
1434
1435
1436 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1437     struct ide_internal * ide = priv_data;
1438     struct ide_channel * channel = get_selected_channel(ide, port);
1439     struct ide_drive * drive = get_selected_drive(channel);
1440     
1441     if (length != 1) {
1442         PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1443         return -1;
1444     }
1445     
1446     PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1447
1448     if ((port == PRI_ADDR_REG_PORT) ||
1449         (port == SEC_ADDR_REG_PORT)) {
1450         // unused, return 0xff
1451         *(uint8_t *)dst = 0xff;
1452         return length;
1453     }
1454
1455
1456     // if no drive is present just return 0 + reserved bits
1457     if (drive->drive_type == BLOCK_NONE) {
1458         if ((port == PRI_DRV_SEL_PORT) ||
1459             (port == SEC_DRV_SEL_PORT)) {
1460             *(uint8_t *)dst = 0xa0;
1461         } else {
1462             *(uint8_t *)dst = 0;
1463         }
1464
1465         return length;
1466     }
1467
1468     switch (port) {
1469
1470         // This is really the error register.
1471         case PRI_FEATURES_PORT:
1472         case SEC_FEATURES_PORT:
1473             *(uint8_t *)dst = channel->error_reg.val;
1474             break;
1475             
1476         case PRI_SECT_CNT_PORT:
1477         case SEC_SECT_CNT_PORT:
1478             *(uint8_t *)dst = drive->sector_count;
1479             break;
1480
1481         case PRI_SECT_NUM_PORT:
1482         case SEC_SECT_NUM_PORT:
1483             *(uint8_t *)dst = drive->sector_num;
1484             break;
1485
1486         case PRI_CYL_LOW_PORT:
1487         case SEC_CYL_LOW_PORT:
1488             *(uint8_t *)dst = drive->cylinder_low;
1489             break;
1490
1491
1492         case PRI_CYL_HIGH_PORT:
1493         case SEC_CYL_HIGH_PORT:
1494             *(uint8_t *)dst = drive->cylinder_high;
1495             break;
1496
1497         case PRI_DRV_SEL_PORT:
1498         case SEC_DRV_SEL_PORT:  // hard disk drive and head register 0x1f6
1499             *(uint8_t *)dst = channel->drive_head.val;
1500             break;
1501
1502         case PRI_CTRL_PORT:
1503         case SEC_CTRL_PORT:
1504         case PRI_CMD_PORT:
1505         case SEC_CMD_PORT:
1506             // Something about lowering interrupts here....
1507             *(uint8_t *)dst = channel->status.val;
1508             break;
1509
1510         default:
1511             PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1512             return -1;
1513     }
1514
1515     PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1516
1517     return length;
1518 }
1519
1520
1521
1522 static void init_drive(struct ide_drive * drive) {
1523
1524     drive->sector_count = 0x01;
1525     drive->sector_num = 0x01;
1526     drive->cylinder = 0x0000;
1527
1528     drive->drive_type = BLOCK_NONE;
1529
1530     memset(drive->model, 0, sizeof(drive->model));
1531
1532     drive->transfer_index = 0;
1533     drive->transfer_length = 0;
1534     memset(drive->data_buf, 0, sizeof(drive->data_buf));
1535
1536     drive->num_cylinders = 0;
1537     drive->num_heads = 0;
1538     drive->num_sectors = 0;
1539     
1540
1541     drive->private_data = NULL;
1542     drive->ops = NULL;
1543 }
1544
1545 static void init_channel(struct ide_channel * channel) {
1546     int i = 0;
1547
1548     channel->error_reg.val = 0x01;
1549
1550     //** channel->features = 0x0;
1551
1552     channel->drive_head.val = 0x00;
1553     channel->status.val = 0x00;
1554     channel->cmd_reg = 0x00;
1555     channel->ctrl_reg.val = 0x08;
1556
1557     channel->dma_cmd.val = 0;
1558     channel->dma_status.val = 0;
1559     channel->dma_prd_addr = 0;
1560     channel->dma_tbl_index = 0;
1561
1562     for (i = 0; i < 2; i++) {
1563         init_drive(&(channel->drives[i]));
1564     }
1565
1566 }
1567
1568
1569 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1570     PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1571     /*
1572     struct ide_internal * ide = (struct ide_internal *)(private_data);
1573
1574     PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1575     */
1576
1577     return 0;
1578 }
1579
1580 static int init_ide_state(struct ide_internal * ide) {
1581
1582     /* 
1583      * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry 
1584      */
1585
1586     init_channel(&(ide->channels[0]));
1587     ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1588
1589     init_channel(&(ide->channels[1]));
1590     ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1591
1592
1593     return 0;
1594 }
1595
1596
1597
1598
1599 static int ide_free(struct ide_internal * ide) {
1600
1601     // deregister from PCI?
1602
1603     V3_Free(ide);
1604
1605     return 0;
1606 }
1607
1608 #ifdef V3_CONFIG_CHECKPOINT
1609
1610 #include <palacios/vmm_sprintf.h>
1611
1612 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1613     struct ide_internal * ide = (struct ide_internal *)private_data;
1614     struct v3_chkpt_ctx *ctx=0;
1615     int ch_num = 0;
1616     int drive_num = 0;
1617     char buf[128];
1618     
1619
1620     ctx=v3_chkpt_open_ctx(chkpt,id);
1621     
1622     if (!ctx) { 
1623       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1624       goto savefailout;
1625     }
1626
1627     // nothing saved yet
1628     
1629     v3_chkpt_close_ctx(ctx);ctx=0;
1630    
1631
1632     for (ch_num = 0; ch_num < 2; ch_num++) {
1633         struct ide_channel * ch = &(ide->channels[ch_num]);
1634
1635         snprintf(buf, 128, "%s-%d", id, ch_num);
1636
1637         ctx = v3_chkpt_open_ctx(chkpt, buf);
1638         
1639         if (!ctx) { 
1640           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1641           goto savefailout;
1642         }
1643
1644         V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1645         V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1646         V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1647         V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1648         V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1649         V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1650         V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1651         V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1652         V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1653         V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1654
1655         v3_chkpt_close_ctx(ctx); ctx=0;
1656
1657         for (drive_num = 0; drive_num < 2; drive_num++) {
1658             struct ide_drive * drive = &(ch->drives[drive_num]);
1659             
1660             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1661
1662             ctx = v3_chkpt_open_ctx(chkpt, buf);
1663             
1664             if (!ctx) { 
1665               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1666               goto savefailout;
1667             }
1668
1669             V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1670             V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1671             V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1672             V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1673
1674             V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1675             V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1676             V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1677
1678             V3_CHKPT_SAVE(ctx, "DATA_BUF",  drive->data_buf, savefailout);
1679
1680
1681             /* For now we'll just pack the type specific data at the end... */
1682             /* We should probably add a new context here in the future... */
1683             if (drive->drive_type == BLOCK_CDROM) {
1684               V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1685               V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1686               V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1687             } else if (drive->drive_type == BLOCK_DISK) {
1688               V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1689               V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1690               V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1691             } else if (drive->drive_type == BLOCK_NONE) { 
1692               // no drive connected, so no data
1693             } else {
1694               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1695               goto savefailout;
1696             }
1697             
1698             v3_chkpt_close_ctx(ctx); ctx=0;
1699         }
1700     }
1701
1702 // goodout:
1703     return 0;
1704
1705  savefailout:
1706     PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1707     if (ctx) {v3_chkpt_close_ctx(ctx); }
1708     return -1;
1709 }
1710
1711
1712
1713 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1714     struct ide_internal * ide = (struct ide_internal *)private_data;
1715     struct v3_chkpt_ctx *ctx=0;
1716     int ch_num = 0;
1717     int drive_num = 0;
1718     char buf[128];
1719     
1720     ctx=v3_chkpt_open_ctx(chkpt,id);
1721     
1722     if (!ctx) { 
1723       PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1724       goto loadfailout;
1725     }
1726
1727     // nothing saved yet
1728     
1729     v3_chkpt_close_ctx(ctx);ctx=0;
1730    
1731
1732     for (ch_num = 0; ch_num < 2; ch_num++) {
1733         struct ide_channel * ch = &(ide->channels[ch_num]);
1734
1735         snprintf(buf, 128, "%s-%d", id, ch_num);
1736
1737         ctx = v3_chkpt_open_ctx(chkpt, buf);
1738         
1739         if (!ctx) { 
1740           PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1741           goto loadfailout;
1742         }
1743
1744         V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1745         V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1746         V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1747         V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1748         V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1749         V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1750         V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1751         V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1752         V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1753         V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1754
1755         v3_chkpt_close_ctx(ctx); ctx=0;
1756
1757         for (drive_num = 0; drive_num < 2; drive_num++) {
1758             struct ide_drive * drive = &(ch->drives[drive_num]);
1759             
1760             snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1761
1762             ctx = v3_chkpt_open_ctx(chkpt, buf);
1763             
1764             if (!ctx) { 
1765               PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1766               goto loadfailout;
1767             }
1768
1769             V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1770             V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1771             V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1772             V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1773
1774             V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1775             V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1776             V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1777
1778             V3_CHKPT_LOAD(ctx, "DATA_BUF",  drive->data_buf, loadfailout);
1779
1780             
1781             /* For now we'll just pack the type specific data at the end... */
1782             /* We should probably add a new context here in the future... */
1783             if (drive->drive_type == BLOCK_CDROM) {
1784               V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1785               V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1786               V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1787             } else if (drive->drive_type == BLOCK_DISK) {
1788               V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1789               V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1790               V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1791             } else if (drive->drive_type == BLOCK_NONE) { 
1792               // no drive connected, so no data
1793             } else {
1794               PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1795               goto loadfailout;
1796             }
1797         }
1798     }
1799 // goodout:
1800     return 0;
1801
1802  loadfailout:
1803     PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1804     if (ctx) {v3_chkpt_close_ctx(ctx); }
1805     return -1;
1806
1807 }
1808
1809
1810
1811 #endif
1812
1813
1814 static struct v3_device_ops dev_ops = {
1815     .free = (int (*)(void *))ide_free,
1816 #ifdef V3_CONFIG_CHECKPOINT
1817     .save_extended = ide_save_extended,
1818     .load_extended = ide_load_extended
1819 #endif
1820 };
1821
1822
1823
1824
1825 static int connect_fn(struct v3_vm_info * vm, 
1826                       void * frontend_data, 
1827                       struct v3_dev_blk_ops * ops, 
1828                       v3_cfg_tree_t * cfg, 
1829                       void * private_data) {
1830     struct ide_internal * ide  = (struct ide_internal *)(frontend_data);  
1831     struct ide_channel * channel = NULL;
1832     struct ide_drive * drive = NULL;
1833
1834     char * bus_str = v3_cfg_val(cfg, "bus_num");
1835     char * drive_str = v3_cfg_val(cfg, "drive_num");
1836     char * type_str = v3_cfg_val(cfg, "type");
1837     char * model_str = v3_cfg_val(cfg, "model");
1838     uint_t bus_num = 0;
1839     uint_t drive_num = 0;
1840
1841
1842     if ((!type_str) || (!drive_str) || (!bus_str)) {
1843         PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1844         return -1;
1845     }
1846
1847     bus_num = atoi(bus_str);
1848     drive_num = atoi(drive_str);
1849
1850     channel = &(ide->channels[bus_num]);
1851     drive = &(channel->drives[drive_num]);
1852
1853     if (drive->drive_type != BLOCK_NONE) {
1854         PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1855         return -1;
1856     }
1857
1858     if (model_str != NULL) {
1859         strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1860     }
1861
1862     if (strcasecmp(type_str, "cdrom") == 0) {
1863         drive->drive_type = BLOCK_CDROM;
1864
1865         while (strlen((char *)(drive->model)) < 40) {
1866             strcat((char*)(drive->model), " ");
1867         }
1868
1869     } else if (strcasecmp(type_str, "hd") == 0) {
1870         drive->drive_type = BLOCK_DISK;
1871
1872         drive->hd_state.accessed = 0;
1873         drive->hd_state.mult_sector_num = 1;
1874
1875         drive->num_sectors = 63;
1876         drive->num_heads = 16;
1877         drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
1878     } else {
1879         PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
1880         return -1;
1881     }
1882  
1883     drive->ops = ops;
1884
1885     if (ide->ide_pci) {
1886         // Hardcode this for now, but its not a good idea....
1887         ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1888     }
1889  
1890     drive->private_data = private_data;
1891
1892     return 0;
1893 }
1894
1895
1896
1897
1898 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1899     struct ide_internal * ide  = NULL;
1900     char * dev_id = v3_cfg_val(cfg, "ID");
1901     int ret = 0;
1902
1903     PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
1904
1905     ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1906
1907     if (ide == NULL) {
1908         PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
1909         return -1;
1910     }
1911
1912     memset(ide, 0, sizeof(struct ide_internal));
1913
1914     ide->vm = vm;
1915     ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1916
1917     if (ide->pci_bus != NULL) {
1918         struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1919
1920         if (!southbridge) {
1921             PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
1922             V3_Free(ide);
1923             return -1;
1924         }
1925
1926         ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1927     }
1928
1929     PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
1930
1931     struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
1932
1933     if (dev == NULL) {
1934         PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
1935         V3_Free(ide);
1936         return -1;
1937     }
1938
1939     if (init_ide_state(ide) == -1) {
1940         PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
1941         v3_remove_device(dev);
1942         return -1;
1943     }
1944
1945     PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
1946
1947     ret |= v3_dev_hook_io(dev, PRI_DATA_PORT, 
1948                           &read_data_port, &write_data_port);
1949     ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT, 
1950                           &read_port_std, &write_port_std);
1951     ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT, 
1952                           &read_port_std, &write_port_std);
1953     ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT, 
1954                           &read_port_std, &write_port_std);
1955     ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT, 
1956                           &read_port_std, &write_port_std);
1957     ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT, 
1958                           &read_port_std, &write_port_std);
1959     ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT, 
1960                           &read_port_std, &write_port_std);
1961     ret |= v3_dev_hook_io(dev, PRI_CMD_PORT, 
1962                           &read_port_std, &write_cmd_port);
1963
1964     ret |= v3_dev_hook_io(dev, SEC_DATA_PORT, 
1965                           &read_data_port, &write_data_port);
1966     ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT, 
1967                           &read_port_std, &write_port_std);
1968     ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT, 
1969                           &read_port_std, &write_port_std);
1970     ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT, 
1971                           &read_port_std, &write_port_std);
1972     ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT, 
1973                           &read_port_std, &write_port_std);
1974     ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT, 
1975                           &read_port_std, &write_port_std);
1976     ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT, 
1977                           &read_port_std, &write_port_std);
1978     ret |= v3_dev_hook_io(dev, SEC_CMD_PORT, 
1979                           &read_port_std, &write_cmd_port);
1980   
1981
1982     ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT, 
1983                           &read_port_std, &write_port_std);
1984
1985     ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT, 
1986                           &read_port_std, &write_port_std);
1987   
1988
1989     ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT, 
1990                           &read_port_std, &write_port_std);
1991
1992     ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT, 
1993                           &read_port_std, &write_port_std);
1994
1995
1996     if (ret != 0) {
1997         PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
1998         v3_remove_device(dev);
1999         return -1;
2000     }
2001
2002
2003     if (ide->pci_bus) {
2004         struct v3_pci_bar bars[6];
2005         struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2006         struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2007         struct pci_device * pci_dev = NULL;
2008         int i;
2009
2010         PrintDebug(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2011
2012         for (i = 0; i < 6; i++) {
2013             bars[i].type = PCI_BAR_NONE;
2014         }
2015
2016         bars[4].type = PCI_BAR_IO;
2017         //      bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2018         bars[4].default_base_port = -1;
2019         bars[4].num_ports = 16;
2020
2021         bars[4].io_read = read_dma_port;
2022         bars[4].io_write = write_dma_port;
2023         bars[4].private_data = ide;
2024
2025         pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1, 
2026                                          "PIIX3_IDE", bars,
2027                                          pci_config_update, NULL, NULL, NULL, ide);
2028
2029         if (pci_dev == NULL) {
2030             PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i); 
2031             v3_remove_device(dev);
2032             return -1;
2033         }
2034
2035         /* This is for CMD646 devices 
2036            pci_dev->config_header.vendor_id = 0x1095;
2037            pci_dev->config_header.device_id = 0x0646;
2038            pci_dev->config_header.revision = 0x8f07;
2039         */
2040
2041         pci_dev->config_header.vendor_id = 0x8086;
2042         pci_dev->config_header.device_id = 0x7010;
2043         pci_dev->config_header.revision = 0x00;
2044
2045         pci_dev->config_header.prog_if = 0x80; // Master IDE device
2046         pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2047         pci_dev->config_header.class = PCI_CLASS_STORAGE;
2048
2049         pci_dev->config_header.command = 0;
2050         pci_dev->config_header.status = 0x0280;
2051
2052         ide->ide_pci = pci_dev;
2053
2054
2055     }
2056
2057     if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2058         PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2059         v3_remove_device(dev);
2060         return -1;
2061     }
2062     
2063
2064     PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2065
2066     return 0;
2067 }
2068
2069
2070 device_register("IDE", ide_init)
2071
2072
2073
2074
2075 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num, 
2076                         uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2077
2078     struct ide_internal * ide  = ide_data;  
2079     struct ide_channel * channel = &(ide->channels[channel_num]);
2080     struct ide_drive * drive = &(channel->drives[drive_num]);
2081     
2082     if (drive->drive_type == BLOCK_NONE) {
2083         return -1;
2084     }
2085
2086     *cylinders = drive->num_cylinders;
2087     *heads = drive->num_heads;
2088     *sectors = drive->num_sectors;
2089
2090     return 0;
2091 }