2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef V3_CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
71 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72 "PRI_CTRL", "PRI_ADDR_REG"};
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
76 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77 "SEC_CTRL", "SEC_ADDR_REG"};
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
85 static inline const char * io_port_to_str(uint16_t port) {
86 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87 return ide_pri_port_strs[port - PRI_DATA_PORT];
88 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89 return ide_sec_port_strs[port - SEC_DATA_PORT];
90 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
99 static inline const char * dma_port_to_str(uint16_t port) {
100 return ide_dma_port_strs[port & 0x7];
105 struct ide_cd_state {
106 struct atapi_sense_data sense;
109 struct atapi_error_recovery err_recovery;
112 struct ide_hd_state {
115 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116 uint32_t mult_sector_num;
118 /* This is the current op sector size:
119 * for multiple sector ops this equals mult_sector_num
120 * for standard ops this equals 1
122 uint32_t cur_sector_num;
128 v3_block_type_t drive_type;
130 struct v3_dev_blk_ops * ops;
133 struct ide_cd_state cd_state;
134 struct ide_hd_state hd_state;
139 // Where we are in the data transfer
140 uint32_t transfer_index;
142 // the length of a transfer
143 // calculated for easy access
144 uint32_t transfer_length;
146 uint64_t current_lba;
148 // We have a local data buffer that we use for IO port accesses
149 uint8_t data_buf[DATA_BUFFER_SIZE];
152 uint32_t num_cylinders;
154 uint32_t num_sectors;
159 uint8_t sector_count; // 0x1f2,0x172
160 struct atapi_irq_flags irq_flags;
161 } __attribute__((packed));
164 uint8_t sector_num; // 0x1f3,0x173
166 } __attribute__((packed));
173 uint8_t cylinder_low; // 0x1f4,0x174
174 uint8_t cylinder_high; // 0x1f5,0x175
175 } __attribute__((packed));
180 } __attribute__((packed));
183 // The transfer length requested by the CPU
185 } __attribute__((packed));
192 struct ide_drive drives[2];
195 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
197 struct ide_features_reg features;
199 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
201 struct ide_status_reg status; // [read] 0x1f7,0x177
202 uint8_t cmd_reg; // [write] 0x1f7,0x177
204 int irq; // this is temporary until we add PCI support
207 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
209 struct ide_dma_cmd_reg dma_cmd;
210 struct ide_dma_status_reg dma_status;
211 uint32_t dma_prd_addr;
212 uint32_t dma_tbl_index;
217 struct ide_internal {
218 struct ide_channel channels[2];
220 struct v3_southbridge * southbridge;
221 struct vm_device * pci_bus;
223 struct pci_device * ide_pci;
225 struct v3_vm_info * vm;
232 /* Utility functions */
234 static inline uint16_t be_to_le_16(const uint16_t val) {
235 uint8_t * buf = (uint8_t *)&val;
236 return (buf[0] << 8) | (buf[1]) ;
239 static inline uint16_t le_to_be_16(const uint16_t val) {
240 return be_to_le_16(val);
244 static inline uint32_t be_to_le_32(const uint32_t val) {
245 uint8_t * buf = (uint8_t *)&val;
246 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
249 static inline uint32_t le_to_be_32(const uint32_t val) {
250 return be_to_le_32(val);
254 static inline int get_channel_index(ushort_t port) {
255 if (((port & 0xfff8) == 0x1f0) ||
256 ((port & 0xfffe) == 0x3f6) ||
257 ((port & 0xfff8) == 0xc000)) {
259 } else if (((port & 0xfff8) == 0x170) ||
260 ((port & 0xfffe) == 0x376) ||
261 ((port & 0xfff8) == 0xc008)) {
268 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
269 int channel_idx = get_channel_index(port);
270 return &(ide->channels[channel_idx]);
273 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
274 return &(channel->drives[channel->drive_head.drive_sel]);
278 static inline int is_lba_enabled(struct ide_channel * channel) {
279 return channel->drive_head.lba_mode;
284 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
285 if (channel->ctrl_reg.irq_disable == 0) {
286 // PrintError("Raising IDE Interrupt %d\n", channel->irq);
287 channel->dma_status.int_gen = 1;
288 v3_raise_irq(ide->vm, channel->irq);
293 static void drive_reset(struct ide_drive * drive) {
294 drive->sector_count = 0x01;
295 drive->sector_num = 0x01;
297 PrintDebug("Resetting drive %s\n", drive->model);
299 if (drive->drive_type == BLOCK_CDROM) {
300 drive->cylinder = 0xeb14;
302 drive->cylinder = 0x0000;
303 //drive->hd_state.accessed = 0;
307 memset(drive->data_buf, 0, sizeof(drive->data_buf));
308 drive->transfer_index = 0;
310 // Send the reset signal to the connected device callbacks
311 // channel->drives[0].reset();
312 // channel->drives[1].reset();
315 static void channel_reset(struct ide_channel * channel) {
317 // set busy and seek complete flags
318 channel->status.val = 0x90;
321 channel->error_reg.val = 0x01;
324 channel->cmd_reg = 0x00;
326 channel->ctrl_reg.irq_disable = 0;
329 static void channel_reset_complete(struct ide_channel * channel) {
330 channel->status.busy = 0;
331 channel->status.ready = 1;
333 channel->drive_head.head_num = 0;
335 drive_reset(&(channel->drives[0]));
336 drive_reset(&(channel->drives[1]));
340 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
341 channel->status.val = 0x41; // Error + ready
342 channel->error_reg.val = 0x04; // No idea...
344 ide_raise_irq(ide, channel);
348 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
349 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
352 /* ATAPI functions */
359 #ifdef V3_CONFIG_DEBUG_IDE
360 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
361 struct ide_dma_prd prd_entry;
364 PrintDebug("Dumping PRD table\n");
367 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
370 ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
372 if (ret != sizeof(struct ide_dma_prd)) {
373 PrintError("Could not read PRD\n");
377 PrintDebug("\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
379 (prd_entry.size == 0) ? 0x10000 : prd_entry.size,
380 prd_entry.end_of_table);
382 if (prd_entry.end_of_table) {
394 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
395 struct ide_drive * drive = get_selected_drive(channel);
396 // This is at top level scope to do the EOT test at the end
397 struct ide_dma_prd prd_entry = {};
398 uint_t bytes_left = drive->transfer_length;
400 // Read in the data buffer....
401 // Read a sector/block at a time until the prd entry is full.
403 #ifdef V3_CONFIG_DEBUG_IDE
404 print_prd_table(ide, channel);
407 PrintDebug("DMA read for %d bytes\n", bytes_left);
409 // Loop through the disk data
410 while (bytes_left > 0) {
411 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
412 uint_t prd_bytes_left = 0;
413 uint_t prd_offset = 0;
416 PrintDebug("PRD table address = %x\n", channel->dma_prd_addr);
418 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
420 if (ret != sizeof(struct ide_dma_prd)) {
421 PrintError("Could not read PRD\n");
425 PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n",
426 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
428 // loop through the PRD data....
430 if (prd_entry.size == 0) {
431 // a size of 0 means 64k
432 prd_bytes_left = 0x10000;
434 prd_bytes_left = prd_entry.size;
438 while (prd_bytes_left > 0) {
439 uint_t bytes_to_write = 0;
441 if (drive->drive_type == BLOCK_DISK) {
442 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
445 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
446 PrintError("Failed to read next disk sector\n");
449 } else if (drive->drive_type == BLOCK_CDROM) {
450 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
451 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
453 if (atapi_read_chunk(ide, channel) == -1) {
454 PrintError("Failed to read next disk sector\n");
459 PrintError("DMA of command packet\n");
460 PrintError("How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
465 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
466 prd_bytes_left = bytes_to_write;
468 cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset,
469 bytes_to_write, drive->data_buf);
476 drive->transfer_index += bytes_to_write;
478 channel->status.busy = 0;
479 channel->status.ready = 1;
480 channel->status.data_req = 0;
481 channel->status.error = 0;
482 channel->status.seek_complete = 1;
484 channel->dma_status.active = 0;
485 channel->dma_status.err = 0;
487 ide_raise_irq(ide, channel);
493 PrintDebug("Writing DMA data to guest Memory ptr=%p, len=%d\n",
494 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
496 drive->current_lba++;
498 ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
500 if (ret != bytes_to_write) {
501 PrintError("Failed to copy data into guest memory... (ret=%d)\n", ret);
505 PrintDebug("\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
507 drive->transfer_index += ret;
508 prd_bytes_left -= ret;
513 channel->dma_tbl_index++;
515 if (drive->drive_type == BLOCK_DISK) {
516 if (drive->transfer_index % HD_SECTOR_SIZE) {
517 PrintError("We currently don't handle sectors that span PRD descriptors\n");
520 } else if (drive->drive_type == BLOCK_CDROM) {
521 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
522 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
523 PrintError("We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
524 PrintError("transfer_index=%d, transfer_length=%d\n",
525 drive->transfer_index, drive->transfer_length);
532 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
533 PrintError("DMA table not large enough for data transfer...\n");
539 drive->irq_flags.io_dir = 1;
540 drive->irq_flags.c_d = 1;
541 drive->irq_flags.rel = 0;
545 // Update to the next PRD entry
549 if (prd_entry.end_of_table) {
550 channel->status.busy = 0;
551 channel->status.ready = 1;
552 channel->status.data_req = 0;
553 channel->status.error = 0;
554 channel->status.seek_complete = 1;
556 channel->dma_status.active = 0;
557 channel->dma_status.err = 0;
560 ide_raise_irq(ide, channel);
566 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
567 struct ide_drive * drive = get_selected_drive(channel);
568 // This is at top level scope to do the EOT test at the end
569 struct ide_dma_prd prd_entry = {};
570 uint_t bytes_left = drive->transfer_length;
573 PrintDebug("DMA write from %d bytes\n", bytes_left);
575 // Loop through disk data
576 while (bytes_left > 0) {
577 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
578 uint_t prd_bytes_left = 0;
579 uint_t prd_offset = 0;
582 PrintDebug("PRD Table address = %x\n", channel->dma_prd_addr);
584 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
586 if (ret != sizeof(struct ide_dma_prd)) {
587 PrintError("Could not read PRD\n");
591 PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n",
592 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
594 prd_bytes_left = prd_entry.size;
596 while (prd_bytes_left > 0) {
597 uint_t bytes_to_write = 0;
600 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
603 ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
605 if (ret != bytes_to_write) {
606 PrintError("Faild to copy data from guest memory... (ret=%d)\n", ret);
610 PrintDebug("\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
613 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
614 PrintError("Failed to write data to disk\n");
618 drive->current_lba++;
620 drive->transfer_index += ret;
621 prd_bytes_left -= ret;
626 channel->dma_tbl_index++;
628 if (drive->transfer_index % HD_SECTOR_SIZE) {
629 PrintError("We currently don't handle sectors that span PRD descriptors\n");
633 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
634 PrintError("DMA table not large enough for data transfer...\n");
639 if (prd_entry.end_of_table) {
640 channel->status.busy = 0;
641 channel->status.ready = 1;
642 channel->status.data_req = 0;
643 channel->status.error = 0;
644 channel->status.seek_complete = 1;
646 channel->dma_status.active = 0;
647 channel->dma_status.err = 0;
650 ide_raise_irq(ide, channel);
657 #define DMA_CMD_PORT 0x00
658 #define DMA_STATUS_PORT 0x02
659 #define DMA_PRD_PORT0 0x04
660 #define DMA_PRD_PORT1 0x05
661 #define DMA_PRD_PORT2 0x06
662 #define DMA_PRD_PORT3 0x07
664 #define DMA_CHANNEL_FLAG 0x08
666 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
667 struct ide_internal * ide = (struct ide_internal *)private_data;
668 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
669 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
670 struct ide_channel * channel = &(ide->channels[channel_flag]);
672 PrintDebug("IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
673 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
675 switch (port_offset) {
677 channel->dma_cmd.val = *(uint8_t *)src;
679 if (channel->dma_cmd.start == 0) {
680 channel->dma_tbl_index = 0;
682 channel->dma_status.active = 1;
684 if (channel->dma_cmd.read == 1) {
686 if (dma_read(core, ide, channel) == -1) {
687 PrintError("Failed DMA Read\n");
692 if (dma_write(core, ide, channel) == -1) {
693 PrintError("Failed DMA Write\n");
698 channel->dma_cmd.val &= 0x09;
703 case DMA_STATUS_PORT: {
704 uint8_t val = *(uint8_t *)src;
707 PrintError("Invalid read length for DMA status port\n");
712 channel->dma_status.val = ((val & 0x60) |
713 (channel->dma_status.val & 0x01) |
714 (channel->dma_status.val & ~val & 0x06));
721 case DMA_PRD_PORT3: {
722 uint_t addr_index = port_offset & 0x3;
723 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
726 if (addr_index + length > 4) {
727 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
731 for (i = 0; i < length; i++) {
732 addr_buf[addr_index + i] = *((uint8_t *)src + i);
735 PrintDebug("Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
740 PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
748 static int read_dma_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * private_data) {
749 struct ide_internal * ide = (struct ide_internal *)private_data;
750 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
751 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
752 struct ide_channel * channel = &(ide->channels[channel_flag]);
754 PrintDebug("Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
756 switch (port_offset) {
758 *(uint8_t *)dst = channel->dma_cmd.val;
761 case DMA_STATUS_PORT:
763 PrintError("Invalid read length for DMA status port\n");
767 *(uint8_t *)dst = channel->dma_status.val;
773 case DMA_PRD_PORT3: {
774 uint_t addr_index = port_offset & 0x3;
775 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
778 if (addr_index + length > 4) {
779 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
783 for (i = 0; i < length; i++) {
784 *((uint8_t *)dst + i) = addr_buf[addr_index + i];
790 PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
794 PrintDebug("\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
801 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
802 struct ide_internal * ide = priv_data;
803 struct ide_channel * channel = get_selected_channel(ide, port);
804 struct ide_drive * drive = get_selected_drive(channel);
807 PrintError("Invalid Write Length on IDE command Port %x\n", port);
811 PrintDebug("IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
813 channel->cmd_reg = *(uint8_t *)src;
815 switch (channel->cmd_reg) {
817 case 0xa1: // ATAPI Identify Device Packet
818 if (drive->drive_type != BLOCK_CDROM) {
821 // JRL: Should we abort here?
822 ide_abort_command(ide, channel);
825 atapi_identify_device(drive);
827 channel->error_reg.val = 0;
828 channel->status.val = 0x58; // ready, data_req, seek_complete
830 ide_raise_irq(ide, channel);
833 case 0xec: // Identify Device
834 if (drive->drive_type != BLOCK_DISK) {
837 // JRL: Should we abort here?
838 ide_abort_command(ide, channel);
840 ata_identify_device(drive);
842 channel->error_reg.val = 0;
843 channel->status.val = 0x58;
845 ide_raise_irq(ide, channel);
849 case 0xa0: // ATAPI Command Packet
850 if (drive->drive_type != BLOCK_CDROM) {
851 ide_abort_command(ide, channel);
854 drive->sector_count = 1;
856 channel->status.busy = 0;
857 channel->status.write_fault = 0;
858 channel->status.data_req = 1;
859 channel->status.error = 0;
861 // reset the data buffer...
862 drive->transfer_length = ATAPI_PACKET_SIZE;
863 drive->transfer_index = 0;
867 case 0x20: // Read Sectors with Retry
868 case 0x21: // Read Sectors without Retry
869 drive->hd_state.cur_sector_num = 1;
871 if (ata_read_sectors(ide, channel) == -1) {
872 PrintError("Error reading sectors\n");
877 case 0x24: // Read Sectors Extended
878 drive->hd_state.cur_sector_num = 1;
880 if (ata_read_sectors_ext(ide, channel) == -1) {
881 PrintError("Error reading extended sectors\n");
886 case 0xc8: // Read DMA with retry
887 case 0xc9: { // Read DMA
888 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
890 if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
891 ide_abort_command(ide, channel);
895 drive->hd_state.cur_sector_num = 1;
897 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
898 drive->transfer_index = 0;
900 if (channel->dma_status.active == 1) {
902 if (dma_read(core, ide, channel) == -1) {
903 PrintError("Failed DMA Read\n");
910 case 0xca: { // Write DMA
911 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
913 if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
914 ide_abort_command(ide, channel);
918 drive->hd_state.cur_sector_num = 1;
920 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
921 drive->transfer_index = 0;
923 if (channel->dma_status.active == 1) {
925 if (dma_write(core, ide, channel) == -1) {
926 PrintError("Failed DMA Write\n");
932 case 0xe0: // Standby Now 1
933 case 0xe1: // Set Idle Immediate
934 case 0xe2: // Standby
935 case 0xe3: // Set Idle 1
936 case 0xe6: // Sleep Now 1
937 case 0x94: // Standby Now 2
938 case 0x95: // Idle Immediate (CFA)
939 case 0x96: // Standby 2
940 case 0x97: // Set idle 2
941 case 0x99: // Sleep Now 2
942 channel->status.val = 0;
943 channel->status.ready = 1;
944 ide_raise_irq(ide, channel);
947 case 0xef: // Set Features
948 // Prior to this the features register has been written to.
949 // This command tells the drive to check if the new value is supported (the value is drive specific)
950 // Common is that bit0=DMA enable
951 // If valid the drive raises an interrupt, if not it aborts.
953 // Do some checking here...
955 channel->status.busy = 0;
956 channel->status.write_fault = 0;
957 channel->status.error = 0;
958 channel->status.ready = 1;
959 channel->status.seek_complete = 1;
961 ide_raise_irq(ide, channel);
964 case 0x91: // Initialize Drive Parameters
965 case 0x10: // recalibrate?
966 channel->status.error = 0;
967 channel->status.ready = 1;
968 channel->status.seek_complete = 1;
969 ide_raise_irq(ide, channel);
971 case 0xc6: { // Set multiple mode (IDE Block mode)
972 // This makes the drive transfer multiple sectors before generating an interrupt
973 uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
975 if (tmp_sect_num > MAX_MULT_SECTORS) {
976 ide_abort_command(ide, channel);
980 if (drive->sector_count == 0) {
981 drive->hd_state.mult_sector_num= 1;
983 drive->hd_state.mult_sector_num = drive->sector_count;
986 channel->status.ready = 1;
987 channel->status.error = 0;
989 ide_raise_irq(ide, channel);
994 case 0x08: // Reset Device
996 channel->error_reg.val = 0x01;
997 channel->status.busy = 0;
998 channel->status.ready = 1;
999 channel->status.seek_complete = 1;
1000 channel->status.write_fault = 0;
1001 channel->status.error = 0;
1004 case 0xe5: // Check power mode
1005 drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1006 channel->status.busy = 0;
1007 channel->status.ready = 1;
1008 channel->status.write_fault = 0;
1009 channel->status.data_req = 0;
1010 channel->status.error = 0;
1013 case 0xc4: // read multiple sectors
1014 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
1016 PrintError("Unimplemented IDE command (%x)\n", channel->cmd_reg);
1024 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1025 struct ide_internal * ide = priv_data;
1026 struct ide_channel * channel = get_selected_channel(ide, port);
1027 struct ide_drive * drive = get_selected_drive(channel);
1029 // PrintDebug("IDE: Writing Data Port %x (val=%x, len=%d)\n",
1030 // port, *(uint32_t *)src, length);
1032 memcpy(drive->data_buf + drive->transfer_index, src, length);
1033 drive->transfer_index += length;
1035 // Transfer is complete, dispatch the command
1036 if (drive->transfer_index >= drive->transfer_length) {
1037 switch (channel->cmd_reg) {
1038 case 0x30: // Write Sectors
1039 PrintError("Writing Data not yet implemented\n");
1042 case 0xa0: // ATAPI packet command
1043 if (atapi_handle_packet(core, ide, channel) == -1) {
1044 PrintError("Error handling ATAPI packet\n");
1049 PrintError("Unhandld IDE Command %x\n", channel->cmd_reg);
1058 static int read_hd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1059 struct ide_drive * drive = get_selected_drive(channel);
1060 int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1064 if (drive->transfer_index >= drive->transfer_length) {
1065 PrintError("Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1066 drive->transfer_length, drive->transfer_index,
1067 drive->transfer_index + length);
1072 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1073 drive->current_lba++;
1075 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1076 PrintError("Could not read next disk sector\n");
1082 PrintDebug("Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1083 *(uint32_t *)(drive->data_buf + data_offset),
1084 length, data_offset);
1086 memcpy(dst, drive->data_buf + data_offset, length);
1088 drive->transfer_index += length;
1091 /* This is the trigger for interrupt injection.
1092 * For read single sector commands we interrupt after every sector
1093 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1094 * cur_sector_num is configured depending on the operation we are currently running
1095 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1097 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1098 (drive->transfer_index == drive->transfer_length)) {
1099 if (drive->transfer_index < drive->transfer_length) {
1100 // An increment is complete, but there is still more data to be transferred...
1101 PrintDebug("Integral Complete, still transferring more sectors\n");
1102 channel->status.data_req = 1;
1104 drive->irq_flags.c_d = 0;
1106 PrintDebug("Final Sector Transferred\n");
1107 // This was the final read of the request
1108 channel->status.data_req = 0;
1111 drive->irq_flags.c_d = 1;
1112 drive->irq_flags.rel = 0;
1115 channel->status.ready = 1;
1116 drive->irq_flags.io_dir = 1;
1117 channel->status.busy = 0;
1119 ide_raise_irq(ide, channel);
1128 static int read_cd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1129 struct ide_drive * drive = get_selected_drive(channel);
1130 int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1131 // int req_offset = drive->transfer_index % drive->req_len;
1133 if (drive->cd_state.atapi_cmd != 0x28) {
1134 PrintDebug("IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1135 PrintDebug("IDE: transfer len=%d, transfer idx=%d\n", drive->transfer_length, drive->transfer_index);
1140 if (drive->transfer_index >= drive->transfer_length) {
1141 PrintError("Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n",
1142 drive->transfer_length, drive->transfer_index,
1143 drive->transfer_index + length);
1148 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1149 if (atapi_update_data_buf(ide, channel) == -1) {
1150 PrintError("Could not update CDROM data buffer\n");
1155 memcpy(dst, drive->data_buf + data_offset, length);
1157 drive->transfer_index += length;
1160 // Should the req_offset be recalculated here?????
1161 if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1162 if (drive->transfer_index < drive->transfer_length) {
1163 // An increment is complete, but there is still more data to be transferred...
1165 channel->status.data_req = 1;
1167 drive->irq_flags.c_d = 0;
1169 // Update the request length in the cylinder regs
1170 if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1171 PrintError("Could not update request length after completed increment\n");
1175 // This was the final read of the request
1178 channel->status.data_req = 0;
1179 channel->status.ready = 1;
1181 drive->irq_flags.c_d = 1;
1182 drive->irq_flags.rel = 0;
1185 drive->irq_flags.io_dir = 1;
1186 channel->status.busy = 0;
1188 ide_raise_irq(ide, channel);
1195 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1196 struct ide_drive * drive = get_selected_drive(channel);
1198 channel->status.busy = 0;
1199 channel->status.ready = 1;
1200 channel->status.write_fault = 0;
1201 channel->status.seek_complete = 1;
1202 channel->status.corrected = 0;
1203 channel->status.error = 0;
1206 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1207 drive->transfer_index += length;
1209 if (drive->transfer_index >= drive->transfer_length) {
1210 channel->status.data_req = 0;
1217 static int ide_read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1218 struct ide_internal * ide = priv_data;
1219 struct ide_channel * channel = get_selected_channel(ide, port);
1220 struct ide_drive * drive = get_selected_drive(channel);
1222 // PrintDebug("IDE: Reading Data Port %x (len=%d)\n", port, length);
1224 if ((channel->cmd_reg == 0xec) ||
1225 (channel->cmd_reg == 0xa1)) {
1226 return read_drive_id((uint8_t *)dst, length, ide, channel);
1229 if (drive->drive_type == BLOCK_CDROM) {
1230 if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1231 PrintError("IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1234 } else if (drive->drive_type == BLOCK_DISK) {
1235 if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1236 PrintError("IDE: Could not read HD Data\n");
1240 memset((uint8_t *)dst, 0, length);
1246 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1247 struct ide_internal * ide = priv_data;
1248 struct ide_channel * channel = get_selected_channel(ide, port);
1249 struct ide_drive * drive = get_selected_drive(channel);
1252 PrintError("Invalid Write length on IDE port %x\n", port);
1256 PrintDebug("IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1259 // reset and interrupt enable
1261 case SEC_CTRL_PORT: {
1262 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1264 // only reset channel on a 0->1 reset bit transition
1265 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1266 channel_reset(channel);
1267 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1268 channel_reset_complete(channel);
1271 channel->ctrl_reg.val = tmp_ctrl->val;
1274 case PRI_FEATURES_PORT:
1275 case SEC_FEATURES_PORT:
1276 channel->features.val = *(uint8_t *)src;
1279 case PRI_SECT_CNT_PORT:
1280 case SEC_SECT_CNT_PORT:
1281 channel->drives[0].sector_count = *(uint8_t *)src;
1282 channel->drives[1].sector_count = *(uint8_t *)src;
1285 case PRI_SECT_NUM_PORT:
1286 case SEC_SECT_NUM_PORT:
1287 channel->drives[0].sector_num = *(uint8_t *)src;
1288 channel->drives[1].sector_num = *(uint8_t *)src;
1290 case PRI_CYL_LOW_PORT:
1291 case SEC_CYL_LOW_PORT:
1292 channel->drives[0].cylinder_low = *(uint8_t *)src;
1293 channel->drives[1].cylinder_low = *(uint8_t *)src;
1296 case PRI_CYL_HIGH_PORT:
1297 case SEC_CYL_HIGH_PORT:
1298 channel->drives[0].cylinder_high = *(uint8_t *)src;
1299 channel->drives[1].cylinder_high = *(uint8_t *)src;
1302 case PRI_DRV_SEL_PORT:
1303 case SEC_DRV_SEL_PORT: {
1304 channel->drive_head.val = *(uint8_t *)src;
1306 // make sure the reserved bits are ok..
1307 // JRL TODO: check with new ramdisk to make sure this is right...
1308 channel->drive_head.val |= 0xa0;
1310 drive = get_selected_drive(channel);
1312 // Selecting a non-present device is a no-no
1313 if (drive->drive_type == BLOCK_NONE) {
1314 PrintDebug("Attempting to select a non-present drive\n");
1315 channel->error_reg.abort = 1;
1316 channel->status.error = 1;
1318 channel->status.busy = 0;
1319 channel->status.ready = 1;
1320 channel->status.data_req = 0;
1321 channel->status.error = 0;
1322 channel->status.seek_complete = 1;
1324 channel->dma_status.active = 0;
1325 channel->dma_status.err = 0;
1331 PrintError("IDE: Write to unknown Port %x\n", port);
1338 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1339 struct ide_internal * ide = priv_data;
1340 struct ide_channel * channel = get_selected_channel(ide, port);
1341 struct ide_drive * drive = get_selected_drive(channel);
1344 PrintError("Invalid Read length on IDE port %x\n", port);
1348 PrintDebug("IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1350 if ((port == PRI_ADDR_REG_PORT) ||
1351 (port == SEC_ADDR_REG_PORT)) {
1352 // unused, return 0xff
1353 *(uint8_t *)dst = 0xff;
1358 // if no drive is present just return 0 + reserved bits
1359 if (drive->drive_type == BLOCK_NONE) {
1360 if ((port == PRI_DRV_SEL_PORT) ||
1361 (port == SEC_DRV_SEL_PORT)) {
1362 *(uint8_t *)dst = 0xa0;
1364 *(uint8_t *)dst = 0;
1372 // This is really the error register.
1373 case PRI_FEATURES_PORT:
1374 case SEC_FEATURES_PORT:
1375 *(uint8_t *)dst = channel->error_reg.val;
1378 case PRI_SECT_CNT_PORT:
1379 case SEC_SECT_CNT_PORT:
1380 *(uint8_t *)dst = drive->sector_count;
1383 case PRI_SECT_NUM_PORT:
1384 case SEC_SECT_NUM_PORT:
1385 *(uint8_t *)dst = drive->sector_num;
1388 case PRI_CYL_LOW_PORT:
1389 case SEC_CYL_LOW_PORT:
1390 *(uint8_t *)dst = drive->cylinder_low;
1394 case PRI_CYL_HIGH_PORT:
1395 case SEC_CYL_HIGH_PORT:
1396 *(uint8_t *)dst = drive->cylinder_high;
1399 case PRI_DRV_SEL_PORT:
1400 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1401 *(uint8_t *)dst = channel->drive_head.val;
1408 // Something about lowering interrupts here....
1409 *(uint8_t *)dst = channel->status.val;
1413 PrintError("Invalid Port: %x\n", port);
1417 PrintDebug("\tVal=%x\n", *(uint8_t *)dst);
1424 static void init_drive(struct ide_drive * drive) {
1426 drive->sector_count = 0x01;
1427 drive->sector_num = 0x01;
1428 drive->cylinder = 0x0000;
1430 drive->drive_type = BLOCK_NONE;
1432 memset(drive->model, 0, sizeof(drive->model));
1434 drive->transfer_index = 0;
1435 drive->transfer_length = 0;
1436 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1438 drive->num_cylinders = 0;
1439 drive->num_heads = 0;
1440 drive->num_sectors = 0;
1443 drive->private_data = NULL;
1447 static void init_channel(struct ide_channel * channel) {
1450 channel->error_reg.val = 0x01;
1451 channel->drive_head.val = 0x00;
1452 channel->status.val = 0x00;
1453 channel->cmd_reg = 0x00;
1454 channel->ctrl_reg.val = 0x08;
1457 channel->dma_cmd.val = 0;
1458 channel->dma_status.val = 0;
1459 channel->dma_prd_addr = 0;
1460 channel->dma_tbl_index = 0;
1462 for (i = 0; i < 2; i++) {
1463 init_drive(&(channel->drives[i]));
1469 static int pci_config_update(uint_t reg_num, void * src, uint_t length, void * private_data) {
1470 PrintDebug("PCI Config Update\n");
1472 struct ide_internal * ide = (struct ide_internal *)(private_data);
1474 PrintDebug("\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1480 static int init_ide_state(struct ide_internal * ide) {
1484 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1487 for (i = 0; i < 1; i++) {
1488 init_channel(&(ide->channels[i]));
1490 // JRL: this is a terrible hack...
1491 ide->channels[i].irq = PRI_DEFAULT_IRQ + i;
1501 static int ide_free(struct ide_internal * ide) {
1503 // deregister from PCI?
1510 #ifdef V3_CONFIG_CHECKPOINT
1512 #include <palacios/vmm_sprintf.h>
1513 static int ide_save(struct v3_chkpt_ctx * ctx, void * private_data) {
1514 struct ide_internal * ide = (struct ide_internal *)private_data;
1520 for (ch_num = 0; ch_num < 2; ch_num++) {
1521 struct v3_chkpt_ctx * ch_ctx = NULL;
1522 struct ide_channel * ch = &(ide->channels[ch_num]);
1524 snprintf(buf, 128, "channel-%d", ch_num);
1525 ch_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
1527 v3_chkpt_save_8(ch_ctx, "ERROR", &(ch->error_reg.val));
1528 v3_chkpt_save_8(ch_ctx, "FEATURES", &(ch->features.val));
1529 v3_chkpt_save_8(ch_ctx, "DRIVE_HEAD", &(ch->drive_head.val));
1530 v3_chkpt_save_8(ch_ctx, "STATUS", &(ch->status.val));
1531 v3_chkpt_save_8(ch_ctx, "CMD_REG", &(ch->cmd_reg));
1532 v3_chkpt_save_8(ch_ctx, "CTRL_REG", &(ch->ctrl_reg.val));
1533 v3_chkpt_save_8(ch_ctx, "DMA_CMD", &(ch->dma_cmd.val));
1534 v3_chkpt_save_8(ch_ctx, "DMA_STATUS", &(ch->dma_status.val));
1535 v3_chkpt_save_32(ch_ctx, "PRD_ADDR", &(ch->dma_prd_addr));
1536 v3_chkpt_save_32(ch_ctx, "DMA_TBL_IDX", &(ch->dma_tbl_index));
1539 for (drive_num = 0; drive_num < 2; drive_num++) {
1540 struct v3_chkpt_ctx * drive_ctx = NULL;
1541 struct ide_drive * drive = &(ch->drives[drive_num]);
1543 snprintf(buf, 128, "drive-%d-%d", ch_num, drive_num);
1544 drive_ctx = v3_chkpt_open_ctx(ctx->chkpt, ch_ctx, buf);
1546 v3_chkpt_save_8(drive_ctx, "DRIVE_TYPE", &(drive->drive_type));
1547 v3_chkpt_save_8(drive_ctx, "SECTOR_COUNT", &(drive->sector_count));
1548 v3_chkpt_save_8(drive_ctx, "SECTOR_NUM", &(drive->sector_num));
1549 v3_chkpt_save_16(drive_ctx, "CYLINDER", &(drive->cylinder));
1551 v3_chkpt_save_64(drive_ctx, "CURRENT_LBA", &(drive->current_lba));
1552 v3_chkpt_save_32(drive_ctx, "TRANSFER_LENGTH", &(drive->transfer_length));
1553 v3_chkpt_save_32(drive_ctx, "TRANSFER_INDEX", &(drive->transfer_index));
1555 v3_chkpt_save(drive_ctx, "DATA_BUF", DATA_BUFFER_SIZE, drive->data_buf);
1558 /* For now we'll just pack the type specific data at the end... */
1559 /* We should probably add a new context here in the future... */
1560 if (drive->drive_type == BLOCK_CDROM) {
1561 v3_chkpt_save(drive_ctx, "ATAPI_SENSE_DATA", 18, drive->cd_state.sense.buf);
1562 v3_chkpt_save_8(drive_ctx, "ATAPI_CMD", &(drive->cd_state.atapi_cmd));
1563 v3_chkpt_save(drive_ctx, "ATAPI_ERR_RECOVERY", 12, drive->cd_state.err_recovery.buf);
1564 } else if (drive->drive_type == BLOCK_DISK) {
1565 v3_chkpt_save_32(drive_ctx, "ACCESSED", &(drive->hd_state.accessed));
1566 v3_chkpt_save_32(drive_ctx, "MULT_SECT_NUM", &(drive->hd_state.mult_sector_num));
1567 v3_chkpt_save_32(drive_ctx, "CUR_SECT_NUM", &(drive->hd_state.cur_sector_num));
1577 static int ide_load(struct v3_chkpt_ctx * ctx, void * private_data) {
1578 struct ide_internal * ide = (struct ide_internal *)private_data;
1584 for (ch_num = 0; ch_num < 2; ch_num++) {
1585 struct v3_chkpt_ctx * ch_ctx = NULL;
1586 struct ide_channel * ch = &(ide->channels[ch_num]);
1588 snprintf(buf, 128, "channel-%d", ch_num);
1589 ch_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
1591 v3_chkpt_load_8(ch_ctx, "ERROR", &(ch->error_reg.val));
1592 v3_chkpt_load_8(ch_ctx, "FEATURES", &(ch->features.val));
1593 v3_chkpt_load_8(ch_ctx, "DRIVE_HEAD", &(ch->drive_head.val));
1594 v3_chkpt_load_8(ch_ctx, "STATUS", &(ch->status.val));
1595 v3_chkpt_load_8(ch_ctx, "CMD_REG", &(ch->cmd_reg));
1596 v3_chkpt_load_8(ch_ctx, "CTRL_REG", &(ch->ctrl_reg.val));
1597 v3_chkpt_load_8(ch_ctx, "DMA_CMD", &(ch->dma_cmd.val));
1598 v3_chkpt_load_8(ch_ctx, "DMA_STATUS", &(ch->dma_status.val));
1599 v3_chkpt_load_32(ch_ctx, "PRD_ADDR", &(ch->dma_prd_addr));
1600 v3_chkpt_load_32(ch_ctx, "DMA_TBL_IDX", &(ch->dma_tbl_index));
1603 for (drive_num = 0; drive_num < 2; drive_num++) {
1604 struct v3_chkpt_ctx * drive_ctx = NULL;
1605 struct ide_drive * drive = &(ch->drives[drive_num]);
1607 snprintf(buf, 128, "drive-%d-%d", ch_num, drive_num);
1608 drive_ctx = v3_chkpt_open_ctx(ctx->chkpt, ch_ctx, buf);
1610 v3_chkpt_load_8(drive_ctx, "DRIVE_TYPE", &(drive->drive_type));
1611 v3_chkpt_load_8(drive_ctx, "SECTOR_COUNT", &(drive->sector_count));
1612 v3_chkpt_load_8(drive_ctx, "SECTOR_NUM", &(drive->sector_num));
1613 v3_chkpt_load_16(drive_ctx, "CYLINDER", &(drive->cylinder));
1615 v3_chkpt_load_64(drive_ctx, "CURRENT_LBA", &(drive->current_lba));
1616 v3_chkpt_load_32(drive_ctx, "TRANSFER_LENGTH", &(drive->transfer_length));
1617 v3_chkpt_load_32(drive_ctx, "TRANSFER_INDEX", &(drive->transfer_index));
1619 v3_chkpt_load(drive_ctx, "DATA_BUF", DATA_BUFFER_SIZE, drive->data_buf);
1622 /* For now we'll just pack the type specific data at the end... */
1623 /* We should probably add a new context here in the future... */
1624 if (drive->drive_type == BLOCK_CDROM) {
1625 v3_chkpt_load(drive_ctx, "ATAPI_SENSE_DATA", 18, drive->cd_state.sense.buf);
1626 v3_chkpt_load_8(drive_ctx, "ATAPI_CMD", &(drive->cd_state.atapi_cmd));
1627 v3_chkpt_load(drive_ctx, "ATAPI_ERR_RECOVERY", 12, drive->cd_state.err_recovery.buf);
1628 } else if (drive->drive_type == BLOCK_DISK) {
1629 v3_chkpt_load_32(drive_ctx, "ACCESSED", &(drive->hd_state.accessed));
1630 v3_chkpt_load_32(drive_ctx, "MULT_SECT_NUM", &(drive->hd_state.mult_sector_num));
1631 v3_chkpt_load_32(drive_ctx, "CUR_SECT_NUM", &(drive->hd_state.cur_sector_num));
1644 static struct v3_device_ops dev_ops = {
1645 .free = (int (*)(void *))ide_free,
1646 #ifdef V3_CONFIG_CHECKPOINT
1656 static int connect_fn(struct v3_vm_info * vm,
1657 void * frontend_data,
1658 struct v3_dev_blk_ops * ops,
1659 v3_cfg_tree_t * cfg,
1660 void * private_data) {
1661 struct ide_internal * ide = (struct ide_internal *)(frontend_data);
1662 struct ide_channel * channel = NULL;
1663 struct ide_drive * drive = NULL;
1665 char * bus_str = v3_cfg_val(cfg, "bus_num");
1666 char * drive_str = v3_cfg_val(cfg, "drive_num");
1667 char * type_str = v3_cfg_val(cfg, "type");
1668 char * model_str = v3_cfg_val(cfg, "model");
1670 uint_t drive_num = 0;
1673 if ((!type_str) || (!drive_str) || (!bus_str)) {
1674 PrintError("Incomplete IDE Configuration\n");
1678 bus_num = atoi(bus_str);
1679 drive_num = atoi(drive_str);
1681 channel = &(ide->channels[bus_num]);
1682 drive = &(channel->drives[drive_num]);
1684 if (drive->drive_type != BLOCK_NONE) {
1685 PrintError("Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1689 if (model_str != NULL) {
1690 strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1693 if (strcasecmp(type_str, "cdrom") == 0) {
1694 drive->drive_type = BLOCK_CDROM;
1696 while (strlen((char *)(drive->model)) < 40) {
1697 strcat((char*)(drive->model), " ");
1700 } else if (strcasecmp(type_str, "hd") == 0) {
1701 drive->drive_type = BLOCK_DISK;
1703 drive->hd_state.accessed = 0;
1704 drive->hd_state.mult_sector_num = 1;
1706 drive->num_sectors = 63;
1707 drive->num_heads = 16;
1708 drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
1710 PrintError("invalid IDE drive type\n");
1717 // Hardcode this for now, but its not a good idea....
1718 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1721 drive->private_data = private_data;
1729 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1730 struct ide_internal * ide = NULL;
1731 char * dev_id = v3_cfg_val(cfg, "ID");
1734 PrintDebug("IDE: Initializing IDE\n");
1736 ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1739 PrintError("Error allocating IDE state\n");
1743 memset(ide, 0, sizeof(struct ide_internal));
1746 ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1748 if (ide->pci_bus != NULL) {
1749 struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1752 PrintError("Could not find southbridge\n");
1757 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1760 PrintDebug("IDE: Creating IDE bus x 2\n");
1762 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
1765 PrintError("Could not attach device %s\n", dev_id);
1770 if (init_ide_state(ide) == -1) {
1771 PrintError("Failed to initialize IDE state\n");
1772 v3_remove_device(dev);
1776 PrintDebug("Connecting to IDE IO ports\n");
1778 ret |= v3_dev_hook_io(dev, PRI_DATA_PORT,
1779 &ide_read_data_port, &write_data_port);
1780 ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT,
1781 &read_port_std, &write_port_std);
1782 ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
1783 &read_port_std, &write_port_std);
1784 ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
1785 &read_port_std, &write_port_std);
1786 ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
1787 &read_port_std, &write_port_std);
1788 ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
1789 &read_port_std, &write_port_std);
1790 ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
1791 &read_port_std, &write_port_std);
1792 ret |= v3_dev_hook_io(dev, PRI_CMD_PORT,
1793 &read_port_std, &write_cmd_port);
1795 ret |= v3_dev_hook_io(dev, SEC_DATA_PORT,
1796 &ide_read_data_port, &write_data_port);
1797 ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT,
1798 &read_port_std, &write_port_std);
1799 ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
1800 &read_port_std, &write_port_std);
1801 ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
1802 &read_port_std, &write_port_std);
1803 ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
1804 &read_port_std, &write_port_std);
1805 ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
1806 &read_port_std, &write_port_std);
1807 ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
1808 &read_port_std, &write_port_std);
1809 ret |= v3_dev_hook_io(dev, SEC_CMD_PORT,
1810 &read_port_std, &write_cmd_port);
1813 ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT,
1814 &read_port_std, &write_port_std);
1816 ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT,
1817 &read_port_std, &write_port_std);
1820 ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
1821 &read_port_std, &write_port_std);
1823 ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
1824 &read_port_std, &write_port_std);
1828 PrintError("Error hooking IDE IO port\n");
1829 v3_remove_device(dev);
1835 struct v3_pci_bar bars[6];
1836 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
1837 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
1838 struct pci_device * pci_dev = NULL;
1841 PrintDebug("Connecting IDE to PCI bus\n");
1843 for (i = 0; i < 6; i++) {
1844 bars[i].type = PCI_BAR_NONE;
1847 bars[4].type = PCI_BAR_IO;
1848 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
1849 bars[4].default_base_port = -1;
1850 bars[4].num_ports = 16;
1852 bars[4].io_read = read_dma_port;
1853 bars[4].io_write = write_dma_port;
1854 bars[4].private_data = ide;
1856 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
1858 pci_config_update, NULL, NULL, ide);
1860 if (pci_dev == NULL) {
1861 PrintError("Failed to register IDE BUS %d with PCI\n", i);
1862 v3_remove_device(dev);
1866 /* This is for CMD646 devices
1867 pci_dev->config_header.vendor_id = 0x1095;
1868 pci_dev->config_header.device_id = 0x0646;
1869 pci_dev->config_header.revision = 0x8f07;
1872 pci_dev->config_header.vendor_id = 0x8086;
1873 pci_dev->config_header.device_id = 0x7010;
1874 pci_dev->config_header.revision = 0x00;
1876 pci_dev->config_header.prog_if = 0x80; // Master IDE device
1877 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
1878 pci_dev->config_header.class = PCI_CLASS_STORAGE;
1880 pci_dev->config_header.command = 0;
1881 pci_dev->config_header.status = 0x0280;
1883 ide->ide_pci = pci_dev;
1888 if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
1889 PrintError("Could not register %s as frontend\n", dev_id);
1890 v3_remove_device(dev);
1895 PrintDebug("IDE Initialized\n");
1901 device_register("IDE", ide_init)
1906 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num,
1907 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
1909 struct ide_internal * ide = ide_data;
1910 struct ide_channel * channel = &(ide->channels[channel_num]);
1911 struct ide_drive * drive = &(channel->drives[drive_num]);
1913 if (drive->drive_type == BLOCK_NONE) {
1917 *cylinders = drive->num_cylinders;
1918 *heads = drive->num_heads;
1919 *sectors = drive->num_sectors;