2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef V3_CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
71 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72 "PRI_CTRL", "PRI_ADDR_REG"};
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
76 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77 "SEC_CTRL", "SEC_ADDR_REG"};
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
85 static inline const char * io_port_to_str(uint16_t port) {
86 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87 return ide_pri_port_strs[port - PRI_DATA_PORT];
88 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89 return ide_sec_port_strs[port - SEC_DATA_PORT];
90 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
99 static inline const char * dma_port_to_str(uint16_t port) {
100 return ide_dma_port_strs[port & 0x7];
105 struct ide_cd_state {
106 struct atapi_sense_data sense;
109 struct atapi_error_recovery err_recovery;
112 struct ide_hd_state {
115 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116 uint32_t mult_sector_num;
118 /* This is the current op sector size:
119 * for multiple sector ops this equals mult_sector_num
120 * for standard ops this equals 1
122 uint32_t cur_sector_num;
128 v3_block_type_t drive_type;
130 struct v3_dev_blk_ops * ops;
133 struct ide_cd_state cd_state;
134 struct ide_hd_state hd_state;
139 // Where we are in the data transfer
140 uint32_t transfer_index;
142 // the length of a transfer
143 // calculated for easy access
144 uint32_t transfer_length;
146 uint64_t current_lba;
148 // We have a local data buffer that we use for IO port accesses
149 uint8_t data_buf[DATA_BUFFER_SIZE];
152 uint32_t num_cylinders;
154 uint32_t num_sectors;
159 uint8_t sector_count; // 0x1f2,0x172
160 struct atapi_irq_flags irq_flags;
161 } __attribute__((packed));
164 uint8_t sector_num; // 0x1f3,0x173
166 } __attribute__((packed));
173 uint8_t cylinder_low; // 0x1f4,0x174
174 uint8_t cylinder_high; // 0x1f5,0x175
175 } __attribute__((packed));
180 } __attribute__((packed));
183 // The transfer length requested by the CPU
185 } __attribute__((packed));
192 struct ide_drive drives[2];
195 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
197 struct ide_features_reg features;
199 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
201 struct ide_status_reg status; // [read] 0x1f7,0x177
202 uint8_t cmd_reg; // [write] 0x1f7,0x177
204 int irq; // this is temporary until we add PCI support
207 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
210 uint8_t dma_ports[8];
212 struct ide_dma_cmd_reg dma_cmd;
214 struct ide_dma_status_reg dma_status;
216 uint32_t dma_prd_addr;
217 } __attribute__((packed));
218 } __attribute__((packed));
220 uint32_t dma_tbl_index;
225 struct ide_internal {
226 struct ide_channel channels[2];
228 struct v3_southbridge * southbridge;
229 struct vm_device * pci_bus;
231 struct pci_device * ide_pci;
233 struct v3_vm_info * vm;
240 /* Utility functions */
242 static inline uint16_t be_to_le_16(const uint16_t val) {
243 uint8_t * buf = (uint8_t *)&val;
244 return (buf[0] << 8) | (buf[1]) ;
247 static inline uint16_t le_to_be_16(const uint16_t val) {
248 return be_to_le_16(val);
252 static inline uint32_t be_to_le_32(const uint32_t val) {
253 uint8_t * buf = (uint8_t *)&val;
254 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
257 static inline uint32_t le_to_be_32(const uint32_t val) {
258 return be_to_le_32(val);
262 static inline int get_channel_index(ushort_t port) {
263 if (((port & 0xfff8) == 0x1f0) ||
264 ((port & 0xfffe) == 0x3f6) ||
265 ((port & 0xfff8) == 0xc000)) {
267 } else if (((port & 0xfff8) == 0x170) ||
268 ((port & 0xfffe) == 0x376) ||
269 ((port & 0xfff8) == 0xc008)) {
276 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
277 int channel_idx = get_channel_index(port);
278 return &(ide->channels[channel_idx]);
281 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
282 return &(channel->drives[channel->drive_head.drive_sel]);
286 static inline int is_lba_enabled(struct ide_channel * channel) {
287 return channel->drive_head.lba_mode;
292 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
293 if (channel->ctrl_reg.irq_disable == 0) {
295 //PrintError("Raising IDE Interrupt %d\n", channel->irq);
297 channel->dma_status.int_gen = 1;
298 v3_raise_irq(ide->vm, channel->irq);
303 static void drive_reset(struct ide_drive * drive) {
304 drive->sector_count = 0x01;
305 drive->sector_num = 0x01;
307 PrintDebug("Resetting drive %s\n", drive->model);
309 if (drive->drive_type == BLOCK_CDROM) {
310 drive->cylinder = 0xeb14;
312 drive->cylinder = 0x0000;
313 //drive->hd_state.accessed = 0;
317 memset(drive->data_buf, 0, sizeof(drive->data_buf));
318 drive->transfer_index = 0;
320 // Send the reset signal to the connected device callbacks
321 // channel->drives[0].reset();
322 // channel->drives[1].reset();
325 static void channel_reset(struct ide_channel * channel) {
327 // set busy and seek complete flags
328 channel->status.val = 0x90;
331 channel->error_reg.val = 0x01;
334 channel->cmd_reg = 0x00;
336 channel->ctrl_reg.irq_disable = 0;
339 static void channel_reset_complete(struct ide_channel * channel) {
340 channel->status.busy = 0;
341 channel->status.ready = 1;
343 channel->drive_head.head_num = 0;
345 drive_reset(&(channel->drives[0]));
346 drive_reset(&(channel->drives[1]));
350 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
351 channel->status.val = 0x41; // Error + ready
352 channel->error_reg.val = 0x04; // No idea...
354 ide_raise_irq(ide, channel);
358 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
359 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
362 /* ATAPI functions */
370 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
371 struct ide_dma_prd prd_entry;
374 V3_Print("Dumping PRD table\n");
377 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
380 ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
382 if (ret != sizeof(struct ide_dma_prd)) {
383 PrintError("Could not read PRD\n");
387 V3_Print("\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
389 (prd_entry.size == 0) ? 0x10000 : prd_entry.size,
390 prd_entry.end_of_table);
392 if (prd_entry.end_of_table) {
404 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
405 struct ide_drive * drive = get_selected_drive(channel);
406 // This is at top level scope to do the EOT test at the end
407 struct ide_dma_prd prd_entry = {};
408 uint_t bytes_left = drive->transfer_length;
410 // Read in the data buffer....
411 // Read a sector/block at a time until the prd entry is full.
413 #ifdef V3_CONFIG_DEBUG_IDE
414 print_prd_table(ide, channel);
417 PrintDebug("DMA read for %d bytes\n", bytes_left);
419 // Loop through the disk data
420 while (bytes_left > 0) {
421 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
422 uint_t prd_bytes_left = 0;
423 uint_t prd_offset = 0;
426 PrintDebug("PRD table address = %x\n", channel->dma_prd_addr);
428 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
430 if (ret != sizeof(struct ide_dma_prd)) {
431 PrintError("Could not read PRD\n");
435 PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n",
436 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
438 // loop through the PRD data....
440 if (prd_entry.size == 0) {
441 // a size of 0 means 64k
442 prd_bytes_left = 0x10000;
444 prd_bytes_left = prd_entry.size;
448 while (prd_bytes_left > 0) {
449 uint_t bytes_to_write = 0;
451 if (drive->drive_type == BLOCK_DISK) {
452 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
455 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
456 PrintError("Failed to read next disk sector\n");
459 } else if (drive->drive_type == BLOCK_CDROM) {
460 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
461 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
463 if (atapi_read_chunk(ide, channel) == -1) {
464 PrintError("Failed to read next disk sector\n");
469 PrintError("How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
474 //V3_Print("DMA of command packet\n");
476 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
477 prd_bytes_left = bytes_to_write;
480 // V3_Print("Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
481 cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset,
482 bytes_to_write, drive->data_buf);
489 drive->transfer_index += bytes_to_write;
491 channel->status.busy = 0;
492 channel->status.ready = 1;
493 channel->status.data_req = 0;
494 channel->status.error = 0;
495 channel->status.seek_complete = 1;
497 channel->dma_status.active = 0;
498 channel->dma_status.err = 0;
500 ide_raise_irq(ide, channel);
506 PrintDebug("Writing DMA data to guest Memory ptr=%p, len=%d\n",
507 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
509 drive->current_lba++;
511 ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
513 if (ret != bytes_to_write) {
514 PrintError("Failed to copy data into guest memory... (ret=%d)\n", ret);
518 PrintDebug("\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
520 drive->transfer_index += ret;
521 prd_bytes_left -= ret;
526 channel->dma_tbl_index++;
528 if (drive->drive_type == BLOCK_DISK) {
529 if (drive->transfer_index % HD_SECTOR_SIZE) {
530 PrintError("We currently don't handle sectors that span PRD descriptors\n");
533 } else if (drive->drive_type == BLOCK_CDROM) {
534 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
535 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
536 PrintError("We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
537 PrintError("transfer_index=%d, transfer_length=%d\n",
538 drive->transfer_index, drive->transfer_length);
545 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
546 PrintError("DMA table not large enough for data transfer...\n");
552 drive->irq_flags.io_dir = 1;
553 drive->irq_flags.c_d = 1;
554 drive->irq_flags.rel = 0;
558 // Update to the next PRD entry
562 if (prd_entry.end_of_table) {
563 channel->status.busy = 0;
564 channel->status.ready = 1;
565 channel->status.data_req = 0;
566 channel->status.error = 0;
567 channel->status.seek_complete = 1;
569 channel->dma_status.active = 0;
570 channel->dma_status.err = 0;
573 ide_raise_irq(ide, channel);
579 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
580 struct ide_drive * drive = get_selected_drive(channel);
581 // This is at top level scope to do the EOT test at the end
582 struct ide_dma_prd prd_entry = {};
583 uint_t bytes_left = drive->transfer_length;
586 PrintDebug("DMA write from %d bytes\n", bytes_left);
588 // Loop through disk data
589 while (bytes_left > 0) {
590 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
591 uint_t prd_bytes_left = 0;
592 uint_t prd_offset = 0;
595 PrintDebug("PRD Table address = %x\n", channel->dma_prd_addr);
597 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
599 if (ret != sizeof(struct ide_dma_prd)) {
600 PrintError("Could not read PRD\n");
604 PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n",
605 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
608 if (prd_entry.size == 0) {
609 // a size of 0 means 64k
610 prd_bytes_left = 0x10000;
612 prd_bytes_left = prd_entry.size;
615 while (prd_bytes_left > 0) {
616 uint_t bytes_to_write = 0;
619 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
622 ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
624 if (ret != bytes_to_write) {
625 PrintError("Faild to copy data from guest memory... (ret=%d)\n", ret);
629 PrintDebug("\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
632 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
633 PrintError("Failed to write data to disk\n");
637 drive->current_lba++;
639 drive->transfer_index += ret;
640 prd_bytes_left -= ret;
645 channel->dma_tbl_index++;
647 if (drive->transfer_index % HD_SECTOR_SIZE) {
648 PrintError("We currently don't handle sectors that span PRD descriptors\n");
652 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
653 PrintError("DMA table not large enough for data transfer...\n");
654 PrintError("\t(bytes_left=%u) (transfer_length=%u)...\n",
655 bytes_left, drive->transfer_length);
656 PrintError("PRD Addr: %x, PRD Len: %d, EOT: %d\n",
657 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
659 print_prd_table(ide, channel);
664 if (prd_entry.end_of_table) {
665 channel->status.busy = 0;
666 channel->status.ready = 1;
667 channel->status.data_req = 0;
668 channel->status.error = 0;
669 channel->status.seek_complete = 1;
671 channel->dma_status.active = 0;
672 channel->dma_status.err = 0;
675 ide_raise_irq(ide, channel);
682 #define DMA_CMD_PORT 0x00
683 #define DMA_STATUS_PORT 0x02
684 #define DMA_PRD_PORT0 0x04
685 #define DMA_PRD_PORT1 0x05
686 #define DMA_PRD_PORT2 0x06
687 #define DMA_PRD_PORT3 0x07
689 #define DMA_CHANNEL_FLAG 0x08
691 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
692 struct ide_internal * ide = (struct ide_internal *)private_data;
693 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
694 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
695 struct ide_channel * channel = &(ide->channels[channel_flag]);
697 PrintDebug("IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
698 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
700 switch (port_offset) {
702 channel->dma_cmd.val = *(uint8_t *)src;
704 if (channel->dma_cmd.start == 0) {
705 channel->dma_tbl_index = 0;
707 channel->dma_status.active = 1;
709 if (channel->dma_cmd.read == 1) {
711 if (dma_read(core, ide, channel) == -1) {
712 PrintError("Failed DMA Read\n");
717 if (dma_write(core, ide, channel) == -1) {
718 PrintError("Failed DMA Write\n");
723 channel->dma_cmd.val &= 0x09;
728 case DMA_STATUS_PORT: {
729 uint8_t val = *(uint8_t *)src;
732 PrintError("Invalid read length for DMA status port\n");
737 channel->dma_status.val = ((val & 0x60) |
738 (channel->dma_status.val & 0x01) |
739 (channel->dma_status.val & ~val & 0x06));
746 case DMA_PRD_PORT3: {
747 uint_t addr_index = port_offset & 0x3;
748 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
751 if (addr_index + length > 4) {
752 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
756 for (i = 0; i < length; i++) {
757 addr_buf[addr_index + i] = *((uint8_t *)src + i);
760 PrintDebug("Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
765 PrintError("IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
773 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
774 struct ide_internal * ide = (struct ide_internal *)private_data;
775 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
776 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
777 struct ide_channel * channel = &(ide->channels[channel_flag]);
779 PrintDebug("Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
781 if (port_offset + length > 16) {
782 PrintError("DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
786 memcpy(dst, channel->dma_ports + port_offset, length);
788 PrintDebug("\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
795 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
796 struct ide_internal * ide = priv_data;
797 struct ide_channel * channel = get_selected_channel(ide, port);
798 struct ide_drive * drive = get_selected_drive(channel);
801 PrintError("Invalid Write Length on IDE command Port %x\n", port);
805 PrintDebug("IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
807 channel->cmd_reg = *(uint8_t *)src;
809 switch (channel->cmd_reg) {
811 case 0xa1: // ATAPI Identify Device Packet
812 if (drive->drive_type != BLOCK_CDROM) {
815 // JRL: Should we abort here?
816 ide_abort_command(ide, channel);
819 atapi_identify_device(drive);
821 channel->error_reg.val = 0;
822 channel->status.val = 0x58; // ready, data_req, seek_complete
824 ide_raise_irq(ide, channel);
827 case 0xec: // Identify Device
828 if (drive->drive_type != BLOCK_DISK) {
831 // JRL: Should we abort here?
832 ide_abort_command(ide, channel);
834 ata_identify_device(drive);
836 channel->error_reg.val = 0;
837 channel->status.val = 0x58;
839 ide_raise_irq(ide, channel);
843 case 0xa0: // ATAPI Command Packet
844 if (drive->drive_type != BLOCK_CDROM) {
845 ide_abort_command(ide, channel);
848 drive->sector_count = 1;
850 channel->status.busy = 0;
851 channel->status.write_fault = 0;
852 channel->status.data_req = 1;
853 channel->status.error = 0;
855 // reset the data buffer...
856 drive->transfer_length = ATAPI_PACKET_SIZE;
857 drive->transfer_index = 0;
861 case 0x20: // Read Sectors with Retry
862 case 0x21: // Read Sectors without Retry
863 drive->hd_state.cur_sector_num = 1;
865 if (ata_read_sectors(ide, channel) == -1) {
866 PrintError("Error reading sectors\n");
871 case 0x24: // Read Sectors Extended
872 drive->hd_state.cur_sector_num = 1;
874 if (ata_read_sectors_ext(ide, channel) == -1) {
875 PrintError("Error reading extended sectors\n");
880 case 0xc8: // Read DMA with retry
881 case 0xc9: { // Read DMA
882 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
884 if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
885 ide_abort_command(ide, channel);
889 drive->hd_state.cur_sector_num = 1;
891 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
892 drive->transfer_index = 0;
894 if (channel->dma_status.active == 1) {
896 if (dma_read(core, ide, channel) == -1) {
897 PrintError("Failed DMA Read\n");
904 case 0xca: { // Write DMA
905 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
907 if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
908 ide_abort_command(ide, channel);
912 drive->hd_state.cur_sector_num = 1;
914 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
915 drive->transfer_index = 0;
917 if (channel->dma_status.active == 1) {
919 if (dma_write(core, ide, channel) == -1) {
920 PrintError("Failed DMA Write\n");
926 case 0xe0: // Standby Now 1
927 case 0xe1: // Set Idle Immediate
928 case 0xe2: // Standby
929 case 0xe3: // Set Idle 1
930 case 0xe6: // Sleep Now 1
931 case 0x94: // Standby Now 2
932 case 0x95: // Idle Immediate (CFA)
933 case 0x96: // Standby 2
934 case 0x97: // Set idle 2
935 case 0x99: // Sleep Now 2
936 channel->status.val = 0;
937 channel->status.ready = 1;
938 ide_raise_irq(ide, channel);
941 case 0xef: // Set Features
942 // Prior to this the features register has been written to.
943 // This command tells the drive to check if the new value is supported (the value is drive specific)
944 // Common is that bit0=DMA enable
945 // If valid the drive raises an interrupt, if not it aborts.
947 // Do some checking here...
949 channel->status.busy = 0;
950 channel->status.write_fault = 0;
951 channel->status.error = 0;
952 channel->status.ready = 1;
953 channel->status.seek_complete = 1;
955 ide_raise_irq(ide, channel);
958 case 0x91: // Initialize Drive Parameters
959 case 0x10: // recalibrate?
960 channel->status.error = 0;
961 channel->status.ready = 1;
962 channel->status.seek_complete = 1;
963 ide_raise_irq(ide, channel);
965 case 0xc6: { // Set multiple mode (IDE Block mode)
966 // This makes the drive transfer multiple sectors before generating an interrupt
967 uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
969 if (tmp_sect_num > MAX_MULT_SECTORS) {
970 ide_abort_command(ide, channel);
974 if (drive->sector_count == 0) {
975 drive->hd_state.mult_sector_num= 1;
977 drive->hd_state.mult_sector_num = drive->sector_count;
980 channel->status.ready = 1;
981 channel->status.error = 0;
983 ide_raise_irq(ide, channel);
988 case 0x08: // Reset Device
990 channel->error_reg.val = 0x01;
991 channel->status.busy = 0;
992 channel->status.ready = 1;
993 channel->status.seek_complete = 1;
994 channel->status.write_fault = 0;
995 channel->status.error = 0;
998 case 0xe5: // Check power mode
999 drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1000 channel->status.busy = 0;
1001 channel->status.ready = 1;
1002 channel->status.write_fault = 0;
1003 channel->status.data_req = 0;
1004 channel->status.error = 0;
1007 case 0xc4: // read multiple sectors
1008 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
1010 PrintError("Unimplemented IDE command (%x)\n", channel->cmd_reg);
1018 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1019 struct ide_internal * ide = priv_data;
1020 struct ide_channel * channel = get_selected_channel(ide, port);
1021 struct ide_drive * drive = get_selected_drive(channel);
1023 // PrintDebug("IDE: Writing Data Port %x (val=%x, len=%d)\n",
1024 // port, *(uint32_t *)src, length);
1026 memcpy(drive->data_buf + drive->transfer_index, src, length);
1027 drive->transfer_index += length;
1029 // Transfer is complete, dispatch the command
1030 if (drive->transfer_index >= drive->transfer_length) {
1031 switch (channel->cmd_reg) {
1032 case 0x30: // Write Sectors
1033 PrintError("Writing Data not yet implemented\n");
1036 case 0xa0: // ATAPI packet command
1037 if (atapi_handle_packet(core, ide, channel) == -1) {
1038 PrintError("Error handling ATAPI packet\n");
1043 PrintError("Unhandld IDE Command %x\n", channel->cmd_reg);
1052 static int read_hd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1053 struct ide_drive * drive = get_selected_drive(channel);
1054 int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1058 if (drive->transfer_index >= drive->transfer_length) {
1059 PrintError("Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1060 drive->transfer_length, drive->transfer_index,
1061 drive->transfer_index + length);
1066 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1067 drive->current_lba++;
1069 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1070 PrintError("Could not read next disk sector\n");
1076 PrintDebug("Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1077 *(uint32_t *)(drive->data_buf + data_offset),
1078 length, data_offset);
1080 memcpy(dst, drive->data_buf + data_offset, length);
1082 drive->transfer_index += length;
1085 /* This is the trigger for interrupt injection.
1086 * For read single sector commands we interrupt after every sector
1087 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1088 * cur_sector_num is configured depending on the operation we are currently running
1089 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1091 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1092 (drive->transfer_index == drive->transfer_length)) {
1093 if (drive->transfer_index < drive->transfer_length) {
1094 // An increment is complete, but there is still more data to be transferred...
1095 PrintDebug("Integral Complete, still transferring more sectors\n");
1096 channel->status.data_req = 1;
1098 drive->irq_flags.c_d = 0;
1100 PrintDebug("Final Sector Transferred\n");
1101 // This was the final read of the request
1102 channel->status.data_req = 0;
1105 drive->irq_flags.c_d = 1;
1106 drive->irq_flags.rel = 0;
1109 channel->status.ready = 1;
1110 drive->irq_flags.io_dir = 1;
1111 channel->status.busy = 0;
1113 ide_raise_irq(ide, channel);
1122 static int read_cd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1123 struct ide_drive * drive = get_selected_drive(channel);
1124 int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1125 // int req_offset = drive->transfer_index % drive->req_len;
1127 if (drive->cd_state.atapi_cmd != 0x28) {
1128 PrintDebug("IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1129 PrintDebug("IDE: transfer len=%d, transfer idx=%d\n", drive->transfer_length, drive->transfer_index);
1134 if (drive->transfer_index >= drive->transfer_length) {
1135 PrintError("Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n",
1136 drive->transfer_length, drive->transfer_index,
1137 drive->transfer_index + length);
1142 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1143 if (atapi_update_data_buf(ide, channel) == -1) {
1144 PrintError("Could not update CDROM data buffer\n");
1149 memcpy(dst, drive->data_buf + data_offset, length);
1151 drive->transfer_index += length;
1154 // Should the req_offset be recalculated here?????
1155 if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1156 if (drive->transfer_index < drive->transfer_length) {
1157 // An increment is complete, but there is still more data to be transferred...
1159 channel->status.data_req = 1;
1161 drive->irq_flags.c_d = 0;
1163 // Update the request length in the cylinder regs
1164 if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1165 PrintError("Could not update request length after completed increment\n");
1169 // This was the final read of the request
1172 channel->status.data_req = 0;
1173 channel->status.ready = 1;
1175 drive->irq_flags.c_d = 1;
1176 drive->irq_flags.rel = 0;
1179 drive->irq_flags.io_dir = 1;
1180 channel->status.busy = 0;
1182 ide_raise_irq(ide, channel);
1189 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1190 struct ide_drive * drive = get_selected_drive(channel);
1192 channel->status.busy = 0;
1193 channel->status.ready = 1;
1194 channel->status.write_fault = 0;
1195 channel->status.seek_complete = 1;
1196 channel->status.corrected = 0;
1197 channel->status.error = 0;
1200 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1201 drive->transfer_index += length;
1203 if (drive->transfer_index >= drive->transfer_length) {
1204 channel->status.data_req = 0;
1211 static int ide_read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1212 struct ide_internal * ide = priv_data;
1213 struct ide_channel * channel = get_selected_channel(ide, port);
1214 struct ide_drive * drive = get_selected_drive(channel);
1216 // PrintDebug("IDE: Reading Data Port %x (len=%d)\n", port, length);
1218 if ((channel->cmd_reg == 0xec) ||
1219 (channel->cmd_reg == 0xa1)) {
1220 return read_drive_id((uint8_t *)dst, length, ide, channel);
1223 if (drive->drive_type == BLOCK_CDROM) {
1224 if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1225 PrintError("IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1228 } else if (drive->drive_type == BLOCK_DISK) {
1229 if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1230 PrintError("IDE: Could not read HD Data\n");
1234 memset((uint8_t *)dst, 0, length);
1240 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1241 struct ide_internal * ide = priv_data;
1242 struct ide_channel * channel = get_selected_channel(ide, port);
1243 struct ide_drive * drive = get_selected_drive(channel);
1246 PrintError("Invalid Write length on IDE port %x\n", port);
1250 PrintDebug("IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1253 // reset and interrupt enable
1255 case SEC_CTRL_PORT: {
1256 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1258 // only reset channel on a 0->1 reset bit transition
1259 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1260 channel_reset(channel);
1261 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1262 channel_reset_complete(channel);
1265 channel->ctrl_reg.val = tmp_ctrl->val;
1268 case PRI_FEATURES_PORT:
1269 case SEC_FEATURES_PORT:
1270 channel->features.val = *(uint8_t *)src;
1273 case PRI_SECT_CNT_PORT:
1274 case SEC_SECT_CNT_PORT:
1275 channel->drives[0].sector_count = *(uint8_t *)src;
1276 channel->drives[1].sector_count = *(uint8_t *)src;
1279 case PRI_SECT_NUM_PORT:
1280 case SEC_SECT_NUM_PORT:
1281 channel->drives[0].sector_num = *(uint8_t *)src;
1282 channel->drives[1].sector_num = *(uint8_t *)src;
1284 case PRI_CYL_LOW_PORT:
1285 case SEC_CYL_LOW_PORT:
1286 channel->drives[0].cylinder_low = *(uint8_t *)src;
1287 channel->drives[1].cylinder_low = *(uint8_t *)src;
1290 case PRI_CYL_HIGH_PORT:
1291 case SEC_CYL_HIGH_PORT:
1292 channel->drives[0].cylinder_high = *(uint8_t *)src;
1293 channel->drives[1].cylinder_high = *(uint8_t *)src;
1296 case PRI_DRV_SEL_PORT:
1297 case SEC_DRV_SEL_PORT: {
1298 channel->drive_head.val = *(uint8_t *)src;
1300 // make sure the reserved bits are ok..
1301 // JRL TODO: check with new ramdisk to make sure this is right...
1302 channel->drive_head.val |= 0xa0;
1304 drive = get_selected_drive(channel);
1306 // Selecting a non-present device is a no-no
1307 if (drive->drive_type == BLOCK_NONE) {
1308 PrintDebug("Attempting to select a non-present drive\n");
1309 channel->error_reg.abort = 1;
1310 channel->status.error = 1;
1312 channel->status.busy = 0;
1313 channel->status.ready = 1;
1314 channel->status.data_req = 0;
1315 channel->status.error = 0;
1316 channel->status.seek_complete = 1;
1318 channel->dma_status.active = 0;
1319 channel->dma_status.err = 0;
1325 PrintError("IDE: Write to unknown Port %x\n", port);
1332 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1333 struct ide_internal * ide = priv_data;
1334 struct ide_channel * channel = get_selected_channel(ide, port);
1335 struct ide_drive * drive = get_selected_drive(channel);
1338 PrintError("Invalid Read length on IDE port %x\n", port);
1342 PrintDebug("IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1344 if ((port == PRI_ADDR_REG_PORT) ||
1345 (port == SEC_ADDR_REG_PORT)) {
1346 // unused, return 0xff
1347 *(uint8_t *)dst = 0xff;
1352 // if no drive is present just return 0 + reserved bits
1353 if (drive->drive_type == BLOCK_NONE) {
1354 if ((port == PRI_DRV_SEL_PORT) ||
1355 (port == SEC_DRV_SEL_PORT)) {
1356 *(uint8_t *)dst = 0xa0;
1358 *(uint8_t *)dst = 0;
1366 // This is really the error register.
1367 case PRI_FEATURES_PORT:
1368 case SEC_FEATURES_PORT:
1369 *(uint8_t *)dst = channel->error_reg.val;
1372 case PRI_SECT_CNT_PORT:
1373 case SEC_SECT_CNT_PORT:
1374 *(uint8_t *)dst = drive->sector_count;
1377 case PRI_SECT_NUM_PORT:
1378 case SEC_SECT_NUM_PORT:
1379 *(uint8_t *)dst = drive->sector_num;
1382 case PRI_CYL_LOW_PORT:
1383 case SEC_CYL_LOW_PORT:
1384 *(uint8_t *)dst = drive->cylinder_low;
1388 case PRI_CYL_HIGH_PORT:
1389 case SEC_CYL_HIGH_PORT:
1390 *(uint8_t *)dst = drive->cylinder_high;
1393 case PRI_DRV_SEL_PORT:
1394 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1395 *(uint8_t *)dst = channel->drive_head.val;
1402 // Something about lowering interrupts here....
1403 *(uint8_t *)dst = channel->status.val;
1407 PrintError("Invalid Port: %x\n", port);
1411 PrintDebug("\tVal=%x\n", *(uint8_t *)dst);
1418 static void init_drive(struct ide_drive * drive) {
1420 drive->sector_count = 0x01;
1421 drive->sector_num = 0x01;
1422 drive->cylinder = 0x0000;
1424 drive->drive_type = BLOCK_NONE;
1426 memset(drive->model, 0, sizeof(drive->model));
1428 drive->transfer_index = 0;
1429 drive->transfer_length = 0;
1430 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1432 drive->num_cylinders = 0;
1433 drive->num_heads = 0;
1434 drive->num_sectors = 0;
1437 drive->private_data = NULL;
1441 static void init_channel(struct ide_channel * channel) {
1444 channel->error_reg.val = 0x01;
1445 channel->drive_head.val = 0x00;
1446 channel->status.val = 0x00;
1447 channel->cmd_reg = 0x00;
1448 channel->ctrl_reg.val = 0x08;
1450 channel->dma_cmd.val = 0;
1451 channel->dma_status.val = 0;
1452 channel->dma_prd_addr = 0;
1453 channel->dma_tbl_index = 0;
1455 for (i = 0; i < 2; i++) {
1456 init_drive(&(channel->drives[i]));
1462 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1463 PrintDebug("PCI Config Update\n");
1465 struct ide_internal * ide = (struct ide_internal *)(private_data);
1467 PrintDebug("\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1473 static int init_ide_state(struct ide_internal * ide) {
1477 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1480 for (i = 0; i < 1; i++) {
1481 init_channel(&(ide->channels[i]));
1483 // JRL: this is a terrible hack...
1484 ide->channels[i].irq = PRI_DEFAULT_IRQ + i;
1494 static int ide_free(struct ide_internal * ide) {
1496 // deregister from PCI?
1503 #ifdef V3_CONFIG_CHECKPOINT
1505 #include <palacios/vmm_sprintf.h>
1506 static int ide_save(struct v3_chkpt_ctx * ctx, void * private_data) {
1507 struct ide_internal * ide = (struct ide_internal *)private_data;
1513 for (ch_num = 0; ch_num < 2; ch_num++) {
1514 struct v3_chkpt_ctx * ch_ctx = NULL;
1515 struct ide_channel * ch = &(ide->channels[ch_num]);
1517 snprintf(buf, 128, "channel-%d", ch_num);
1518 ch_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
1520 v3_chkpt_save_8(ch_ctx, "ERROR", &(ch->error_reg.val));
1521 v3_chkpt_save_8(ch_ctx, "FEATURES", &(ch->features.val));
1522 v3_chkpt_save_8(ch_ctx, "DRIVE_HEAD", &(ch->drive_head.val));
1523 v3_chkpt_save_8(ch_ctx, "STATUS", &(ch->status.val));
1524 v3_chkpt_save_8(ch_ctx, "CMD_REG", &(ch->cmd_reg));
1525 v3_chkpt_save_8(ch_ctx, "CTRL_REG", &(ch->ctrl_reg.val));
1526 v3_chkpt_save_8(ch_ctx, "DMA_CMD", &(ch->dma_cmd.val));
1527 v3_chkpt_save_8(ch_ctx, "DMA_STATUS", &(ch->dma_status.val));
1528 v3_chkpt_save_32(ch_ctx, "PRD_ADDR", &(ch->dma_prd_addr));
1529 v3_chkpt_save_32(ch_ctx, "DMA_TBL_IDX", &(ch->dma_tbl_index));
1532 for (drive_num = 0; drive_num < 2; drive_num++) {
1533 struct v3_chkpt_ctx * drive_ctx = NULL;
1534 struct ide_drive * drive = &(ch->drives[drive_num]);
1536 snprintf(buf, 128, "drive-%d-%d", ch_num, drive_num);
1537 drive_ctx = v3_chkpt_open_ctx(ctx->chkpt, ch_ctx, buf);
1539 v3_chkpt_save_8(drive_ctx, "DRIVE_TYPE", &(drive->drive_type));
1540 v3_chkpt_save_8(drive_ctx, "SECTOR_COUNT", &(drive->sector_count));
1541 v3_chkpt_save_8(drive_ctx, "SECTOR_NUM", &(drive->sector_num));
1542 v3_chkpt_save_16(drive_ctx, "CYLINDER", &(drive->cylinder));
1544 v3_chkpt_save_64(drive_ctx, "CURRENT_LBA", &(drive->current_lba));
1545 v3_chkpt_save_32(drive_ctx, "TRANSFER_LENGTH", &(drive->transfer_length));
1546 v3_chkpt_save_32(drive_ctx, "TRANSFER_INDEX", &(drive->transfer_index));
1548 v3_chkpt_save(drive_ctx, "DATA_BUF", DATA_BUFFER_SIZE, drive->data_buf);
1551 /* For now we'll just pack the type specific data at the end... */
1552 /* We should probably add a new context here in the future... */
1553 if (drive->drive_type == BLOCK_CDROM) {
1554 v3_chkpt_save(drive_ctx, "ATAPI_SENSE_DATA", 18, drive->cd_state.sense.buf);
1555 v3_chkpt_save_8(drive_ctx, "ATAPI_CMD", &(drive->cd_state.atapi_cmd));
1556 v3_chkpt_save(drive_ctx, "ATAPI_ERR_RECOVERY", 12, drive->cd_state.err_recovery.buf);
1557 } else if (drive->drive_type == BLOCK_DISK) {
1558 v3_chkpt_save_32(drive_ctx, "ACCESSED", &(drive->hd_state.accessed));
1559 v3_chkpt_save_32(drive_ctx, "MULT_SECT_NUM", &(drive->hd_state.mult_sector_num));
1560 v3_chkpt_save_32(drive_ctx, "CUR_SECT_NUM", &(drive->hd_state.cur_sector_num));
1570 static int ide_load(struct v3_chkpt_ctx * ctx, void * private_data) {
1571 struct ide_internal * ide = (struct ide_internal *)private_data;
1577 for (ch_num = 0; ch_num < 2; ch_num++) {
1578 struct v3_chkpt_ctx * ch_ctx = NULL;
1579 struct ide_channel * ch = &(ide->channels[ch_num]);
1581 snprintf(buf, 128, "channel-%d", ch_num);
1582 ch_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
1584 v3_chkpt_load_8(ch_ctx, "ERROR", &(ch->error_reg.val));
1585 v3_chkpt_load_8(ch_ctx, "FEATURES", &(ch->features.val));
1586 v3_chkpt_load_8(ch_ctx, "DRIVE_HEAD", &(ch->drive_head.val));
1587 v3_chkpt_load_8(ch_ctx, "STATUS", &(ch->status.val));
1588 v3_chkpt_load_8(ch_ctx, "CMD_REG", &(ch->cmd_reg));
1589 v3_chkpt_load_8(ch_ctx, "CTRL_REG", &(ch->ctrl_reg.val));
1590 v3_chkpt_load_8(ch_ctx, "DMA_CMD", &(ch->dma_cmd.val));
1591 v3_chkpt_load_8(ch_ctx, "DMA_STATUS", &(ch->dma_status.val));
1592 v3_chkpt_load_32(ch_ctx, "PRD_ADDR", &(ch->dma_prd_addr));
1593 v3_chkpt_load_32(ch_ctx, "DMA_TBL_IDX", &(ch->dma_tbl_index));
1596 for (drive_num = 0; drive_num < 2; drive_num++) {
1597 struct v3_chkpt_ctx * drive_ctx = NULL;
1598 struct ide_drive * drive = &(ch->drives[drive_num]);
1600 snprintf(buf, 128, "drive-%d-%d", ch_num, drive_num);
1601 drive_ctx = v3_chkpt_open_ctx(ctx->chkpt, ch_ctx, buf);
1603 v3_chkpt_load_8(drive_ctx, "DRIVE_TYPE", &(drive->drive_type));
1604 v3_chkpt_load_8(drive_ctx, "SECTOR_COUNT", &(drive->sector_count));
1605 v3_chkpt_load_8(drive_ctx, "SECTOR_NUM", &(drive->sector_num));
1606 v3_chkpt_load_16(drive_ctx, "CYLINDER", &(drive->cylinder));
1608 v3_chkpt_load_64(drive_ctx, "CURRENT_LBA", &(drive->current_lba));
1609 v3_chkpt_load_32(drive_ctx, "TRANSFER_LENGTH", &(drive->transfer_length));
1610 v3_chkpt_load_32(drive_ctx, "TRANSFER_INDEX", &(drive->transfer_index));
1612 v3_chkpt_load(drive_ctx, "DATA_BUF", DATA_BUFFER_SIZE, drive->data_buf);
1615 /* For now we'll just pack the type specific data at the end... */
1616 /* We should probably add a new context here in the future... */
1617 if (drive->drive_type == BLOCK_CDROM) {
1618 v3_chkpt_load(drive_ctx, "ATAPI_SENSE_DATA", 18, drive->cd_state.sense.buf);
1619 v3_chkpt_load_8(drive_ctx, "ATAPI_CMD", &(drive->cd_state.atapi_cmd));
1620 v3_chkpt_load(drive_ctx, "ATAPI_ERR_RECOVERY", 12, drive->cd_state.err_recovery.buf);
1621 } else if (drive->drive_type == BLOCK_DISK) {
1622 v3_chkpt_load_32(drive_ctx, "ACCESSED", &(drive->hd_state.accessed));
1623 v3_chkpt_load_32(drive_ctx, "MULT_SECT_NUM", &(drive->hd_state.mult_sector_num));
1624 v3_chkpt_load_32(drive_ctx, "CUR_SECT_NUM", &(drive->hd_state.cur_sector_num));
1637 static struct v3_device_ops dev_ops = {
1638 .free = (int (*)(void *))ide_free,
1639 #ifdef V3_CONFIG_CHECKPOINT
1649 static int connect_fn(struct v3_vm_info * vm,
1650 void * frontend_data,
1651 struct v3_dev_blk_ops * ops,
1652 v3_cfg_tree_t * cfg,
1653 void * private_data) {
1654 struct ide_internal * ide = (struct ide_internal *)(frontend_data);
1655 struct ide_channel * channel = NULL;
1656 struct ide_drive * drive = NULL;
1658 char * bus_str = v3_cfg_val(cfg, "bus_num");
1659 char * drive_str = v3_cfg_val(cfg, "drive_num");
1660 char * type_str = v3_cfg_val(cfg, "type");
1661 char * model_str = v3_cfg_val(cfg, "model");
1663 uint_t drive_num = 0;
1666 if ((!type_str) || (!drive_str) || (!bus_str)) {
1667 PrintError("Incomplete IDE Configuration\n");
1671 bus_num = atoi(bus_str);
1672 drive_num = atoi(drive_str);
1674 channel = &(ide->channels[bus_num]);
1675 drive = &(channel->drives[drive_num]);
1677 if (drive->drive_type != BLOCK_NONE) {
1678 PrintError("Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1682 if (model_str != NULL) {
1683 strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1686 if (strcasecmp(type_str, "cdrom") == 0) {
1687 drive->drive_type = BLOCK_CDROM;
1689 while (strlen((char *)(drive->model)) < 40) {
1690 strcat((char*)(drive->model), " ");
1693 } else if (strcasecmp(type_str, "hd") == 0) {
1694 drive->drive_type = BLOCK_DISK;
1696 drive->hd_state.accessed = 0;
1697 drive->hd_state.mult_sector_num = 1;
1699 drive->num_sectors = 63;
1700 drive->num_heads = 16;
1701 drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
1703 PrintError("invalid IDE drive type\n");
1710 // Hardcode this for now, but its not a good idea....
1711 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1714 drive->private_data = private_data;
1722 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1723 struct ide_internal * ide = NULL;
1724 char * dev_id = v3_cfg_val(cfg, "ID");
1727 PrintDebug("IDE: Initializing IDE\n");
1729 ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1732 PrintError("Error allocating IDE state\n");
1736 memset(ide, 0, sizeof(struct ide_internal));
1739 ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1741 if (ide->pci_bus != NULL) {
1742 struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1745 PrintError("Could not find southbridge\n");
1750 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1753 PrintDebug("IDE: Creating IDE bus x 2\n");
1755 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
1758 PrintError("Could not attach device %s\n", dev_id);
1763 if (init_ide_state(ide) == -1) {
1764 PrintError("Failed to initialize IDE state\n");
1765 v3_remove_device(dev);
1769 PrintDebug("Connecting to IDE IO ports\n");
1771 ret |= v3_dev_hook_io(dev, PRI_DATA_PORT,
1772 &ide_read_data_port, &write_data_port);
1773 ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT,
1774 &read_port_std, &write_port_std);
1775 ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
1776 &read_port_std, &write_port_std);
1777 ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
1778 &read_port_std, &write_port_std);
1779 ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
1780 &read_port_std, &write_port_std);
1781 ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
1782 &read_port_std, &write_port_std);
1783 ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
1784 &read_port_std, &write_port_std);
1785 ret |= v3_dev_hook_io(dev, PRI_CMD_PORT,
1786 &read_port_std, &write_cmd_port);
1788 ret |= v3_dev_hook_io(dev, SEC_DATA_PORT,
1789 &ide_read_data_port, &write_data_port);
1790 ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT,
1791 &read_port_std, &write_port_std);
1792 ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
1793 &read_port_std, &write_port_std);
1794 ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
1795 &read_port_std, &write_port_std);
1796 ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
1797 &read_port_std, &write_port_std);
1798 ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
1799 &read_port_std, &write_port_std);
1800 ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
1801 &read_port_std, &write_port_std);
1802 ret |= v3_dev_hook_io(dev, SEC_CMD_PORT,
1803 &read_port_std, &write_cmd_port);
1806 ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT,
1807 &read_port_std, &write_port_std);
1809 ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT,
1810 &read_port_std, &write_port_std);
1813 ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
1814 &read_port_std, &write_port_std);
1816 ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
1817 &read_port_std, &write_port_std);
1821 PrintError("Error hooking IDE IO port\n");
1822 v3_remove_device(dev);
1828 struct v3_pci_bar bars[6];
1829 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
1830 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
1831 struct pci_device * pci_dev = NULL;
1834 PrintDebug("Connecting IDE to PCI bus\n");
1836 for (i = 0; i < 6; i++) {
1837 bars[i].type = PCI_BAR_NONE;
1840 bars[4].type = PCI_BAR_IO;
1841 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
1842 bars[4].default_base_port = -1;
1843 bars[4].num_ports = 16;
1845 bars[4].io_read = read_dma_port;
1846 bars[4].io_write = write_dma_port;
1847 bars[4].private_data = ide;
1849 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
1851 pci_config_update, NULL, NULL, NULL, ide);
1853 if (pci_dev == NULL) {
1854 PrintError("Failed to register IDE BUS %d with PCI\n", i);
1855 v3_remove_device(dev);
1859 /* This is for CMD646 devices
1860 pci_dev->config_header.vendor_id = 0x1095;
1861 pci_dev->config_header.device_id = 0x0646;
1862 pci_dev->config_header.revision = 0x8f07;
1865 pci_dev->config_header.vendor_id = 0x8086;
1866 pci_dev->config_header.device_id = 0x7010;
1867 pci_dev->config_header.revision = 0x00;
1869 pci_dev->config_header.prog_if = 0x80; // Master IDE device
1870 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
1871 pci_dev->config_header.class = PCI_CLASS_STORAGE;
1873 pci_dev->config_header.command = 0;
1874 pci_dev->config_header.status = 0x0280;
1876 ide->ide_pci = pci_dev;
1881 if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
1882 PrintError("Could not register %s as frontend\n", dev_id);
1883 v3_remove_device(dev);
1888 PrintDebug("IDE Initialized\n");
1894 device_register("IDE", ide_init)
1899 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num,
1900 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
1902 struct ide_internal * ide = ide_data;
1903 struct ide_channel * channel = &(ide->channels[channel_num]);
1904 struct ide_drive * drive = &(channel->drives[drive_num]);
1906 if (drive->drive_type == BLOCK_NONE) {
1910 *cylinders = drive->num_cylinders;
1911 *heads = drive->num_heads;
1912 *sectors = drive->num_sectors;