2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef V3_CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
71 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72 "PRI_CTRL", "PRI_ADDR_REG"};
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
76 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77 "SEC_CTRL", "SEC_ADDR_REG"};
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
85 static inline const char * io_port_to_str(uint16_t port) {
86 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87 return ide_pri_port_strs[port - PRI_DATA_PORT];
88 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89 return ide_sec_port_strs[port - SEC_DATA_PORT];
90 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
99 static inline const char * dma_port_to_str(uint16_t port) {
100 return ide_dma_port_strs[port & 0x7];
105 struct ide_cd_state {
106 struct atapi_sense_data sense;
109 struct atapi_error_recovery err_recovery;
112 struct ide_hd_state {
115 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116 uint32_t mult_sector_num;
118 /* This is the current op sector size:
119 * for multiple sector ops this equals mult_sector_num
120 * for standard ops this equals 1
122 uint32_t cur_sector_num;
128 v3_block_type_t drive_type;
130 struct v3_dev_blk_ops * ops;
133 struct ide_cd_state cd_state;
134 struct ide_hd_state hd_state;
139 // Where we are in the data transfer
140 uint32_t transfer_index;
142 // the length of a transfer
143 // calculated for easy access
144 uint32_t transfer_length;
146 uint64_t current_lba;
148 // We have a local data buffer that we use for IO port accesses
149 uint8_t data_buf[DATA_BUFFER_SIZE];
152 uint32_t num_cylinders;
154 uint32_t num_sectors;
159 uint8_t sector_count; // 0x1f2,0x172 (ATA)
160 struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
161 } __attribute__((packed));
164 uint8_t sector_num; // 0x1f3,0x173
166 } __attribute__((packed));
173 uint8_t cylinder_low; // 0x1f4,0x174
174 uint8_t cylinder_high; // 0x1f5,0x175
175 } __attribute__((packed));
180 } __attribute__((packed));
183 // The transfer length requested by the CPU
185 } __attribute__((packed));
192 struct ide_drive drives[2];
195 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
197 struct ide_features_reg features;
199 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
201 struct ide_status_reg status; // [read] 0x1f7,0x177
202 uint8_t cmd_reg; // [write] 0x1f7,0x177
204 int irq; // this is temporary until we add PCI support
207 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
210 uint8_t dma_ports[8];
212 struct ide_dma_cmd_reg dma_cmd;
214 struct ide_dma_status_reg dma_status;
216 uint32_t dma_prd_addr;
217 } __attribute__((packed));
218 } __attribute__((packed));
220 uint32_t dma_tbl_index;
225 struct ide_internal {
226 struct ide_channel channels[2];
228 struct v3_southbridge * southbridge;
229 struct vm_device * pci_bus;
231 struct pci_device * ide_pci;
233 struct v3_vm_info * vm;
240 /* Utility functions */
242 static inline uint16_t be_to_le_16(const uint16_t val) {
243 uint8_t * buf = (uint8_t *)&val;
244 return (buf[0] << 8) | (buf[1]) ;
247 static inline uint16_t le_to_be_16(const uint16_t val) {
248 return be_to_le_16(val);
252 static inline uint32_t be_to_le_32(const uint32_t val) {
253 uint8_t * buf = (uint8_t *)&val;
254 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
257 static inline uint32_t le_to_be_32(const uint32_t val) {
258 return be_to_le_32(val);
262 static inline int get_channel_index(ushort_t port) {
263 if (((port & 0xfff8) == 0x1f0) ||
264 ((port & 0xfffe) == 0x3f6) ||
265 ((port & 0xfff8) == 0xc000)) {
267 } else if (((port & 0xfff8) == 0x170) ||
268 ((port & 0xfffe) == 0x376) ||
269 ((port & 0xfff8) == 0xc008)) {
276 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
277 int channel_idx = get_channel_index(port);
278 return &(ide->channels[channel_idx]);
281 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
282 return &(channel->drives[channel->drive_head.drive_sel]);
286 static inline int is_lba_enabled(struct ide_channel * channel) {
287 return channel->drive_head.lba_mode;
292 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
293 if (channel->ctrl_reg.irq_disable == 0) {
295 PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
297 channel->dma_status.int_gen = 1;
298 v3_raise_irq(ide->vm, channel->irq);
300 PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
305 static void drive_reset(struct ide_drive * drive) {
306 drive->sector_count = 0x01;
307 drive->sector_num = 0x01;
309 PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
311 if (drive->drive_type == BLOCK_CDROM) {
312 drive->cylinder = 0xeb14;
314 drive->cylinder = 0x0000;
315 //drive->hd_state.accessed = 0;
319 memset(drive->data_buf, 0, sizeof(drive->data_buf));
320 drive->transfer_index = 0;
322 // Send the reset signal to the connected device callbacks
323 // channel->drives[0].reset();
324 // channel->drives[1].reset();
327 static void channel_reset(struct ide_channel * channel) {
329 // set busy and seek complete flags
330 channel->status.val = 0x90;
333 channel->error_reg.val = 0x01;
336 channel->cmd_reg = 0; // NOP
338 channel->ctrl_reg.irq_disable = 0;
341 static void channel_reset_complete(struct ide_channel * channel) {
342 channel->status.busy = 0;
343 channel->status.ready = 1;
345 channel->drive_head.head_num = 0;
347 drive_reset(&(channel->drives[0]));
348 drive_reset(&(channel->drives[1]));
352 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
354 PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
356 channel->status.val = 0x41; // Error + ready
357 channel->error_reg.val = 0x04; // No idea...
359 ide_raise_irq(ide, channel);
363 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
364 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
367 /* ATAPI functions */
375 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
376 struct ide_dma_prd prd_entry;
379 V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
382 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
385 ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
387 if (ret != sizeof(struct ide_dma_prd)) {
388 PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
392 V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
394 (prd_entry.size == 0) ? 0x10000 : prd_entry.size,
395 prd_entry.end_of_table);
397 if (prd_entry.end_of_table) {
409 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
410 struct ide_drive * drive = get_selected_drive(channel);
411 // This is at top level scope to do the EOT test at the end
412 struct ide_dma_prd prd_entry = {};
413 uint_t bytes_left = drive->transfer_length;
415 // Read in the data buffer....
416 // Read a sector/block at a time until the prd entry is full.
418 #ifdef V3_CONFIG_DEBUG_IDE
419 print_prd_table(ide, channel);
422 PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
424 // Loop through the disk data
425 while (bytes_left > 0) {
426 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
427 uint_t prd_bytes_left = 0;
428 uint_t prd_offset = 0;
431 PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
433 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
435 if (ret != sizeof(struct ide_dma_prd)) {
436 PrintError(core->vm_info, core, "Could not read PRD\n");
440 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
441 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
443 // loop through the PRD data....
445 if (prd_entry.size == 0) {
446 // a size of 0 means 64k
447 prd_bytes_left = 0x10000;
449 prd_bytes_left = prd_entry.size;
453 while (prd_bytes_left > 0) {
454 uint_t bytes_to_write = 0;
456 if (drive->drive_type == BLOCK_DISK) {
457 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
460 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
461 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
464 } else if (drive->drive_type == BLOCK_CDROM) {
465 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
466 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
468 if (atapi_read_chunk(ide, channel) == -1) {
469 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
474 PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
479 //V3_Print(core->vm_info, core, "DMA of command packet\n");
481 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
482 prd_bytes_left = bytes_to_write;
485 // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
486 cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset,
487 bytes_to_write, drive->data_buf);
494 drive->transfer_index += bytes_to_write;
496 channel->status.busy = 0;
497 channel->status.ready = 1;
498 channel->status.data_req = 0;
499 channel->status.error = 0;
500 channel->status.seek_complete = 1;
502 channel->dma_status.active = 0;
503 channel->dma_status.err = 0;
505 ide_raise_irq(ide, channel);
511 PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n",
512 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
514 drive->current_lba++;
516 ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
518 if (ret != bytes_to_write) {
519 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
523 PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
525 drive->transfer_index += ret;
526 prd_bytes_left -= ret;
531 channel->dma_tbl_index++;
533 if (drive->drive_type == BLOCK_DISK) {
534 if (drive->transfer_index % HD_SECTOR_SIZE) {
535 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
538 } else if (drive->drive_type == BLOCK_CDROM) {
539 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
540 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
541 PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
542 PrintError(core->vm_info, core, "transfer_index=%d, transfer_length=%d\n",
543 drive->transfer_index, drive->transfer_length);
550 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
551 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
557 drive->irq_flags.io_dir = 1;
558 drive->irq_flags.c_d = 1;
559 drive->irq_flags.rel = 0;
563 // Update to the next PRD entry
567 if (prd_entry.end_of_table) {
568 channel->status.busy = 0;
569 channel->status.ready = 1;
570 channel->status.data_req = 0;
571 channel->status.error = 0;
572 channel->status.seek_complete = 1;
574 channel->dma_status.active = 0;
575 channel->dma_status.err = 0;
578 ide_raise_irq(ide, channel);
584 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
585 struct ide_drive * drive = get_selected_drive(channel);
586 // This is at top level scope to do the EOT test at the end
587 struct ide_dma_prd prd_entry = {};
588 uint_t bytes_left = drive->transfer_length;
591 PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
593 // Loop through disk data
594 while (bytes_left > 0) {
595 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
596 uint_t prd_bytes_left = 0;
597 uint_t prd_offset = 0;
600 PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
602 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
604 if (ret != sizeof(struct ide_dma_prd)) {
605 PrintError(core->vm_info, core, "Could not read PRD\n");
609 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
610 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
613 if (prd_entry.size == 0) {
614 // a size of 0 means 64k
615 prd_bytes_left = 0x10000;
617 prd_bytes_left = prd_entry.size;
620 while (prd_bytes_left > 0) {
621 uint_t bytes_to_write = 0;
624 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
627 ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
629 if (ret != bytes_to_write) {
630 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
634 PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
637 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
638 PrintError(core->vm_info, core, "Failed to write data to disk\n");
642 drive->current_lba++;
644 drive->transfer_index += ret;
645 prd_bytes_left -= ret;
650 channel->dma_tbl_index++;
652 if (drive->transfer_index % HD_SECTOR_SIZE) {
653 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
657 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
658 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
659 PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%u)...\n",
660 bytes_left, drive->transfer_length);
661 PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
662 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
664 print_prd_table(ide, channel);
669 if (prd_entry.end_of_table) {
670 channel->status.busy = 0;
671 channel->status.ready = 1;
672 channel->status.data_req = 0;
673 channel->status.error = 0;
674 channel->status.seek_complete = 1;
676 channel->dma_status.active = 0;
677 channel->dma_status.err = 0;
680 ide_raise_irq(ide, channel);
687 #define DMA_CMD_PORT 0x00
688 #define DMA_STATUS_PORT 0x02
689 #define DMA_PRD_PORT0 0x04
690 #define DMA_PRD_PORT1 0x05
691 #define DMA_PRD_PORT2 0x06
692 #define DMA_PRD_PORT3 0x07
694 #define DMA_CHANNEL_FLAG 0x08
696 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
697 struct ide_internal * ide = (struct ide_internal *)private_data;
698 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
699 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
700 struct ide_channel * channel = &(ide->channels[channel_flag]);
702 PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
703 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
705 switch (port_offset) {
707 channel->dma_cmd.val = *(uint8_t *)src;
709 if (channel->dma_cmd.start == 0) {
710 channel->dma_tbl_index = 0;
712 channel->dma_status.active = 1;
714 if (channel->dma_cmd.read == 1) {
716 if (dma_read(core, ide, channel) == -1) {
717 PrintError(core->vm_info, core, "Failed DMA Read\n");
722 if (dma_write(core, ide, channel) == -1) {
723 PrintError(core->vm_info, core, "Failed DMA Write\n");
728 channel->dma_cmd.val &= 0x09;
733 case DMA_STATUS_PORT: {
734 uint8_t val = *(uint8_t *)src;
737 PrintError(core->vm_info, core, "Invalid read length for DMA status port\n");
742 channel->dma_status.val = ((val & 0x60) |
743 (channel->dma_status.val & 0x01) |
744 (channel->dma_status.val & ~val & 0x06));
751 case DMA_PRD_PORT3: {
752 uint_t addr_index = port_offset & 0x3;
753 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
756 if (addr_index + length > 4) {
757 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
761 for (i = 0; i < length; i++) {
762 addr_buf[addr_index + i] = *((uint8_t *)src + i);
765 PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
770 PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
778 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
779 struct ide_internal * ide = (struct ide_internal *)private_data;
780 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
781 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
782 struct ide_channel * channel = &(ide->channels[channel_flag]);
784 PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
786 if (port_offset + length > 16) {
787 PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
791 memcpy(dst, channel->dma_ports + port_offset, length);
793 PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
800 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
801 struct ide_internal * ide = priv_data;
802 struct ide_channel * channel = get_selected_channel(ide, port);
803 struct ide_drive * drive = get_selected_drive(channel);
806 PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
810 PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
812 channel->cmd_reg = *(uint8_t *)src;
814 switch (channel->cmd_reg) {
816 case ATA_PIDENTIFY: // ATAPI Identify Device Packet
817 if (drive->drive_type != BLOCK_CDROM) {
820 // JRL: Should we abort here?
821 ide_abort_command(ide, channel);
824 atapi_identify_device(drive);
826 channel->error_reg.val = 0;
827 channel->status.val = 0x58; // ready, data_req, seek_complete
829 ide_raise_irq(ide, channel);
832 case ATA_IDENTIFY: // Identify Device
833 if (drive->drive_type != BLOCK_DISK) {
836 // JRL: Should we abort here?
837 ide_abort_command(ide, channel);
839 ata_identify_device(drive);
841 channel->error_reg.val = 0;
842 channel->status.val = 0x58;
844 ide_raise_irq(ide, channel);
848 case ATA_PACKETCMD: // ATAPI Command Packet
849 if (drive->drive_type != BLOCK_CDROM) {
850 ide_abort_command(ide, channel);
853 drive->sector_count = 1;
855 channel->status.busy = 0;
856 channel->status.write_fault = 0;
857 channel->status.data_req = 1;
858 channel->status.error = 0;
860 // reset the data buffer...
861 drive->transfer_length = ATAPI_PACKET_SIZE;
862 drive->transfer_index = 0;
866 case ATA_READ: // Read Sectors with Retry
867 case ATA_READ_ONCE: // Read Sectors without Retry
868 drive->hd_state.cur_sector_num = 1;
870 if (ata_read_sectors(ide, channel) == -1) {
871 PrintError(core->vm_info, core, "Error reading sectors\n");
872 ide_abort_command(ide,channel);
876 case ATA_READ_EXT: // Read Sectors Extended
877 drive->hd_state.cur_sector_num = 1;
879 if (ata_read_sectors_ext(ide, channel) == -1) {
880 PrintError(core->vm_info, core, "Error reading extended sectors\n");
881 ide_abort_command(ide,channel);
887 case ATA_WRITE_ONCE: {// Write Sector
888 drive->hd_state.cur_sector_num = 1;
890 if (ata_write_sectors(ide, channel) == -1) {
891 PrintError(core->vm_info, core, "Error writing sectors\n");
892 ide_abort_command(ide,channel);
897 case ATA_READDMA: // Read DMA with retry
898 case ATA_READDMA_ONCE: { // Read DMA
899 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
901 if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
902 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
903 ide_abort_command(ide, channel);
907 drive->hd_state.cur_sector_num = 1;
909 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
910 drive->transfer_index = 0;
912 if (channel->dma_status.active == 1) {
914 if (dma_read(core, ide, channel) == -1) {
915 PrintError(core->vm_info, core, "Failed DMA Read\n");
916 ide_abort_command(ide, channel);
919 PrintError(core->vm_info,core,"Attempt to initiate DMA read on channel that is not active\n");
920 ide_abort_command(ide, channel);
925 case ATA_WRITEDMA: { // Write DMA
926 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
928 if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
929 PrintError(core->vm_info,core,"Cannot get lba\n");
930 ide_abort_command(ide, channel);
934 drive->hd_state.cur_sector_num = 1;
936 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
937 drive->transfer_index = 0;
939 if (channel->dma_status.active == 1) {
941 if (dma_write(core, ide, channel) == -1) {
942 PrintError(core->vm_info, core, "Failed DMA Write\n");
943 ide_abort_command(ide, channel);
946 PrintError(core->vm_info,core,"Attempt to initiate DMA write with DMA inactive\n");
947 ide_abort_command(ide, channel);
951 case ATA_STANDBYNOW1: // Standby Now 1
952 case ATA_IDLEIMMEDIATE: // Set Idle Immediate
953 case ATA_STANDBY: // Standby
954 case ATA_SETIDLE1: // Set Idle 1
955 case ATA_SLEEPNOW1: // Sleep Now 1
956 case ATA_STANDBYNOW2: // Standby Now 2
957 case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
958 case ATA_STANDBY2: // Standby 2
959 case ATA_SETIDLE2: // Set idle 2
960 case ATA_SLEEPNOW2: // Sleep Now 2
961 channel->status.val = 0;
962 channel->status.ready = 1;
963 ide_raise_irq(ide, channel);
966 case ATA_SETFEATURES: // Set Features
967 // Prior to this the features register has been written to.
968 // This command tells the drive to check if the new value is supported (the value is drive specific)
969 // Common is that bit0=DMA enable
970 // If valid the drive raises an interrupt, if not it aborts.
972 // Do some checking here...
974 channel->status.busy = 0;
975 channel->status.write_fault = 0;
976 channel->status.error = 0;
977 channel->status.ready = 1;
978 channel->status.seek_complete = 1;
980 ide_raise_irq(ide, channel);
983 case ATA_SPECIFY: // Initialize Drive Parameters
984 case ATA_RECAL: // recalibrate?
985 channel->status.error = 0;
986 channel->status.ready = 1;
987 channel->status.seek_complete = 1;
988 ide_raise_irq(ide, channel);
990 case ATA_SETMULT: { // Set multiple mode (IDE Block mode)
991 // This makes the drive transfer multiple sectors before generating an interrupt
992 uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
994 if (tmp_sect_num > MAX_MULT_SECTORS) {
995 ide_abort_command(ide, channel);
999 if (drive->sector_count == 0) {
1000 drive->hd_state.mult_sector_num= 1;
1002 drive->hd_state.mult_sector_num = drive->sector_count;
1005 channel->status.ready = 1;
1006 channel->status.error = 0;
1008 ide_raise_irq(ide, channel);
1013 case ATA_DEVICE_RESET: // Reset Device
1015 channel->error_reg.val = 0x01;
1016 channel->status.busy = 0;
1017 channel->status.ready = 1;
1018 channel->status.seek_complete = 1;
1019 channel->status.write_fault = 0;
1020 channel->status.error = 0;
1023 case ATA_CHECKPOWERMODE1: // Check power mode
1024 drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1025 channel->status.busy = 0;
1026 channel->status.ready = 1;
1027 channel->status.write_fault = 0;
1028 channel->status.data_req = 0;
1029 channel->status.error = 0;
1032 case ATA_MULTREAD: // read multiple sectors
1033 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
1037 PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1038 ide_abort_command(ide, channel);
1048 static int read_hd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1049 struct ide_drive * drive = get_selected_drive(channel);
1050 int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1053 PrintDebug(VM_NONE,VCORE_NONE, "Read HD data: transfer_index %x transfer length %x current sector numer %x\n",
1054 drive->transfer_index, drive->transfer_length,
1055 drive->hd_state.cur_sector_num);
1057 if (drive->transfer_index >= drive->transfer_length) {
1058 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1059 drive->transfer_length, drive->transfer_index,
1060 drive->transfer_index + length);
1065 if (data_offset + length > HD_SECTOR_SIZE) {
1066 PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%d length=%u)!\n",data_offset,length);
1069 // For index==0, the read has been done in ata_read_sectors
1070 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1071 // advance to next sector and read it
1073 drive->current_lba++;
1075 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1076 PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1082 PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1083 *(uint32_t *)(drive->data_buf + data_offset),
1084 length, data_offset);
1086 memcpy(dst, drive->data_buf + data_offset, length);
1088 drive->transfer_index += length;
1091 /* This is the trigger for interrupt injection.
1092 * For read single sector commands we interrupt after every sector
1093 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1094 * cur_sector_num is configured depending on the operation we are currently running
1095 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1097 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1098 (drive->transfer_index == drive->transfer_length)) {
1099 if (drive->transfer_index < drive->transfer_length) {
1100 // An increment is complete, but there is still more data to be transferred...
1101 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1102 channel->status.data_req = 1;
1104 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1105 // This was the final read of the request
1106 channel->status.data_req = 0;
1109 channel->status.ready = 1;
1110 channel->status.busy = 0;
1112 ide_raise_irq(ide, channel);
1119 static int write_hd_data(uint8_t * src, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1120 struct ide_drive * drive = get_selected_drive(channel);
1121 int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1124 PrintDebug(VM_NONE,VCORE_NONE, "Write HD data: transfer_index %x transfer length %x current sector numer %x\n",
1125 drive->transfer_index, drive->transfer_length,
1126 drive->hd_state.cur_sector_num);
1128 if (drive->transfer_index >= drive->transfer_length) {
1129 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1130 drive->transfer_length, drive->transfer_index,
1131 drive->transfer_index + length);
1135 if (data_offset + length > HD_SECTOR_SIZE) {
1136 PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%d length=%u)!\n",data_offset,length);
1139 // Copy data into our buffer - there will be room due to
1140 // (a) the ata_write test below is flushing sectors
1141 // (b) if we somehow get a sector-stradling write (an error), this will
1142 // be OK since the buffer itself is >1 sector in memory
1143 memcpy(drive->data_buf + data_offset, src, length);
1145 drive->transfer_index += length;
1147 if ((data_offset+length) >= HD_SECTOR_SIZE) {
1148 // Write out the sector we just finished
1149 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1150 PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1154 // go onto next sector
1155 drive->current_lba++;
1158 /* This is the trigger for interrupt injection.
1159 * For write single sector commands we interrupt after every sector
1160 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1161 * cur_sector_num is configured depending on the operation we are currently running
1162 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1164 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1165 (drive->transfer_index == drive->transfer_length)) {
1166 if (drive->transfer_index < drive->transfer_length) {
1167 // An increment is complete, but there is still more data to be transferred...
1168 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1169 channel->status.data_req = 1;
1171 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1172 // This was the final read of the request
1173 channel->status.data_req = 0;
1176 channel->status.ready = 1;
1177 channel->status.busy = 0;
1179 ide_raise_irq(ide, channel);
1187 static int read_cd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1188 struct ide_drive * drive = get_selected_drive(channel);
1189 int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1190 // int req_offset = drive->transfer_index % drive->req_len;
1192 if (drive->cd_state.atapi_cmd != 0x28) {
1193 PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1194 PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%d, transfer idx=%d\n", drive->transfer_length, drive->transfer_index);
1199 if (drive->transfer_index >= drive->transfer_length) {
1200 PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n",
1201 drive->transfer_length, drive->transfer_index,
1202 drive->transfer_index + length);
1207 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1208 if (atapi_update_data_buf(ide, channel) == -1) {
1209 PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1214 memcpy(dst, drive->data_buf + data_offset, length);
1216 drive->transfer_index += length;
1219 // Should the req_offset be recalculated here?????
1220 if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1221 if (drive->transfer_index < drive->transfer_length) {
1222 // An increment is complete, but there is still more data to be transferred...
1224 channel->status.data_req = 1;
1226 drive->irq_flags.c_d = 0;
1228 // Update the request length in the cylinder regs
1229 if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1230 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1234 // This was the final read of the request
1237 channel->status.data_req = 0;
1238 channel->status.ready = 1;
1240 drive->irq_flags.c_d = 1;
1241 drive->irq_flags.rel = 0;
1244 drive->irq_flags.io_dir = 1;
1245 channel->status.busy = 0;
1247 ide_raise_irq(ide, channel);
1254 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1255 struct ide_drive * drive = get_selected_drive(channel);
1257 channel->status.busy = 0;
1258 channel->status.ready = 1;
1259 channel->status.write_fault = 0;
1260 channel->status.seek_complete = 1;
1261 channel->status.corrected = 0;
1262 channel->status.error = 0;
1265 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1266 drive->transfer_index += length;
1268 if (drive->transfer_index >= drive->transfer_length) {
1269 channel->status.data_req = 0;
1277 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1278 struct ide_internal * ide = priv_data;
1279 struct ide_channel * channel = get_selected_channel(ide, port);
1280 struct ide_drive * drive = get_selected_drive(channel);
1282 //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1284 if ((channel->cmd_reg == ATA_IDENTIFY) ||
1285 (channel->cmd_reg == ATA_PIDENTIFY)) {
1286 return read_drive_id((uint8_t *)dst, length, ide, channel);
1289 if (drive->drive_type == BLOCK_CDROM) {
1290 if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1291 PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1294 } else if (drive->drive_type == BLOCK_DISK) {
1295 if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1296 PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1300 memset((uint8_t *)dst, 0, length);
1306 // For the write side, we care both about
1307 // direct PIO writes to a drive as well as
1308 // writes that pass a packet through to an CD
1309 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1310 struct ide_internal * ide = priv_data;
1311 struct ide_channel * channel = get_selected_channel(ide, port);
1312 struct ide_drive * drive = get_selected_drive(channel);
1314 PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n",
1315 port, *(uint32_t *)src, length);
1317 if (drive->drive_type == BLOCK_CDROM) {
1318 if (channel->cmd_reg == ATA_PACKETCMD) {
1319 // short command packet - no check for space...
1320 memcpy(drive->data_buf + drive->transfer_index, src, length);
1321 drive->transfer_index += length;
1322 if (drive->transfer_index >= drive->transfer_length) {
1323 if (atapi_handle_packet(core, ide, channel) == -1) {
1324 PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1329 PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1332 } else if (drive->drive_type == BLOCK_DISK) {
1333 if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1334 PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1338 // nothing ... do not support writable cd
1344 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1345 struct ide_internal * ide = priv_data;
1346 struct ide_channel * channel = get_selected_channel(ide, port);
1347 struct ide_drive * drive = get_selected_drive(channel);
1350 PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1354 PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1357 // reset and interrupt enable
1359 case SEC_CTRL_PORT: {
1360 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1362 // only reset channel on a 0->1 reset bit transition
1363 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1364 channel_reset(channel);
1365 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1366 channel_reset_complete(channel);
1369 channel->ctrl_reg.val = tmp_ctrl->val;
1372 case PRI_FEATURES_PORT:
1373 case SEC_FEATURES_PORT:
1374 channel->features.val = *(uint8_t *)src;
1377 case PRI_SECT_CNT_PORT:
1378 case SEC_SECT_CNT_PORT:
1379 channel->drives[0].sector_count = *(uint8_t *)src;
1380 channel->drives[1].sector_count = *(uint8_t *)src;
1383 case PRI_SECT_NUM_PORT:
1384 case SEC_SECT_NUM_PORT:
1385 channel->drives[0].sector_num = *(uint8_t *)src;
1386 channel->drives[1].sector_num = *(uint8_t *)src;
1388 case PRI_CYL_LOW_PORT:
1389 case SEC_CYL_LOW_PORT:
1390 channel->drives[0].cylinder_low = *(uint8_t *)src;
1391 channel->drives[1].cylinder_low = *(uint8_t *)src;
1394 case PRI_CYL_HIGH_PORT:
1395 case SEC_CYL_HIGH_PORT:
1396 channel->drives[0].cylinder_high = *(uint8_t *)src;
1397 channel->drives[1].cylinder_high = *(uint8_t *)src;
1400 case PRI_DRV_SEL_PORT:
1401 case SEC_DRV_SEL_PORT: {
1402 channel->drive_head.val = *(uint8_t *)src;
1404 // make sure the reserved bits are ok..
1405 // JRL TODO: check with new ramdisk to make sure this is right...
1406 channel->drive_head.val |= 0xa0;
1408 drive = get_selected_drive(channel);
1410 // Selecting a non-present device is a no-no
1411 if (drive->drive_type == BLOCK_NONE) {
1412 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1413 channel->error_reg.abort = 1;
1414 channel->status.error = 1;
1416 channel->status.busy = 0;
1417 channel->status.ready = 1;
1418 channel->status.data_req = 0;
1419 channel->status.error = 0;
1420 channel->status.seek_complete = 1;
1422 channel->dma_status.active = 0;
1423 channel->dma_status.err = 0;
1429 PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1436 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1437 struct ide_internal * ide = priv_data;
1438 struct ide_channel * channel = get_selected_channel(ide, port);
1439 struct ide_drive * drive = get_selected_drive(channel);
1442 PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1446 PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1448 if ((port == PRI_ADDR_REG_PORT) ||
1449 (port == SEC_ADDR_REG_PORT)) {
1450 // unused, return 0xff
1451 *(uint8_t *)dst = 0xff;
1456 // if no drive is present just return 0 + reserved bits
1457 if (drive->drive_type == BLOCK_NONE) {
1458 if ((port == PRI_DRV_SEL_PORT) ||
1459 (port == SEC_DRV_SEL_PORT)) {
1460 *(uint8_t *)dst = 0xa0;
1462 *(uint8_t *)dst = 0;
1470 // This is really the error register.
1471 case PRI_FEATURES_PORT:
1472 case SEC_FEATURES_PORT:
1473 *(uint8_t *)dst = channel->error_reg.val;
1476 case PRI_SECT_CNT_PORT:
1477 case SEC_SECT_CNT_PORT:
1478 *(uint8_t *)dst = drive->sector_count;
1481 case PRI_SECT_NUM_PORT:
1482 case SEC_SECT_NUM_PORT:
1483 *(uint8_t *)dst = drive->sector_num;
1486 case PRI_CYL_LOW_PORT:
1487 case SEC_CYL_LOW_PORT:
1488 *(uint8_t *)dst = drive->cylinder_low;
1492 case PRI_CYL_HIGH_PORT:
1493 case SEC_CYL_HIGH_PORT:
1494 *(uint8_t *)dst = drive->cylinder_high;
1497 case PRI_DRV_SEL_PORT:
1498 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1499 *(uint8_t *)dst = channel->drive_head.val;
1506 // Something about lowering interrupts here....
1507 *(uint8_t *)dst = channel->status.val;
1511 PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1515 PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1522 static void init_drive(struct ide_drive * drive) {
1524 drive->sector_count = 0x01;
1525 drive->sector_num = 0x01;
1526 drive->cylinder = 0x0000;
1528 drive->drive_type = BLOCK_NONE;
1530 memset(drive->model, 0, sizeof(drive->model));
1532 drive->transfer_index = 0;
1533 drive->transfer_length = 0;
1534 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1536 drive->num_cylinders = 0;
1537 drive->num_heads = 0;
1538 drive->num_sectors = 0;
1541 drive->private_data = NULL;
1545 static void init_channel(struct ide_channel * channel) {
1548 channel->error_reg.val = 0x01;
1550 //** channel->features = 0x0;
1552 channel->drive_head.val = 0x00;
1553 channel->status.val = 0x00;
1554 channel->cmd_reg = 0x00;
1555 channel->ctrl_reg.val = 0x08;
1557 channel->dma_cmd.val = 0;
1558 channel->dma_status.val = 0;
1559 channel->dma_prd_addr = 0;
1560 channel->dma_tbl_index = 0;
1562 for (i = 0; i < 2; i++) {
1563 init_drive(&(channel->drives[i]));
1569 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1570 PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1572 struct ide_internal * ide = (struct ide_internal *)(private_data);
1574 PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1580 static int init_ide_state(struct ide_internal * ide) {
1583 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1586 init_channel(&(ide->channels[0]));
1587 ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1589 init_channel(&(ide->channels[1]));
1590 ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1599 static int ide_free(struct ide_internal * ide) {
1601 // deregister from PCI?
1608 #ifdef V3_CONFIG_CHECKPOINT
1610 #include <palacios/vmm_sprintf.h>
1612 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1613 struct ide_internal * ide = (struct ide_internal *)private_data;
1614 struct v3_chkpt_ctx *ctx=0;
1620 ctx=v3_chkpt_open_ctx(chkpt,id);
1623 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1627 // nothing saved yet
1629 v3_chkpt_close_ctx(ctx);ctx=0;
1632 for (ch_num = 0; ch_num < 2; ch_num++) {
1633 struct ide_channel * ch = &(ide->channels[ch_num]);
1635 snprintf(buf, 128, "%s-%d", id, ch_num);
1637 ctx = v3_chkpt_open_ctx(chkpt, buf);
1640 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1644 V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1645 V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1646 V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1647 V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1648 V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1649 V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1650 V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1651 V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1652 V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1653 V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1655 v3_chkpt_close_ctx(ctx); ctx=0;
1657 for (drive_num = 0; drive_num < 2; drive_num++) {
1658 struct ide_drive * drive = &(ch->drives[drive_num]);
1660 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1662 ctx = v3_chkpt_open_ctx(chkpt, buf);
1665 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1669 V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1670 V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1671 V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1672 V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1674 V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1675 V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1676 V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1678 V3_CHKPT_SAVE(ctx, "DATA_BUF", drive->data_buf, savefailout);
1681 /* For now we'll just pack the type specific data at the end... */
1682 /* We should probably add a new context here in the future... */
1683 if (drive->drive_type == BLOCK_CDROM) {
1684 V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1685 V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1686 V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1687 } else if (drive->drive_type == BLOCK_DISK) {
1688 V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1689 V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1690 V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1691 } else if (drive->drive_type == BLOCK_NONE) {
1692 // no drive connected, so no data
1694 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1698 v3_chkpt_close_ctx(ctx); ctx=0;
1706 PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1707 if (ctx) {v3_chkpt_close_ctx(ctx); }
1713 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1714 struct ide_internal * ide = (struct ide_internal *)private_data;
1715 struct v3_chkpt_ctx *ctx=0;
1720 ctx=v3_chkpt_open_ctx(chkpt,id);
1723 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1727 // nothing saved yet
1729 v3_chkpt_close_ctx(ctx);ctx=0;
1732 for (ch_num = 0; ch_num < 2; ch_num++) {
1733 struct ide_channel * ch = &(ide->channels[ch_num]);
1735 snprintf(buf, 128, "%s-%d", id, ch_num);
1737 ctx = v3_chkpt_open_ctx(chkpt, buf);
1740 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1744 V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1745 V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1746 V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1747 V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1748 V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1749 V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1750 V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1751 V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1752 V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1753 V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1755 v3_chkpt_close_ctx(ctx); ctx=0;
1757 for (drive_num = 0; drive_num < 2; drive_num++) {
1758 struct ide_drive * drive = &(ch->drives[drive_num]);
1760 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1762 ctx = v3_chkpt_open_ctx(chkpt, buf);
1765 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1769 V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1770 V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1771 V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1772 V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1774 V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1775 V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1776 V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1778 V3_CHKPT_LOAD(ctx, "DATA_BUF", drive->data_buf, loadfailout);
1781 /* For now we'll just pack the type specific data at the end... */
1782 /* We should probably add a new context here in the future... */
1783 if (drive->drive_type == BLOCK_CDROM) {
1784 V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1785 V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1786 V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1787 } else if (drive->drive_type == BLOCK_DISK) {
1788 V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1789 V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1790 V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1791 } else if (drive->drive_type == BLOCK_NONE) {
1792 // no drive connected, so no data
1794 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1803 PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1804 if (ctx) {v3_chkpt_close_ctx(ctx); }
1814 static struct v3_device_ops dev_ops = {
1815 .free = (int (*)(void *))ide_free,
1816 #ifdef V3_CONFIG_CHECKPOINT
1817 .save_extended = ide_save_extended,
1818 .load_extended = ide_load_extended
1825 static int connect_fn(struct v3_vm_info * vm,
1826 void * frontend_data,
1827 struct v3_dev_blk_ops * ops,
1828 v3_cfg_tree_t * cfg,
1829 void * private_data) {
1830 struct ide_internal * ide = (struct ide_internal *)(frontend_data);
1831 struct ide_channel * channel = NULL;
1832 struct ide_drive * drive = NULL;
1834 char * bus_str = v3_cfg_val(cfg, "bus_num");
1835 char * drive_str = v3_cfg_val(cfg, "drive_num");
1836 char * type_str = v3_cfg_val(cfg, "type");
1837 char * model_str = v3_cfg_val(cfg, "model");
1839 uint_t drive_num = 0;
1842 if ((!type_str) || (!drive_str) || (!bus_str)) {
1843 PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1847 bus_num = atoi(bus_str);
1848 drive_num = atoi(drive_str);
1850 channel = &(ide->channels[bus_num]);
1851 drive = &(channel->drives[drive_num]);
1853 if (drive->drive_type != BLOCK_NONE) {
1854 PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1858 if (model_str != NULL) {
1859 strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1862 if (strcasecmp(type_str, "cdrom") == 0) {
1863 drive->drive_type = BLOCK_CDROM;
1865 while (strlen((char *)(drive->model)) < 40) {
1866 strcat((char*)(drive->model), " ");
1869 } else if (strcasecmp(type_str, "hd") == 0) {
1870 drive->drive_type = BLOCK_DISK;
1872 drive->hd_state.accessed = 0;
1873 drive->hd_state.mult_sector_num = 1;
1875 drive->num_sectors = 63;
1876 drive->num_heads = 16;
1877 drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
1879 PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
1886 // Hardcode this for now, but its not a good idea....
1887 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1890 drive->private_data = private_data;
1898 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1899 struct ide_internal * ide = NULL;
1900 char * dev_id = v3_cfg_val(cfg, "ID");
1903 PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
1905 ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1908 PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
1912 memset(ide, 0, sizeof(struct ide_internal));
1915 ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1917 if (ide->pci_bus != NULL) {
1918 struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1921 PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
1926 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1929 PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
1931 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
1934 PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
1939 if (init_ide_state(ide) == -1) {
1940 PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
1941 v3_remove_device(dev);
1945 PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
1947 ret |= v3_dev_hook_io(dev, PRI_DATA_PORT,
1948 &read_data_port, &write_data_port);
1949 ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT,
1950 &read_port_std, &write_port_std);
1951 ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
1952 &read_port_std, &write_port_std);
1953 ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
1954 &read_port_std, &write_port_std);
1955 ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
1956 &read_port_std, &write_port_std);
1957 ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
1958 &read_port_std, &write_port_std);
1959 ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
1960 &read_port_std, &write_port_std);
1961 ret |= v3_dev_hook_io(dev, PRI_CMD_PORT,
1962 &read_port_std, &write_cmd_port);
1964 ret |= v3_dev_hook_io(dev, SEC_DATA_PORT,
1965 &read_data_port, &write_data_port);
1966 ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT,
1967 &read_port_std, &write_port_std);
1968 ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
1969 &read_port_std, &write_port_std);
1970 ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
1971 &read_port_std, &write_port_std);
1972 ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
1973 &read_port_std, &write_port_std);
1974 ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
1975 &read_port_std, &write_port_std);
1976 ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
1977 &read_port_std, &write_port_std);
1978 ret |= v3_dev_hook_io(dev, SEC_CMD_PORT,
1979 &read_port_std, &write_cmd_port);
1982 ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT,
1983 &read_port_std, &write_port_std);
1985 ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT,
1986 &read_port_std, &write_port_std);
1989 ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
1990 &read_port_std, &write_port_std);
1992 ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
1993 &read_port_std, &write_port_std);
1997 PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
1998 v3_remove_device(dev);
2004 struct v3_pci_bar bars[6];
2005 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2006 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2007 struct pci_device * pci_dev = NULL;
2010 PrintDebug(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2012 for (i = 0; i < 6; i++) {
2013 bars[i].type = PCI_BAR_NONE;
2016 bars[4].type = PCI_BAR_IO;
2017 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2018 bars[4].default_base_port = -1;
2019 bars[4].num_ports = 16;
2021 bars[4].io_read = read_dma_port;
2022 bars[4].io_write = write_dma_port;
2023 bars[4].private_data = ide;
2025 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
2027 pci_config_update, NULL, NULL, NULL, ide);
2029 if (pci_dev == NULL) {
2030 PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i);
2031 v3_remove_device(dev);
2035 /* This is for CMD646 devices
2036 pci_dev->config_header.vendor_id = 0x1095;
2037 pci_dev->config_header.device_id = 0x0646;
2038 pci_dev->config_header.revision = 0x8f07;
2041 pci_dev->config_header.vendor_id = 0x8086;
2042 pci_dev->config_header.device_id = 0x7010;
2043 pci_dev->config_header.revision = 0x00;
2045 pci_dev->config_header.prog_if = 0x80; // Master IDE device
2046 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2047 pci_dev->config_header.class = PCI_CLASS_STORAGE;
2049 pci_dev->config_header.command = 0;
2050 pci_dev->config_header.status = 0x0280;
2052 ide->ide_pci = pci_dev;
2057 if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2058 PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2059 v3_remove_device(dev);
2064 PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2070 device_register("IDE", ide_init)
2075 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num,
2076 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2078 struct ide_internal * ide = ide_data;
2079 struct ide_channel * channel = &(ide->channels[channel_num]);
2080 struct ide_drive * drive = &(channel->drives[drive_num]);
2082 if (drive->drive_type == BLOCK_NONE) {
2086 *cylinders = drive->num_cylinders;
2087 *heads = drive->num_heads;
2088 *sectors = drive->num_sectors;