2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef V3_CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
71 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72 "PRI_CTRL", "PRI_ADDR_REG"};
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
76 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77 "SEC_CTRL", "SEC_ADDR_REG"};
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
85 static inline const char * io_port_to_str(uint16_t port) {
86 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87 return ide_pri_port_strs[port - PRI_DATA_PORT];
88 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89 return ide_sec_port_strs[port - SEC_DATA_PORT];
90 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
99 static inline const char * dma_port_to_str(uint16_t port) {
100 return ide_dma_port_strs[port & 0x7];
105 struct ide_cd_state {
106 struct atapi_sense_data sense;
109 struct atapi_error_recovery err_recovery;
112 struct ide_hd_state {
115 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116 uint32_t mult_sector_num;
118 /* This is the current op sector size:
119 * for multiple sector ops this equals mult_sector_num
120 * for standard ops this equals 1
122 uint64_t cur_sector_num;
128 v3_block_type_t drive_type;
130 struct v3_dev_blk_ops * ops;
133 struct ide_cd_state cd_state;
134 struct ide_hd_state hd_state;
139 // Where we are in the data transfer
140 uint64_t transfer_index;
142 // the length of a transfer
143 // calculated for easy access
144 uint64_t transfer_length;
146 uint64_t current_lba;
148 // We have a local data buffer that we use for IO port accesses
149 uint8_t data_buf[DATA_BUFFER_SIZE];
152 uint32_t num_cylinders;
154 uint32_t num_sectors;
160 uint16_t sector_count; // for LBA48
161 uint8_t sector_count_state; // two step write to 1f2/172 (high first)
162 uint8_t lba41_state; // two step write to 1f3
163 uint8_t lba52_state; // two step write to 1f4
164 uint8_t lba63_state; // two step write to 15
170 uint8_t sector_count; // 0x1f2,0x172 (ATA)
171 struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
172 } __attribute__((packed));
176 uint8_t sector_num; // 0x1f3,0x173
178 } __attribute__((packed));
185 uint8_t cylinder_low; // 0x1f4,0x174
186 uint8_t cylinder_high; // 0x1f5,0x175
187 } __attribute__((packed));
192 } __attribute__((packed));
195 // The transfer length requested by the CPU
197 } __attribute__((packed));
204 struct ide_drive drives[2];
207 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
209 struct ide_features_reg features;
211 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
213 struct ide_status_reg status; // [read] 0x1f7,0x177
214 uint8_t cmd_reg; // [write] 0x1f7,0x177
216 int irq; // this is temporary until we add PCI support
219 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
222 uint8_t dma_ports[8];
224 struct ide_dma_cmd_reg dma_cmd;
226 struct ide_dma_status_reg dma_status;
228 uint32_t dma_prd_addr;
229 } __attribute__((packed));
230 } __attribute__((packed));
232 uint32_t dma_tbl_index;
237 struct ide_internal {
238 struct ide_channel channels[2];
240 struct v3_southbridge * southbridge;
241 struct vm_device * pci_bus;
243 struct pci_device * ide_pci;
245 struct v3_vm_info * vm;
252 /* Utility functions */
254 static inline uint16_t be_to_le_16(const uint16_t val) {
255 uint8_t * buf = (uint8_t *)&val;
256 return (buf[0] << 8) | (buf[1]) ;
259 static inline uint16_t le_to_be_16(const uint16_t val) {
260 return be_to_le_16(val);
264 static inline uint32_t be_to_le_32(const uint32_t val) {
265 uint8_t * buf = (uint8_t *)&val;
266 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
269 static inline uint32_t le_to_be_32(const uint32_t val) {
270 return be_to_le_32(val);
274 static inline int is_lba28(struct ide_channel * channel) {
275 return channel->drive_head.lba_mode && channel->drive_head.rsvd1 && channel->drive_head.rsvd2;
278 static inline int is_lba48(struct ide_channel * channel) {
279 return channel->drive_head.lba_mode && !channel->drive_head.rsvd1 && !channel->drive_head.rsvd2;
282 static inline int is_chs(struct ide_channel * channel) {
283 return !channel->drive_head.lba_mode;
286 static inline int get_channel_index(ushort_t port) {
287 if (((port & 0xfff8) == 0x1f0) ||
288 ((port & 0xfffe) == 0x3f6) ||
289 ((port & 0xfff8) == 0xc000)) {
291 } else if (((port & 0xfff8) == 0x170) ||
292 ((port & 0xfffe) == 0x376) ||
293 ((port & 0xfff8) == 0xc008)) {
300 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
301 int channel_idx = get_channel_index(port);
302 if (channel_idx >= 0) {
303 return &(ide->channels[channel_idx]);
305 PrintError(VM_NONE,VCORE_NONE,"ide: Cannot Determine Selected Channel\n");
310 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
311 return &(channel->drives[channel->drive_head.drive_sel]);
318 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
319 if (channel->ctrl_reg.irq_disable == 0) {
321 PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
323 channel->dma_status.int_gen = 1;
324 v3_raise_irq(ide->vm, channel->irq);
326 PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
331 static void drive_reset(struct ide_drive * drive) {
332 drive->sector_count = 0x01;
333 drive->sector_num = 0x01;
335 PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
337 if (drive->drive_type == BLOCK_CDROM) {
338 drive->cylinder = 0xeb14;
340 drive->cylinder = 0x0000;
341 //drive->hd_state.accessed = 0;
345 memset(drive->data_buf, 0, sizeof(drive->data_buf));
346 drive->transfer_index = 0;
348 // Send the reset signal to the connected device callbacks
349 // channel->drives[0].reset();
350 // channel->drives[1].reset();
353 static void channel_reset(struct ide_channel * channel) {
355 // set busy and seek complete flags
356 channel->status.val = 0x90;
359 channel->error_reg.val = 0x01;
362 channel->cmd_reg = 0; // NOP
364 channel->ctrl_reg.irq_disable = 0;
367 static void channel_reset_complete(struct ide_channel * channel) {
368 channel->status.busy = 0;
369 channel->status.ready = 1;
371 channel->drive_head.head_num = 0;
373 drive_reset(&(channel->drives[0]));
374 drive_reset(&(channel->drives[1]));
378 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
380 PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
382 channel->status.val = 0x41; // Error + ready
383 channel->error_reg.val = 0x04; // No idea...
385 ide_raise_irq(ide, channel);
389 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
390 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
393 /* ATAPI functions */
401 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
402 struct ide_dma_prd prd_entry;
405 V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
408 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
411 ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
413 if (ret != sizeof(struct ide_dma_prd)) {
414 PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
418 V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
420 (prd_entry.size == 0) ? 0x10000 : prd_entry.size,
421 prd_entry.end_of_table);
423 if (prd_entry.end_of_table) {
435 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
436 struct ide_drive * drive = get_selected_drive(channel);
437 // This is at top level scope to do the EOT test at the end
438 struct ide_dma_prd prd_entry = {};
439 uint_t bytes_left = drive->transfer_length;
441 // Read in the data buffer....
442 // Read a sector/block at a time until the prd entry is full.
444 #ifdef V3_CONFIG_DEBUG_IDE
445 print_prd_table(ide, channel);
448 PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
450 // Loop through the disk data
451 while (bytes_left > 0) {
452 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
453 uint_t prd_bytes_left = 0;
454 uint_t prd_offset = 0;
457 PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
459 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
461 if (ret != sizeof(struct ide_dma_prd)) {
462 PrintError(core->vm_info, core, "Could not read PRD\n");
466 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
467 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
469 // loop through the PRD data....
471 if (prd_entry.size == 0) {
472 // a size of 0 means 64k
473 prd_bytes_left = 0x10000;
475 prd_bytes_left = prd_entry.size;
479 while (prd_bytes_left > 0) {
480 uint_t bytes_to_write = 0;
482 if (drive->drive_type == BLOCK_DISK) {
483 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
486 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
487 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
490 } else if (drive->drive_type == BLOCK_CDROM) {
491 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
492 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
494 if (atapi_read_chunk(ide, channel) == -1) {
495 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
500 PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
505 //V3_Print(core->vm_info, core, "DMA of command packet\n");
507 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
508 prd_bytes_left = bytes_to_write;
511 // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
512 cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset,
513 bytes_to_write, drive->data_buf);
515 if (cmd_ret!=bytes_to_write) {
516 PrintError(core->vm_info, core, "Failed to write data to memory\n");
524 drive->transfer_index += bytes_to_write;
526 channel->status.busy = 0;
527 channel->status.ready = 1;
528 channel->status.data_req = 0;
529 channel->status.error = 0;
530 channel->status.seek_complete = 1;
532 channel->dma_status.active = 0;
533 channel->dma_status.err = 0;
535 ide_raise_irq(ide, channel);
541 PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n",
542 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
544 drive->current_lba++;
546 ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
548 if (ret != bytes_to_write) {
549 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
553 PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
555 drive->transfer_index += ret;
556 prd_bytes_left -= ret;
561 channel->dma_tbl_index++;
563 if (drive->drive_type == BLOCK_DISK) {
564 if (drive->transfer_index % HD_SECTOR_SIZE) {
565 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
568 } else if (drive->drive_type == BLOCK_CDROM) {
569 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
570 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
571 PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
572 PrintError(core->vm_info, core, "transfer_index=%llu, transfer_length=%llu\n",
573 drive->transfer_index, drive->transfer_length);
580 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
581 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
587 drive->irq_flags.io_dir = 1;
588 drive->irq_flags.c_d = 1;
589 drive->irq_flags.rel = 0;
593 // Update to the next PRD entry
597 if (prd_entry.end_of_table) {
598 channel->status.busy = 0;
599 channel->status.ready = 1;
600 channel->status.data_req = 0;
601 channel->status.error = 0;
602 channel->status.seek_complete = 1;
604 channel->dma_status.active = 0;
605 channel->dma_status.err = 0;
608 ide_raise_irq(ide, channel);
614 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
615 struct ide_drive * drive = get_selected_drive(channel);
616 // This is at top level scope to do the EOT test at the end
617 struct ide_dma_prd prd_entry = {};
618 uint_t bytes_left = drive->transfer_length;
621 PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
623 // Loop through disk data
624 while (bytes_left > 0) {
625 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
626 uint_t prd_bytes_left = 0;
627 uint_t prd_offset = 0;
630 PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
632 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
634 if (ret != sizeof(struct ide_dma_prd)) {
635 PrintError(core->vm_info, core, "Could not read PRD\n");
639 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
640 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
643 if (prd_entry.size == 0) {
644 // a size of 0 means 64k
645 prd_bytes_left = 0x10000;
647 prd_bytes_left = prd_entry.size;
650 while (prd_bytes_left > 0) {
651 uint_t bytes_to_write = 0;
654 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
657 ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
659 if (ret != bytes_to_write) {
660 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
664 PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
667 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
668 PrintError(core->vm_info, core, "Failed to write data to disk\n");
672 drive->current_lba++;
674 drive->transfer_index += ret;
675 prd_bytes_left -= ret;
680 channel->dma_tbl_index++;
682 if (drive->transfer_index % HD_SECTOR_SIZE) {
683 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
687 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
688 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
689 PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%llu)...\n",
690 bytes_left, drive->transfer_length);
691 PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
692 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
694 print_prd_table(ide, channel);
699 if (prd_entry.end_of_table) {
700 channel->status.busy = 0;
701 channel->status.ready = 1;
702 channel->status.data_req = 0;
703 channel->status.error = 0;
704 channel->status.seek_complete = 1;
706 channel->dma_status.active = 0;
707 channel->dma_status.err = 0;
710 ide_raise_irq(ide, channel);
717 #define DMA_CMD_PORT 0x00
718 #define DMA_STATUS_PORT 0x02
719 #define DMA_PRD_PORT0 0x04
720 #define DMA_PRD_PORT1 0x05
721 #define DMA_PRD_PORT2 0x06
722 #define DMA_PRD_PORT3 0x07
724 #define DMA_CHANNEL_FLAG 0x08
727 Note that DMA model is as follows:
729 1. Write the PRD pointer to the busmaster (DMA engine)
730 2. Start the transfer on the device
731 3. Tell the busmaster to start shoveling data (active DMA)
734 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
735 struct ide_internal * ide = (struct ide_internal *)private_data;
736 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
737 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
738 struct ide_channel * channel = &(ide->channels[channel_flag]);
740 PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
741 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
743 switch (port_offset) {
745 channel->dma_cmd.val = *(uint8_t *)src;
747 PrintDebug(core->vm_info, core, "IDE: dma command write: 0x%x\n", channel->dma_cmd.val);
749 if (channel->dma_cmd.start == 0) {
750 channel->dma_tbl_index = 0;
752 // Launch DMA operation, interrupt at end
754 channel->dma_status.active = 1;
756 if (channel->dma_cmd.read == 1) {
757 // DMA Read the whole thing - dma_read will raise irq
758 if (dma_read(core, ide, channel) == -1) {
759 PrintError(core->vm_info, core, "Failed DMA Read\n");
763 // DMA write the whole thing - dma_write will raiase irw
764 if (dma_write(core, ide, channel) == -1) {
765 PrintError(core->vm_info, core, "Failed DMA Write\n");
771 // Note that guest cannot abort a DMA transfer
772 channel->dma_cmd.start = 0;
777 case DMA_STATUS_PORT: {
778 // This is intended to clear status
780 uint8_t val = *(uint8_t *)src;
783 PrintError(core->vm_info, core, "Invalid write length for DMA status port\n");
787 // but preserve certain bits
788 channel->dma_status.val = ((val & 0x60) |
789 (channel->dma_status.val & 0x01) |
790 (channel->dma_status.val & ~val & 0x06));
797 case DMA_PRD_PORT3: {
798 uint_t addr_index = port_offset & 0x3;
799 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
802 if (addr_index + length > 4) {
803 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
807 for (i = 0; i < length; i++) {
808 addr_buf[addr_index + i] = *((uint8_t *)src + i);
811 PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
816 PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
824 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
825 struct ide_internal * ide = (struct ide_internal *)private_data;
826 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
827 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
828 struct ide_channel * channel = &(ide->channels[channel_flag]);
830 PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
832 if (port_offset + length > 16) {
833 PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
837 memcpy(dst, channel->dma_ports + port_offset, length);
839 PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
846 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
847 struct ide_internal * ide = priv_data;
848 struct ide_channel * channel = get_selected_channel(ide, port);
849 struct ide_drive * drive = get_selected_drive(channel);
852 PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
856 PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
858 channel->cmd_reg = *(uint8_t *)src;
860 switch (channel->cmd_reg) {
862 case ATA_PIDENTIFY: // ATAPI Identify Device Packet (CDROM)
863 if (drive->drive_type != BLOCK_CDROM) {
866 // JRL: Should we abort here?
867 ide_abort_command(ide, channel);
870 atapi_identify_device(drive);
872 channel->error_reg.val = 0;
873 channel->status.val = 0x58; // ready, data_req, seek_complete
875 ide_raise_irq(ide, channel);
879 case ATA_IDENTIFY: // Identify Device
880 if (drive->drive_type != BLOCK_DISK) {
883 // JRL: Should we abort here?
884 ide_abort_command(ide, channel);
886 ata_identify_device(drive);
888 channel->error_reg.val = 0;
889 channel->status.val = 0x58;
891 ide_raise_irq(ide, channel);
895 case ATA_PACKETCMD: // ATAPI Command Packet (CDROM)
896 if (drive->drive_type != BLOCK_CDROM) {
897 ide_abort_command(ide, channel);
900 drive->sector_count = 1;
902 channel->status.busy = 0;
903 channel->status.write_fault = 0;
904 channel->status.data_req = 1;
905 channel->status.error = 0;
907 // reset the data buffer...
908 drive->transfer_length = ATAPI_PACKET_SIZE;
909 drive->transfer_index = 0;
913 case ATA_READ: // Read Sectors with Retry
914 case ATA_READ_ONCE: // Read Sectors without Retry
915 case ATA_MULTREAD: // Read multiple sectors per ire
916 case ATA_READ_EXT: // Read Sectors Extended (LBA48)
918 if (channel->cmd_reg==ATA_MULTREAD) {
919 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
921 drive->hd_state.cur_sector_num = 1;
924 if (ata_read_sectors(ide, channel) == -1) {
925 PrintError(core->vm_info, core, "Error reading sectors\n");
926 ide_abort_command(ide,channel);
930 case ATA_WRITE: // Write Sector with retry
931 case ATA_WRITE_ONCE: // Write Sector without retry
932 case ATA_MULTWRITE: // Write multiple sectors per irq
933 case ATA_WRITE_EXT: // Write Sectors Extended (LBA48)
935 if (channel->cmd_reg==ATA_MULTWRITE) {
936 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
938 drive->hd_state.cur_sector_num = 1;
941 if (ata_write_sectors(ide, channel) == -1) {
942 PrintError(core->vm_info, core, "Error writing sectors\n");
943 ide_abort_command(ide,channel);
947 case ATA_READDMA: // Read DMA with retry
948 case ATA_READDMA_ONCE: // Read DMA without retry
949 case ATA_READDMA_EXT: { // Read DMA (LBA48)
952 if (ata_get_lba_and_size(ide, channel, &(drive->current_lba), §_cnt) == -1) {
953 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
954 ide_abort_command(ide, channel);
958 drive->hd_state.cur_sector_num = 1; // Not used for DMA
960 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
961 drive->transfer_index = 0;
963 // Now we wait for the transfer to be intiated by flipping the
964 // bus-master start bit
968 case ATA_WRITEDMA: // Write DMA with retry
969 case ATA_WRITEDMA_ONCE: // Write DMA without retry
970 case ATA_WRITEDMA_EXT: { // Write DMA (LBA48)
974 if (ata_get_lba_and_size(ide, channel, &(drive->current_lba),§_cnt) == -1) {
975 PrintError(core->vm_info,core,"Cannot get lba\n");
976 ide_abort_command(ide, channel);
980 drive->hd_state.cur_sector_num = 1; // Not used for DMA
982 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
983 drive->transfer_index = 0;
985 // Now we wait for the transfer to be intiated by flipping the
986 // bus-master start bit
990 case ATA_STANDBYNOW1: // Standby Now 1
991 case ATA_IDLEIMMEDIATE: // Set Idle Immediate
992 case ATA_STANDBY: // Standby
993 case ATA_SETIDLE1: // Set Idle 1
994 case ATA_SLEEPNOW1: // Sleep Now 1
995 case ATA_STANDBYNOW2: // Standby Now 2
996 case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
997 case ATA_STANDBY2: // Standby 2
998 case ATA_SETIDLE2: // Set idle 2
999 case ATA_SLEEPNOW2: // Sleep Now 2
1000 channel->status.val = 0;
1001 channel->status.ready = 1;
1002 ide_raise_irq(ide, channel);
1005 case ATA_SETFEATURES: // Set Features
1006 // Prior to this the features register has been written to.
1007 // This command tells the drive to check if the new value is supported (the value is drive specific)
1008 // Common is that bit0=DMA enable
1009 // If valid the drive raises an interrupt, if not it aborts.
1011 // Do some checking here...
1013 channel->status.busy = 0;
1014 channel->status.write_fault = 0;
1015 channel->status.error = 0;
1016 channel->status.ready = 1;
1017 channel->status.seek_complete = 1;
1019 ide_raise_irq(ide, channel);
1022 case ATA_SPECIFY: // Initialize Drive Parameters
1023 case ATA_RECAL: // recalibrate?
1024 channel->status.error = 0;
1025 channel->status.ready = 1;
1026 channel->status.seek_complete = 1;
1027 ide_raise_irq(ide, channel);
1030 case ATA_SETMULT: { // Set multiple mode (IDE Block mode)
1031 // This makes the drive transfer multiple sectors before generating an interrupt
1033 if (drive->sector_count == 0) {
1034 PrintError(core->vm_info,core,"Attempt to set multiple to zero\n");
1035 drive->hd_state.mult_sector_num= 1;
1036 ide_abort_command(ide,channel);
1039 drive->hd_state.mult_sector_num = drive->sector_count;
1042 channel->status.ready = 1;
1043 channel->status.error = 0;
1045 ide_raise_irq(ide, channel);
1050 case ATA_DEVICE_RESET: // Reset Device
1052 channel->error_reg.val = 0x01;
1053 channel->status.busy = 0;
1054 channel->status.ready = 1;
1055 channel->status.seek_complete = 1;
1056 channel->status.write_fault = 0;
1057 channel->status.error = 0;
1060 case ATA_CHECKPOWERMODE1: // Check power mode
1061 drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1062 channel->status.busy = 0;
1063 channel->status.ready = 1;
1064 channel->status.write_fault = 0;
1065 channel->status.data_req = 0;
1066 channel->status.error = 0;
1070 PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1071 ide_abort_command(ide, channel);
1081 static int read_hd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1082 struct ide_drive * drive = get_selected_drive(channel);
1083 uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1086 PrintDebug(VM_NONE,VCORE_NONE, "Read HD data: transfer_index %llu transfer length %llu current sector numer %llu\n",
1087 drive->transfer_index, drive->transfer_length,
1088 drive->hd_state.cur_sector_num);
1090 if (drive->transfer_index >= drive->transfer_length && drive->transfer_index>=DATA_BUFFER_SIZE) {
1091 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1092 drive->transfer_length, drive->transfer_index,
1093 drive->transfer_index + length);
1098 if (data_offset + length > HD_SECTOR_SIZE) {
1099 PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1102 // For index==0, the read has been done in ata_read_sectors
1103 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1104 // advance to next sector and read it
1106 drive->current_lba++;
1108 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1109 PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1115 PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1116 *(uint32_t *)(drive->data_buf + data_offset),
1117 length, data_offset);
1119 memcpy(dst, drive->data_buf + data_offset, length);
1121 drive->transfer_index += length;
1124 /* This is the trigger for interrupt injection.
1125 * For read single sector commands we interrupt after every sector
1126 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1127 * cur_sector_num is configured depending on the operation we are currently running
1128 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1130 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1131 (drive->transfer_index == drive->transfer_length)) {
1132 if (drive->transfer_index < drive->transfer_length) {
1133 // An increment is complete, but there is still more data to be transferred...
1134 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1135 channel->status.data_req = 1;
1137 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1138 // This was the final read of the request
1139 channel->status.data_req = 0;
1142 channel->status.ready = 1;
1143 channel->status.busy = 0;
1145 ide_raise_irq(ide, channel);
1152 static int write_hd_data(uint8_t * src, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1153 struct ide_drive * drive = get_selected_drive(channel);
1154 uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1157 PrintDebug(VM_NONE,VCORE_NONE, "Write HD data: transfer_index %llu transfer length %llu current sector numer %llu\n",
1158 drive->transfer_index, drive->transfer_length,
1159 drive->hd_state.cur_sector_num);
1161 if (drive->transfer_index >= drive->transfer_length) {
1162 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1163 drive->transfer_length, drive->transfer_index,
1164 drive->transfer_index + length);
1168 if (data_offset + length > HD_SECTOR_SIZE) {
1169 PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1172 // Copy data into our buffer - there will be room due to
1173 // (a) the ata_write test below is flushing sectors
1174 // (b) if we somehow get a sector-stradling write (an error), this will
1175 // be OK since the buffer itself is >1 sector in memory
1176 memcpy(drive->data_buf + data_offset, src, length);
1178 drive->transfer_index += length;
1180 if ((data_offset+length) >= HD_SECTOR_SIZE) {
1181 // Write out the sector we just finished
1182 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1183 PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1187 // go onto next sector
1188 drive->current_lba++;
1191 /* This is the trigger for interrupt injection.
1192 * For write single sector commands we interrupt after every sector
1193 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1194 * cur_sector_num is configured depending on the operation we are currently running
1195 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1197 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1198 (drive->transfer_index == drive->transfer_length)) {
1199 if (drive->transfer_index < drive->transfer_length) {
1200 // An increment is complete, but there is still more data to be transferred...
1201 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1202 channel->status.data_req = 1;
1204 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1205 // This was the final read of the request
1206 channel->status.data_req = 0;
1209 channel->status.ready = 1;
1210 channel->status.busy = 0;
1212 ide_raise_irq(ide, channel);
1220 static int read_cd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1221 struct ide_drive * drive = get_selected_drive(channel);
1222 uint64_t data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1223 // int req_offset = drive->transfer_index % drive->req_len;
1225 if (drive->cd_state.atapi_cmd != 0x28) {
1226 PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%llu) (req_len=%u)\n", length, drive->req_len);
1227 PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%llu, transfer idx=%llu\n", drive->transfer_length, drive->transfer_index);
1232 if (drive->transfer_index >= drive->transfer_length && drive->transfer_index>=DATA_BUFFER_SIZE) {
1233 PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1234 drive->transfer_length, drive->transfer_index,
1235 drive->transfer_index + length);
1240 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1241 if (atapi_update_data_buf(ide, channel) == -1) {
1242 PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1247 memcpy(dst, drive->data_buf + data_offset, length);
1249 drive->transfer_index += length;
1252 // Should the req_offset be recalculated here?????
1253 if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1254 if (drive->transfer_index < drive->transfer_length) {
1255 // An increment is complete, but there is still more data to be transferred...
1257 channel->status.data_req = 1;
1259 drive->irq_flags.c_d = 0;
1261 // Update the request length in the cylinder regs
1262 if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1263 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1267 // This was the final read of the request
1270 channel->status.data_req = 0;
1271 channel->status.ready = 1;
1273 drive->irq_flags.c_d = 1;
1274 drive->irq_flags.rel = 0;
1277 drive->irq_flags.io_dir = 1;
1278 channel->status.busy = 0;
1280 ide_raise_irq(ide, channel);
1287 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1288 struct ide_drive * drive = get_selected_drive(channel);
1290 channel->status.busy = 0;
1291 channel->status.ready = 1;
1292 channel->status.write_fault = 0;
1293 channel->status.seek_complete = 1;
1294 channel->status.corrected = 0;
1295 channel->status.error = 0;
1298 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1299 drive->transfer_index += length;
1301 if (drive->transfer_index >= drive->transfer_length) {
1302 channel->status.data_req = 0;
1310 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1311 struct ide_internal * ide = priv_data;
1312 struct ide_channel * channel = get_selected_channel(ide, port);
1313 struct ide_drive * drive = get_selected_drive(channel);
1315 //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1317 if ((channel->cmd_reg == ATA_IDENTIFY) ||
1318 (channel->cmd_reg == ATA_PIDENTIFY)) {
1319 return read_drive_id((uint8_t *)dst, length, ide, channel);
1322 if (drive->drive_type == BLOCK_CDROM) {
1323 if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1324 PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1327 } else if (drive->drive_type == BLOCK_DISK) {
1328 if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1329 PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1333 memset((uint8_t *)dst, 0, length);
1339 // For the write side, we care both about
1340 // direct PIO writes to a drive as well as
1341 // writes that pass a packet through to an CD
1342 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1343 struct ide_internal * ide = priv_data;
1344 struct ide_channel * channel = get_selected_channel(ide, port);
1345 struct ide_drive * drive = get_selected_drive(channel);
1347 PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n",
1348 port, *(uint32_t *)src, length);
1350 if (drive->drive_type == BLOCK_CDROM) {
1351 if (channel->cmd_reg == ATA_PACKETCMD) {
1352 // short command packet - no check for space...
1353 memcpy(drive->data_buf + drive->transfer_index, src, length);
1354 drive->transfer_index += length;
1355 if (drive->transfer_index >= drive->transfer_length) {
1356 if (atapi_handle_packet(core, ide, channel) == -1) {
1357 PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1362 PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1365 } else if (drive->drive_type == BLOCK_DISK) {
1366 if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1367 PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1371 // nothing ... do not support writable cd
1377 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1378 struct ide_internal * ide = priv_data;
1379 struct ide_channel * channel = get_selected_channel(ide, port);
1380 struct ide_drive * drive = get_selected_drive(channel);
1383 PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1387 PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1390 // reset and interrupt enable
1392 case SEC_CTRL_PORT: {
1393 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1395 // only reset channel on a 0->1 reset bit transition
1396 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1397 channel_reset(channel);
1398 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1399 channel_reset_complete(channel);
1402 channel->ctrl_reg.val = tmp_ctrl->val;
1405 case PRI_FEATURES_PORT:
1406 case SEC_FEATURES_PORT:
1407 channel->features.val = *(uint8_t *)src;
1410 case PRI_SECT_CNT_PORT:
1411 case SEC_SECT_CNT_PORT:
1412 // update CHS and LBA28 state
1413 channel->drives[0].sector_count = *(uint8_t *)src;
1414 channel->drives[1].sector_count = *(uint8_t *)src;
1416 // update LBA48 state
1417 if (is_lba48(channel)) {
1418 uint16_t val = *(uint8_t*)src; // top bits zero;
1419 if (!channel->drives[0].lba48.sector_count_state) {
1420 channel->drives[0].lba48.sector_count = val<<8;
1422 channel->drives[0].lba48.sector_count |= val;
1424 channel->drives[0].lba48.sector_count_state ^= 1;
1425 if (!channel->drives[1].lba48.sector_count_state) {
1426 channel->drives[1].lba48.sector_count = val<<8;
1428 channel->drives[1].lba48.sector_count |= val;
1430 channel->drives[0].lba48.sector_count_state ^= 1;
1435 case PRI_SECT_NUM_PORT:
1436 case SEC_SECT_NUM_PORT:
1437 // update CHS and LBA28 state
1438 channel->drives[0].sector_num = *(uint8_t *)src;
1439 channel->drives[1].sector_num = *(uint8_t *)src;
1441 // update LBA48 state
1442 if (is_lba48(channel)) {
1443 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1444 if (!channel->drives[0].lba48.lba41_state) {
1445 channel->drives[0].lba48.lba |= val<<24;
1447 channel->drives[0].lba48.lba |= val;
1449 channel->drives[0].lba48.lba41_state ^= 1;
1450 if (!channel->drives[1].lba48.lba41_state) {
1451 channel->drives[1].lba48.lba |= val<<24;
1453 channel->drives[1].lba48.lba |= val;
1455 channel->drives[1].lba48.lba41_state ^= 1;
1459 case PRI_CYL_LOW_PORT:
1460 case SEC_CYL_LOW_PORT:
1461 // update CHS and LBA28 state
1462 channel->drives[0].cylinder_low = *(uint8_t *)src;
1463 channel->drives[1].cylinder_low = *(uint8_t *)src;
1465 // update LBA48 state
1466 if (is_lba48(channel)) {
1467 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1468 if (!channel->drives[0].lba48.lba52_state) {
1469 channel->drives[0].lba48.lba |= val<<32;
1471 channel->drives[0].lba48.lba |= val<<8;
1473 channel->drives[0].lba48.lba52_state ^= 1;
1474 if (!channel->drives[1].lba48.lba52_state) {
1475 channel->drives[1].lba48.lba |= val<<32;
1477 channel->drives[1].lba48.lba |= val<<8;
1479 channel->drives[1].lba48.lba52_state ^= 1;
1484 case PRI_CYL_HIGH_PORT:
1485 case SEC_CYL_HIGH_PORT:
1486 // update CHS and LBA28 state
1487 channel->drives[0].cylinder_high = *(uint8_t *)src;
1488 channel->drives[1].cylinder_high = *(uint8_t *)src;
1490 // update LBA48 state
1491 if (is_lba48(channel)) {
1492 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1493 if (!channel->drives[0].lba48.lba63_state) {
1494 channel->drives[0].lba48.lba |= val<<40;
1496 channel->drives[0].lba48.lba |= val<<16;
1498 channel->drives[0].lba48.lba63_state ^= 1;
1499 if (!channel->drives[1].lba48.lba63_state) {
1500 channel->drives[1].lba48.lba |= val<<40;
1502 channel->drives[1].lba48.lba |= val<<16;
1504 channel->drives[1].lba48.lba63_state ^= 1;
1509 case PRI_DRV_SEL_PORT:
1510 case SEC_DRV_SEL_PORT: {
1511 struct ide_drive_head_reg nh, oh;
1513 oh.val = channel->drive_head.val;
1514 channel->drive_head.val = nh.val = *(uint8_t *)src;
1517 if ((oh.val & 0xe0) != (nh.val & 0xe0)) {
1518 // reset LBA48 state
1519 channel->drives[0].lba48.sector_count_state=0;
1520 channel->drives[0].lba48.lba41_state=0;
1521 channel->drives[0].lba48.lba52_state=0;
1522 channel->drives[0].lba48.lba63_state=0;
1523 channel->drives[1].lba48.sector_count_state=0;
1524 channel->drives[1].lba48.lba41_state=0;
1525 channel->drives[1].lba48.lba52_state=0;
1526 channel->drives[1].lba48.lba63_state=0;
1530 drive = get_selected_drive(channel);
1532 // Selecting a non-present device is a no-no
1533 if (drive->drive_type == BLOCK_NONE) {
1534 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1535 channel->error_reg.abort = 1;
1536 channel->status.error = 1;
1538 channel->status.busy = 0;
1539 channel->status.ready = 1;
1540 channel->status.data_req = 0;
1541 channel->status.error = 0;
1542 channel->status.seek_complete = 1;
1544 channel->dma_status.active = 0;
1545 channel->dma_status.err = 0;
1551 PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1558 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1559 struct ide_internal * ide = priv_data;
1560 struct ide_channel * channel = get_selected_channel(ide, port);
1561 struct ide_drive * drive = get_selected_drive(channel);
1564 PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1568 PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1570 if ((port == PRI_ADDR_REG_PORT) ||
1571 (port == SEC_ADDR_REG_PORT)) {
1572 // unused, return 0xff
1573 *(uint8_t *)dst = 0xff;
1578 // if no drive is present just return 0 + reserved bits
1579 if (drive->drive_type == BLOCK_NONE) {
1580 if ((port == PRI_DRV_SEL_PORT) ||
1581 (port == SEC_DRV_SEL_PORT)) {
1582 *(uint8_t *)dst = 0xa0;
1584 *(uint8_t *)dst = 0;
1592 // This is really the error register.
1593 case PRI_FEATURES_PORT:
1594 case SEC_FEATURES_PORT:
1595 *(uint8_t *)dst = channel->error_reg.val;
1598 case PRI_SECT_CNT_PORT:
1599 case SEC_SECT_CNT_PORT:
1600 *(uint8_t *)dst = drive->sector_count;
1603 case PRI_SECT_NUM_PORT:
1604 case SEC_SECT_NUM_PORT:
1605 *(uint8_t *)dst = drive->sector_num;
1608 case PRI_CYL_LOW_PORT:
1609 case SEC_CYL_LOW_PORT:
1610 *(uint8_t *)dst = drive->cylinder_low;
1614 case PRI_CYL_HIGH_PORT:
1615 case SEC_CYL_HIGH_PORT:
1616 *(uint8_t *)dst = drive->cylinder_high;
1619 case PRI_DRV_SEL_PORT:
1620 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1621 *(uint8_t *)dst = channel->drive_head.val;
1628 // Something about lowering interrupts here....
1629 *(uint8_t *)dst = channel->status.val;
1633 PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1637 PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1644 static void init_drive(struct ide_drive * drive) {
1646 drive->sector_count = 0x01;
1647 drive->sector_num = 0x01;
1648 drive->cylinder = 0x0000;
1650 drive->drive_type = BLOCK_NONE;
1652 memset(drive->model, 0, sizeof(drive->model));
1654 drive->transfer_index = 0;
1655 drive->transfer_length = 0;
1656 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1658 drive->num_cylinders = 0;
1659 drive->num_heads = 0;
1660 drive->num_sectors = 0;
1663 drive->private_data = NULL;
1667 static void init_channel(struct ide_channel * channel) {
1670 channel->error_reg.val = 0x01;
1672 //** channel->features = 0x0;
1674 channel->drive_head.val = 0x00;
1675 channel->status.val = 0x00;
1676 channel->cmd_reg = 0x00;
1677 channel->ctrl_reg.val = 0x08;
1679 channel->dma_cmd.val = 0;
1680 channel->dma_status.val = 0;
1681 channel->dma_prd_addr = 0;
1682 channel->dma_tbl_index = 0;
1684 for (i = 0; i < 2; i++) {
1685 init_drive(&(channel->drives[i]));
1691 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1692 PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1694 struct ide_internal * ide = (struct ide_internal *)(private_data);
1696 PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1702 static int init_ide_state(struct ide_internal * ide) {
1705 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1708 init_channel(&(ide->channels[0]));
1709 ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1711 init_channel(&(ide->channels[1]));
1712 ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1721 static int ide_free(struct ide_internal * ide) {
1723 // deregister from PCI?
1730 #ifdef V3_CONFIG_CHECKPOINT
1732 #include <palacios/vmm_sprintf.h>
1734 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1735 struct ide_internal * ide = (struct ide_internal *)private_data;
1736 struct v3_chkpt_ctx *ctx=0;
1742 ctx=v3_chkpt_open_ctx(chkpt,id);
1745 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1749 // nothing saved yet
1751 v3_chkpt_close_ctx(ctx);ctx=0;
1754 for (ch_num = 0; ch_num < 2; ch_num++) {
1755 struct ide_channel * ch = &(ide->channels[ch_num]);
1757 snprintf(buf, 128, "%s-%d", id, ch_num);
1759 ctx = v3_chkpt_open_ctx(chkpt, buf);
1762 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1766 V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1767 V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1768 V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1769 V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1770 V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1771 V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1772 V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1773 V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1774 V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1775 V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1779 v3_chkpt_close_ctx(ctx); ctx=0;
1781 for (drive_num = 0; drive_num < 2; drive_num++) {
1782 struct ide_drive * drive = &(ch->drives[drive_num]);
1784 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1786 ctx = v3_chkpt_open_ctx(chkpt, buf);
1789 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1793 V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1794 V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1795 V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1796 V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1798 V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1799 V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1800 V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1802 V3_CHKPT_SAVE(ctx, "DATA_BUF", drive->data_buf, savefailout);
1805 /* For now we'll just pack the type specific data at the end... */
1806 /* We should probably add a new context here in the future... */
1807 if (drive->drive_type == BLOCK_CDROM) {
1808 V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1809 V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1810 V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1811 } else if (drive->drive_type == BLOCK_DISK) {
1812 V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1813 V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1814 V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1815 } else if (drive->drive_type == BLOCK_NONE) {
1816 // no drive connected, so no data
1818 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1822 V3_CHKPT_SAVE(ctx, "LBA48_LBA", drive->lba48.lba, savefailout);
1823 V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, savefailout);
1824 V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, savefailout);
1825 V3_CHKPT_SAVE(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, savefailout);
1826 V3_CHKPT_SAVE(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, savefailout);
1827 V3_CHKPT_SAVE(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, savefailout);
1829 v3_chkpt_close_ctx(ctx); ctx=0;
1837 PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1838 if (ctx) {v3_chkpt_close_ctx(ctx); }
1844 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1845 struct ide_internal * ide = (struct ide_internal *)private_data;
1846 struct v3_chkpt_ctx *ctx=0;
1851 ctx=v3_chkpt_open_ctx(chkpt,id);
1854 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1858 // nothing saved yet
1860 v3_chkpt_close_ctx(ctx);ctx=0;
1863 for (ch_num = 0; ch_num < 2; ch_num++) {
1864 struct ide_channel * ch = &(ide->channels[ch_num]);
1866 snprintf(buf, 128, "%s-%d", id, ch_num);
1868 ctx = v3_chkpt_open_ctx(chkpt, buf);
1871 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1875 V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1876 V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1877 V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1878 V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1879 V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1880 V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1881 V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1882 V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1883 V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1884 V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1886 v3_chkpt_close_ctx(ctx); ctx=0;
1888 for (drive_num = 0; drive_num < 2; drive_num++) {
1889 struct ide_drive * drive = &(ch->drives[drive_num]);
1891 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1893 ctx = v3_chkpt_open_ctx(chkpt, buf);
1896 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1900 V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1901 V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1902 V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1903 V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1905 V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1906 V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1907 V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1909 V3_CHKPT_LOAD(ctx, "DATA_BUF", drive->data_buf, loadfailout);
1912 /* For now we'll just pack the type specific data at the end... */
1913 /* We should probably add a new context here in the future... */
1914 if (drive->drive_type == BLOCK_CDROM) {
1915 V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1916 V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1917 V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1918 } else if (drive->drive_type == BLOCK_DISK) {
1919 V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1920 V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1921 V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1922 } else if (drive->drive_type == BLOCK_NONE) {
1923 // no drive connected, so no data
1925 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1929 V3_CHKPT_LOAD(ctx, "LBA48_LBA", drive->lba48.lba, loadfailout);
1930 V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, loadfailout);
1931 V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, loadfailout);
1932 V3_CHKPT_LOAD(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, loadfailout);
1933 V3_CHKPT_LOAD(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, loadfailout);
1934 V3_CHKPT_LOAD(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, loadfailout);
1942 PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1943 if (ctx) {v3_chkpt_close_ctx(ctx); }
1953 static struct v3_device_ops dev_ops = {
1954 .free = (int (*)(void *))ide_free,
1955 #ifdef V3_CONFIG_CHECKPOINT
1956 .save_extended = ide_save_extended,
1957 .load_extended = ide_load_extended
1964 static int connect_fn(struct v3_vm_info * vm,
1965 void * frontend_data,
1966 struct v3_dev_blk_ops * ops,
1967 v3_cfg_tree_t * cfg,
1968 void * private_data) {
1969 struct ide_internal * ide = (struct ide_internal *)(frontend_data);
1970 struct ide_channel * channel = NULL;
1971 struct ide_drive * drive = NULL;
1973 char * bus_str = v3_cfg_val(cfg, "bus_num");
1974 char * drive_str = v3_cfg_val(cfg, "drive_num");
1975 char * type_str = v3_cfg_val(cfg, "type");
1976 char * model_str = v3_cfg_val(cfg, "model");
1978 uint_t drive_num = 0;
1981 if ((!type_str) || (!drive_str) || (!bus_str)) {
1982 PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1986 bus_num = atoi(bus_str);
1987 drive_num = atoi(drive_str);
1989 channel = &(ide->channels[bus_num]);
1990 drive = &(channel->drives[drive_num]);
1992 if (drive->drive_type != BLOCK_NONE) {
1993 PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1997 if (model_str != NULL) {
1998 strncpy(drive->model, model_str, sizeof(drive->model));
1999 drive->model[sizeof(drive->model)-1] = 0;
2002 if (strcasecmp(type_str, "cdrom") == 0) {
2003 drive->drive_type = BLOCK_CDROM;
2005 while (strlen((char *)(drive->model)) < 40) {
2006 strcat((char*)(drive->model), " ");
2009 } else if (strcasecmp(type_str, "hd") == 0) {
2010 drive->drive_type = BLOCK_DISK;
2012 drive->hd_state.accessed = 0;
2013 drive->hd_state.mult_sector_num = 1;
2015 drive->num_sectors = 63;
2016 drive->num_heads = 16;
2017 drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
2019 PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
2026 // Hardcode this for now, but its not a good idea....
2027 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
2030 drive->private_data = private_data;
2038 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
2039 struct ide_internal * ide = NULL;
2040 char * dev_id = v3_cfg_val(cfg, "ID");
2043 PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
2045 ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
2048 PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
2052 memset(ide, 0, sizeof(struct ide_internal));
2055 ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
2057 if (ide->pci_bus != NULL) {
2058 struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
2061 PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
2066 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
2068 PrintError(vm,VCORE_NONE,"Strange - you don't have a PCI bus\n");
2071 PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
2073 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
2076 PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
2081 if (init_ide_state(ide) == -1) {
2082 PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
2083 v3_remove_device(dev);
2087 PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
2089 ret |= v3_dev_hook_io(dev, PRI_DATA_PORT,
2090 &read_data_port, &write_data_port);
2091 ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT,
2092 &read_port_std, &write_port_std);
2093 ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
2094 &read_port_std, &write_port_std);
2095 ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
2096 &read_port_std, &write_port_std);
2097 ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
2098 &read_port_std, &write_port_std);
2099 ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
2100 &read_port_std, &write_port_std);
2101 ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
2102 &read_port_std, &write_port_std);
2103 ret |= v3_dev_hook_io(dev, PRI_CMD_PORT,
2104 &read_port_std, &write_cmd_port);
2106 ret |= v3_dev_hook_io(dev, SEC_DATA_PORT,
2107 &read_data_port, &write_data_port);
2108 ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT,
2109 &read_port_std, &write_port_std);
2110 ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
2111 &read_port_std, &write_port_std);
2112 ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
2113 &read_port_std, &write_port_std);
2114 ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
2115 &read_port_std, &write_port_std);
2116 ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
2117 &read_port_std, &write_port_std);
2118 ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
2119 &read_port_std, &write_port_std);
2120 ret |= v3_dev_hook_io(dev, SEC_CMD_PORT,
2121 &read_port_std, &write_cmd_port);
2124 ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT,
2125 &read_port_std, &write_port_std);
2127 ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT,
2128 &read_port_std, &write_port_std);
2131 ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
2132 &read_port_std, &write_port_std);
2134 ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
2135 &read_port_std, &write_port_std);
2139 PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
2140 v3_remove_device(dev);
2146 struct v3_pci_bar bars[6];
2147 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2148 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2149 struct pci_device * pci_dev = NULL;
2152 V3_Print(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2154 for (i = 0; i < 6; i++) {
2155 bars[i].type = PCI_BAR_NONE;
2158 bars[4].type = PCI_BAR_IO;
2159 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2160 bars[4].default_base_port = -1;
2161 bars[4].num_ports = 16;
2163 bars[4].io_read = read_dma_port;
2164 bars[4].io_write = write_dma_port;
2165 bars[4].private_data = ide;
2167 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
2169 pci_config_update, NULL, NULL, NULL, ide);
2171 if (pci_dev == NULL) {
2172 PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i);
2173 v3_remove_device(dev);
2177 /* This is for CMD646 devices
2178 pci_dev->config_header.vendor_id = 0x1095;
2179 pci_dev->config_header.device_id = 0x0646;
2180 pci_dev->config_header.revision = 0x8f07;
2183 pci_dev->config_header.vendor_id = 0x8086;
2184 pci_dev->config_header.device_id = 0x7010;
2185 pci_dev->config_header.revision = 0x00;
2187 pci_dev->config_header.prog_if = 0x80; // Master IDE device
2188 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2189 pci_dev->config_header.class = PCI_CLASS_STORAGE;
2191 pci_dev->config_header.command = 0;
2192 pci_dev->config_header.status = 0x0280;
2194 ide->ide_pci = pci_dev;
2199 if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2200 PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2201 v3_remove_device(dev);
2206 PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2212 device_register("IDE", ide_init)
2217 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num,
2218 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2220 struct ide_internal * ide = ide_data;
2221 struct ide_channel * channel = &(ide->channels[channel_num]);
2222 struct ide_drive * drive = &(channel->drives[drive_num]);
2224 if (drive->drive_type == BLOCK_NONE) {
2228 *cylinders = drive->num_cylinders;
2229 *heads = drive->num_heads;
2230 *sectors = drive->num_sectors;