2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef V3_CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
71 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72 "PRI_CTRL", "PRI_ADDR_REG"};
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
76 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77 "SEC_CTRL", "SEC_ADDR_REG"};
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
85 static inline const char * io_port_to_str(uint16_t port) {
86 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87 return ide_pri_port_strs[port - PRI_DATA_PORT];
88 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89 return ide_sec_port_strs[port - SEC_DATA_PORT];
90 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
99 static inline const char * dma_port_to_str(uint16_t port) {
100 return ide_dma_port_strs[port & 0x7];
105 struct ide_cd_state {
106 struct atapi_sense_data sense;
109 struct atapi_error_recovery err_recovery;
112 struct ide_hd_state {
115 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116 uint32_t mult_sector_num;
118 /* This is the current op sector size:
119 * for multiple sector ops this equals mult_sector_num
120 * for standard ops this equals 1
122 uint64_t cur_sector_num;
128 v3_block_type_t drive_type;
130 struct v3_dev_blk_ops * ops;
133 struct ide_cd_state cd_state;
134 struct ide_hd_state hd_state;
139 // Where we are in the data transfer
140 uint64_t transfer_index;
142 // the length of a transfer
143 // calculated for easy access
144 uint64_t transfer_length;
146 uint64_t current_lba;
148 // We have a local data buffer that we use for IO port accesses
149 uint8_t data_buf[DATA_BUFFER_SIZE];
152 uint32_t num_cylinders;
154 uint32_t num_sectors;
160 uint16_t sector_count; // for LBA48
161 uint8_t sector_count_state; // two step write to 1f2/172 (high first)
162 uint8_t lba41_state; // two step write to 1f3
163 uint8_t lba52_state; // two step write to 1f4
164 uint8_t lba63_state; // two step write to 15
170 uint8_t sector_count; // 0x1f2,0x172 (ATA)
171 struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
172 } __attribute__((packed));
176 uint8_t sector_num; // 0x1f3,0x173
178 } __attribute__((packed));
185 uint8_t cylinder_low; // 0x1f4,0x174
186 uint8_t cylinder_high; // 0x1f5,0x175
187 } __attribute__((packed));
192 } __attribute__((packed));
195 // The transfer length requested by the CPU
197 } __attribute__((packed));
204 struct ide_drive drives[2];
207 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
209 struct ide_features_reg features;
211 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
213 struct ide_status_reg status; // [read] 0x1f7,0x177
214 uint8_t cmd_reg; // [write] 0x1f7,0x177
216 int irq; // this is temporary until we add PCI support
219 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
222 uint8_t dma_ports[8];
224 struct ide_dma_cmd_reg dma_cmd;
226 struct ide_dma_status_reg dma_status;
228 uint32_t dma_prd_addr;
229 } __attribute__((packed));
230 } __attribute__((packed));
232 uint32_t dma_tbl_index;
237 struct ide_internal {
238 struct ide_channel channels[2];
240 struct v3_southbridge * southbridge;
241 struct vm_device * pci_bus;
243 struct pci_device * ide_pci;
245 struct v3_vm_info * vm;
252 /* Utility functions */
254 static inline uint16_t be_to_le_16(const uint16_t val) {
255 uint8_t * buf = (uint8_t *)&val;
256 return (buf[0] << 8) | (buf[1]) ;
259 static inline uint16_t le_to_be_16(const uint16_t val) {
260 return be_to_le_16(val);
264 static inline uint32_t be_to_le_32(const uint32_t val) {
265 uint8_t * buf = (uint8_t *)&val;
266 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
269 static inline uint32_t le_to_be_32(const uint32_t val) {
270 return be_to_le_32(val);
274 static inline int is_lba28(struct ide_channel * channel) {
275 return channel->drive_head.lba_mode && channel->drive_head.rsvd1 && channel->drive_head.rsvd2;
278 static inline int is_lba48(struct ide_channel * channel) {
279 return channel->drive_head.lba_mode && !channel->drive_head.rsvd1 && !channel->drive_head.rsvd2;
282 static inline int is_chs(struct ide_channel * channel) {
283 return !channel->drive_head.lba_mode;
286 static inline int get_channel_index(ushort_t port) {
287 if (((port & 0xfff8) == 0x1f0) ||
288 ((port & 0xfffe) == 0x3f6) ||
289 ((port & 0xfff8) == 0xc000)) {
291 } else if (((port & 0xfff8) == 0x170) ||
292 ((port & 0xfffe) == 0x376) ||
293 ((port & 0xfff8) == 0xc008)) {
300 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
301 int channel_idx = get_channel_index(port);
302 return &(ide->channels[channel_idx]);
305 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
306 return &(channel->drives[channel->drive_head.drive_sel]);
313 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
314 if (channel->ctrl_reg.irq_disable == 0) {
316 PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
318 channel->dma_status.int_gen = 1;
319 v3_raise_irq(ide->vm, channel->irq);
321 PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
326 static void drive_reset(struct ide_drive * drive) {
327 drive->sector_count = 0x01;
328 drive->sector_num = 0x01;
330 PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
332 if (drive->drive_type == BLOCK_CDROM) {
333 drive->cylinder = 0xeb14;
335 drive->cylinder = 0x0000;
336 //drive->hd_state.accessed = 0;
340 memset(drive->data_buf, 0, sizeof(drive->data_buf));
341 drive->transfer_index = 0;
343 // Send the reset signal to the connected device callbacks
344 // channel->drives[0].reset();
345 // channel->drives[1].reset();
348 static void channel_reset(struct ide_channel * channel) {
350 // set busy and seek complete flags
351 channel->status.val = 0x90;
354 channel->error_reg.val = 0x01;
357 channel->cmd_reg = 0; // NOP
359 channel->ctrl_reg.irq_disable = 0;
362 static void channel_reset_complete(struct ide_channel * channel) {
363 channel->status.busy = 0;
364 channel->status.ready = 1;
366 channel->drive_head.head_num = 0;
368 drive_reset(&(channel->drives[0]));
369 drive_reset(&(channel->drives[1]));
373 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
375 PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
377 channel->status.val = 0x41; // Error + ready
378 channel->error_reg.val = 0x04; // No idea...
380 ide_raise_irq(ide, channel);
384 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
385 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
388 /* ATAPI functions */
396 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
397 struct ide_dma_prd prd_entry;
400 V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
403 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
406 ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
408 if (ret != sizeof(struct ide_dma_prd)) {
409 PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
413 V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
415 (prd_entry.size == 0) ? 0x10000 : prd_entry.size,
416 prd_entry.end_of_table);
418 if (prd_entry.end_of_table) {
430 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
431 struct ide_drive * drive = get_selected_drive(channel);
432 // This is at top level scope to do the EOT test at the end
433 struct ide_dma_prd prd_entry = {};
434 uint_t bytes_left = drive->transfer_length;
436 // Read in the data buffer....
437 // Read a sector/block at a time until the prd entry is full.
439 #ifdef V3_CONFIG_DEBUG_IDE
440 print_prd_table(ide, channel);
443 PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
445 // Loop through the disk data
446 while (bytes_left > 0) {
447 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
448 uint_t prd_bytes_left = 0;
449 uint_t prd_offset = 0;
452 PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
454 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
456 if (ret != sizeof(struct ide_dma_prd)) {
457 PrintError(core->vm_info, core, "Could not read PRD\n");
461 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
462 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
464 // loop through the PRD data....
466 if (prd_entry.size == 0) {
467 // a size of 0 means 64k
468 prd_bytes_left = 0x10000;
470 prd_bytes_left = prd_entry.size;
474 while (prd_bytes_left > 0) {
475 uint_t bytes_to_write = 0;
477 if (drive->drive_type == BLOCK_DISK) {
478 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
481 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
482 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
485 } else if (drive->drive_type == BLOCK_CDROM) {
486 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
487 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
489 if (atapi_read_chunk(ide, channel) == -1) {
490 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
495 PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
500 //V3_Print(core->vm_info, core, "DMA of command packet\n");
502 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
503 prd_bytes_left = bytes_to_write;
506 // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
507 cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset,
508 bytes_to_write, drive->data_buf);
515 drive->transfer_index += bytes_to_write;
517 channel->status.busy = 0;
518 channel->status.ready = 1;
519 channel->status.data_req = 0;
520 channel->status.error = 0;
521 channel->status.seek_complete = 1;
523 channel->dma_status.active = 0;
524 channel->dma_status.err = 0;
526 ide_raise_irq(ide, channel);
532 PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n",
533 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
535 drive->current_lba++;
537 ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
539 if (ret != bytes_to_write) {
540 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
544 PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
546 drive->transfer_index += ret;
547 prd_bytes_left -= ret;
552 channel->dma_tbl_index++;
554 if (drive->drive_type == BLOCK_DISK) {
555 if (drive->transfer_index % HD_SECTOR_SIZE) {
556 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
559 } else if (drive->drive_type == BLOCK_CDROM) {
560 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
561 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
562 PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
563 PrintError(core->vm_info, core, "transfer_index=%llu, transfer_length=%llu\n",
564 drive->transfer_index, drive->transfer_length);
571 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
572 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
578 drive->irq_flags.io_dir = 1;
579 drive->irq_flags.c_d = 1;
580 drive->irq_flags.rel = 0;
584 // Update to the next PRD entry
588 if (prd_entry.end_of_table) {
589 channel->status.busy = 0;
590 channel->status.ready = 1;
591 channel->status.data_req = 0;
592 channel->status.error = 0;
593 channel->status.seek_complete = 1;
595 channel->dma_status.active = 0;
596 channel->dma_status.err = 0;
599 ide_raise_irq(ide, channel);
605 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
606 struct ide_drive * drive = get_selected_drive(channel);
607 // This is at top level scope to do the EOT test at the end
608 struct ide_dma_prd prd_entry = {};
609 uint_t bytes_left = drive->transfer_length;
612 PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
614 // Loop through disk data
615 while (bytes_left > 0) {
616 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
617 uint_t prd_bytes_left = 0;
618 uint_t prd_offset = 0;
621 PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
623 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
625 if (ret != sizeof(struct ide_dma_prd)) {
626 PrintError(core->vm_info, core, "Could not read PRD\n");
630 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
631 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
634 if (prd_entry.size == 0) {
635 // a size of 0 means 64k
636 prd_bytes_left = 0x10000;
638 prd_bytes_left = prd_entry.size;
641 while (prd_bytes_left > 0) {
642 uint_t bytes_to_write = 0;
645 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
648 ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
650 if (ret != bytes_to_write) {
651 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
655 PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
658 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
659 PrintError(core->vm_info, core, "Failed to write data to disk\n");
663 drive->current_lba++;
665 drive->transfer_index += ret;
666 prd_bytes_left -= ret;
671 channel->dma_tbl_index++;
673 if (drive->transfer_index % HD_SECTOR_SIZE) {
674 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
678 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
679 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
680 PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%llu)...\n",
681 bytes_left, drive->transfer_length);
682 PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
683 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
685 print_prd_table(ide, channel);
690 if (prd_entry.end_of_table) {
691 channel->status.busy = 0;
692 channel->status.ready = 1;
693 channel->status.data_req = 0;
694 channel->status.error = 0;
695 channel->status.seek_complete = 1;
697 channel->dma_status.active = 0;
698 channel->dma_status.err = 0;
701 ide_raise_irq(ide, channel);
708 #define DMA_CMD_PORT 0x00
709 #define DMA_STATUS_PORT 0x02
710 #define DMA_PRD_PORT0 0x04
711 #define DMA_PRD_PORT1 0x05
712 #define DMA_PRD_PORT2 0x06
713 #define DMA_PRD_PORT3 0x07
715 #define DMA_CHANNEL_FLAG 0x08
718 Note that DMA model is as follows:
720 1. Write the PRD pointer to the busmaster (DMA engine)
721 2. Start the transfer on the device
722 3. Tell the busmaster to start shoveling data (active DMA)
725 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
726 struct ide_internal * ide = (struct ide_internal *)private_data;
727 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
728 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
729 struct ide_channel * channel = &(ide->channels[channel_flag]);
731 PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
732 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
734 switch (port_offset) {
736 channel->dma_cmd.val = *(uint8_t *)src;
738 PrintDebug(core->vm_info, core, "IDE: dma command write: 0x%x\n", channel->dma_cmd.val);
740 if (channel->dma_cmd.start == 0) {
741 channel->dma_tbl_index = 0;
743 // Launch DMA operation, interrupt at end
745 channel->dma_status.active = 1;
747 if (channel->dma_cmd.read == 1) {
748 // DMA Read the whole thing - dma_read will raise irq
749 if (dma_read(core, ide, channel) == -1) {
750 PrintError(core->vm_info, core, "Failed DMA Read\n");
754 // DMA write the whole thing - dma_write will raiase irw
755 if (dma_write(core, ide, channel) == -1) {
756 PrintError(core->vm_info, core, "Failed DMA Write\n");
762 // Note that guest cannot abort a DMA transfer
763 channel->dma_cmd.start = 0;
768 case DMA_STATUS_PORT: {
769 // This is intended to clear status
771 uint8_t val = *(uint8_t *)src;
774 PrintError(core->vm_info, core, "Invalid write length for DMA status port\n");
778 // but preserve certain bits
779 channel->dma_status.val = ((val & 0x60) |
780 (channel->dma_status.val & 0x01) |
781 (channel->dma_status.val & ~val & 0x06));
788 case DMA_PRD_PORT3: {
789 uint_t addr_index = port_offset & 0x3;
790 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
793 if (addr_index + length > 4) {
794 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
798 for (i = 0; i < length; i++) {
799 addr_buf[addr_index + i] = *((uint8_t *)src + i);
802 PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
807 PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
815 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
816 struct ide_internal * ide = (struct ide_internal *)private_data;
817 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
818 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
819 struct ide_channel * channel = &(ide->channels[channel_flag]);
821 PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
823 if (port_offset + length > 16) {
824 PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
828 memcpy(dst, channel->dma_ports + port_offset, length);
830 PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
837 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
838 struct ide_internal * ide = priv_data;
839 struct ide_channel * channel = get_selected_channel(ide, port);
840 struct ide_drive * drive = get_selected_drive(channel);
843 PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
847 PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
849 channel->cmd_reg = *(uint8_t *)src;
851 switch (channel->cmd_reg) {
853 case ATA_PIDENTIFY: // ATAPI Identify Device Packet (CDROM)
854 if (drive->drive_type != BLOCK_CDROM) {
857 // JRL: Should we abort here?
858 ide_abort_command(ide, channel);
861 atapi_identify_device(drive);
863 channel->error_reg.val = 0;
864 channel->status.val = 0x58; // ready, data_req, seek_complete
866 ide_raise_irq(ide, channel);
870 case ATA_IDENTIFY: // Identify Device
871 if (drive->drive_type != BLOCK_DISK) {
874 // JRL: Should we abort here?
875 ide_abort_command(ide, channel);
877 ata_identify_device(drive);
879 channel->error_reg.val = 0;
880 channel->status.val = 0x58;
882 ide_raise_irq(ide, channel);
886 case ATA_PACKETCMD: // ATAPI Command Packet (CDROM)
887 if (drive->drive_type != BLOCK_CDROM) {
888 ide_abort_command(ide, channel);
891 drive->sector_count = 1;
893 channel->status.busy = 0;
894 channel->status.write_fault = 0;
895 channel->status.data_req = 1;
896 channel->status.error = 0;
898 // reset the data buffer...
899 drive->transfer_length = ATAPI_PACKET_SIZE;
900 drive->transfer_index = 0;
904 case ATA_READ: // Read Sectors with Retry
905 case ATA_READ_ONCE: // Read Sectors without Retry
906 case ATA_MULTREAD: // Read multiple sectors per ire
907 case ATA_READ_EXT: // Read Sectors Extended (LBA48)
909 if (channel->cmd_reg==ATA_MULTREAD) {
910 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
912 drive->hd_state.cur_sector_num = 1;
915 if (ata_read_sectors(ide, channel) == -1) {
916 PrintError(core->vm_info, core, "Error reading sectors\n");
917 ide_abort_command(ide,channel);
921 case ATA_WRITE: // Write Sector with retry
922 case ATA_WRITE_ONCE: // Write Sector without retry
923 case ATA_MULTWRITE: // Write multiple sectors per irq
924 case ATA_WRITE_EXT: // Write Sectors Extended (LBA48)
926 if (channel->cmd_reg==ATA_MULTWRITE) {
927 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
929 drive->hd_state.cur_sector_num = 1;
932 if (ata_write_sectors(ide, channel) == -1) {
933 PrintError(core->vm_info, core, "Error writing sectors\n");
934 ide_abort_command(ide,channel);
938 case ATA_READDMA: // Read DMA with retry
939 case ATA_READDMA_ONCE: // Read DMA without retry
940 case ATA_READDMA_EXT: { // Read DMA (LBA48)
943 if (ata_get_lba_and_size(ide, channel, &(drive->current_lba), §_cnt) == -1) {
944 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
945 ide_abort_command(ide, channel);
949 drive->hd_state.cur_sector_num = 1; // Not used for DMA
951 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
952 drive->transfer_index = 0;
954 // Now we wait for the transfer to be intiated by flipping the
955 // bus-master start bit
959 case ATA_WRITEDMA: // Write DMA with retry
960 case ATA_WRITEDMA_ONCE: // Write DMA without retry
961 case ATA_WRITEDMA_EXT: { // Write DMA (LBA48)
965 if (ata_get_lba_and_size(ide, channel, &(drive->current_lba),§_cnt) == -1) {
966 PrintError(core->vm_info,core,"Cannot get lba\n");
967 ide_abort_command(ide, channel);
971 drive->hd_state.cur_sector_num = 1; // Not used for DMA
973 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
974 drive->transfer_index = 0;
976 // Now we wait for the transfer to be intiated by flipping the
977 // bus-master start bit
981 case ATA_STANDBYNOW1: // Standby Now 1
982 case ATA_IDLEIMMEDIATE: // Set Idle Immediate
983 case ATA_STANDBY: // Standby
984 case ATA_SETIDLE1: // Set Idle 1
985 case ATA_SLEEPNOW1: // Sleep Now 1
986 case ATA_STANDBYNOW2: // Standby Now 2
987 case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
988 case ATA_STANDBY2: // Standby 2
989 case ATA_SETIDLE2: // Set idle 2
990 case ATA_SLEEPNOW2: // Sleep Now 2
991 channel->status.val = 0;
992 channel->status.ready = 1;
993 ide_raise_irq(ide, channel);
996 case ATA_SETFEATURES: // Set Features
997 // Prior to this the features register has been written to.
998 // This command tells the drive to check if the new value is supported (the value is drive specific)
999 // Common is that bit0=DMA enable
1000 // If valid the drive raises an interrupt, if not it aborts.
1002 // Do some checking here...
1004 channel->status.busy = 0;
1005 channel->status.write_fault = 0;
1006 channel->status.error = 0;
1007 channel->status.ready = 1;
1008 channel->status.seek_complete = 1;
1010 ide_raise_irq(ide, channel);
1013 case ATA_SPECIFY: // Initialize Drive Parameters
1014 case ATA_RECAL: // recalibrate?
1015 channel->status.error = 0;
1016 channel->status.ready = 1;
1017 channel->status.seek_complete = 1;
1018 ide_raise_irq(ide, channel);
1021 case ATA_SETMULT: { // Set multiple mode (IDE Block mode)
1022 // This makes the drive transfer multiple sectors before generating an interrupt
1024 if (drive->sector_count == 0) {
1025 PrintError(core->vm_info,core,"Attempt to set multiple to zero\n");
1026 drive->hd_state.mult_sector_num= 1;
1027 ide_abort_command(ide,channel);
1030 drive->hd_state.mult_sector_num = drive->sector_count;
1033 channel->status.ready = 1;
1034 channel->status.error = 0;
1036 ide_raise_irq(ide, channel);
1041 case ATA_DEVICE_RESET: // Reset Device
1043 channel->error_reg.val = 0x01;
1044 channel->status.busy = 0;
1045 channel->status.ready = 1;
1046 channel->status.seek_complete = 1;
1047 channel->status.write_fault = 0;
1048 channel->status.error = 0;
1051 case ATA_CHECKPOWERMODE1: // Check power mode
1052 drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1053 channel->status.busy = 0;
1054 channel->status.ready = 1;
1055 channel->status.write_fault = 0;
1056 channel->status.data_req = 0;
1057 channel->status.error = 0;
1061 PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1062 ide_abort_command(ide, channel);
1072 static int read_hd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1073 struct ide_drive * drive = get_selected_drive(channel);
1074 uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1077 PrintDebug(VM_NONE,VCORE_NONE, "Read HD data: transfer_index %llu transfer length %llu current sector numer %llu\n",
1078 drive->transfer_index, drive->transfer_length,
1079 drive->hd_state.cur_sector_num);
1081 if (drive->transfer_index >= drive->transfer_length) {
1082 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1083 drive->transfer_length, drive->transfer_index,
1084 drive->transfer_index + length);
1089 if (data_offset + length > HD_SECTOR_SIZE) {
1090 PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1093 // For index==0, the read has been done in ata_read_sectors
1094 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1095 // advance to next sector and read it
1097 drive->current_lba++;
1099 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1100 PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1106 PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1107 *(uint32_t *)(drive->data_buf + data_offset),
1108 length, data_offset);
1110 memcpy(dst, drive->data_buf + data_offset, length);
1112 drive->transfer_index += length;
1115 /* This is the trigger for interrupt injection.
1116 * For read single sector commands we interrupt after every sector
1117 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1118 * cur_sector_num is configured depending on the operation we are currently running
1119 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1121 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1122 (drive->transfer_index == drive->transfer_length)) {
1123 if (drive->transfer_index < drive->transfer_length) {
1124 // An increment is complete, but there is still more data to be transferred...
1125 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1126 channel->status.data_req = 1;
1128 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1129 // This was the final read of the request
1130 channel->status.data_req = 0;
1133 channel->status.ready = 1;
1134 channel->status.busy = 0;
1136 ide_raise_irq(ide, channel);
1143 static int write_hd_data(uint8_t * src, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1144 struct ide_drive * drive = get_selected_drive(channel);
1145 uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1148 PrintDebug(VM_NONE,VCORE_NONE, "Write HD data: transfer_index %llu transfer length %llu current sector numer %llu\n",
1149 drive->transfer_index, drive->transfer_length,
1150 drive->hd_state.cur_sector_num);
1152 if (drive->transfer_index >= drive->transfer_length) {
1153 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1154 drive->transfer_length, drive->transfer_index,
1155 drive->transfer_index + length);
1159 if (data_offset + length > HD_SECTOR_SIZE) {
1160 PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1163 // Copy data into our buffer - there will be room due to
1164 // (a) the ata_write test below is flushing sectors
1165 // (b) if we somehow get a sector-stradling write (an error), this will
1166 // be OK since the buffer itself is >1 sector in memory
1167 memcpy(drive->data_buf + data_offset, src, length);
1169 drive->transfer_index += length;
1171 if ((data_offset+length) >= HD_SECTOR_SIZE) {
1172 // Write out the sector we just finished
1173 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1174 PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1178 // go onto next sector
1179 drive->current_lba++;
1182 /* This is the trigger for interrupt injection.
1183 * For write single sector commands we interrupt after every sector
1184 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1185 * cur_sector_num is configured depending on the operation we are currently running
1186 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1188 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1189 (drive->transfer_index == drive->transfer_length)) {
1190 if (drive->transfer_index < drive->transfer_length) {
1191 // An increment is complete, but there is still more data to be transferred...
1192 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1193 channel->status.data_req = 1;
1195 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1196 // This was the final read of the request
1197 channel->status.data_req = 0;
1200 channel->status.ready = 1;
1201 channel->status.busy = 0;
1203 ide_raise_irq(ide, channel);
1211 static int read_cd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1212 struct ide_drive * drive = get_selected_drive(channel);
1213 uint64_t data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1214 // int req_offset = drive->transfer_index % drive->req_len;
1216 if (drive->cd_state.atapi_cmd != 0x28) {
1217 PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%llu) (req_len=%u)\n", length, drive->req_len);
1218 PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%llu, transfer idx=%llu\n", drive->transfer_length, drive->transfer_index);
1223 if (drive->transfer_index >= drive->transfer_length) {
1224 PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1225 drive->transfer_length, drive->transfer_index,
1226 drive->transfer_index + length);
1231 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1232 if (atapi_update_data_buf(ide, channel) == -1) {
1233 PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1238 memcpy(dst, drive->data_buf + data_offset, length);
1240 drive->transfer_index += length;
1243 // Should the req_offset be recalculated here?????
1244 if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1245 if (drive->transfer_index < drive->transfer_length) {
1246 // An increment is complete, but there is still more data to be transferred...
1248 channel->status.data_req = 1;
1250 drive->irq_flags.c_d = 0;
1252 // Update the request length in the cylinder regs
1253 if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1254 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1258 // This was the final read of the request
1261 channel->status.data_req = 0;
1262 channel->status.ready = 1;
1264 drive->irq_flags.c_d = 1;
1265 drive->irq_flags.rel = 0;
1268 drive->irq_flags.io_dir = 1;
1269 channel->status.busy = 0;
1271 ide_raise_irq(ide, channel);
1278 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1279 struct ide_drive * drive = get_selected_drive(channel);
1281 channel->status.busy = 0;
1282 channel->status.ready = 1;
1283 channel->status.write_fault = 0;
1284 channel->status.seek_complete = 1;
1285 channel->status.corrected = 0;
1286 channel->status.error = 0;
1289 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1290 drive->transfer_index += length;
1292 if (drive->transfer_index >= drive->transfer_length) {
1293 channel->status.data_req = 0;
1301 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1302 struct ide_internal * ide = priv_data;
1303 struct ide_channel * channel = get_selected_channel(ide, port);
1304 struct ide_drive * drive = get_selected_drive(channel);
1306 //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1308 if ((channel->cmd_reg == ATA_IDENTIFY) ||
1309 (channel->cmd_reg == ATA_PIDENTIFY)) {
1310 return read_drive_id((uint8_t *)dst, length, ide, channel);
1313 if (drive->drive_type == BLOCK_CDROM) {
1314 if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1315 PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1318 } else if (drive->drive_type == BLOCK_DISK) {
1319 if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1320 PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1324 memset((uint8_t *)dst, 0, length);
1330 // For the write side, we care both about
1331 // direct PIO writes to a drive as well as
1332 // writes that pass a packet through to an CD
1333 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1334 struct ide_internal * ide = priv_data;
1335 struct ide_channel * channel = get_selected_channel(ide, port);
1336 struct ide_drive * drive = get_selected_drive(channel);
1338 PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n",
1339 port, *(uint32_t *)src, length);
1341 if (drive->drive_type == BLOCK_CDROM) {
1342 if (channel->cmd_reg == ATA_PACKETCMD) {
1343 // short command packet - no check for space...
1344 memcpy(drive->data_buf + drive->transfer_index, src, length);
1345 drive->transfer_index += length;
1346 if (drive->transfer_index >= drive->transfer_length) {
1347 if (atapi_handle_packet(core, ide, channel) == -1) {
1348 PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1353 PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1356 } else if (drive->drive_type == BLOCK_DISK) {
1357 if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1358 PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1362 // nothing ... do not support writable cd
1368 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1369 struct ide_internal * ide = priv_data;
1370 struct ide_channel * channel = get_selected_channel(ide, port);
1371 struct ide_drive * drive = get_selected_drive(channel);
1374 PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1378 PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1381 // reset and interrupt enable
1383 case SEC_CTRL_PORT: {
1384 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1386 // only reset channel on a 0->1 reset bit transition
1387 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1388 channel_reset(channel);
1389 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1390 channel_reset_complete(channel);
1393 channel->ctrl_reg.val = tmp_ctrl->val;
1396 case PRI_FEATURES_PORT:
1397 case SEC_FEATURES_PORT:
1398 channel->features.val = *(uint8_t *)src;
1401 case PRI_SECT_CNT_PORT:
1402 case SEC_SECT_CNT_PORT:
1403 // update CHS and LBA28 state
1404 channel->drives[0].sector_count = *(uint8_t *)src;
1405 channel->drives[1].sector_count = *(uint8_t *)src;
1407 // update LBA48 state
1408 if (is_lba48(channel)) {
1409 uint16_t val = *(uint8_t*)src; // top bits zero;
1410 if (!channel->drives[0].lba48.sector_count_state) {
1411 channel->drives[0].lba48.sector_count = val<<8;
1413 channel->drives[0].lba48.sector_count |= val;
1415 channel->drives[0].lba48.sector_count_state ^= 1;
1416 if (!channel->drives[1].lba48.sector_count_state) {
1417 channel->drives[1].lba48.sector_count = val<<8;
1419 channel->drives[1].lba48.sector_count |= val;
1421 channel->drives[0].lba48.sector_count_state ^= 1;
1426 case PRI_SECT_NUM_PORT:
1427 case SEC_SECT_NUM_PORT:
1428 // update CHS and LBA28 state
1429 channel->drives[0].sector_num = *(uint8_t *)src;
1430 channel->drives[1].sector_num = *(uint8_t *)src;
1432 // update LBA48 state
1433 if (is_lba48(channel)) {
1434 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1435 if (!channel->drives[0].lba48.lba41_state) {
1436 channel->drives[0].lba48.lba |= val<<24;
1438 channel->drives[0].lba48.lba |= val;
1440 channel->drives[0].lba48.lba41_state ^= 1;
1441 if (!channel->drives[1].lba48.lba41_state) {
1442 channel->drives[1].lba48.lba |= val<<24;
1444 channel->drives[1].lba48.lba |= val;
1446 channel->drives[1].lba48.lba41_state ^= 1;
1450 case PRI_CYL_LOW_PORT:
1451 case SEC_CYL_LOW_PORT:
1452 // update CHS and LBA28 state
1453 channel->drives[0].cylinder_low = *(uint8_t *)src;
1454 channel->drives[1].cylinder_low = *(uint8_t *)src;
1456 // update LBA48 state
1457 if (is_lba48(channel)) {
1458 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1459 if (!channel->drives[0].lba48.lba52_state) {
1460 channel->drives[0].lba48.lba |= val<<32;
1462 channel->drives[0].lba48.lba |= val<<8;
1464 channel->drives[0].lba48.lba52_state ^= 1;
1465 if (!channel->drives[1].lba48.lba52_state) {
1466 channel->drives[1].lba48.lba |= val<<32;
1468 channel->drives[1].lba48.lba |= val<<8;
1470 channel->drives[1].lba48.lba52_state ^= 1;
1475 case PRI_CYL_HIGH_PORT:
1476 case SEC_CYL_HIGH_PORT:
1477 // update CHS and LBA28 state
1478 channel->drives[0].cylinder_high = *(uint8_t *)src;
1479 channel->drives[1].cylinder_high = *(uint8_t *)src;
1481 // update LBA48 state
1482 if (is_lba48(channel)) {
1483 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1484 if (!channel->drives[0].lba48.lba63_state) {
1485 channel->drives[0].lba48.lba |= val<<40;
1487 channel->drives[0].lba48.lba |= val<<16;
1489 channel->drives[0].lba48.lba63_state ^= 1;
1490 if (!channel->drives[1].lba48.lba63_state) {
1491 channel->drives[1].lba48.lba |= val<<40;
1493 channel->drives[1].lba48.lba |= val<<16;
1495 channel->drives[1].lba48.lba63_state ^= 1;
1500 case PRI_DRV_SEL_PORT:
1501 case SEC_DRV_SEL_PORT: {
1502 struct ide_drive_head_reg nh, oh;
1504 oh.val = channel->drive_head.val;
1505 channel->drive_head.val = nh.val = *(uint8_t *)src;
1508 if ((oh.val & 0xe0) != (nh.val & 0xe0)) {
1509 // reset LBA48 state
1510 channel->drives[0].lba48.sector_count_state=0;
1511 channel->drives[0].lba48.lba41_state=0;
1512 channel->drives[0].lba48.lba52_state=0;
1513 channel->drives[0].lba48.lba63_state=0;
1514 channel->drives[1].lba48.sector_count_state=0;
1515 channel->drives[1].lba48.lba41_state=0;
1516 channel->drives[1].lba48.lba52_state=0;
1517 channel->drives[1].lba48.lba63_state=0;
1521 drive = get_selected_drive(channel);
1523 // Selecting a non-present device is a no-no
1524 if (drive->drive_type == BLOCK_NONE) {
1525 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1526 channel->error_reg.abort = 1;
1527 channel->status.error = 1;
1529 channel->status.busy = 0;
1530 channel->status.ready = 1;
1531 channel->status.data_req = 0;
1532 channel->status.error = 0;
1533 channel->status.seek_complete = 1;
1535 channel->dma_status.active = 0;
1536 channel->dma_status.err = 0;
1542 PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1549 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1550 struct ide_internal * ide = priv_data;
1551 struct ide_channel * channel = get_selected_channel(ide, port);
1552 struct ide_drive * drive = get_selected_drive(channel);
1555 PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1559 PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1561 if ((port == PRI_ADDR_REG_PORT) ||
1562 (port == SEC_ADDR_REG_PORT)) {
1563 // unused, return 0xff
1564 *(uint8_t *)dst = 0xff;
1569 // if no drive is present just return 0 + reserved bits
1570 if (drive->drive_type == BLOCK_NONE) {
1571 if ((port == PRI_DRV_SEL_PORT) ||
1572 (port == SEC_DRV_SEL_PORT)) {
1573 *(uint8_t *)dst = 0xa0;
1575 *(uint8_t *)dst = 0;
1583 // This is really the error register.
1584 case PRI_FEATURES_PORT:
1585 case SEC_FEATURES_PORT:
1586 *(uint8_t *)dst = channel->error_reg.val;
1589 case PRI_SECT_CNT_PORT:
1590 case SEC_SECT_CNT_PORT:
1591 *(uint8_t *)dst = drive->sector_count;
1594 case PRI_SECT_NUM_PORT:
1595 case SEC_SECT_NUM_PORT:
1596 *(uint8_t *)dst = drive->sector_num;
1599 case PRI_CYL_LOW_PORT:
1600 case SEC_CYL_LOW_PORT:
1601 *(uint8_t *)dst = drive->cylinder_low;
1605 case PRI_CYL_HIGH_PORT:
1606 case SEC_CYL_HIGH_PORT:
1607 *(uint8_t *)dst = drive->cylinder_high;
1610 case PRI_DRV_SEL_PORT:
1611 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1612 *(uint8_t *)dst = channel->drive_head.val;
1619 // Something about lowering interrupts here....
1620 *(uint8_t *)dst = channel->status.val;
1624 PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1628 PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1635 static void init_drive(struct ide_drive * drive) {
1637 drive->sector_count = 0x01;
1638 drive->sector_num = 0x01;
1639 drive->cylinder = 0x0000;
1641 drive->drive_type = BLOCK_NONE;
1643 memset(drive->model, 0, sizeof(drive->model));
1645 drive->transfer_index = 0;
1646 drive->transfer_length = 0;
1647 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1649 drive->num_cylinders = 0;
1650 drive->num_heads = 0;
1651 drive->num_sectors = 0;
1654 drive->private_data = NULL;
1658 static void init_channel(struct ide_channel * channel) {
1661 channel->error_reg.val = 0x01;
1663 //** channel->features = 0x0;
1665 channel->drive_head.val = 0x00;
1666 channel->status.val = 0x00;
1667 channel->cmd_reg = 0x00;
1668 channel->ctrl_reg.val = 0x08;
1670 channel->dma_cmd.val = 0;
1671 channel->dma_status.val = 0;
1672 channel->dma_prd_addr = 0;
1673 channel->dma_tbl_index = 0;
1675 for (i = 0; i < 2; i++) {
1676 init_drive(&(channel->drives[i]));
1682 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1683 PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1685 struct ide_internal * ide = (struct ide_internal *)(private_data);
1687 PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1693 static int init_ide_state(struct ide_internal * ide) {
1696 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1699 init_channel(&(ide->channels[0]));
1700 ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1702 init_channel(&(ide->channels[1]));
1703 ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1712 static int ide_free(struct ide_internal * ide) {
1714 // deregister from PCI?
1721 #ifdef V3_CONFIG_CHECKPOINT
1723 #include <palacios/vmm_sprintf.h>
1725 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1726 struct ide_internal * ide = (struct ide_internal *)private_data;
1727 struct v3_chkpt_ctx *ctx=0;
1733 ctx=v3_chkpt_open_ctx(chkpt,id);
1736 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1740 // nothing saved yet
1742 v3_chkpt_close_ctx(ctx);ctx=0;
1745 for (ch_num = 0; ch_num < 2; ch_num++) {
1746 struct ide_channel * ch = &(ide->channels[ch_num]);
1748 snprintf(buf, 128, "%s-%d", id, ch_num);
1750 ctx = v3_chkpt_open_ctx(chkpt, buf);
1753 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1757 V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1758 V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1759 V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1760 V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1761 V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1762 V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1763 V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1764 V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1765 V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1766 V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1770 v3_chkpt_close_ctx(ctx); ctx=0;
1772 for (drive_num = 0; drive_num < 2; drive_num++) {
1773 struct ide_drive * drive = &(ch->drives[drive_num]);
1775 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1777 ctx = v3_chkpt_open_ctx(chkpt, buf);
1780 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1784 V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1785 V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1786 V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1787 V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1789 V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1790 V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1791 V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1793 V3_CHKPT_SAVE(ctx, "DATA_BUF", drive->data_buf, savefailout);
1796 /* For now we'll just pack the type specific data at the end... */
1797 /* We should probably add a new context here in the future... */
1798 if (drive->drive_type == BLOCK_CDROM) {
1799 V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1800 V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1801 V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1802 } else if (drive->drive_type == BLOCK_DISK) {
1803 V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1804 V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1805 V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1806 } else if (drive->drive_type == BLOCK_NONE) {
1807 // no drive connected, so no data
1809 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1813 V3_CHKPT_SAVE(ctx, "LBA48_LBA", drive->lba48.lba, savefailout);
1814 V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, savefailout);
1815 V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, savefailout);
1816 V3_CHKPT_SAVE(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, savefailout);
1817 V3_CHKPT_SAVE(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, savefailout);
1818 V3_CHKPT_SAVE(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, savefailout);
1820 v3_chkpt_close_ctx(ctx); ctx=0;
1828 PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1829 if (ctx) {v3_chkpt_close_ctx(ctx); }
1835 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1836 struct ide_internal * ide = (struct ide_internal *)private_data;
1837 struct v3_chkpt_ctx *ctx=0;
1842 ctx=v3_chkpt_open_ctx(chkpt,id);
1845 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1849 // nothing saved yet
1851 v3_chkpt_close_ctx(ctx);ctx=0;
1854 for (ch_num = 0; ch_num < 2; ch_num++) {
1855 struct ide_channel * ch = &(ide->channels[ch_num]);
1857 snprintf(buf, 128, "%s-%d", id, ch_num);
1859 ctx = v3_chkpt_open_ctx(chkpt, buf);
1862 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1866 V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1867 V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1868 V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1869 V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1870 V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1871 V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1872 V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1873 V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1874 V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1875 V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1877 v3_chkpt_close_ctx(ctx); ctx=0;
1879 for (drive_num = 0; drive_num < 2; drive_num++) {
1880 struct ide_drive * drive = &(ch->drives[drive_num]);
1882 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1884 ctx = v3_chkpt_open_ctx(chkpt, buf);
1887 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1891 V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1892 V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1893 V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1894 V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1896 V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1897 V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1898 V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1900 V3_CHKPT_LOAD(ctx, "DATA_BUF", drive->data_buf, loadfailout);
1903 /* For now we'll just pack the type specific data at the end... */
1904 /* We should probably add a new context here in the future... */
1905 if (drive->drive_type == BLOCK_CDROM) {
1906 V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1907 V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1908 V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1909 } else if (drive->drive_type == BLOCK_DISK) {
1910 V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1911 V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1912 V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1913 } else if (drive->drive_type == BLOCK_NONE) {
1914 // no drive connected, so no data
1916 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1920 V3_CHKPT_LOAD(ctx, "LBA48_LBA", drive->lba48.lba, loadfailout);
1921 V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, loadfailout);
1922 V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, loadfailout);
1923 V3_CHKPT_LOAD(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, loadfailout);
1924 V3_CHKPT_LOAD(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, loadfailout);
1925 V3_CHKPT_LOAD(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, loadfailout);
1933 PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1934 if (ctx) {v3_chkpt_close_ctx(ctx); }
1944 static struct v3_device_ops dev_ops = {
1945 .free = (int (*)(void *))ide_free,
1946 #ifdef V3_CONFIG_CHECKPOINT
1947 .save_extended = ide_save_extended,
1948 .load_extended = ide_load_extended
1955 static int connect_fn(struct v3_vm_info * vm,
1956 void * frontend_data,
1957 struct v3_dev_blk_ops * ops,
1958 v3_cfg_tree_t * cfg,
1959 void * private_data) {
1960 struct ide_internal * ide = (struct ide_internal *)(frontend_data);
1961 struct ide_channel * channel = NULL;
1962 struct ide_drive * drive = NULL;
1964 char * bus_str = v3_cfg_val(cfg, "bus_num");
1965 char * drive_str = v3_cfg_val(cfg, "drive_num");
1966 char * type_str = v3_cfg_val(cfg, "type");
1967 char * model_str = v3_cfg_val(cfg, "model");
1969 uint_t drive_num = 0;
1972 if ((!type_str) || (!drive_str) || (!bus_str)) {
1973 PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1977 bus_num = atoi(bus_str);
1978 drive_num = atoi(drive_str);
1980 channel = &(ide->channels[bus_num]);
1981 drive = &(channel->drives[drive_num]);
1983 if (drive->drive_type != BLOCK_NONE) {
1984 PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1988 if (model_str != NULL) {
1989 strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1992 if (strcasecmp(type_str, "cdrom") == 0) {
1993 drive->drive_type = BLOCK_CDROM;
1995 while (strlen((char *)(drive->model)) < 40) {
1996 strcat((char*)(drive->model), " ");
1999 } else if (strcasecmp(type_str, "hd") == 0) {
2000 drive->drive_type = BLOCK_DISK;
2002 drive->hd_state.accessed = 0;
2003 drive->hd_state.mult_sector_num = 1;
2005 drive->num_sectors = 63;
2006 drive->num_heads = 16;
2007 drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
2009 PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
2016 // Hardcode this for now, but its not a good idea....
2017 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
2020 drive->private_data = private_data;
2028 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
2029 struct ide_internal * ide = NULL;
2030 char * dev_id = v3_cfg_val(cfg, "ID");
2033 PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
2035 ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
2038 PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
2042 memset(ide, 0, sizeof(struct ide_internal));
2045 ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
2047 if (ide->pci_bus != NULL) {
2048 struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
2051 PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
2056 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
2058 PrintError(vm,VCORE_NONE,"Strange - you don't have a PCI bus\n");
2061 PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
2063 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
2066 PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
2071 if (init_ide_state(ide) == -1) {
2072 PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
2073 v3_remove_device(dev);
2077 PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
2079 ret |= v3_dev_hook_io(dev, PRI_DATA_PORT,
2080 &read_data_port, &write_data_port);
2081 ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT,
2082 &read_port_std, &write_port_std);
2083 ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
2084 &read_port_std, &write_port_std);
2085 ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
2086 &read_port_std, &write_port_std);
2087 ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
2088 &read_port_std, &write_port_std);
2089 ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
2090 &read_port_std, &write_port_std);
2091 ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
2092 &read_port_std, &write_port_std);
2093 ret |= v3_dev_hook_io(dev, PRI_CMD_PORT,
2094 &read_port_std, &write_cmd_port);
2096 ret |= v3_dev_hook_io(dev, SEC_DATA_PORT,
2097 &read_data_port, &write_data_port);
2098 ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT,
2099 &read_port_std, &write_port_std);
2100 ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
2101 &read_port_std, &write_port_std);
2102 ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
2103 &read_port_std, &write_port_std);
2104 ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
2105 &read_port_std, &write_port_std);
2106 ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
2107 &read_port_std, &write_port_std);
2108 ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
2109 &read_port_std, &write_port_std);
2110 ret |= v3_dev_hook_io(dev, SEC_CMD_PORT,
2111 &read_port_std, &write_cmd_port);
2114 ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT,
2115 &read_port_std, &write_port_std);
2117 ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT,
2118 &read_port_std, &write_port_std);
2121 ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
2122 &read_port_std, &write_port_std);
2124 ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
2125 &read_port_std, &write_port_std);
2129 PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
2130 v3_remove_device(dev);
2136 struct v3_pci_bar bars[6];
2137 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2138 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2139 struct pci_device * pci_dev = NULL;
2142 V3_Print(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2144 for (i = 0; i < 6; i++) {
2145 bars[i].type = PCI_BAR_NONE;
2148 bars[4].type = PCI_BAR_IO;
2149 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2150 bars[4].default_base_port = -1;
2151 bars[4].num_ports = 16;
2153 bars[4].io_read = read_dma_port;
2154 bars[4].io_write = write_dma_port;
2155 bars[4].private_data = ide;
2157 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
2159 pci_config_update, NULL, NULL, NULL, ide);
2161 if (pci_dev == NULL) {
2162 PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i);
2163 v3_remove_device(dev);
2167 /* This is for CMD646 devices
2168 pci_dev->config_header.vendor_id = 0x1095;
2169 pci_dev->config_header.device_id = 0x0646;
2170 pci_dev->config_header.revision = 0x8f07;
2173 pci_dev->config_header.vendor_id = 0x8086;
2174 pci_dev->config_header.device_id = 0x7010;
2175 pci_dev->config_header.revision = 0x00;
2177 pci_dev->config_header.prog_if = 0x80; // Master IDE device
2178 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2179 pci_dev->config_header.class = PCI_CLASS_STORAGE;
2181 pci_dev->config_header.command = 0;
2182 pci_dev->config_header.status = 0x0280;
2184 ide->ide_pci = pci_dev;
2189 if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2190 PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2191 v3_remove_device(dev);
2196 PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2202 device_register("IDE", ide_init)
2207 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num,
2208 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2210 struct ide_internal * ide = ide_data;
2211 struct ide_channel * channel = &(ide->channels[channel_num]);
2212 struct ide_drive * drive = &(channel->drives[drive_num]);
2214 if (drive->drive_type == BLOCK_NONE) {
2218 *cylinders = drive->num_cylinders;
2219 *heads = drive->num_heads;
2220 *sectors = drive->num_sectors;