2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef V3_CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
71 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72 "PRI_CTRL", "PRI_ADDR_REG"};
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
76 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77 "SEC_CTRL", "SEC_ADDR_REG"};
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
85 static inline const char * io_port_to_str(uint16_t port) {
86 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87 return ide_pri_port_strs[port - PRI_DATA_PORT];
88 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89 return ide_sec_port_strs[port - SEC_DATA_PORT];
90 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
99 static inline const char * dma_port_to_str(uint16_t port) {
100 return ide_dma_port_strs[port & 0x7];
105 struct ide_cd_state {
106 struct atapi_sense_data sense;
109 struct atapi_error_recovery err_recovery;
112 struct ide_hd_state {
115 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116 uint32_t mult_sector_num;
118 /* This is the current op sector size:
119 * for multiple sector ops this equals mult_sector_num
120 * for standard ops this equals 1
122 uint64_t cur_sector_num;
128 v3_block_type_t drive_type;
130 struct v3_dev_blk_ops * ops;
133 struct ide_cd_state cd_state;
134 struct ide_hd_state hd_state;
139 // Where we are in the data transfer
140 uint64_t transfer_index;
142 // the length of a transfer
143 // calculated for easy access
144 uint64_t transfer_length;
146 uint64_t current_lba;
148 // We have a local data buffer that we use for IO port accesses
149 uint8_t data_buf[DATA_BUFFER_SIZE];
152 uint32_t num_cylinders;
154 uint32_t num_sectors;
160 uint16_t sector_count; // for LBA48
161 uint8_t sector_count_state; // two step write to 1f2/172 (high first)
162 uint8_t lba41_state; // two step write to 1f3
163 uint8_t lba52_state; // two step write to 1f4
164 uint8_t lba63_state; // two step write to 15
170 uint8_t sector_count; // 0x1f2,0x172 (ATA)
171 struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
172 } __attribute__((packed));
176 uint8_t sector_num; // 0x1f3,0x173
178 } __attribute__((packed));
185 uint8_t cylinder_low; // 0x1f4,0x174
186 uint8_t cylinder_high; // 0x1f5,0x175
187 } __attribute__((packed));
192 } __attribute__((packed));
195 // The transfer length requested by the CPU
197 } __attribute__((packed));
204 struct ide_drive drives[2];
207 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
209 struct ide_features_reg features;
211 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
213 struct ide_status_reg status; // [read] 0x1f7,0x177
214 uint8_t cmd_reg; // [write] 0x1f7,0x177
216 int irq; // this is temporary until we add PCI support
219 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
222 uint8_t dma_ports[8];
224 struct ide_dma_cmd_reg dma_cmd;
226 struct ide_dma_status_reg dma_status;
228 uint32_t dma_prd_addr;
229 } __attribute__((packed));
230 } __attribute__((packed));
232 uint32_t dma_tbl_index;
237 struct ide_internal {
238 struct ide_channel channels[2];
240 struct v3_southbridge * southbridge;
241 struct vm_device * pci_bus;
243 struct pci_device * ide_pci;
245 struct v3_vm_info * vm;
252 /* Utility functions */
254 static inline uint16_t be_to_le_16(const uint16_t val) {
255 uint8_t * buf = (uint8_t *)&val;
256 return (buf[0] << 8) | (buf[1]) ;
259 static inline uint16_t le_to_be_16(const uint16_t val) {
260 return be_to_le_16(val);
264 static inline uint32_t be_to_le_32(const uint32_t val) {
265 uint8_t * buf = (uint8_t *)&val;
266 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
269 static inline uint32_t le_to_be_32(const uint32_t val) {
270 return be_to_le_32(val);
274 static inline int is_lba28(struct ide_channel * channel) {
275 return channel->drive_head.lba_mode && channel->drive_head.rsvd1 && channel->drive_head.rsvd2;
278 static inline int is_lba48(struct ide_channel * channel) {
279 return channel->drive_head.lba_mode && !channel->drive_head.rsvd1 && !channel->drive_head.rsvd2;
282 static inline int is_chs(struct ide_channel * channel) {
283 return !channel->drive_head.lba_mode;
286 static inline int get_channel_index(ushort_t port) {
287 if (((port & 0xfff8) == 0x1f0) ||
288 ((port & 0xfffe) == 0x3f6) ||
289 ((port & 0xfff8) == 0xc000)) {
291 } else if (((port & 0xfff8) == 0x170) ||
292 ((port & 0xfffe) == 0x376) ||
293 ((port & 0xfff8) == 0xc008)) {
300 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
301 int channel_idx = get_channel_index(port);
302 return &(ide->channels[channel_idx]);
305 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
306 return &(channel->drives[channel->drive_head.drive_sel]);
313 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
314 if (channel->ctrl_reg.irq_disable == 0) {
316 PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
318 channel->dma_status.int_gen = 1;
319 v3_raise_irq(ide->vm, channel->irq);
321 PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
326 static void drive_reset(struct ide_drive * drive) {
327 drive->sector_count = 0x01;
328 drive->sector_num = 0x01;
330 PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
332 if (drive->drive_type == BLOCK_CDROM) {
333 drive->cylinder = 0xeb14;
335 drive->cylinder = 0x0000;
336 //drive->hd_state.accessed = 0;
340 memset(drive->data_buf, 0, sizeof(drive->data_buf));
341 drive->transfer_index = 0;
343 // Send the reset signal to the connected device callbacks
344 // channel->drives[0].reset();
345 // channel->drives[1].reset();
348 static void channel_reset(struct ide_channel * channel) {
350 // set busy and seek complete flags
351 channel->status.val = 0x90;
354 channel->error_reg.val = 0x01;
357 channel->cmd_reg = 0; // NOP
359 channel->ctrl_reg.irq_disable = 0;
362 static void channel_reset_complete(struct ide_channel * channel) {
363 channel->status.busy = 0;
364 channel->status.ready = 1;
366 channel->drive_head.head_num = 0;
368 drive_reset(&(channel->drives[0]));
369 drive_reset(&(channel->drives[1]));
373 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
375 PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
377 channel->status.val = 0x41; // Error + ready
378 channel->error_reg.val = 0x04; // No idea...
380 ide_raise_irq(ide, channel);
384 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
385 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
388 /* ATAPI functions */
396 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
397 struct ide_dma_prd prd_entry;
400 V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
403 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
406 ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
408 if (ret != sizeof(struct ide_dma_prd)) {
409 PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
413 V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
415 (prd_entry.size == 0) ? 0x10000 : prd_entry.size,
416 prd_entry.end_of_table);
418 if (prd_entry.end_of_table) {
430 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
431 struct ide_drive * drive = get_selected_drive(channel);
432 // This is at top level scope to do the EOT test at the end
433 struct ide_dma_prd prd_entry = {};
434 uint_t bytes_left = drive->transfer_length;
436 // Read in the data buffer....
437 // Read a sector/block at a time until the prd entry is full.
439 #ifdef V3_CONFIG_DEBUG_IDE
440 print_prd_table(ide, channel);
443 PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
445 // Loop through the disk data
446 while (bytes_left > 0) {
447 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
448 uint_t prd_bytes_left = 0;
449 uint_t prd_offset = 0;
452 PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
454 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
456 if (ret != sizeof(struct ide_dma_prd)) {
457 PrintError(core->vm_info, core, "Could not read PRD\n");
461 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
462 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
464 // loop through the PRD data....
466 if (prd_entry.size == 0) {
467 // a size of 0 means 64k
468 prd_bytes_left = 0x10000;
470 prd_bytes_left = prd_entry.size;
474 while (prd_bytes_left > 0) {
475 uint_t bytes_to_write = 0;
477 if (drive->drive_type == BLOCK_DISK) {
478 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
481 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
482 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
485 } else if (drive->drive_type == BLOCK_CDROM) {
486 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
487 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
489 if (atapi_read_chunk(ide, channel) == -1) {
490 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
495 PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
500 //V3_Print(core->vm_info, core, "DMA of command packet\n");
502 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
503 prd_bytes_left = bytes_to_write;
506 // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
507 cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset,
508 bytes_to_write, drive->data_buf);
515 drive->transfer_index += bytes_to_write;
517 channel->status.busy = 0;
518 channel->status.ready = 1;
519 channel->status.data_req = 0;
520 channel->status.error = 0;
521 channel->status.seek_complete = 1;
523 channel->dma_status.active = 0;
524 channel->dma_status.err = 0;
526 ide_raise_irq(ide, channel);
532 PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n",
533 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
535 drive->current_lba++;
537 ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
539 if (ret != bytes_to_write) {
540 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
544 PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
546 drive->transfer_index += ret;
547 prd_bytes_left -= ret;
552 channel->dma_tbl_index++;
554 if (drive->drive_type == BLOCK_DISK) {
555 if (drive->transfer_index % HD_SECTOR_SIZE) {
556 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
559 } else if (drive->drive_type == BLOCK_CDROM) {
560 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
561 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
562 PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
563 PrintError(core->vm_info, core, "transfer_index=%llu, transfer_length=%llu\n",
564 drive->transfer_index, drive->transfer_length);
571 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
572 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
578 drive->irq_flags.io_dir = 1;
579 drive->irq_flags.c_d = 1;
580 drive->irq_flags.rel = 0;
584 // Update to the next PRD entry
588 if (prd_entry.end_of_table) {
589 channel->status.busy = 0;
590 channel->status.ready = 1;
591 channel->status.data_req = 0;
592 channel->status.error = 0;
593 channel->status.seek_complete = 1;
595 channel->dma_status.active = 0;
596 channel->dma_status.err = 0;
599 ide_raise_irq(ide, channel);
605 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
606 struct ide_drive * drive = get_selected_drive(channel);
607 // This is at top level scope to do the EOT test at the end
608 struct ide_dma_prd prd_entry = {};
609 uint_t bytes_left = drive->transfer_length;
612 PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
614 // Loop through disk data
615 while (bytes_left > 0) {
616 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
617 uint_t prd_bytes_left = 0;
618 uint_t prd_offset = 0;
621 PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
623 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
625 if (ret != sizeof(struct ide_dma_prd)) {
626 PrintError(core->vm_info, core, "Could not read PRD\n");
630 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
631 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
634 if (prd_entry.size == 0) {
635 // a size of 0 means 64k
636 prd_bytes_left = 0x10000;
638 prd_bytes_left = prd_entry.size;
641 while (prd_bytes_left > 0) {
642 uint_t bytes_to_write = 0;
645 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
648 ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
650 if (ret != bytes_to_write) {
651 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
655 PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
658 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
659 PrintError(core->vm_info, core, "Failed to write data to disk\n");
663 drive->current_lba++;
665 drive->transfer_index += ret;
666 prd_bytes_left -= ret;
671 channel->dma_tbl_index++;
673 if (drive->transfer_index % HD_SECTOR_SIZE) {
674 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
678 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
679 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
680 PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%llu)...\n",
681 bytes_left, drive->transfer_length);
682 PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
683 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
685 print_prd_table(ide, channel);
690 if (prd_entry.end_of_table) {
691 channel->status.busy = 0;
692 channel->status.ready = 1;
693 channel->status.data_req = 0;
694 channel->status.error = 0;
695 channel->status.seek_complete = 1;
697 channel->dma_status.active = 0;
698 channel->dma_status.err = 0;
701 ide_raise_irq(ide, channel);
708 #define DMA_CMD_PORT 0x00
709 #define DMA_STATUS_PORT 0x02
710 #define DMA_PRD_PORT0 0x04
711 #define DMA_PRD_PORT1 0x05
712 #define DMA_PRD_PORT2 0x06
713 #define DMA_PRD_PORT3 0x07
715 #define DMA_CHANNEL_FLAG 0x08
717 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
718 struct ide_internal * ide = (struct ide_internal *)private_data;
719 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
720 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
721 struct ide_channel * channel = &(ide->channels[channel_flag]);
723 PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
724 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
726 switch (port_offset) {
728 channel->dma_cmd.val = *(uint8_t *)src;
730 if (channel->dma_cmd.start == 0) {
731 channel->dma_tbl_index = 0;
733 channel->dma_status.active = 1;
735 if (channel->dma_cmd.read == 1) {
737 if (dma_read(core, ide, channel) == -1) {
738 PrintError(core->vm_info, core, "Failed DMA Read\n");
743 if (dma_write(core, ide, channel) == -1) {
744 PrintError(core->vm_info, core, "Failed DMA Write\n");
749 channel->dma_cmd.val &= 0x09;
754 case DMA_STATUS_PORT: {
755 uint8_t val = *(uint8_t *)src;
758 PrintError(core->vm_info, core, "Invalid read length for DMA status port\n");
763 channel->dma_status.val = ((val & 0x60) |
764 (channel->dma_status.val & 0x01) |
765 (channel->dma_status.val & ~val & 0x06));
772 case DMA_PRD_PORT3: {
773 uint_t addr_index = port_offset & 0x3;
774 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
777 if (addr_index + length > 4) {
778 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
782 for (i = 0; i < length; i++) {
783 addr_buf[addr_index + i] = *((uint8_t *)src + i);
786 PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
791 PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
799 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
800 struct ide_internal * ide = (struct ide_internal *)private_data;
801 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
802 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
803 struct ide_channel * channel = &(ide->channels[channel_flag]);
805 PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
807 if (port_offset + length > 16) {
808 PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
812 memcpy(dst, channel->dma_ports + port_offset, length);
814 PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
821 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
822 struct ide_internal * ide = priv_data;
823 struct ide_channel * channel = get_selected_channel(ide, port);
824 struct ide_drive * drive = get_selected_drive(channel);
827 PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
831 PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
833 channel->cmd_reg = *(uint8_t *)src;
835 switch (channel->cmd_reg) {
837 case ATA_PIDENTIFY: // ATAPI Identify Device Packet
838 if (drive->drive_type != BLOCK_CDROM) {
841 // JRL: Should we abort here?
842 ide_abort_command(ide, channel);
845 atapi_identify_device(drive);
847 channel->error_reg.val = 0;
848 channel->status.val = 0x58; // ready, data_req, seek_complete
850 ide_raise_irq(ide, channel);
853 case ATA_IDENTIFY: // Identify Device
854 if (drive->drive_type != BLOCK_DISK) {
857 // JRL: Should we abort here?
858 ide_abort_command(ide, channel);
860 ata_identify_device(drive);
862 channel->error_reg.val = 0;
863 channel->status.val = 0x58;
865 ide_raise_irq(ide, channel);
869 case ATA_PACKETCMD: // ATAPI Command Packet
870 if (drive->drive_type != BLOCK_CDROM) {
871 ide_abort_command(ide, channel);
874 drive->sector_count = 1;
876 channel->status.busy = 0;
877 channel->status.write_fault = 0;
878 channel->status.data_req = 1;
879 channel->status.error = 0;
881 // reset the data buffer...
882 drive->transfer_length = ATAPI_PACKET_SIZE;
883 drive->transfer_index = 0;
887 case ATA_READ: // Read Sectors with Retry
888 case ATA_READ_ONCE: // Read Sectors without Retry
889 case ATA_MULTREAD: // Read multiple sectors per ire
890 case ATA_READ_EXT: // Read Sectors Extended (LBA48)
892 if (channel->cmd_reg==ATA_MULTREAD) {
893 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
895 drive->hd_state.cur_sector_num = 1;
898 if (ata_read_sectors(ide, channel) == -1) {
899 PrintError(core->vm_info, core, "Error reading sectors\n");
900 ide_abort_command(ide,channel);
904 case ATA_WRITE: // Write Sector with retry
905 case ATA_WRITE_ONCE: // Write Sector without retry
906 case ATA_MULTWRITE: // Write multiple sectors per irq
907 case ATA_WRITE_EXT: // Write Sectors Extended (LBA48)
909 if (channel->cmd_reg==ATA_MULTWRITE) {
910 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
912 drive->hd_state.cur_sector_num = 1;
915 if (ata_write_sectors(ide, channel) == -1) {
916 PrintError(core->vm_info, core, "Error writing sectors\n");
917 ide_abort_command(ide,channel);
921 case ATA_READDMA: // Read DMA with retry
922 case ATA_READDMA_ONCE: { // Read DMA
925 if (ata_get_lba_and_size(ide, channel, &(drive->current_lba), §_cnt) == -1) {
926 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
927 ide_abort_command(ide, channel);
931 drive->hd_state.cur_sector_num = 1;
933 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
934 drive->transfer_index = 0;
936 if (channel->dma_status.active == 1) {
938 if (dma_read(core, ide, channel) == -1) {
939 PrintError(core->vm_info, core, "Failed DMA Read\n");
940 ide_abort_command(ide, channel);
943 PrintError(core->vm_info,core,"Attempt to initiate DMA read on channel that is not active\n");
944 ide_abort_command(ide, channel);
949 case ATA_WRITEDMA: { // Write DMA
952 if (ata_get_lba_and_size(ide, channel, &(drive->current_lba),§_cnt) == -1) {
953 PrintError(core->vm_info,core,"Cannot get lba\n");
954 ide_abort_command(ide, channel);
958 drive->hd_state.cur_sector_num = 1;
960 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
961 drive->transfer_index = 0;
963 if (channel->dma_status.active == 1) {
965 if (dma_write(core, ide, channel) == -1) {
966 PrintError(core->vm_info, core, "Failed DMA Write\n");
967 ide_abort_command(ide, channel);
970 PrintError(core->vm_info,core,"Attempt to initiate DMA write with DMA inactive\n");
971 ide_abort_command(ide, channel);
975 case ATA_STANDBYNOW1: // Standby Now 1
976 case ATA_IDLEIMMEDIATE: // Set Idle Immediate
977 case ATA_STANDBY: // Standby
978 case ATA_SETIDLE1: // Set Idle 1
979 case ATA_SLEEPNOW1: // Sleep Now 1
980 case ATA_STANDBYNOW2: // Standby Now 2
981 case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
982 case ATA_STANDBY2: // Standby 2
983 case ATA_SETIDLE2: // Set idle 2
984 case ATA_SLEEPNOW2: // Sleep Now 2
985 channel->status.val = 0;
986 channel->status.ready = 1;
987 ide_raise_irq(ide, channel);
990 case ATA_SETFEATURES: // Set Features
991 // Prior to this the features register has been written to.
992 // This command tells the drive to check if the new value is supported (the value is drive specific)
993 // Common is that bit0=DMA enable
994 // If valid the drive raises an interrupt, if not it aborts.
996 // Do some checking here...
998 channel->status.busy = 0;
999 channel->status.write_fault = 0;
1000 channel->status.error = 0;
1001 channel->status.ready = 1;
1002 channel->status.seek_complete = 1;
1004 ide_raise_irq(ide, channel);
1007 case ATA_SPECIFY: // Initialize Drive Parameters
1008 case ATA_RECAL: // recalibrate?
1009 channel->status.error = 0;
1010 channel->status.ready = 1;
1011 channel->status.seek_complete = 1;
1012 ide_raise_irq(ide, channel);
1015 case ATA_SETMULT: { // Set multiple mode (IDE Block mode)
1016 // This makes the drive transfer multiple sectors before generating an interrupt
1018 if (drive->sector_count == 0) {
1019 PrintError(core->vm_info,core,"Attempt to set multiple to zero\n");
1020 drive->hd_state.mult_sector_num= 1;
1021 ide_abort_command(ide,channel);
1024 drive->hd_state.mult_sector_num = drive->sector_count;
1027 channel->status.ready = 1;
1028 channel->status.error = 0;
1030 ide_raise_irq(ide, channel);
1035 case ATA_DEVICE_RESET: // Reset Device
1037 channel->error_reg.val = 0x01;
1038 channel->status.busy = 0;
1039 channel->status.ready = 1;
1040 channel->status.seek_complete = 1;
1041 channel->status.write_fault = 0;
1042 channel->status.error = 0;
1045 case ATA_CHECKPOWERMODE1: // Check power mode
1046 drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1047 channel->status.busy = 0;
1048 channel->status.ready = 1;
1049 channel->status.write_fault = 0;
1050 channel->status.data_req = 0;
1051 channel->status.error = 0;
1055 PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1056 ide_abort_command(ide, channel);
1066 static int read_hd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1067 struct ide_drive * drive = get_selected_drive(channel);
1068 uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1071 PrintDebug(VM_NONE,VCORE_NONE, "Read HD data: transfer_index %llu transfer length %llu current sector numer %llu\n",
1072 drive->transfer_index, drive->transfer_length,
1073 drive->hd_state.cur_sector_num);
1075 if (drive->transfer_index >= drive->transfer_length) {
1076 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1077 drive->transfer_length, drive->transfer_index,
1078 drive->transfer_index + length);
1083 if (data_offset + length > HD_SECTOR_SIZE) {
1084 PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1087 // For index==0, the read has been done in ata_read_sectors
1088 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1089 // advance to next sector and read it
1091 drive->current_lba++;
1093 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1094 PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1100 PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1101 *(uint32_t *)(drive->data_buf + data_offset),
1102 length, data_offset);
1104 memcpy(dst, drive->data_buf + data_offset, length);
1106 drive->transfer_index += length;
1109 /* This is the trigger for interrupt injection.
1110 * For read single sector commands we interrupt after every sector
1111 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1112 * cur_sector_num is configured depending on the operation we are currently running
1113 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1115 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1116 (drive->transfer_index == drive->transfer_length)) {
1117 if (drive->transfer_index < drive->transfer_length) {
1118 // An increment is complete, but there is still more data to be transferred...
1119 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1120 channel->status.data_req = 1;
1122 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1123 // This was the final read of the request
1124 channel->status.data_req = 0;
1127 channel->status.ready = 1;
1128 channel->status.busy = 0;
1130 ide_raise_irq(ide, channel);
1137 static int write_hd_data(uint8_t * src, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1138 struct ide_drive * drive = get_selected_drive(channel);
1139 uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1142 PrintDebug(VM_NONE,VCORE_NONE, "Write HD data: transfer_index %llu transfer length %llu current sector numer %llu\n",
1143 drive->transfer_index, drive->transfer_length,
1144 drive->hd_state.cur_sector_num);
1146 if (drive->transfer_index >= drive->transfer_length) {
1147 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1148 drive->transfer_length, drive->transfer_index,
1149 drive->transfer_index + length);
1153 if (data_offset + length > HD_SECTOR_SIZE) {
1154 PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1157 // Copy data into our buffer - there will be room due to
1158 // (a) the ata_write test below is flushing sectors
1159 // (b) if we somehow get a sector-stradling write (an error), this will
1160 // be OK since the buffer itself is >1 sector in memory
1161 memcpy(drive->data_buf + data_offset, src, length);
1163 drive->transfer_index += length;
1165 if ((data_offset+length) >= HD_SECTOR_SIZE) {
1166 // Write out the sector we just finished
1167 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1168 PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1172 // go onto next sector
1173 drive->current_lba++;
1176 /* This is the trigger for interrupt injection.
1177 * For write single sector commands we interrupt after every sector
1178 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1179 * cur_sector_num is configured depending on the operation we are currently running
1180 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1182 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1183 (drive->transfer_index == drive->transfer_length)) {
1184 if (drive->transfer_index < drive->transfer_length) {
1185 // An increment is complete, but there is still more data to be transferred...
1186 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1187 channel->status.data_req = 1;
1189 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1190 // This was the final read of the request
1191 channel->status.data_req = 0;
1194 channel->status.ready = 1;
1195 channel->status.busy = 0;
1197 ide_raise_irq(ide, channel);
1205 static int read_cd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1206 struct ide_drive * drive = get_selected_drive(channel);
1207 uint64_t data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1208 // int req_offset = drive->transfer_index % drive->req_len;
1210 if (drive->cd_state.atapi_cmd != 0x28) {
1211 PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%llu) (req_len=%u)\n", length, drive->req_len);
1212 PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%llu, transfer idx=%llu\n", drive->transfer_length, drive->transfer_index);
1217 if (drive->transfer_index >= drive->transfer_length) {
1218 PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1219 drive->transfer_length, drive->transfer_index,
1220 drive->transfer_index + length);
1225 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1226 if (atapi_update_data_buf(ide, channel) == -1) {
1227 PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1232 memcpy(dst, drive->data_buf + data_offset, length);
1234 drive->transfer_index += length;
1237 // Should the req_offset be recalculated here?????
1238 if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1239 if (drive->transfer_index < drive->transfer_length) {
1240 // An increment is complete, but there is still more data to be transferred...
1242 channel->status.data_req = 1;
1244 drive->irq_flags.c_d = 0;
1246 // Update the request length in the cylinder regs
1247 if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1248 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1252 // This was the final read of the request
1255 channel->status.data_req = 0;
1256 channel->status.ready = 1;
1258 drive->irq_flags.c_d = 1;
1259 drive->irq_flags.rel = 0;
1262 drive->irq_flags.io_dir = 1;
1263 channel->status.busy = 0;
1265 ide_raise_irq(ide, channel);
1272 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1273 struct ide_drive * drive = get_selected_drive(channel);
1275 channel->status.busy = 0;
1276 channel->status.ready = 1;
1277 channel->status.write_fault = 0;
1278 channel->status.seek_complete = 1;
1279 channel->status.corrected = 0;
1280 channel->status.error = 0;
1283 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1284 drive->transfer_index += length;
1286 if (drive->transfer_index >= drive->transfer_length) {
1287 channel->status.data_req = 0;
1295 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1296 struct ide_internal * ide = priv_data;
1297 struct ide_channel * channel = get_selected_channel(ide, port);
1298 struct ide_drive * drive = get_selected_drive(channel);
1300 //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1302 if ((channel->cmd_reg == ATA_IDENTIFY) ||
1303 (channel->cmd_reg == ATA_PIDENTIFY)) {
1304 return read_drive_id((uint8_t *)dst, length, ide, channel);
1307 if (drive->drive_type == BLOCK_CDROM) {
1308 if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1309 PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1312 } else if (drive->drive_type == BLOCK_DISK) {
1313 if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1314 PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1318 memset((uint8_t *)dst, 0, length);
1324 // For the write side, we care both about
1325 // direct PIO writes to a drive as well as
1326 // writes that pass a packet through to an CD
1327 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1328 struct ide_internal * ide = priv_data;
1329 struct ide_channel * channel = get_selected_channel(ide, port);
1330 struct ide_drive * drive = get_selected_drive(channel);
1332 PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n",
1333 port, *(uint32_t *)src, length);
1335 if (drive->drive_type == BLOCK_CDROM) {
1336 if (channel->cmd_reg == ATA_PACKETCMD) {
1337 // short command packet - no check for space...
1338 memcpy(drive->data_buf + drive->transfer_index, src, length);
1339 drive->transfer_index += length;
1340 if (drive->transfer_index >= drive->transfer_length) {
1341 if (atapi_handle_packet(core, ide, channel) == -1) {
1342 PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1347 PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1350 } else if (drive->drive_type == BLOCK_DISK) {
1351 if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1352 PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1356 // nothing ... do not support writable cd
1362 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1363 struct ide_internal * ide = priv_data;
1364 struct ide_channel * channel = get_selected_channel(ide, port);
1365 struct ide_drive * drive = get_selected_drive(channel);
1368 PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1372 PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1375 // reset and interrupt enable
1377 case SEC_CTRL_PORT: {
1378 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1380 // only reset channel on a 0->1 reset bit transition
1381 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1382 channel_reset(channel);
1383 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1384 channel_reset_complete(channel);
1387 channel->ctrl_reg.val = tmp_ctrl->val;
1390 case PRI_FEATURES_PORT:
1391 case SEC_FEATURES_PORT:
1392 channel->features.val = *(uint8_t *)src;
1395 case PRI_SECT_CNT_PORT:
1396 case SEC_SECT_CNT_PORT:
1397 // update CHS and LBA28 state
1398 channel->drives[0].sector_count = *(uint8_t *)src;
1399 channel->drives[1].sector_count = *(uint8_t *)src;
1401 // update LBA48 state
1402 if (is_lba48(channel)) {
1403 uint16_t val = *(uint8_t*)src; // top bits zero;
1404 if (!channel->drives[0].lba48.sector_count_state) {
1405 channel->drives[0].lba48.sector_count = val<<8;
1407 channel->drives[0].lba48.sector_count |= val;
1409 channel->drives[0].lba48.sector_count_state ^= 1;
1410 if (!channel->drives[1].lba48.sector_count_state) {
1411 channel->drives[1].lba48.sector_count = val<<8;
1413 channel->drives[1].lba48.sector_count |= val;
1415 channel->drives[0].lba48.sector_count_state ^= 1;
1420 case PRI_SECT_NUM_PORT:
1421 case SEC_SECT_NUM_PORT:
1422 // update CHS and LBA28 state
1423 channel->drives[0].sector_num = *(uint8_t *)src;
1424 channel->drives[1].sector_num = *(uint8_t *)src;
1426 // update LBA48 state
1427 if (is_lba48(channel)) {
1428 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1429 if (!channel->drives[0].lba48.lba41_state) {
1430 channel->drives[0].lba48.lba |= val<<24;
1432 channel->drives[0].lba48.lba |= val;
1434 channel->drives[0].lba48.lba41_state ^= 1;
1435 if (!channel->drives[1].lba48.lba41_state) {
1436 channel->drives[1].lba48.lba |= val<<24;
1438 channel->drives[1].lba48.lba |= val;
1440 channel->drives[1].lba48.lba41_state ^= 1;
1444 case PRI_CYL_LOW_PORT:
1445 case SEC_CYL_LOW_PORT:
1446 // update CHS and LBA28 state
1447 channel->drives[0].cylinder_low = *(uint8_t *)src;
1448 channel->drives[1].cylinder_low = *(uint8_t *)src;
1450 // update LBA48 state
1451 if (is_lba48(channel)) {
1452 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1453 if (!channel->drives[0].lba48.lba52_state) {
1454 channel->drives[0].lba48.lba |= val<<32;
1456 channel->drives[0].lba48.lba |= val<<8;
1458 channel->drives[0].lba48.lba52_state ^= 1;
1459 if (!channel->drives[1].lba48.lba52_state) {
1460 channel->drives[1].lba48.lba |= val<<32;
1462 channel->drives[1].lba48.lba |= val<<8;
1464 channel->drives[1].lba48.lba52_state ^= 1;
1469 case PRI_CYL_HIGH_PORT:
1470 case SEC_CYL_HIGH_PORT:
1471 // update CHS and LBA28 state
1472 channel->drives[0].cylinder_high = *(uint8_t *)src;
1473 channel->drives[1].cylinder_high = *(uint8_t *)src;
1475 // update LBA48 state
1476 if (is_lba48(channel)) {
1477 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1478 if (!channel->drives[0].lba48.lba63_state) {
1479 channel->drives[0].lba48.lba |= val<<40;
1481 channel->drives[0].lba48.lba |= val<<16;
1483 channel->drives[0].lba48.lba63_state ^= 1;
1484 if (!channel->drives[1].lba48.lba63_state) {
1485 channel->drives[1].lba48.lba |= val<<40;
1487 channel->drives[1].lba48.lba |= val<<16;
1489 channel->drives[1].lba48.lba63_state ^= 1;
1494 case PRI_DRV_SEL_PORT:
1495 case SEC_DRV_SEL_PORT: {
1496 struct ide_drive_head_reg nh, oh;
1498 oh.val = channel->drive_head.val;
1499 channel->drive_head.val = nh.val = *(uint8_t *)src;
1502 if ((oh.val & 0xe0) != (nh.val & 0xe0)) {
1503 // reset LBA48 state
1504 channel->drives[0].lba48.sector_count_state=0;
1505 channel->drives[0].lba48.lba41_state=0;
1506 channel->drives[0].lba48.lba52_state=0;
1507 channel->drives[0].lba48.lba63_state=0;
1508 channel->drives[1].lba48.sector_count_state=0;
1509 channel->drives[1].lba48.lba41_state=0;
1510 channel->drives[1].lba48.lba52_state=0;
1511 channel->drives[1].lba48.lba63_state=0;
1515 drive = get_selected_drive(channel);
1517 // Selecting a non-present device is a no-no
1518 if (drive->drive_type == BLOCK_NONE) {
1519 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1520 channel->error_reg.abort = 1;
1521 channel->status.error = 1;
1523 channel->status.busy = 0;
1524 channel->status.ready = 1;
1525 channel->status.data_req = 0;
1526 channel->status.error = 0;
1527 channel->status.seek_complete = 1;
1529 channel->dma_status.active = 0;
1530 channel->dma_status.err = 0;
1536 PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1543 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1544 struct ide_internal * ide = priv_data;
1545 struct ide_channel * channel = get_selected_channel(ide, port);
1546 struct ide_drive * drive = get_selected_drive(channel);
1549 PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1553 PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1555 if ((port == PRI_ADDR_REG_PORT) ||
1556 (port == SEC_ADDR_REG_PORT)) {
1557 // unused, return 0xff
1558 *(uint8_t *)dst = 0xff;
1563 // if no drive is present just return 0 + reserved bits
1564 if (drive->drive_type == BLOCK_NONE) {
1565 if ((port == PRI_DRV_SEL_PORT) ||
1566 (port == SEC_DRV_SEL_PORT)) {
1567 *(uint8_t *)dst = 0xa0;
1569 *(uint8_t *)dst = 0;
1577 // This is really the error register.
1578 case PRI_FEATURES_PORT:
1579 case SEC_FEATURES_PORT:
1580 *(uint8_t *)dst = channel->error_reg.val;
1583 case PRI_SECT_CNT_PORT:
1584 case SEC_SECT_CNT_PORT:
1585 *(uint8_t *)dst = drive->sector_count;
1588 case PRI_SECT_NUM_PORT:
1589 case SEC_SECT_NUM_PORT:
1590 *(uint8_t *)dst = drive->sector_num;
1593 case PRI_CYL_LOW_PORT:
1594 case SEC_CYL_LOW_PORT:
1595 *(uint8_t *)dst = drive->cylinder_low;
1599 case PRI_CYL_HIGH_PORT:
1600 case SEC_CYL_HIGH_PORT:
1601 *(uint8_t *)dst = drive->cylinder_high;
1604 case PRI_DRV_SEL_PORT:
1605 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1606 *(uint8_t *)dst = channel->drive_head.val;
1613 // Something about lowering interrupts here....
1614 *(uint8_t *)dst = channel->status.val;
1618 PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1622 PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1629 static void init_drive(struct ide_drive * drive) {
1631 drive->sector_count = 0x01;
1632 drive->sector_num = 0x01;
1633 drive->cylinder = 0x0000;
1635 drive->drive_type = BLOCK_NONE;
1637 memset(drive->model, 0, sizeof(drive->model));
1639 drive->transfer_index = 0;
1640 drive->transfer_length = 0;
1641 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1643 drive->num_cylinders = 0;
1644 drive->num_heads = 0;
1645 drive->num_sectors = 0;
1648 drive->private_data = NULL;
1652 static void init_channel(struct ide_channel * channel) {
1655 channel->error_reg.val = 0x01;
1657 //** channel->features = 0x0;
1659 channel->drive_head.val = 0x00;
1660 channel->status.val = 0x00;
1661 channel->cmd_reg = 0x00;
1662 channel->ctrl_reg.val = 0x08;
1664 channel->dma_cmd.val = 0;
1665 channel->dma_status.val = 0;
1666 channel->dma_prd_addr = 0;
1667 channel->dma_tbl_index = 0;
1669 for (i = 0; i < 2; i++) {
1670 init_drive(&(channel->drives[i]));
1676 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1677 PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1679 struct ide_internal * ide = (struct ide_internal *)(private_data);
1681 PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1687 static int init_ide_state(struct ide_internal * ide) {
1690 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1693 init_channel(&(ide->channels[0]));
1694 ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1696 init_channel(&(ide->channels[1]));
1697 ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1706 static int ide_free(struct ide_internal * ide) {
1708 // deregister from PCI?
1715 #ifdef V3_CONFIG_CHECKPOINT
1717 #include <palacios/vmm_sprintf.h>
1719 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1720 struct ide_internal * ide = (struct ide_internal *)private_data;
1721 struct v3_chkpt_ctx *ctx=0;
1727 ctx=v3_chkpt_open_ctx(chkpt,id);
1730 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1734 // nothing saved yet
1736 v3_chkpt_close_ctx(ctx);ctx=0;
1739 for (ch_num = 0; ch_num < 2; ch_num++) {
1740 struct ide_channel * ch = &(ide->channels[ch_num]);
1742 snprintf(buf, 128, "%s-%d", id, ch_num);
1744 ctx = v3_chkpt_open_ctx(chkpt, buf);
1747 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1751 V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1752 V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1753 V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1754 V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1755 V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1756 V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1757 V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1758 V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1759 V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1760 V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1764 v3_chkpt_close_ctx(ctx); ctx=0;
1766 for (drive_num = 0; drive_num < 2; drive_num++) {
1767 struct ide_drive * drive = &(ch->drives[drive_num]);
1769 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1771 ctx = v3_chkpt_open_ctx(chkpt, buf);
1774 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1778 V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1779 V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1780 V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1781 V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1783 V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1784 V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1785 V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1787 V3_CHKPT_SAVE(ctx, "DATA_BUF", drive->data_buf, savefailout);
1790 /* For now we'll just pack the type specific data at the end... */
1791 /* We should probably add a new context here in the future... */
1792 if (drive->drive_type == BLOCK_CDROM) {
1793 V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1794 V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1795 V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1796 } else if (drive->drive_type == BLOCK_DISK) {
1797 V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1798 V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1799 V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1800 } else if (drive->drive_type == BLOCK_NONE) {
1801 // no drive connected, so no data
1803 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1807 V3_CHKPT_SAVE(ctx, "LBA48_LBA", drive->lba48.lba, savefailout);
1808 V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, savefailout);
1809 V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, savefailout);
1810 V3_CHKPT_SAVE(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, savefailout);
1811 V3_CHKPT_SAVE(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, savefailout);
1812 V3_CHKPT_SAVE(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, savefailout);
1814 v3_chkpt_close_ctx(ctx); ctx=0;
1822 PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1823 if (ctx) {v3_chkpt_close_ctx(ctx); }
1829 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1830 struct ide_internal * ide = (struct ide_internal *)private_data;
1831 struct v3_chkpt_ctx *ctx=0;
1836 ctx=v3_chkpt_open_ctx(chkpt,id);
1839 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1843 // nothing saved yet
1845 v3_chkpt_close_ctx(ctx);ctx=0;
1848 for (ch_num = 0; ch_num < 2; ch_num++) {
1849 struct ide_channel * ch = &(ide->channels[ch_num]);
1851 snprintf(buf, 128, "%s-%d", id, ch_num);
1853 ctx = v3_chkpt_open_ctx(chkpt, buf);
1856 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1860 V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1861 V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1862 V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1863 V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1864 V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1865 V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1866 V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1867 V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1868 V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1869 V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1871 v3_chkpt_close_ctx(ctx); ctx=0;
1873 for (drive_num = 0; drive_num < 2; drive_num++) {
1874 struct ide_drive * drive = &(ch->drives[drive_num]);
1876 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1878 ctx = v3_chkpt_open_ctx(chkpt, buf);
1881 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1885 V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1886 V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1887 V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1888 V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1890 V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1891 V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1892 V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1894 V3_CHKPT_LOAD(ctx, "DATA_BUF", drive->data_buf, loadfailout);
1897 /* For now we'll just pack the type specific data at the end... */
1898 /* We should probably add a new context here in the future... */
1899 if (drive->drive_type == BLOCK_CDROM) {
1900 V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1901 V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1902 V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1903 } else if (drive->drive_type == BLOCK_DISK) {
1904 V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1905 V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1906 V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1907 } else if (drive->drive_type == BLOCK_NONE) {
1908 // no drive connected, so no data
1910 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1914 V3_CHKPT_LOAD(ctx, "LBA48_LBA", drive->lba48.lba, loadfailout);
1915 V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, loadfailout);
1916 V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, loadfailout);
1917 V3_CHKPT_LOAD(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, loadfailout);
1918 V3_CHKPT_LOAD(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, loadfailout);
1919 V3_CHKPT_LOAD(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, loadfailout);
1927 PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1928 if (ctx) {v3_chkpt_close_ctx(ctx); }
1938 static struct v3_device_ops dev_ops = {
1939 .free = (int (*)(void *))ide_free,
1940 #ifdef V3_CONFIG_CHECKPOINT
1941 .save_extended = ide_save_extended,
1942 .load_extended = ide_load_extended
1949 static int connect_fn(struct v3_vm_info * vm,
1950 void * frontend_data,
1951 struct v3_dev_blk_ops * ops,
1952 v3_cfg_tree_t * cfg,
1953 void * private_data) {
1954 struct ide_internal * ide = (struct ide_internal *)(frontend_data);
1955 struct ide_channel * channel = NULL;
1956 struct ide_drive * drive = NULL;
1958 char * bus_str = v3_cfg_val(cfg, "bus_num");
1959 char * drive_str = v3_cfg_val(cfg, "drive_num");
1960 char * type_str = v3_cfg_val(cfg, "type");
1961 char * model_str = v3_cfg_val(cfg, "model");
1963 uint_t drive_num = 0;
1966 if ((!type_str) || (!drive_str) || (!bus_str)) {
1967 PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1971 bus_num = atoi(bus_str);
1972 drive_num = atoi(drive_str);
1974 channel = &(ide->channels[bus_num]);
1975 drive = &(channel->drives[drive_num]);
1977 if (drive->drive_type != BLOCK_NONE) {
1978 PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1982 if (model_str != NULL) {
1983 strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1986 if (strcasecmp(type_str, "cdrom") == 0) {
1987 drive->drive_type = BLOCK_CDROM;
1989 while (strlen((char *)(drive->model)) < 40) {
1990 strcat((char*)(drive->model), " ");
1993 } else if (strcasecmp(type_str, "hd") == 0) {
1994 drive->drive_type = BLOCK_DISK;
1996 drive->hd_state.accessed = 0;
1997 drive->hd_state.mult_sector_num = 1;
1999 drive->num_sectors = 63;
2000 drive->num_heads = 16;
2001 drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
2003 PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
2010 // Hardcode this for now, but its not a good idea....
2011 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
2014 drive->private_data = private_data;
2022 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
2023 struct ide_internal * ide = NULL;
2024 char * dev_id = v3_cfg_val(cfg, "ID");
2027 PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
2029 ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
2032 PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
2036 memset(ide, 0, sizeof(struct ide_internal));
2039 ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
2041 if (ide->pci_bus != NULL) {
2042 struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
2045 PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
2050 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
2053 PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
2055 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
2058 PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
2063 if (init_ide_state(ide) == -1) {
2064 PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
2065 v3_remove_device(dev);
2069 PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
2071 ret |= v3_dev_hook_io(dev, PRI_DATA_PORT,
2072 &read_data_port, &write_data_port);
2073 ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT,
2074 &read_port_std, &write_port_std);
2075 ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
2076 &read_port_std, &write_port_std);
2077 ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
2078 &read_port_std, &write_port_std);
2079 ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
2080 &read_port_std, &write_port_std);
2081 ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
2082 &read_port_std, &write_port_std);
2083 ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
2084 &read_port_std, &write_port_std);
2085 ret |= v3_dev_hook_io(dev, PRI_CMD_PORT,
2086 &read_port_std, &write_cmd_port);
2088 ret |= v3_dev_hook_io(dev, SEC_DATA_PORT,
2089 &read_data_port, &write_data_port);
2090 ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT,
2091 &read_port_std, &write_port_std);
2092 ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
2093 &read_port_std, &write_port_std);
2094 ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
2095 &read_port_std, &write_port_std);
2096 ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
2097 &read_port_std, &write_port_std);
2098 ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
2099 &read_port_std, &write_port_std);
2100 ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
2101 &read_port_std, &write_port_std);
2102 ret |= v3_dev_hook_io(dev, SEC_CMD_PORT,
2103 &read_port_std, &write_cmd_port);
2106 ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT,
2107 &read_port_std, &write_port_std);
2109 ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT,
2110 &read_port_std, &write_port_std);
2113 ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
2114 &read_port_std, &write_port_std);
2116 ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
2117 &read_port_std, &write_port_std);
2121 PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
2122 v3_remove_device(dev);
2128 struct v3_pci_bar bars[6];
2129 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2130 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2131 struct pci_device * pci_dev = NULL;
2134 PrintDebug(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2136 for (i = 0; i < 6; i++) {
2137 bars[i].type = PCI_BAR_NONE;
2140 bars[4].type = PCI_BAR_IO;
2141 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2142 bars[4].default_base_port = -1;
2143 bars[4].num_ports = 16;
2145 bars[4].io_read = read_dma_port;
2146 bars[4].io_write = write_dma_port;
2147 bars[4].private_data = ide;
2149 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
2151 pci_config_update, NULL, NULL, NULL, ide);
2153 if (pci_dev == NULL) {
2154 PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i);
2155 v3_remove_device(dev);
2159 /* This is for CMD646 devices
2160 pci_dev->config_header.vendor_id = 0x1095;
2161 pci_dev->config_header.device_id = 0x0646;
2162 pci_dev->config_header.revision = 0x8f07;
2165 pci_dev->config_header.vendor_id = 0x8086;
2166 pci_dev->config_header.device_id = 0x7010;
2167 pci_dev->config_header.revision = 0x00;
2169 pci_dev->config_header.prog_if = 0x80; // Master IDE device
2170 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2171 pci_dev->config_header.class = PCI_CLASS_STORAGE;
2173 pci_dev->config_header.command = 0;
2174 pci_dev->config_header.status = 0x0280;
2176 ide->ide_pci = pci_dev;
2181 if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2182 PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2183 v3_remove_device(dev);
2188 PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2194 device_register("IDE", ide_init)
2199 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num,
2200 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2202 struct ide_internal * ide = ide_data;
2203 struct ide_channel * channel = &(ide->channels[channel_num]);
2204 struct ide_drive * drive = &(channel->drives[drive_num]);
2206 if (drive->drive_type == BLOCK_NONE) {
2210 *cylinders = drive->num_cylinders;
2211 *heads = drive->num_heads;
2212 *sectors = drive->num_sectors;