2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef V3_CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
71 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72 "PRI_CTRL", "PRI_ADDR_REG"};
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
76 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77 "SEC_CTRL", "SEC_ADDR_REG"};
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
85 static inline const char * io_port_to_str(uint16_t port) {
86 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87 return ide_pri_port_strs[port - PRI_DATA_PORT];
88 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89 return ide_sec_port_strs[port - SEC_DATA_PORT];
90 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
99 static inline const char * dma_port_to_str(uint16_t port) {
100 return ide_dma_port_strs[port & 0x7];
105 struct ide_cd_state {
106 struct atapi_sense_data sense;
109 struct atapi_error_recovery err_recovery;
112 struct ide_hd_state {
115 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116 uint32_t mult_sector_num;
118 /* This is the current op sector size:
119 * for multiple sector ops this equals mult_sector_num
120 * for standard ops this equals 1
122 uint64_t cur_sector_num;
128 v3_block_type_t drive_type;
130 struct v3_dev_blk_ops * ops;
133 struct ide_cd_state cd_state;
134 struct ide_hd_state hd_state;
139 // Where we are in the data transfer
140 uint64_t transfer_index;
142 // the length of a transfer
143 // calculated for easy access
144 uint64_t transfer_length;
146 uint64_t current_lba;
148 // We have a local data buffer that we use for IO port accesses
149 uint8_t data_buf[DATA_BUFFER_SIZE];
152 uint32_t num_cylinders;
154 uint32_t num_sectors;
160 uint16_t sector_count; // for LBA48
161 uint8_t sector_count_state; // two step write to 1f2/172 (high first)
162 uint8_t lba41_state; // two step write to 1f3
163 uint8_t lba52_state; // two step write to 1f4
164 uint8_t lba63_state; // two step write to 15
170 uint8_t sector_count; // 0x1f2,0x172 (ATA)
171 struct atapi_irq_flags irq_flags; // (ATAPI ONLY)
172 } __attribute__((packed));
176 uint8_t sector_num; // 0x1f3,0x173
178 } __attribute__((packed));
185 uint8_t cylinder_low; // 0x1f4,0x174
186 uint8_t cylinder_high; // 0x1f5,0x175
187 } __attribute__((packed));
192 } __attribute__((packed));
195 // The transfer length requested by the CPU
197 } __attribute__((packed));
204 struct ide_drive drives[2];
207 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
209 struct ide_features_reg features;
211 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
213 struct ide_status_reg status; // [read] 0x1f7,0x177
214 uint8_t cmd_reg; // [write] 0x1f7,0x177
216 int irq; // this is temporary until we add PCI support
219 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
222 uint8_t dma_ports[8];
224 struct ide_dma_cmd_reg dma_cmd;
226 struct ide_dma_status_reg dma_status;
228 uint32_t dma_prd_addr;
229 } __attribute__((packed));
230 } __attribute__((packed));
232 uint32_t dma_tbl_index;
237 struct ide_internal {
238 struct ide_channel channels[2];
240 struct v3_southbridge * southbridge;
241 struct vm_device * pci_bus;
243 struct pci_device * ide_pci;
245 struct v3_vm_info * vm;
252 /* Utility functions */
254 static inline uint16_t be_to_le_16(const uint16_t val) {
255 uint8_t * buf = (uint8_t *)&val;
256 return (buf[0] << 8) | (buf[1]) ;
259 static inline uint16_t le_to_be_16(const uint16_t val) {
260 return be_to_le_16(val);
264 static inline uint32_t be_to_le_32(const uint32_t val) {
265 uint8_t * buf = (uint8_t *)&val;
266 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
269 static inline uint32_t le_to_be_32(const uint32_t val) {
270 return be_to_le_32(val);
274 static inline int is_lba28(struct ide_channel * channel) {
275 return channel->drive_head.lba_mode && channel->drive_head.rsvd1 && channel->drive_head.rsvd2;
278 static inline int is_lba48(struct ide_channel * channel) {
279 return channel->drive_head.lba_mode && !channel->drive_head.rsvd1 && !channel->drive_head.rsvd2;
282 static inline int is_chs(struct ide_channel * channel) {
283 return !channel->drive_head.lba_mode;
286 static inline int get_channel_index(ushort_t port) {
287 if (((port & 0xfff8) == 0x1f0) ||
288 ((port & 0xfffe) == 0x3f6) ||
289 ((port & 0xfff8) == 0xc000)) {
291 } else if (((port & 0xfff8) == 0x170) ||
292 ((port & 0xfffe) == 0x376) ||
293 ((port & 0xfff8) == 0xc008)) {
300 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
301 int channel_idx = get_channel_index(port);
302 return &(ide->channels[channel_idx]);
305 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
306 return &(channel->drives[channel->drive_head.drive_sel]);
313 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
314 if (channel->ctrl_reg.irq_disable == 0) {
316 PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
318 channel->dma_status.int_gen = 1;
319 v3_raise_irq(ide->vm, channel->irq);
321 PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
326 static void drive_reset(struct ide_drive * drive) {
327 drive->sector_count = 0x01;
328 drive->sector_num = 0x01;
330 PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
332 if (drive->drive_type == BLOCK_CDROM) {
333 drive->cylinder = 0xeb14;
335 drive->cylinder = 0x0000;
336 //drive->hd_state.accessed = 0;
340 memset(drive->data_buf, 0, sizeof(drive->data_buf));
341 drive->transfer_index = 0;
343 // Send the reset signal to the connected device callbacks
344 // channel->drives[0].reset();
345 // channel->drives[1].reset();
348 static void channel_reset(struct ide_channel * channel) {
350 // set busy and seek complete flags
351 channel->status.val = 0x90;
354 channel->error_reg.val = 0x01;
357 channel->cmd_reg = 0; // NOP
359 channel->ctrl_reg.irq_disable = 0;
362 static void channel_reset_complete(struct ide_channel * channel) {
363 channel->status.busy = 0;
364 channel->status.ready = 1;
366 channel->drive_head.head_num = 0;
368 drive_reset(&(channel->drives[0]));
369 drive_reset(&(channel->drives[1]));
373 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
375 PrintDebug(VM_NONE,VCORE_NONE,"Aborting IDE Command\n");
377 channel->status.val = 0x41; // Error + ready
378 channel->error_reg.val = 0x04; // No idea...
380 ide_raise_irq(ide, channel);
384 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
385 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
388 /* ATAPI functions */
396 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
397 struct ide_dma_prd prd_entry;
400 V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
403 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
406 ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
408 if (ret != sizeof(struct ide_dma_prd)) {
409 PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
413 V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
415 (prd_entry.size == 0) ? 0x10000 : prd_entry.size,
416 prd_entry.end_of_table);
418 if (prd_entry.end_of_table) {
430 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
431 struct ide_drive * drive = get_selected_drive(channel);
432 // This is at top level scope to do the EOT test at the end
433 struct ide_dma_prd prd_entry = {};
434 uint_t bytes_left = drive->transfer_length;
436 // Read in the data buffer....
437 // Read a sector/block at a time until the prd entry is full.
439 #ifdef V3_CONFIG_DEBUG_IDE
440 print_prd_table(ide, channel);
443 PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
445 // Loop through the disk data
446 while (bytes_left > 0) {
447 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
448 uint_t prd_bytes_left = 0;
449 uint_t prd_offset = 0;
452 PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
454 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
456 if (ret != sizeof(struct ide_dma_prd)) {
457 PrintError(core->vm_info, core, "Could not read PRD\n");
461 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
462 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
464 // loop through the PRD data....
466 if (prd_entry.size == 0) {
467 // a size of 0 means 64k
468 prd_bytes_left = 0x10000;
470 prd_bytes_left = prd_entry.size;
474 while (prd_bytes_left > 0) {
475 uint_t bytes_to_write = 0;
477 if (drive->drive_type == BLOCK_DISK) {
478 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
481 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
482 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
485 } else if (drive->drive_type == BLOCK_CDROM) {
486 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
487 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
489 if (atapi_read_chunk(ide, channel) == -1) {
490 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
495 PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
500 //V3_Print(core->vm_info, core, "DMA of command packet\n");
502 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
503 prd_bytes_left = bytes_to_write;
506 // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
507 cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset,
508 bytes_to_write, drive->data_buf);
510 if (cmd_ret!=bytes_to_write) {
511 PrintError(core->vm_info, core, "Failed to write data to memory\n");
519 drive->transfer_index += bytes_to_write;
521 channel->status.busy = 0;
522 channel->status.ready = 1;
523 channel->status.data_req = 0;
524 channel->status.error = 0;
525 channel->status.seek_complete = 1;
527 channel->dma_status.active = 0;
528 channel->dma_status.err = 0;
530 ide_raise_irq(ide, channel);
536 PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n",
537 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
539 drive->current_lba++;
541 ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
543 if (ret != bytes_to_write) {
544 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
548 PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
550 drive->transfer_index += ret;
551 prd_bytes_left -= ret;
556 channel->dma_tbl_index++;
558 if (drive->drive_type == BLOCK_DISK) {
559 if (drive->transfer_index % HD_SECTOR_SIZE) {
560 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
563 } else if (drive->drive_type == BLOCK_CDROM) {
564 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
565 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
566 PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
567 PrintError(core->vm_info, core, "transfer_index=%llu, transfer_length=%llu\n",
568 drive->transfer_index, drive->transfer_length);
575 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
576 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
582 drive->irq_flags.io_dir = 1;
583 drive->irq_flags.c_d = 1;
584 drive->irq_flags.rel = 0;
588 // Update to the next PRD entry
592 if (prd_entry.end_of_table) {
593 channel->status.busy = 0;
594 channel->status.ready = 1;
595 channel->status.data_req = 0;
596 channel->status.error = 0;
597 channel->status.seek_complete = 1;
599 channel->dma_status.active = 0;
600 channel->dma_status.err = 0;
603 ide_raise_irq(ide, channel);
609 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
610 struct ide_drive * drive = get_selected_drive(channel);
611 // This is at top level scope to do the EOT test at the end
612 struct ide_dma_prd prd_entry = {};
613 uint_t bytes_left = drive->transfer_length;
616 PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
618 // Loop through disk data
619 while (bytes_left > 0) {
620 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
621 uint_t prd_bytes_left = 0;
622 uint_t prd_offset = 0;
625 PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
627 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
629 if (ret != sizeof(struct ide_dma_prd)) {
630 PrintError(core->vm_info, core, "Could not read PRD\n");
634 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
635 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
638 if (prd_entry.size == 0) {
639 // a size of 0 means 64k
640 prd_bytes_left = 0x10000;
642 prd_bytes_left = prd_entry.size;
645 while (prd_bytes_left > 0) {
646 uint_t bytes_to_write = 0;
649 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
652 ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
654 if (ret != bytes_to_write) {
655 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
659 PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
662 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
663 PrintError(core->vm_info, core, "Failed to write data to disk\n");
667 drive->current_lba++;
669 drive->transfer_index += ret;
670 prd_bytes_left -= ret;
675 channel->dma_tbl_index++;
677 if (drive->transfer_index % HD_SECTOR_SIZE) {
678 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
682 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
683 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
684 PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%llu)...\n",
685 bytes_left, drive->transfer_length);
686 PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
687 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
689 print_prd_table(ide, channel);
694 if (prd_entry.end_of_table) {
695 channel->status.busy = 0;
696 channel->status.ready = 1;
697 channel->status.data_req = 0;
698 channel->status.error = 0;
699 channel->status.seek_complete = 1;
701 channel->dma_status.active = 0;
702 channel->dma_status.err = 0;
705 ide_raise_irq(ide, channel);
712 #define DMA_CMD_PORT 0x00
713 #define DMA_STATUS_PORT 0x02
714 #define DMA_PRD_PORT0 0x04
715 #define DMA_PRD_PORT1 0x05
716 #define DMA_PRD_PORT2 0x06
717 #define DMA_PRD_PORT3 0x07
719 #define DMA_CHANNEL_FLAG 0x08
722 Note that DMA model is as follows:
724 1. Write the PRD pointer to the busmaster (DMA engine)
725 2. Start the transfer on the device
726 3. Tell the busmaster to start shoveling data (active DMA)
729 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
730 struct ide_internal * ide = (struct ide_internal *)private_data;
731 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
732 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
733 struct ide_channel * channel = &(ide->channels[channel_flag]);
735 PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
736 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
738 switch (port_offset) {
740 channel->dma_cmd.val = *(uint8_t *)src;
742 PrintDebug(core->vm_info, core, "IDE: dma command write: 0x%x\n", channel->dma_cmd.val);
744 if (channel->dma_cmd.start == 0) {
745 channel->dma_tbl_index = 0;
747 // Launch DMA operation, interrupt at end
749 channel->dma_status.active = 1;
751 if (channel->dma_cmd.read == 1) {
752 // DMA Read the whole thing - dma_read will raise irq
753 if (dma_read(core, ide, channel) == -1) {
754 PrintError(core->vm_info, core, "Failed DMA Read\n");
758 // DMA write the whole thing - dma_write will raiase irw
759 if (dma_write(core, ide, channel) == -1) {
760 PrintError(core->vm_info, core, "Failed DMA Write\n");
766 // Note that guest cannot abort a DMA transfer
767 channel->dma_cmd.start = 0;
772 case DMA_STATUS_PORT: {
773 // This is intended to clear status
775 uint8_t val = *(uint8_t *)src;
778 PrintError(core->vm_info, core, "Invalid write length for DMA status port\n");
782 // but preserve certain bits
783 channel->dma_status.val = ((val & 0x60) |
784 (channel->dma_status.val & 0x01) |
785 (channel->dma_status.val & ~val & 0x06));
792 case DMA_PRD_PORT3: {
793 uint_t addr_index = port_offset & 0x3;
794 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
797 if (addr_index + length > 4) {
798 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
802 for (i = 0; i < length; i++) {
803 addr_buf[addr_index + i] = *((uint8_t *)src + i);
806 PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
811 PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
819 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
820 struct ide_internal * ide = (struct ide_internal *)private_data;
821 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
822 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
823 struct ide_channel * channel = &(ide->channels[channel_flag]);
825 PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
827 if (port_offset + length > 16) {
828 PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
832 memcpy(dst, channel->dma_ports + port_offset, length);
834 PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
841 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
842 struct ide_internal * ide = priv_data;
843 struct ide_channel * channel = get_selected_channel(ide, port);
844 struct ide_drive * drive = get_selected_drive(channel);
847 PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
851 PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
853 channel->cmd_reg = *(uint8_t *)src;
855 switch (channel->cmd_reg) {
857 case ATA_PIDENTIFY: // ATAPI Identify Device Packet (CDROM)
858 if (drive->drive_type != BLOCK_CDROM) {
861 // JRL: Should we abort here?
862 ide_abort_command(ide, channel);
865 atapi_identify_device(drive);
867 channel->error_reg.val = 0;
868 channel->status.val = 0x58; // ready, data_req, seek_complete
870 ide_raise_irq(ide, channel);
874 case ATA_IDENTIFY: // Identify Device
875 if (drive->drive_type != BLOCK_DISK) {
878 // JRL: Should we abort here?
879 ide_abort_command(ide, channel);
881 ata_identify_device(drive);
883 channel->error_reg.val = 0;
884 channel->status.val = 0x58;
886 ide_raise_irq(ide, channel);
890 case ATA_PACKETCMD: // ATAPI Command Packet (CDROM)
891 if (drive->drive_type != BLOCK_CDROM) {
892 ide_abort_command(ide, channel);
895 drive->sector_count = 1;
897 channel->status.busy = 0;
898 channel->status.write_fault = 0;
899 channel->status.data_req = 1;
900 channel->status.error = 0;
902 // reset the data buffer...
903 drive->transfer_length = ATAPI_PACKET_SIZE;
904 drive->transfer_index = 0;
908 case ATA_READ: // Read Sectors with Retry
909 case ATA_READ_ONCE: // Read Sectors without Retry
910 case ATA_MULTREAD: // Read multiple sectors per ire
911 case ATA_READ_EXT: // Read Sectors Extended (LBA48)
913 if (channel->cmd_reg==ATA_MULTREAD) {
914 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
916 drive->hd_state.cur_sector_num = 1;
919 if (ata_read_sectors(ide, channel) == -1) {
920 PrintError(core->vm_info, core, "Error reading sectors\n");
921 ide_abort_command(ide,channel);
925 case ATA_WRITE: // Write Sector with retry
926 case ATA_WRITE_ONCE: // Write Sector without retry
927 case ATA_MULTWRITE: // Write multiple sectors per irq
928 case ATA_WRITE_EXT: // Write Sectors Extended (LBA48)
930 if (channel->cmd_reg==ATA_MULTWRITE) {
931 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
933 drive->hd_state.cur_sector_num = 1;
936 if (ata_write_sectors(ide, channel) == -1) {
937 PrintError(core->vm_info, core, "Error writing sectors\n");
938 ide_abort_command(ide,channel);
942 case ATA_READDMA: // Read DMA with retry
943 case ATA_READDMA_ONCE: // Read DMA without retry
944 case ATA_READDMA_EXT: { // Read DMA (LBA48)
947 if (ata_get_lba_and_size(ide, channel, &(drive->current_lba), §_cnt) == -1) {
948 PrintError(core->vm_info, core, "Error getting LBA for DMA READ\n");
949 ide_abort_command(ide, channel);
953 drive->hd_state.cur_sector_num = 1; // Not used for DMA
955 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
956 drive->transfer_index = 0;
958 // Now we wait for the transfer to be intiated by flipping the
959 // bus-master start bit
963 case ATA_WRITEDMA: // Write DMA with retry
964 case ATA_WRITEDMA_ONCE: // Write DMA without retry
965 case ATA_WRITEDMA_EXT: { // Write DMA (LBA48)
969 if (ata_get_lba_and_size(ide, channel, &(drive->current_lba),§_cnt) == -1) {
970 PrintError(core->vm_info,core,"Cannot get lba\n");
971 ide_abort_command(ide, channel);
975 drive->hd_state.cur_sector_num = 1; // Not used for DMA
977 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
978 drive->transfer_index = 0;
980 // Now we wait for the transfer to be intiated by flipping the
981 // bus-master start bit
985 case ATA_STANDBYNOW1: // Standby Now 1
986 case ATA_IDLEIMMEDIATE: // Set Idle Immediate
987 case ATA_STANDBY: // Standby
988 case ATA_SETIDLE1: // Set Idle 1
989 case ATA_SLEEPNOW1: // Sleep Now 1
990 case ATA_STANDBYNOW2: // Standby Now 2
991 case ATA_IDLEIMMEDIATE2: // Idle Immediate (CFA)
992 case ATA_STANDBY2: // Standby 2
993 case ATA_SETIDLE2: // Set idle 2
994 case ATA_SLEEPNOW2: // Sleep Now 2
995 channel->status.val = 0;
996 channel->status.ready = 1;
997 ide_raise_irq(ide, channel);
1000 case ATA_SETFEATURES: // Set Features
1001 // Prior to this the features register has been written to.
1002 // This command tells the drive to check if the new value is supported (the value is drive specific)
1003 // Common is that bit0=DMA enable
1004 // If valid the drive raises an interrupt, if not it aborts.
1006 // Do some checking here...
1008 channel->status.busy = 0;
1009 channel->status.write_fault = 0;
1010 channel->status.error = 0;
1011 channel->status.ready = 1;
1012 channel->status.seek_complete = 1;
1014 ide_raise_irq(ide, channel);
1017 case ATA_SPECIFY: // Initialize Drive Parameters
1018 case ATA_RECAL: // recalibrate?
1019 channel->status.error = 0;
1020 channel->status.ready = 1;
1021 channel->status.seek_complete = 1;
1022 ide_raise_irq(ide, channel);
1025 case ATA_SETMULT: { // Set multiple mode (IDE Block mode)
1026 // This makes the drive transfer multiple sectors before generating an interrupt
1028 if (drive->sector_count == 0) {
1029 PrintError(core->vm_info,core,"Attempt to set multiple to zero\n");
1030 drive->hd_state.mult_sector_num= 1;
1031 ide_abort_command(ide,channel);
1034 drive->hd_state.mult_sector_num = drive->sector_count;
1037 channel->status.ready = 1;
1038 channel->status.error = 0;
1040 ide_raise_irq(ide, channel);
1045 case ATA_DEVICE_RESET: // Reset Device
1047 channel->error_reg.val = 0x01;
1048 channel->status.busy = 0;
1049 channel->status.ready = 1;
1050 channel->status.seek_complete = 1;
1051 channel->status.write_fault = 0;
1052 channel->status.error = 0;
1055 case ATA_CHECKPOWERMODE1: // Check power mode
1056 drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1057 channel->status.busy = 0;
1058 channel->status.ready = 1;
1059 channel->status.write_fault = 0;
1060 channel->status.data_req = 0;
1061 channel->status.error = 0;
1065 PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1066 ide_abort_command(ide, channel);
1076 static int read_hd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1077 struct ide_drive * drive = get_selected_drive(channel);
1078 uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1081 PrintDebug(VM_NONE,VCORE_NONE, "Read HD data: transfer_index %llu transfer length %llu current sector numer %llu\n",
1082 drive->transfer_index, drive->transfer_length,
1083 drive->hd_state.cur_sector_num);
1085 if (drive->transfer_index >= drive->transfer_length) {
1086 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1087 drive->transfer_length, drive->transfer_index,
1088 drive->transfer_index + length);
1093 if (data_offset + length > HD_SECTOR_SIZE) {
1094 PrintError(VM_NONE,VCORE_NONE,"Read spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1097 // For index==0, the read has been done in ata_read_sectors
1098 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1099 // advance to next sector and read it
1101 drive->current_lba++;
1103 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1104 PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1110 PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1111 *(uint32_t *)(drive->data_buf + data_offset),
1112 length, data_offset);
1114 memcpy(dst, drive->data_buf + data_offset, length);
1116 drive->transfer_index += length;
1119 /* This is the trigger for interrupt injection.
1120 * For read single sector commands we interrupt after every sector
1121 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1122 * cur_sector_num is configured depending on the operation we are currently running
1123 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1125 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1126 (drive->transfer_index == drive->transfer_length)) {
1127 if (drive->transfer_index < drive->transfer_length) {
1128 // An increment is complete, but there is still more data to be transferred...
1129 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1130 channel->status.data_req = 1;
1132 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1133 // This was the final read of the request
1134 channel->status.data_req = 0;
1137 channel->status.ready = 1;
1138 channel->status.busy = 0;
1140 ide_raise_irq(ide, channel);
1147 static int write_hd_data(uint8_t * src, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1148 struct ide_drive * drive = get_selected_drive(channel);
1149 uint64_t data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1152 PrintDebug(VM_NONE,VCORE_NONE, "Write HD data: transfer_index %llu transfer length %llu current sector numer %llu\n",
1153 drive->transfer_index, drive->transfer_length,
1154 drive->hd_state.cur_sector_num);
1156 if (drive->transfer_index >= drive->transfer_length) {
1157 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1158 drive->transfer_length, drive->transfer_index,
1159 drive->transfer_index + length);
1163 if (data_offset + length > HD_SECTOR_SIZE) {
1164 PrintError(VM_NONE,VCORE_NONE,"Write spans sectors (data_offset=%llu length=%llu)!\n",data_offset,length);
1167 // Copy data into our buffer - there will be room due to
1168 // (a) the ata_write test below is flushing sectors
1169 // (b) if we somehow get a sector-stradling write (an error), this will
1170 // be OK since the buffer itself is >1 sector in memory
1171 memcpy(drive->data_buf + data_offset, src, length);
1173 drive->transfer_index += length;
1175 if ((data_offset+length) >= HD_SECTOR_SIZE) {
1176 // Write out the sector we just finished
1177 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
1178 PrintError(VM_NONE, VCORE_NONE, "Could not write next disk sector\n");
1182 // go onto next sector
1183 drive->current_lba++;
1186 /* This is the trigger for interrupt injection.
1187 * For write single sector commands we interrupt after every sector
1188 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1189 * cur_sector_num is configured depending on the operation we are currently running
1190 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1192 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1193 (drive->transfer_index == drive->transfer_length)) {
1194 if (drive->transfer_index < drive->transfer_length) {
1195 // An increment is complete, but there is still more data to be transferred...
1196 PrintDebug(VM_NONE, VCORE_NONE, "Increment Complete, still transferring more sectors\n");
1197 channel->status.data_req = 1;
1199 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1200 // This was the final read of the request
1201 channel->status.data_req = 0;
1204 channel->status.ready = 1;
1205 channel->status.busy = 0;
1207 ide_raise_irq(ide, channel);
1215 static int read_cd_data(uint8_t * dst, uint64_t length, struct ide_internal * ide, struct ide_channel * channel) {
1216 struct ide_drive * drive = get_selected_drive(channel);
1217 uint64_t data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1218 // int req_offset = drive->transfer_index % drive->req_len;
1220 if (drive->cd_state.atapi_cmd != 0x28) {
1221 PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%llu) (req_len=%u)\n", length, drive->req_len);
1222 PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%llu, transfer idx=%llu\n", drive->transfer_length, drive->transfer_index);
1227 if (drive->transfer_index >= drive->transfer_length) {
1228 PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%llu) (cur_idx=%llu) (post_idx=%llu)\n",
1229 drive->transfer_length, drive->transfer_index,
1230 drive->transfer_index + length);
1235 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1236 if (atapi_update_data_buf(ide, channel) == -1) {
1237 PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1242 memcpy(dst, drive->data_buf + data_offset, length);
1244 drive->transfer_index += length;
1247 // Should the req_offset be recalculated here?????
1248 if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1249 if (drive->transfer_index < drive->transfer_length) {
1250 // An increment is complete, but there is still more data to be transferred...
1252 channel->status.data_req = 1;
1254 drive->irq_flags.c_d = 0;
1256 // Update the request length in the cylinder regs
1257 if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1258 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1262 // This was the final read of the request
1265 channel->status.data_req = 0;
1266 channel->status.ready = 1;
1268 drive->irq_flags.c_d = 1;
1269 drive->irq_flags.rel = 0;
1272 drive->irq_flags.io_dir = 1;
1273 channel->status.busy = 0;
1275 ide_raise_irq(ide, channel);
1282 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1283 struct ide_drive * drive = get_selected_drive(channel);
1285 channel->status.busy = 0;
1286 channel->status.ready = 1;
1287 channel->status.write_fault = 0;
1288 channel->status.seek_complete = 1;
1289 channel->status.corrected = 0;
1290 channel->status.error = 0;
1293 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1294 drive->transfer_index += length;
1296 if (drive->transfer_index >= drive->transfer_length) {
1297 channel->status.data_req = 0;
1305 static int read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1306 struct ide_internal * ide = priv_data;
1307 struct ide_channel * channel = get_selected_channel(ide, port);
1308 struct ide_drive * drive = get_selected_drive(channel);
1310 //PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1312 if ((channel->cmd_reg == ATA_IDENTIFY) ||
1313 (channel->cmd_reg == ATA_PIDENTIFY)) {
1314 return read_drive_id((uint8_t *)dst, length, ide, channel);
1317 if (drive->drive_type == BLOCK_CDROM) {
1318 if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1319 PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1322 } else if (drive->drive_type == BLOCK_DISK) {
1323 if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1324 PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1328 memset((uint8_t *)dst, 0, length);
1334 // For the write side, we care both about
1335 // direct PIO writes to a drive as well as
1336 // writes that pass a packet through to an CD
1337 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1338 struct ide_internal * ide = priv_data;
1339 struct ide_channel * channel = get_selected_channel(ide, port);
1340 struct ide_drive * drive = get_selected_drive(channel);
1342 PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n",
1343 port, *(uint32_t *)src, length);
1345 if (drive->drive_type == BLOCK_CDROM) {
1346 if (channel->cmd_reg == ATA_PACKETCMD) {
1347 // short command packet - no check for space...
1348 memcpy(drive->data_buf + drive->transfer_index, src, length);
1349 drive->transfer_index += length;
1350 if (drive->transfer_index >= drive->transfer_length) {
1351 if (atapi_handle_packet(core, ide, channel) == -1) {
1352 PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1357 PrintError(core->vm_info,core,"Unknown command %x on CD ROM\n",channel->cmd_reg);
1360 } else if (drive->drive_type == BLOCK_DISK) {
1361 if (write_hd_data((uint8_t *)src, length, ide, channel) == -1) {
1362 PrintError(core->vm_info, core, "IDE: Could not write HD Data\n");
1366 // nothing ... do not support writable cd
1372 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1373 struct ide_internal * ide = priv_data;
1374 struct ide_channel * channel = get_selected_channel(ide, port);
1375 struct ide_drive * drive = get_selected_drive(channel);
1378 PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1382 PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1385 // reset and interrupt enable
1387 case SEC_CTRL_PORT: {
1388 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1390 // only reset channel on a 0->1 reset bit transition
1391 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1392 channel_reset(channel);
1393 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1394 channel_reset_complete(channel);
1397 channel->ctrl_reg.val = tmp_ctrl->val;
1400 case PRI_FEATURES_PORT:
1401 case SEC_FEATURES_PORT:
1402 channel->features.val = *(uint8_t *)src;
1405 case PRI_SECT_CNT_PORT:
1406 case SEC_SECT_CNT_PORT:
1407 // update CHS and LBA28 state
1408 channel->drives[0].sector_count = *(uint8_t *)src;
1409 channel->drives[1].sector_count = *(uint8_t *)src;
1411 // update LBA48 state
1412 if (is_lba48(channel)) {
1413 uint16_t val = *(uint8_t*)src; // top bits zero;
1414 if (!channel->drives[0].lba48.sector_count_state) {
1415 channel->drives[0].lba48.sector_count = val<<8;
1417 channel->drives[0].lba48.sector_count |= val;
1419 channel->drives[0].lba48.sector_count_state ^= 1;
1420 if (!channel->drives[1].lba48.sector_count_state) {
1421 channel->drives[1].lba48.sector_count = val<<8;
1423 channel->drives[1].lba48.sector_count |= val;
1425 channel->drives[0].lba48.sector_count_state ^= 1;
1430 case PRI_SECT_NUM_PORT:
1431 case SEC_SECT_NUM_PORT:
1432 // update CHS and LBA28 state
1433 channel->drives[0].sector_num = *(uint8_t *)src;
1434 channel->drives[1].sector_num = *(uint8_t *)src;
1436 // update LBA48 state
1437 if (is_lba48(channel)) {
1438 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1439 if (!channel->drives[0].lba48.lba41_state) {
1440 channel->drives[0].lba48.lba |= val<<24;
1442 channel->drives[0].lba48.lba |= val;
1444 channel->drives[0].lba48.lba41_state ^= 1;
1445 if (!channel->drives[1].lba48.lba41_state) {
1446 channel->drives[1].lba48.lba |= val<<24;
1448 channel->drives[1].lba48.lba |= val;
1450 channel->drives[1].lba48.lba41_state ^= 1;
1454 case PRI_CYL_LOW_PORT:
1455 case SEC_CYL_LOW_PORT:
1456 // update CHS and LBA28 state
1457 channel->drives[0].cylinder_low = *(uint8_t *)src;
1458 channel->drives[1].cylinder_low = *(uint8_t *)src;
1460 // update LBA48 state
1461 if (is_lba48(channel)) {
1462 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1463 if (!channel->drives[0].lba48.lba52_state) {
1464 channel->drives[0].lba48.lba |= val<<32;
1466 channel->drives[0].lba48.lba |= val<<8;
1468 channel->drives[0].lba48.lba52_state ^= 1;
1469 if (!channel->drives[1].lba48.lba52_state) {
1470 channel->drives[1].lba48.lba |= val<<32;
1472 channel->drives[1].lba48.lba |= val<<8;
1474 channel->drives[1].lba48.lba52_state ^= 1;
1479 case PRI_CYL_HIGH_PORT:
1480 case SEC_CYL_HIGH_PORT:
1481 // update CHS and LBA28 state
1482 channel->drives[0].cylinder_high = *(uint8_t *)src;
1483 channel->drives[1].cylinder_high = *(uint8_t *)src;
1485 // update LBA48 state
1486 if (is_lba48(channel)) {
1487 uint64_t val = *(uint8_t *)src; // lob off top 7 bytes;
1488 if (!channel->drives[0].lba48.lba63_state) {
1489 channel->drives[0].lba48.lba |= val<<40;
1491 channel->drives[0].lba48.lba |= val<<16;
1493 channel->drives[0].lba48.lba63_state ^= 1;
1494 if (!channel->drives[1].lba48.lba63_state) {
1495 channel->drives[1].lba48.lba |= val<<40;
1497 channel->drives[1].lba48.lba |= val<<16;
1499 channel->drives[1].lba48.lba63_state ^= 1;
1504 case PRI_DRV_SEL_PORT:
1505 case SEC_DRV_SEL_PORT: {
1506 struct ide_drive_head_reg nh, oh;
1508 oh.val = channel->drive_head.val;
1509 channel->drive_head.val = nh.val = *(uint8_t *)src;
1512 if ((oh.val & 0xe0) != (nh.val & 0xe0)) {
1513 // reset LBA48 state
1514 channel->drives[0].lba48.sector_count_state=0;
1515 channel->drives[0].lba48.lba41_state=0;
1516 channel->drives[0].lba48.lba52_state=0;
1517 channel->drives[0].lba48.lba63_state=0;
1518 channel->drives[1].lba48.sector_count_state=0;
1519 channel->drives[1].lba48.lba41_state=0;
1520 channel->drives[1].lba48.lba52_state=0;
1521 channel->drives[1].lba48.lba63_state=0;
1525 drive = get_selected_drive(channel);
1527 // Selecting a non-present device is a no-no
1528 if (drive->drive_type == BLOCK_NONE) {
1529 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1530 channel->error_reg.abort = 1;
1531 channel->status.error = 1;
1533 channel->status.busy = 0;
1534 channel->status.ready = 1;
1535 channel->status.data_req = 0;
1536 channel->status.error = 0;
1537 channel->status.seek_complete = 1;
1539 channel->dma_status.active = 0;
1540 channel->dma_status.err = 0;
1546 PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1553 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1554 struct ide_internal * ide = priv_data;
1555 struct ide_channel * channel = get_selected_channel(ide, port);
1556 struct ide_drive * drive = get_selected_drive(channel);
1559 PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1563 PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1565 if ((port == PRI_ADDR_REG_PORT) ||
1566 (port == SEC_ADDR_REG_PORT)) {
1567 // unused, return 0xff
1568 *(uint8_t *)dst = 0xff;
1573 // if no drive is present just return 0 + reserved bits
1574 if (drive->drive_type == BLOCK_NONE) {
1575 if ((port == PRI_DRV_SEL_PORT) ||
1576 (port == SEC_DRV_SEL_PORT)) {
1577 *(uint8_t *)dst = 0xa0;
1579 *(uint8_t *)dst = 0;
1587 // This is really the error register.
1588 case PRI_FEATURES_PORT:
1589 case SEC_FEATURES_PORT:
1590 *(uint8_t *)dst = channel->error_reg.val;
1593 case PRI_SECT_CNT_PORT:
1594 case SEC_SECT_CNT_PORT:
1595 *(uint8_t *)dst = drive->sector_count;
1598 case PRI_SECT_NUM_PORT:
1599 case SEC_SECT_NUM_PORT:
1600 *(uint8_t *)dst = drive->sector_num;
1603 case PRI_CYL_LOW_PORT:
1604 case SEC_CYL_LOW_PORT:
1605 *(uint8_t *)dst = drive->cylinder_low;
1609 case PRI_CYL_HIGH_PORT:
1610 case SEC_CYL_HIGH_PORT:
1611 *(uint8_t *)dst = drive->cylinder_high;
1614 case PRI_DRV_SEL_PORT:
1615 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1616 *(uint8_t *)dst = channel->drive_head.val;
1623 // Something about lowering interrupts here....
1624 *(uint8_t *)dst = channel->status.val;
1628 PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1632 PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1639 static void init_drive(struct ide_drive * drive) {
1641 drive->sector_count = 0x01;
1642 drive->sector_num = 0x01;
1643 drive->cylinder = 0x0000;
1645 drive->drive_type = BLOCK_NONE;
1647 memset(drive->model, 0, sizeof(drive->model));
1649 drive->transfer_index = 0;
1650 drive->transfer_length = 0;
1651 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1653 drive->num_cylinders = 0;
1654 drive->num_heads = 0;
1655 drive->num_sectors = 0;
1658 drive->private_data = NULL;
1662 static void init_channel(struct ide_channel * channel) {
1665 channel->error_reg.val = 0x01;
1667 //** channel->features = 0x0;
1669 channel->drive_head.val = 0x00;
1670 channel->status.val = 0x00;
1671 channel->cmd_reg = 0x00;
1672 channel->ctrl_reg.val = 0x08;
1674 channel->dma_cmd.val = 0;
1675 channel->dma_status.val = 0;
1676 channel->dma_prd_addr = 0;
1677 channel->dma_tbl_index = 0;
1679 for (i = 0; i < 2; i++) {
1680 init_drive(&(channel->drives[i]));
1686 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1687 PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1689 struct ide_internal * ide = (struct ide_internal *)(private_data);
1691 PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1697 static int init_ide_state(struct ide_internal * ide) {
1700 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1703 init_channel(&(ide->channels[0]));
1704 ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1706 init_channel(&(ide->channels[1]));
1707 ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1716 static int ide_free(struct ide_internal * ide) {
1718 // deregister from PCI?
1725 #ifdef V3_CONFIG_CHECKPOINT
1727 #include <palacios/vmm_sprintf.h>
1729 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1730 struct ide_internal * ide = (struct ide_internal *)private_data;
1731 struct v3_chkpt_ctx *ctx=0;
1737 ctx=v3_chkpt_open_ctx(chkpt,id);
1740 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1744 // nothing saved yet
1746 v3_chkpt_close_ctx(ctx);ctx=0;
1749 for (ch_num = 0; ch_num < 2; ch_num++) {
1750 struct ide_channel * ch = &(ide->channels[ch_num]);
1752 snprintf(buf, 128, "%s-%d", id, ch_num);
1754 ctx = v3_chkpt_open_ctx(chkpt, buf);
1757 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1761 V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1762 V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1763 V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1764 V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1765 V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1766 V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1767 V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1768 V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1769 V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1770 V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1774 v3_chkpt_close_ctx(ctx); ctx=0;
1776 for (drive_num = 0; drive_num < 2; drive_num++) {
1777 struct ide_drive * drive = &(ch->drives[drive_num]);
1779 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1781 ctx = v3_chkpt_open_ctx(chkpt, buf);
1784 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1788 V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1789 V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1790 V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1791 V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1793 V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1794 V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1795 V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1797 V3_CHKPT_SAVE(ctx, "DATA_BUF", drive->data_buf, savefailout);
1800 /* For now we'll just pack the type specific data at the end... */
1801 /* We should probably add a new context here in the future... */
1802 if (drive->drive_type == BLOCK_CDROM) {
1803 V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1804 V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1805 V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1806 } else if (drive->drive_type == BLOCK_DISK) {
1807 V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1808 V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1809 V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1810 } else if (drive->drive_type == BLOCK_NONE) {
1811 // no drive connected, so no data
1813 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1817 V3_CHKPT_SAVE(ctx, "LBA48_LBA", drive->lba48.lba, savefailout);
1818 V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, savefailout);
1819 V3_CHKPT_SAVE(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, savefailout);
1820 V3_CHKPT_SAVE(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, savefailout);
1821 V3_CHKPT_SAVE(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, savefailout);
1822 V3_CHKPT_SAVE(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, savefailout);
1824 v3_chkpt_close_ctx(ctx); ctx=0;
1832 PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1833 if (ctx) {v3_chkpt_close_ctx(ctx); }
1839 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1840 struct ide_internal * ide = (struct ide_internal *)private_data;
1841 struct v3_chkpt_ctx *ctx=0;
1846 ctx=v3_chkpt_open_ctx(chkpt,id);
1849 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1853 // nothing saved yet
1855 v3_chkpt_close_ctx(ctx);ctx=0;
1858 for (ch_num = 0; ch_num < 2; ch_num++) {
1859 struct ide_channel * ch = &(ide->channels[ch_num]);
1861 snprintf(buf, 128, "%s-%d", id, ch_num);
1863 ctx = v3_chkpt_open_ctx(chkpt, buf);
1866 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1870 V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1871 V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1872 V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1873 V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1874 V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1875 V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1876 V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1877 V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1878 V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1879 V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1881 v3_chkpt_close_ctx(ctx); ctx=0;
1883 for (drive_num = 0; drive_num < 2; drive_num++) {
1884 struct ide_drive * drive = &(ch->drives[drive_num]);
1886 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1888 ctx = v3_chkpt_open_ctx(chkpt, buf);
1891 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1895 V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1896 V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1897 V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1898 V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1900 V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1901 V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1902 V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1904 V3_CHKPT_LOAD(ctx, "DATA_BUF", drive->data_buf, loadfailout);
1907 /* For now we'll just pack the type specific data at the end... */
1908 /* We should probably add a new context here in the future... */
1909 if (drive->drive_type == BLOCK_CDROM) {
1910 V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1911 V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1912 V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1913 } else if (drive->drive_type == BLOCK_DISK) {
1914 V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1915 V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1916 V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1917 } else if (drive->drive_type == BLOCK_NONE) {
1918 // no drive connected, so no data
1920 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1924 V3_CHKPT_LOAD(ctx, "LBA48_LBA", drive->lba48.lba, loadfailout);
1925 V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT", drive->lba48.sector_count, loadfailout);
1926 V3_CHKPT_LOAD(ctx, "LBA48_SECTOR_COUNT_STATE", drive->lba48.sector_count_state, loadfailout);
1927 V3_CHKPT_LOAD(ctx, "LBA48_LBA41_STATE", drive->lba48.lba41_state, loadfailout);
1928 V3_CHKPT_LOAD(ctx, "LBA48_LBA52_STATE", drive->lba48.lba52_state, loadfailout);
1929 V3_CHKPT_LOAD(ctx, "LBA48_LBA63_STATE", drive->lba48.lba63_state, loadfailout);
1937 PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1938 if (ctx) {v3_chkpt_close_ctx(ctx); }
1948 static struct v3_device_ops dev_ops = {
1949 .free = (int (*)(void *))ide_free,
1950 #ifdef V3_CONFIG_CHECKPOINT
1951 .save_extended = ide_save_extended,
1952 .load_extended = ide_load_extended
1959 static int connect_fn(struct v3_vm_info * vm,
1960 void * frontend_data,
1961 struct v3_dev_blk_ops * ops,
1962 v3_cfg_tree_t * cfg,
1963 void * private_data) {
1964 struct ide_internal * ide = (struct ide_internal *)(frontend_data);
1965 struct ide_channel * channel = NULL;
1966 struct ide_drive * drive = NULL;
1968 char * bus_str = v3_cfg_val(cfg, "bus_num");
1969 char * drive_str = v3_cfg_val(cfg, "drive_num");
1970 char * type_str = v3_cfg_val(cfg, "type");
1971 char * model_str = v3_cfg_val(cfg, "model");
1973 uint_t drive_num = 0;
1976 if ((!type_str) || (!drive_str) || (!bus_str)) {
1977 PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1981 bus_num = atoi(bus_str);
1982 drive_num = atoi(drive_str);
1984 channel = &(ide->channels[bus_num]);
1985 drive = &(channel->drives[drive_num]);
1987 if (drive->drive_type != BLOCK_NONE) {
1988 PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1992 if (model_str != NULL) {
1993 strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1996 if (strcasecmp(type_str, "cdrom") == 0) {
1997 drive->drive_type = BLOCK_CDROM;
1999 while (strlen((char *)(drive->model)) < 40) {
2000 strcat((char*)(drive->model), " ");
2003 } else if (strcasecmp(type_str, "hd") == 0) {
2004 drive->drive_type = BLOCK_DISK;
2006 drive->hd_state.accessed = 0;
2007 drive->hd_state.mult_sector_num = 1;
2009 drive->num_sectors = 63;
2010 drive->num_heads = 16;
2011 drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
2013 PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
2020 // Hardcode this for now, but its not a good idea....
2021 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
2024 drive->private_data = private_data;
2032 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
2033 struct ide_internal * ide = NULL;
2034 char * dev_id = v3_cfg_val(cfg, "ID");
2037 PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
2039 ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
2042 PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
2046 memset(ide, 0, sizeof(struct ide_internal));
2049 ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
2051 if (ide->pci_bus != NULL) {
2052 struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
2055 PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
2060 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
2062 PrintError(vm,VCORE_NONE,"Strange - you don't have a PCI bus\n");
2065 PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
2067 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
2070 PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
2075 if (init_ide_state(ide) == -1) {
2076 PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
2077 v3_remove_device(dev);
2081 PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
2083 ret |= v3_dev_hook_io(dev, PRI_DATA_PORT,
2084 &read_data_port, &write_data_port);
2085 ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT,
2086 &read_port_std, &write_port_std);
2087 ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
2088 &read_port_std, &write_port_std);
2089 ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
2090 &read_port_std, &write_port_std);
2091 ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
2092 &read_port_std, &write_port_std);
2093 ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
2094 &read_port_std, &write_port_std);
2095 ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
2096 &read_port_std, &write_port_std);
2097 ret |= v3_dev_hook_io(dev, PRI_CMD_PORT,
2098 &read_port_std, &write_cmd_port);
2100 ret |= v3_dev_hook_io(dev, SEC_DATA_PORT,
2101 &read_data_port, &write_data_port);
2102 ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT,
2103 &read_port_std, &write_port_std);
2104 ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
2105 &read_port_std, &write_port_std);
2106 ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
2107 &read_port_std, &write_port_std);
2108 ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
2109 &read_port_std, &write_port_std);
2110 ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
2111 &read_port_std, &write_port_std);
2112 ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
2113 &read_port_std, &write_port_std);
2114 ret |= v3_dev_hook_io(dev, SEC_CMD_PORT,
2115 &read_port_std, &write_cmd_port);
2118 ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT,
2119 &read_port_std, &write_port_std);
2121 ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT,
2122 &read_port_std, &write_port_std);
2125 ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
2126 &read_port_std, &write_port_std);
2128 ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
2129 &read_port_std, &write_port_std);
2133 PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
2134 v3_remove_device(dev);
2140 struct v3_pci_bar bars[6];
2141 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
2142 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
2143 struct pci_device * pci_dev = NULL;
2146 V3_Print(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
2148 for (i = 0; i < 6; i++) {
2149 bars[i].type = PCI_BAR_NONE;
2152 bars[4].type = PCI_BAR_IO;
2153 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
2154 bars[4].default_base_port = -1;
2155 bars[4].num_ports = 16;
2157 bars[4].io_read = read_dma_port;
2158 bars[4].io_write = write_dma_port;
2159 bars[4].private_data = ide;
2161 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
2163 pci_config_update, NULL, NULL, NULL, ide);
2165 if (pci_dev == NULL) {
2166 PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i);
2167 v3_remove_device(dev);
2171 /* This is for CMD646 devices
2172 pci_dev->config_header.vendor_id = 0x1095;
2173 pci_dev->config_header.device_id = 0x0646;
2174 pci_dev->config_header.revision = 0x8f07;
2177 pci_dev->config_header.vendor_id = 0x8086;
2178 pci_dev->config_header.device_id = 0x7010;
2179 pci_dev->config_header.revision = 0x00;
2181 pci_dev->config_header.prog_if = 0x80; // Master IDE device
2182 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
2183 pci_dev->config_header.class = PCI_CLASS_STORAGE;
2185 pci_dev->config_header.command = 0;
2186 pci_dev->config_header.status = 0x0280;
2188 ide->ide_pci = pci_dev;
2193 if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
2194 PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
2195 v3_remove_device(dev);
2200 PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
2206 device_register("IDE", ide_init)
2211 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num,
2212 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2214 struct ide_internal * ide = ide_data;
2215 struct ide_channel * channel = &(ide->channels[channel_num]);
2216 struct ide_drive * drive = &(channel->drives[drive_num]);
2218 if (drive->drive_type == BLOCK_NONE) {
2222 *cylinders = drive->num_cylinders;
2223 *heads = drive->num_heads;
2224 *sectors = drive->num_sectors;