2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
71 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72 "PRI_CTRL", "PRI_ADDR_REG"};
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
76 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77 "SEC_CTRL", "SEC_ADDR_REG"};
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
85 static inline const char * io_port_to_str(uint16_t port) {
86 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87 return ide_pri_port_strs[port - PRI_DATA_PORT];
88 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89 return ide_sec_port_strs[port - SEC_DATA_PORT];
90 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
99 static inline const char * dma_port_to_str(uint16_t port) {
100 return ide_dma_port_strs[port & 0x7];
105 struct ide_cd_state {
106 struct atapi_sense_data sense;
109 struct atapi_error_recovery err_recovery;
112 struct ide_hd_state {
115 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116 uint_t mult_sector_num;
118 /* This is the current op sector size:
119 * for multiple sector ops this equals mult_sector_num
120 * for standard ops this equals 1
122 uint_t cur_sector_num;
128 v3_block_type_t drive_type;
130 struct v3_dev_blk_ops * ops;
133 struct ide_cd_state cd_state;
134 struct ide_hd_state hd_state;
139 // Where we are in the data transfer
140 uint_t transfer_index;
142 // the length of a transfer
143 // calculated for easy access
144 uint_t transfer_length;
146 uint64_t current_lba;
148 // We have a local data buffer that we use for IO port accesses
149 uint8_t data_buf[DATA_BUFFER_SIZE];
152 uint32_t num_cylinders;
154 uint32_t num_sectors;
159 uint8_t sector_count; // 0x1f2,0x172
160 struct atapi_irq_flags irq_flags;
161 } __attribute__((packed));
164 uint8_t sector_num; // 0x1f3,0x173
166 } __attribute__((packed));
173 uint8_t cylinder_low; // 0x1f4,0x174
174 uint8_t cylinder_high; // 0x1f5,0x175
175 } __attribute__((packed));
180 } __attribute__((packed));
183 // The transfer length requested by the CPU
185 } __attribute__((packed));
192 struct ide_drive drives[2];
195 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
197 struct ide_features_reg features;
199 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
201 struct ide_status_reg status; // [read] 0x1f7,0x177
202 uint8_t cmd_reg; // [write] 0x1f7,0x177
204 int irq; // this is temporary until we add PCI support
207 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
209 struct ide_dma_cmd_reg dma_cmd;
210 struct ide_dma_status_reg dma_status;
211 uint32_t dma_prd_addr;
212 uint_t dma_tbl_index;
217 struct ide_internal {
218 struct ide_channel channels[2];
220 struct v3_southbridge * southbridge;
221 struct vm_device * pci_bus;
223 struct pci_device * ide_pci;
230 /* Utility functions */
232 static inline uint16_t be_to_le_16(const uint16_t val) {
233 uint8_t * buf = (uint8_t *)&val;
234 return (buf[0] << 8) | (buf[1]) ;
237 static inline uint16_t le_to_be_16(const uint16_t val) {
238 return be_to_le_16(val);
242 static inline uint32_t be_to_le_32(const uint32_t val) {
243 uint8_t * buf = (uint8_t *)&val;
244 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
247 static inline uint32_t le_to_be_32(const uint32_t val) {
248 return be_to_le_32(val);
252 static inline int get_channel_index(ushort_t port) {
253 if (((port & 0xfff8) == 0x1f0) ||
254 ((port & 0xfffe) == 0x3f6) ||
255 ((port & 0xfff8) == 0xc000)) {
257 } else if (((port & 0xfff8) == 0x170) ||
258 ((port & 0xfffe) == 0x376) ||
259 ((port & 0xfff8) == 0xc008)) {
266 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
267 int channel_idx = get_channel_index(port);
268 return &(ide->channels[channel_idx]);
271 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
272 return &(channel->drives[channel->drive_head.drive_sel]);
276 static inline int is_lba_enabled(struct ide_channel * channel) {
277 return channel->drive_head.lba_mode;
282 static void ide_raise_irq(struct vm_device * dev, struct ide_channel * channel) {
283 if (channel->ctrl_reg.irq_disable == 0) {
284 // PrintError("Raising IDE Interrupt %d\n", channel->irq);
285 channel->dma_status.int_gen = 1;
286 v3_raise_irq(dev->vm, channel->irq);
291 static void drive_reset(struct ide_drive * drive) {
292 drive->sector_count = 0x01;
293 drive->sector_num = 0x01;
295 PrintDebug("Resetting drive %s\n", drive->model);
297 if (drive->drive_type == BLOCK_CDROM) {
298 drive->cylinder = 0xeb14;
300 drive->cylinder = 0x0000;
301 //drive->hd_state.accessed = 0;
305 memset(drive->data_buf, 0, sizeof(drive->data_buf));
306 drive->transfer_index = 0;
308 // Send the reset signal to the connected device callbacks
309 // channel->drives[0].reset();
310 // channel->drives[1].reset();
313 static void channel_reset(struct ide_channel * channel) {
315 // set busy and seek complete flags
316 channel->status.val = 0x90;
319 channel->error_reg.val = 0x01;
322 channel->cmd_reg = 0x00;
324 channel->ctrl_reg.irq_disable = 0;
327 static void channel_reset_complete(struct ide_channel * channel) {
328 channel->status.busy = 0;
329 channel->status.ready = 1;
331 channel->drive_head.head_num = 0;
333 drive_reset(&(channel->drives[0]));
334 drive_reset(&(channel->drives[1]));
338 static void ide_abort_command(struct vm_device * dev, struct ide_channel * channel) {
339 channel->status.val = 0x41; // Error + ready
340 channel->error_reg.val = 0x04; // No idea...
342 ide_raise_irq(dev, channel);
346 static int dma_read(struct guest_info * core, struct vm_device * dev, struct ide_channel * channel);
347 static int dma_write(struct guest_info * core, struct vm_device * dev, struct ide_channel * channel);
350 /* ATAPI functions */
357 #ifdef CONFIG_DEBUG_IDE
358 static void print_prd_table(struct vm_device * dev, struct ide_channel * channel) {
359 struct ide_dma_prd prd_entry;
362 PrintDebug("Dumping PRD table\n");
365 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
368 ret = v3_read_gpa_memory(&(dev->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
370 if (ret != sizeof(struct ide_dma_prd)) {
371 PrintError("Could not read PRD\n");
375 PrintDebug("\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
376 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
378 if (prd_entry.end_of_table) {
390 static int dma_read(struct guest_info * core, struct vm_device * dev, struct ide_channel * channel) {
391 struct ide_drive * drive = get_selected_drive(channel);
392 // This is at top level scope to do the EOT test at the end
393 struct ide_dma_prd prd_entry = {};
394 uint_t bytes_left = drive->transfer_length;
396 // Read in the data buffer....
397 // Read a sector/block at a time until the prd entry is full.
399 #ifdef CONFIG_DEBUG_IDE
400 print_prd_table(dev, channel);
403 PrintDebug("DMA read for %d bytes\n", bytes_left);
405 // Loop through the disk data
406 while (bytes_left > 0) {
407 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
408 uint_t prd_bytes_left = 0;
409 uint_t prd_offset = 0;
412 PrintDebug("PRD table address = %x\n", channel->dma_prd_addr);
414 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
416 if (ret != sizeof(struct ide_dma_prd)) {
417 PrintError("Could not read PRD\n");
421 PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n",
422 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
424 // loop through the PRD data....
426 prd_bytes_left = prd_entry.size;
429 while (prd_bytes_left > 0) {
430 uint_t bytes_to_write = 0;
432 if (drive->drive_type == BLOCK_DISK) {
433 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
436 if (ata_read(dev, channel, drive->data_buf, 1) == -1) {
437 PrintError("Failed to read next disk sector\n");
440 } else if (drive->drive_type == BLOCK_CDROM) {
441 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
442 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
444 if (atapi_read_chunk(dev, channel) == -1) {
445 PrintError("Failed to read next disk sector\n");
449 PrintDebug("DMA of command packet\n");
450 PrintError("How does this work???\n");
452 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
453 prd_bytes_left = bytes_to_write;
457 PrintDebug("Writing DMA data to guest Memory ptr=%p, len=%d\n",
458 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
460 drive->current_lba++;
462 ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
464 if (ret != bytes_to_write) {
465 PrintError("Failed to copy data into guest memory... (ret=%d)\n", ret);
469 PrintDebug("\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
471 drive->transfer_index += ret;
472 prd_bytes_left -= ret;
477 channel->dma_tbl_index++;
479 if (drive->drive_type == BLOCK_DISK) {
480 if (drive->transfer_index % HD_SECTOR_SIZE) {
481 PrintError("We currently don't handle sectors that span PRD descriptors\n");
484 } else if (drive->drive_type == BLOCK_CDROM) {
485 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
486 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
487 PrintError("We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
488 PrintError("transfer_index=%d, transfer_length=%d\n",
489 drive->transfer_index, drive->transfer_length);
496 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
497 PrintError("DMA table not large enough for data transfer...\n");
503 drive->irq_flags.io_dir = 1;
504 drive->irq_flags.c_d = 1;
505 drive->irq_flags.rel = 0;
509 // Update to the next PRD entry
513 if (prd_entry.end_of_table) {
514 channel->status.busy = 0;
515 channel->status.ready = 1;
516 channel->status.data_req = 0;
517 channel->status.error = 0;
518 channel->status.seek_complete = 1;
520 channel->dma_status.active = 0;
521 channel->dma_status.err = 0;
524 ide_raise_irq(dev, channel);
530 static int dma_write(struct guest_info * core, struct vm_device * dev, struct ide_channel * channel) {
531 struct ide_drive * drive = get_selected_drive(channel);
532 // This is at top level scope to do the EOT test at the end
533 struct ide_dma_prd prd_entry = {};
534 uint_t bytes_left = drive->transfer_length;
537 PrintDebug("DMA write from %d bytes\n", bytes_left);
539 // Loop through disk data
540 while (bytes_left > 0) {
541 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
542 uint_t prd_bytes_left = 0;
543 uint_t prd_offset = 0;
546 PrintDebug("PRD Table address = %x\n", channel->dma_prd_addr);
548 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
550 if (ret != sizeof(struct ide_dma_prd)) {
551 PrintError("Could not read PRD\n");
555 PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n",
556 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
558 prd_bytes_left = prd_entry.size;
560 while (prd_bytes_left > 0) {
561 uint_t bytes_to_write = 0;
564 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
567 ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
569 if (ret != bytes_to_write) {
570 PrintError("Faild to copy data from guest memory... (ret=%d)\n", ret);
574 PrintDebug("\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
577 if (ata_write(dev, channel, drive->data_buf, 1) == -1) {
578 PrintError("Failed to write data to disk\n");
582 drive->current_lba++;
584 drive->transfer_index += ret;
585 prd_bytes_left -= ret;
590 channel->dma_tbl_index++;
592 if (drive->transfer_index % HD_SECTOR_SIZE) {
593 PrintError("We currently don't handle sectors that span PRD descriptors\n");
597 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
598 PrintError("DMA table not large enough for data transfer...\n");
603 if (prd_entry.end_of_table) {
604 channel->status.busy = 0;
605 channel->status.ready = 1;
606 channel->status.data_req = 0;
607 channel->status.error = 0;
608 channel->status.seek_complete = 1;
610 channel->dma_status.active = 0;
611 channel->dma_status.err = 0;
614 ide_raise_irq(dev, channel);
621 #define DMA_CMD_PORT 0x00
622 #define DMA_STATUS_PORT 0x02
623 #define DMA_PRD_PORT0 0x04
624 #define DMA_PRD_PORT1 0x05
625 #define DMA_PRD_PORT2 0x06
626 #define DMA_PRD_PORT3 0x07
628 #define DMA_CHANNEL_FLAG 0x08
630 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
631 struct vm_device * dev = (struct vm_device *)private_data;
632 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
633 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
634 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
635 struct ide_channel * channel = &(ide->channels[channel_flag]);
637 PrintDebug("IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
638 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
640 switch (port_offset) {
642 channel->dma_cmd.val = *(uint8_t *)src;
644 if (channel->dma_cmd.start == 0) {
645 channel->dma_tbl_index = 0;
647 channel->dma_status.active = 1;
649 if (channel->dma_cmd.read == 1) {
651 if (dma_read(core, dev, channel) == -1) {
652 PrintError("Failed DMA Read\n");
657 if (dma_write(core, dev, channel) == -1) {
658 PrintError("Failed DMA Write\n");
663 channel->dma_cmd.val &= 0x09;
668 case DMA_STATUS_PORT: {
669 uint8_t val = *(uint8_t *)src;
672 PrintError("Invalid read length for DMA status port\n");
677 channel->dma_status.val = ((val & 0x60) |
678 (channel->dma_status.val & 0x01) |
679 (channel->dma_status.val & ~val & 0x06));
686 case DMA_PRD_PORT3: {
687 uint_t addr_index = port_offset & 0x3;
688 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
691 if (addr_index + length > 4) {
692 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
696 for (i = 0; i < length; i++) {
697 addr_buf[addr_index + i] = *((uint8_t *)src + i);
700 PrintDebug("Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
705 PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
713 static int read_dma_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * private_data) {
714 struct vm_device * dev = (struct vm_device *)private_data;
715 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
716 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
717 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
718 struct ide_channel * channel = &(ide->channels[channel_flag]);
720 PrintDebug("Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
722 switch (port_offset) {
724 *(uint8_t *)dst = channel->dma_cmd.val;
727 case DMA_STATUS_PORT:
729 PrintError("Invalid read length for DMA status port\n");
733 *(uint8_t *)dst = channel->dma_status.val;
739 case DMA_PRD_PORT3: {
740 uint_t addr_index = port_offset & 0x3;
741 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
744 if (addr_index + length > 4) {
745 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
749 for (i = 0; i < length; i++) {
750 *((uint8_t *)dst + i) = addr_buf[addr_index + i];
756 PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
760 PrintDebug("\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
767 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
768 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
769 struct ide_channel * channel = get_selected_channel(ide, port);
770 struct ide_drive * drive = get_selected_drive(channel);
773 PrintError("Invalid Write Length on IDE command Port %x\n", port);
777 PrintDebug("IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
779 channel->cmd_reg = *(uint8_t *)src;
781 switch (channel->cmd_reg) {
783 case 0xa1: // ATAPI Identify Device Packet
784 if (drive->drive_type != BLOCK_CDROM) {
787 // JRL: Should we abort here?
788 ide_abort_command(dev, channel);
791 atapi_identify_device(drive);
793 channel->error_reg.val = 0;
794 channel->status.val = 0x58; // ready, data_req, seek_complete
796 ide_raise_irq(dev, channel);
799 case 0xec: // Identify Device
800 if (drive->drive_type != BLOCK_DISK) {
803 // JRL: Should we abort here?
804 ide_abort_command(dev, channel);
806 ata_identify_device(drive);
808 channel->error_reg.val = 0;
809 channel->status.val = 0x58;
811 ide_raise_irq(dev, channel);
815 case 0xa0: // ATAPI Command Packet
816 if (drive->drive_type != BLOCK_CDROM) {
817 ide_abort_command(dev, channel);
820 drive->sector_count = 1;
822 channel->status.busy = 0;
823 channel->status.write_fault = 0;
824 channel->status.data_req = 1;
825 channel->status.error = 0;
827 // reset the data buffer...
828 drive->transfer_length = ATAPI_PACKET_SIZE;
829 drive->transfer_index = 0;
833 case 0x20: // Read Sectors with Retry
834 case 0x21: // Read Sectors without Retry
835 drive->hd_state.cur_sector_num = 1;
837 if (ata_read_sectors(dev, channel) == -1) {
838 PrintError("Error reading sectors\n");
843 case 0x24: // Read Sectors Extended
844 drive->hd_state.cur_sector_num = 1;
846 if (ata_read_sectors_ext(dev, channel) == -1) {
847 PrintError("Error reading extended sectors\n");
852 case 0xc8: // Read DMA with retry
853 case 0xc9: { // Read DMA
854 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
856 if (ata_get_lba(dev, channel, &(drive->current_lba)) == -1) {
857 ide_abort_command(dev, channel);
861 drive->hd_state.cur_sector_num = 1;
863 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
864 drive->transfer_index = 0;
866 if (channel->dma_status.active == 1) {
868 if (dma_read(core, dev, channel) == -1) {
869 PrintError("Failed DMA Read\n");
876 case 0xca: { // Write DMA
877 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
879 if (ata_get_lba(dev, channel, &(drive->current_lba)) == -1) {
880 ide_abort_command(dev, channel);
884 drive->hd_state.cur_sector_num = 1;
886 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
887 drive->transfer_index = 0;
889 if (channel->dma_status.active == 1) {
891 if (dma_write(core, dev, channel) == -1) {
892 PrintError("Failed DMA Write\n");
898 case 0xe0: // Standby Now 1
899 case 0xe1: // Set Idle Immediate
900 case 0xe2: // Standby
901 case 0xe3: // Set Idle 1
902 case 0xe6: // Sleep Now 1
903 case 0x94: // Standby Now 2
904 case 0x95: // Idle Immediate (CFA)
905 case 0x96: // Standby 2
906 case 0x97: // Set idle 2
907 case 0x99: // Sleep Now 2
908 channel->status.val = 0;
909 channel->status.ready = 1;
910 ide_raise_irq(dev, channel);
913 case 0xef: // Set Features
914 // Prior to this the features register has been written to.
915 // This command tells the drive to check if the new value is supported (the value is drive specific)
916 // Common is that bit0=DMA enable
917 // If valid the drive raises an interrupt, if not it aborts.
919 // Do some checking here...
921 channel->status.busy = 0;
922 channel->status.write_fault = 0;
923 channel->status.error = 0;
924 channel->status.ready = 1;
925 channel->status.seek_complete = 1;
927 ide_raise_irq(dev, channel);
930 case 0x91: // Initialize Drive Parameters
931 case 0x10: // recalibrate?
932 channel->status.error = 0;
933 channel->status.ready = 1;
934 channel->status.seek_complete = 1;
935 ide_raise_irq(dev, channel);
937 case 0xc6: { // Set multiple mode (IDE Block mode)
938 // This makes the drive transfer multiple sectors before generating an interrupt
939 uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
941 if (tmp_sect_num > MAX_MULT_SECTORS) {
942 ide_abort_command(dev, channel);
946 if (drive->sector_count == 0) {
947 drive->hd_state.mult_sector_num= 1;
949 drive->hd_state.mult_sector_num = drive->sector_count;
952 channel->status.ready = 1;
953 channel->status.error = 0;
955 ide_raise_irq(dev, channel);
959 case 0xc4: // read multiple sectors
960 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
962 PrintError("Unimplemented IDE command (%x)\n", channel->cmd_reg);
970 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
971 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
972 struct ide_channel * channel = get_selected_channel(ide, port);
973 struct ide_drive * drive = get_selected_drive(channel);
975 // PrintDebug("IDE: Writing Data Port %x (val=%x, len=%d)\n",
976 // port, *(uint32_t *)src, length);
978 memcpy(drive->data_buf + drive->transfer_index, src, length);
979 drive->transfer_index += length;
981 // Transfer is complete, dispatch the command
982 if (drive->transfer_index >= drive->transfer_length) {
983 switch (channel->cmd_reg) {
984 case 0x30: // Write Sectors
985 PrintError("Writing Data not yet implemented\n");
988 case 0xa0: // ATAPI packet command
989 if (atapi_handle_packet(core, dev, channel) == -1) {
990 PrintError("Error handling ATAPI packet\n");
995 PrintError("Unhandld IDE Command %x\n", channel->cmd_reg);
1004 static int read_hd_data(uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
1005 struct ide_drive * drive = get_selected_drive(channel);
1006 int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1010 if (drive->transfer_index >= drive->transfer_length) {
1011 PrintError("Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1012 drive->transfer_length, drive->transfer_index,
1013 drive->transfer_index + length);
1018 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1019 drive->current_lba++;
1021 if (ata_read(dev, channel, drive->data_buf, 1) == -1) {
1022 PrintError("Could not read next disk sector\n");
1028 PrintDebug("Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1029 *(uint32_t *)(drive->data_buf + data_offset),
1030 length, data_offset);
1032 memcpy(dst, drive->data_buf + data_offset, length);
1034 drive->transfer_index += length;
1037 /* This is the trigger for interrupt injection.
1038 * For read single sector commands we interrupt after every sector
1039 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1040 * cur_sector_num is configured depending on the operation we are currently running
1041 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1043 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1044 (drive->transfer_index == drive->transfer_length)) {
1045 if (drive->transfer_index < drive->transfer_length) {
1046 // An increment is complete, but there is still more data to be transferred...
1047 PrintDebug("Integral Complete, still transferring more sectors\n");
1048 channel->status.data_req = 1;
1050 drive->irq_flags.c_d = 0;
1052 PrintDebug("Final Sector Transferred\n");
1053 // This was the final read of the request
1054 channel->status.data_req = 0;
1057 drive->irq_flags.c_d = 1;
1058 drive->irq_flags.rel = 0;
1061 channel->status.ready = 1;
1062 drive->irq_flags.io_dir = 1;
1063 channel->status.busy = 0;
1065 ide_raise_irq(dev, channel);
1074 static int read_cd_data(uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
1075 struct ide_drive * drive = get_selected_drive(channel);
1076 int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1077 int req_offset = drive->transfer_index % drive->req_len;
1079 if (drive->cd_state.atapi_cmd != 0x28) {
1080 PrintDebug("IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1083 if (drive->transfer_index >= drive->transfer_length) {
1084 PrintError("Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n",
1085 drive->transfer_length, drive->transfer_index,
1086 drive->transfer_index + length);
1091 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1092 if (atapi_update_data_buf(dev, channel) == -1) {
1093 PrintError("Could not update CDROM data buffer\n");
1098 memcpy(dst, drive->data_buf + data_offset, length);
1100 drive->transfer_index += length;
1103 // Should the req_offset be recalculated here?????
1104 if ((req_offset == 0) && (drive->transfer_index > 0)) {
1105 if (drive->transfer_index < drive->transfer_length) {
1106 // An increment is complete, but there is still more data to be transferred...
1108 channel->status.data_req = 1;
1110 drive->irq_flags.c_d = 0;
1112 // Update the request length in the cylinder regs
1113 if (atapi_update_req_len(dev, channel, drive->transfer_length - drive->transfer_index) == -1) {
1114 PrintError("Could not update request length after completed increment\n");
1118 // This was the final read of the request
1119 channel->status.data_req = 0;
1120 channel->status.ready = 1;
1122 drive->irq_flags.c_d = 1;
1123 drive->irq_flags.rel = 0;
1126 drive->irq_flags.io_dir = 1;
1127 channel->status.busy = 0;
1129 ide_raise_irq(dev, channel);
1136 static int read_drive_id( uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
1137 struct ide_drive * drive = get_selected_drive(channel);
1139 channel->status.busy = 0;
1140 channel->status.ready = 1;
1141 channel->status.write_fault = 0;
1142 channel->status.seek_complete = 1;
1143 channel->status.corrected = 0;
1144 channel->status.error = 0;
1147 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1148 drive->transfer_index += length;
1150 if (drive->transfer_index >= drive->transfer_length) {
1151 channel->status.data_req = 0;
1158 static int ide_read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
1159 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1160 struct ide_channel * channel = get_selected_channel(ide, port);
1161 struct ide_drive * drive = get_selected_drive(channel);
1163 PrintDebug("IDE: Reading Data Port %x (len=%d)\n", port, length);
1165 if ((channel->cmd_reg == 0xec) ||
1166 (channel->cmd_reg == 0xa1)) {
1167 return read_drive_id((uint8_t *)dst, length, dev, channel);
1170 if (drive->drive_type == BLOCK_CDROM) {
1171 if (read_cd_data((uint8_t *)dst, length, dev, channel) == -1) {
1172 PrintError("IDE: Could not read CD Data\n");
1175 } else if (drive->drive_type == BLOCK_DISK) {
1176 if (read_hd_data((uint8_t *)dst, length, dev, channel) == -1) {
1177 PrintError("IDE: Could not read HD Data\n");
1181 memset((uint8_t *)dst, 0, length);
1187 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
1188 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1189 struct ide_channel * channel = get_selected_channel(ide, port);
1190 struct ide_drive * drive = get_selected_drive(channel);
1193 PrintError("Invalid Write length on IDE port %x\n", port);
1197 PrintDebug("IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1200 // reset and interrupt enable
1202 case SEC_CTRL_PORT: {
1203 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1205 // only reset channel on a 0->1 reset bit transition
1206 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1207 channel_reset(channel);
1208 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1209 channel_reset_complete(channel);
1212 channel->ctrl_reg.val = tmp_ctrl->val;
1215 case PRI_FEATURES_PORT:
1216 case SEC_FEATURES_PORT:
1217 channel->features.val = *(uint8_t *)src;
1220 case PRI_SECT_CNT_PORT:
1221 case SEC_SECT_CNT_PORT:
1222 channel->drives[0].sector_count = *(uint8_t *)src;
1223 channel->drives[1].sector_count = *(uint8_t *)src;
1226 case PRI_SECT_NUM_PORT:
1227 case SEC_SECT_NUM_PORT:
1228 channel->drives[0].sector_num = *(uint8_t *)src;
1229 channel->drives[1].sector_num = *(uint8_t *)src;
1231 case PRI_CYL_LOW_PORT:
1232 case SEC_CYL_LOW_PORT:
1233 channel->drives[0].cylinder_low = *(uint8_t *)src;
1234 channel->drives[1].cylinder_low = *(uint8_t *)src;
1237 case PRI_CYL_HIGH_PORT:
1238 case SEC_CYL_HIGH_PORT:
1239 channel->drives[0].cylinder_high = *(uint8_t *)src;
1240 channel->drives[1].cylinder_high = *(uint8_t *)src;
1243 case PRI_DRV_SEL_PORT:
1244 case SEC_DRV_SEL_PORT: {
1245 channel->drive_head.val = *(uint8_t *)src;
1247 // make sure the reserved bits are ok..
1248 // JRL TODO: check with new ramdisk to make sure this is right...
1249 channel->drive_head.val |= 0xa0;
1251 drive = get_selected_drive(channel);
1253 // Selecting a non-present device is a no-no
1254 if (drive->drive_type == BLOCK_NONE) {
1255 PrintDebug("Attempting to select a non-present drive\n");
1256 channel->error_reg.abort = 1;
1257 channel->status.error = 1;
1263 PrintError("IDE: Write to unknown Port %x\n", port);
1270 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
1271 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1272 struct ide_channel * channel = get_selected_channel(ide, port);
1273 struct ide_drive * drive = get_selected_drive(channel);
1276 PrintError("Invalid Read length on IDE port %x\n", port);
1280 PrintDebug("IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1282 if ((port == PRI_ADDR_REG_PORT) ||
1283 (port == SEC_ADDR_REG_PORT)) {
1284 // unused, return 0xff
1285 *(uint8_t *)dst = 0xff;
1290 // if no drive is present just return 0 + reserved bits
1291 if (drive->drive_type == BLOCK_NONE) {
1292 if ((port == PRI_DRV_SEL_PORT) ||
1293 (port == SEC_DRV_SEL_PORT)) {
1294 *(uint8_t *)dst = 0xa0;
1296 *(uint8_t *)dst = 0;
1304 // This is really the error register.
1305 case PRI_FEATURES_PORT:
1306 case SEC_FEATURES_PORT:
1307 *(uint8_t *)dst = channel->error_reg.val;
1310 case PRI_SECT_CNT_PORT:
1311 case SEC_SECT_CNT_PORT:
1312 *(uint8_t *)dst = drive->sector_count;
1315 case PRI_SECT_NUM_PORT:
1316 case SEC_SECT_NUM_PORT:
1317 *(uint8_t *)dst = drive->sector_num;
1320 case PRI_CYL_LOW_PORT:
1321 case SEC_CYL_LOW_PORT:
1322 *(uint8_t *)dst = drive->cylinder_low;
1326 case PRI_CYL_HIGH_PORT:
1327 case SEC_CYL_HIGH_PORT:
1328 *(uint8_t *)dst = drive->cylinder_high;
1331 case PRI_DRV_SEL_PORT:
1332 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1333 *(uint8_t *)dst = channel->drive_head.val;
1340 // Something about lowering interrupts here....
1341 *(uint8_t *)dst = channel->status.val;
1345 PrintError("Invalid Port: %x\n", port);
1349 PrintDebug("\tVal=%x\n", *(uint8_t *)dst);
1356 static void init_drive(struct ide_drive * drive) {
1358 drive->sector_count = 0x01;
1359 drive->sector_num = 0x01;
1360 drive->cylinder = 0x0000;
1362 drive->drive_type = BLOCK_NONE;
1364 memset(drive->model, 0, sizeof(drive->model));
1366 drive->transfer_index = 0;
1367 drive->transfer_length = 0;
1368 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1370 drive->num_cylinders = 0;
1371 drive->num_heads = 0;
1372 drive->num_sectors = 0;
1375 drive->private_data = NULL;
1379 static void init_channel(struct ide_channel * channel) {
1382 channel->error_reg.val = 0x01;
1383 channel->drive_head.val = 0x00;
1384 channel->status.val = 0x00;
1385 channel->cmd_reg = 0x00;
1386 channel->ctrl_reg.val = 0x08;
1389 channel->dma_cmd.val = 0;
1390 channel->dma_status.val = 0;
1391 channel->dma_prd_addr = 0;
1392 channel->dma_tbl_index = 0;
1394 for (i = 0; i < 2; i++) {
1395 init_drive(&(channel->drives[i]));
1401 static int pci_config_update(uint_t reg_num, void * src, uint_t length, void * private_data) {
1402 PrintDebug("PCI Config Update\n");
1403 /* struct vm_device * dev = (struct vm_device *)private_data;
1404 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1406 PrintDebug("\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1412 static int init_ide_state(struct vm_device * dev) {
1413 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1417 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1420 for (i = 0; i < 1; i++) {
1421 init_channel(&(ide->channels[i]));
1423 // JRL: this is a terrible hack...
1424 ide->channels[i].irq = PRI_DEFAULT_IRQ + i;
1434 static int ide_free(struct vm_device * dev) {
1435 // unhook io ports....
1438 // deregister from PCI?
1446 static struct v3_device_ops dev_ops = {
1454 static int connect_fn(struct v3_vm_info * vm,
1455 void * frontend_data,
1456 struct v3_dev_blk_ops * ops,
1457 v3_cfg_tree_t * cfg,
1458 void * private_data) {
1459 struct ide_internal * ide = (struct ide_internal *)(frontend_data);
1460 struct ide_channel * channel = NULL;
1461 struct ide_drive * drive = NULL;
1463 char * bus_str = v3_cfg_val(cfg, "bus_num");
1464 char * drive_str = v3_cfg_val(cfg, "drive_num");
1465 char * type_str = v3_cfg_val(cfg, "type");
1466 char * model_str = v3_cfg_val(cfg, "model");
1468 uint_t drive_num = 0;
1471 if ((!type_str) || (!drive_str) || (!bus_str)) {
1472 PrintError("Incomplete IDE Configuration\n");
1476 bus_num = atoi(bus_str);
1477 drive_num = atoi(drive_str);
1479 channel = &(ide->channels[bus_num]);
1480 drive = &(channel->drives[drive_num]);
1482 if (drive->drive_type != BLOCK_NONE) {
1483 PrintError("Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1487 strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1489 if (strcasecmp(type_str, "cdrom") == 0) {
1490 drive->drive_type = BLOCK_CDROM;
1492 while (strlen((char *)(drive->model)) < 40) {
1493 strcat((char*)(drive->model), " ");
1496 } else if (strcasecmp(type_str, "hd") == 0) {
1497 drive->drive_type = BLOCK_DISK;
1499 drive->hd_state.accessed = 0;
1500 drive->hd_state.mult_sector_num = 1;
1502 drive->num_sectors = 63;
1503 drive->num_heads = 16;
1504 drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
1506 PrintError("invalid IDE drive type\n");
1513 // Hardcode this for now, but its not a good idea....
1514 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1517 drive->private_data = private_data;
1525 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1526 struct ide_internal * ide = NULL;
1527 char * dev_id = v3_cfg_val(cfg, "ID");
1529 PrintDebug("IDE: Initializing IDE\n");
1531 ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1534 PrintError("Error allocating IDE state\n");
1538 memset(ide, 0, sizeof(struct ide_internal));
1541 ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1543 if (ide->pci_bus != NULL) {
1544 struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1547 PrintError("Could not find southbridge\n");
1552 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1555 PrintDebug("IDE: Creating IDE bus x 2\n");
1557 struct vm_device * dev = v3_allocate_device(dev_id, &dev_ops, ide);
1559 if (v3_attach_device(vm, dev) == -1) {
1560 PrintError("Could not attach device %s\n", dev_id);
1561 v3_free_device(dev);
1566 if (init_ide_state(dev) == -1) {
1567 PrintError("Failed to initialize IDE state\n");
1568 v3_detach_device(dev);
1572 PrintDebug("Connecting to IDE IO ports\n");
1574 v3_dev_hook_io(dev, PRI_DATA_PORT,
1575 &ide_read_data_port, &write_data_port);
1576 v3_dev_hook_io(dev, PRI_FEATURES_PORT,
1577 &read_port_std, &write_port_std);
1578 v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
1579 &read_port_std, &write_port_std);
1580 v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
1581 &read_port_std, &write_port_std);
1582 v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
1583 &read_port_std, &write_port_std);
1584 v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
1585 &read_port_std, &write_port_std);
1586 v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
1587 &read_port_std, &write_port_std);
1588 v3_dev_hook_io(dev, PRI_CMD_PORT,
1589 &read_port_std, &write_cmd_port);
1591 v3_dev_hook_io(dev, SEC_DATA_PORT,
1592 &ide_read_data_port, &write_data_port);
1593 v3_dev_hook_io(dev, SEC_FEATURES_PORT,
1594 &read_port_std, &write_port_std);
1595 v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
1596 &read_port_std, &write_port_std);
1597 v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
1598 &read_port_std, &write_port_std);
1599 v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
1600 &read_port_std, &write_port_std);
1601 v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
1602 &read_port_std, &write_port_std);
1603 v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
1604 &read_port_std, &write_port_std);
1605 v3_dev_hook_io(dev, SEC_CMD_PORT,
1606 &read_port_std, &write_cmd_port);
1609 v3_dev_hook_io(dev, PRI_CTRL_PORT,
1610 &read_port_std, &write_port_std);
1612 v3_dev_hook_io(dev, SEC_CTRL_PORT,
1613 &read_port_std, &write_port_std);
1616 v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
1617 &read_port_std, &write_port_std);
1619 v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
1620 &read_port_std, &write_port_std);
1626 struct v3_pci_bar bars[6];
1627 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
1628 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
1629 struct pci_device * pci_dev = NULL;
1632 PrintDebug("Connecting IDE to PCI bus\n");
1634 for (i = 0; i < 6; i++) {
1635 bars[i].type = PCI_BAR_NONE;
1638 bars[4].type = PCI_BAR_IO;
1639 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
1640 bars[4].default_base_port = -1;
1641 bars[4].num_ports = 16;
1643 bars[4].io_read = read_dma_port;
1644 bars[4].io_write = write_dma_port;
1645 bars[4].private_data = dev;
1647 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
1649 pci_config_update, NULL, NULL, dev);
1651 if (pci_dev == NULL) {
1652 PrintError("Failed to register IDE BUS %d with PCI\n", i);
1656 /* This is for CMD646 devices
1657 pci_dev->config_header.vendor_id = 0x1095;
1658 pci_dev->config_header.device_id = 0x0646;
1659 pci_dev->config_header.revision = 0x8f07;
1662 pci_dev->config_header.vendor_id = 0x8086;
1663 pci_dev->config_header.device_id = 0x7010;
1664 pci_dev->config_header.revision = 0x00;
1666 pci_dev->config_header.prog_if = 0x80; // Master IDE device
1667 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
1668 pci_dev->config_header.class = PCI_CLASS_STORAGE;
1670 pci_dev->config_header.command = 0;
1671 pci_dev->config_header.status = 0x0280;
1673 ide->ide_pci = pci_dev;
1678 if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
1679 PrintError("Could not register %s as frontend\n", dev_id);
1680 v3_detach_device(dev);
1685 PrintDebug("IDE Initialized\n");
1691 device_register("IDE", ide_init)
1696 int v3_ide_get_geometry(struct vm_device * ide_dev, int channel_num, int drive_num,
1697 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
1699 struct ide_internal * ide = (struct ide_internal *)(ide_dev->private_data);
1700 struct ide_channel * channel = &(ide->channels[channel_num]);
1701 struct ide_drive * drive = &(channel->drives[drive_num]);
1703 if (drive->drive_type == BLOCK_NONE) {
1707 *cylinders = drive->num_cylinders;
1708 *heads = drive->num_heads;
1709 *sectors = drive->num_sectors;