2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vm_guest_mem.h>
22 #include <devices/ide.h>
23 #include <devices/pci.h>
24 #include <devices/southbridge.h>
25 #include <devices/block_dev.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
67 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
68 "PRI_CTRL", "PRI_ADDR_REG"};
71 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
72 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
73 "SEC_CTRL", "SEC_ADDR_REG"};
75 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
76 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
80 static inline const char * io_port_to_str(uint16_t port) {
81 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
82 return ide_pri_port_strs[port - PRI_DATA_PORT];
83 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
84 return ide_sec_port_strs[port - SEC_DATA_PORT];
85 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
86 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
87 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
88 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
94 static inline const char * dma_port_to_str(uint16_t port) {
95 return ide_dma_port_strs[port & 0x7];
100 struct ide_cd_state {
101 struct atapi_sense_data sense;
104 struct atapi_error_recovery err_recovery;
107 struct ide_hd_state {
110 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
111 uint_t mult_sector_num;
113 /* This is the current op sector size:
114 * for multiple sector ops this equals mult_sector_num
115 * for standard ops this equals 1
117 uint_t cur_sector_num;
123 v3_block_type_t drive_type;
126 struct v3_cd_ops * cd_ops;
127 struct v3_hd_ops * hd_ops;
132 struct ide_cd_state cd_state;
133 struct ide_hd_state hd_state;
138 // Where we are in the data transfer
139 uint_t transfer_index;
141 // the length of a transfer
142 // calculated for easy access
143 uint_t transfer_length;
145 uint64_t current_lba;
147 // We have a local data buffer that we use for IO port accesses
148 uint8_t data_buf[DATA_BUFFER_SIZE];
151 uint32_t num_cylinders;
153 uint32_t num_sectors;
158 uint8_t sector_count; // 0x1f2,0x172
159 struct atapi_irq_flags irq_flags;
160 } __attribute__((packed));
163 uint8_t sector_num; // 0x1f3,0x173
165 } __attribute__((packed));
172 uint8_t cylinder_low; // 0x1f4,0x174
173 uint8_t cylinder_high; // 0x1f5,0x175
174 } __attribute__((packed));
179 } __attribute__((packed));
182 // The transfer length requested by the CPU
184 } __attribute__((packed));
191 struct ide_drive drives[2];
194 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
196 struct ide_features_reg features;
198 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
200 struct ide_status_reg status; // [read] 0x1f7,0x177
201 uint8_t cmd_reg; // [write] 0x1f7,0x177
203 int irq; // this is temporary until we add PCI support
206 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
208 struct ide_dma_cmd_reg dma_cmd;
209 struct ide_dma_status_reg dma_status;
210 uint32_t dma_prd_addr;
211 uint_t dma_tbl_index;
216 struct ide_internal {
217 struct ide_channel channels[2];
219 struct v3_southbridge * southbridge;
220 struct vm_device * pci_bus;
222 struct pci_device * ide_pci;
229 /* Utility functions */
231 static inline uint16_t be_to_le_16(const uint16_t val) {
232 uint8_t * buf = (uint8_t *)&val;
233 return (buf[0] << 8) | (buf[1]) ;
236 static inline uint16_t le_to_be_16(const uint16_t val) {
237 return be_to_le_16(val);
241 static inline uint32_t be_to_le_32(const uint32_t val) {
242 uint8_t * buf = (uint8_t *)&val;
243 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
246 static inline uint32_t le_to_be_32(const uint32_t val) {
247 return be_to_le_32(val);
251 static inline int get_channel_index(ushort_t port) {
252 if (((port & 0xfff8) == 0x1f0) ||
253 ((port & 0xfffe) == 0x3f6) ||
254 ((port & 0xfff8) == 0xc000)) {
256 } else if (((port & 0xfff8) == 0x170) ||
257 ((port & 0xfffe) == 0x376) ||
258 ((port & 0xfff8) == 0xc008)) {
265 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
266 int channel_idx = get_channel_index(port);
267 return &(ide->channels[channel_idx]);
270 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
271 return &(channel->drives[channel->drive_head.drive_sel]);
275 static inline int is_lba_enabled(struct ide_channel * channel) {
276 return channel->drive_head.lba_mode;
281 static void ide_raise_irq(struct vm_device * dev, struct ide_channel * channel) {
282 if (channel->ctrl_reg.irq_disable == 0) {
283 PrintDebug("Raising IDE Interrupt %d\n", channel->irq);
284 channel->dma_status.int_gen = 1;
285 v3_raise_irq(dev->vm, channel->irq);
290 static void drive_reset(struct ide_drive * drive) {
291 drive->sector_count = 0x01;
292 drive->sector_num = 0x01;
294 PrintDebug("Resetting drive %s\n", drive->model);
296 if (drive->drive_type == BLOCK_CDROM) {
297 drive->cylinder = 0xeb14;
299 drive->cylinder = 0x0000;
300 //drive->hd_state.accessed = 0;
304 memset(drive->data_buf, 0, sizeof(drive->data_buf));
305 drive->transfer_index = 0;
307 // Send the reset signal to the connected device callbacks
308 // channel->drives[0].reset();
309 // channel->drives[1].reset();
312 static void channel_reset(struct ide_channel * channel) {
314 // set busy and seek complete flags
315 channel->status.val = 0x90;
318 channel->error_reg.val = 0x01;
321 channel->cmd_reg = 0x00;
323 channel->ctrl_reg.irq_disable = 0;
326 static void channel_reset_complete(struct ide_channel * channel) {
327 channel->status.busy = 0;
328 channel->status.ready = 1;
330 channel->drive_head.head_num = 0;
332 drive_reset(&(channel->drives[0]));
333 drive_reset(&(channel->drives[1]));
337 static void ide_abort_command(struct vm_device * dev, struct ide_channel * channel) {
338 channel->status.val = 0x41; // Error + ready
339 channel->error_reg.val = 0x04; // No idea...
341 ide_raise_irq(dev, channel);
345 static int dma_read(struct vm_device * dev, struct ide_channel * channel);
346 static int dma_write(struct vm_device * dev, struct ide_channel * channel);
349 /* ATAPI functions */
356 #ifdef CONFIG_DEBUG_IDE
357 static void print_prd_table(struct vm_device * dev, struct ide_channel * channel) {
358 struct ide_dma_prd prd_entry;
361 PrintDebug("Dumping PRD table\n");
364 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
367 ret = read_guest_pa_memory(dev->vm, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
369 if (ret != sizeof(struct ide_dma_prd)) {
370 PrintError("Could not read PRD\n");
374 PrintDebug("\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
375 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
377 if (prd_entry.end_of_table) {
389 static int dma_read(struct vm_device * dev, struct ide_channel * channel) {
390 struct ide_drive * drive = get_selected_drive(channel);
391 // This is at top level scope to do the EOT test at the end
392 struct ide_dma_prd prd_entry;
393 uint_t bytes_left = drive->transfer_length;
395 // Read in the data buffer....
396 // Read a sector/block at a time until the prd entry is full.
398 #ifdef CONFIG_DEBUG_IDE
399 print_prd_table(dev, channel);
402 PrintDebug("DMA read for %d bytes\n", bytes_left);
404 // Loop through the disk data
405 while (bytes_left > 0) {
406 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
407 uint_t prd_bytes_left = 0;
408 uint_t prd_offset = 0;
411 PrintDebug("PRD table address = %x\n", channel->dma_prd_addr);
413 ret = read_guest_pa_memory(dev->vm, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
415 if (ret != sizeof(struct ide_dma_prd)) {
416 PrintError("Could not read PRD\n");
420 PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n",
421 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
423 // loop through the PRD data....
425 prd_bytes_left = prd_entry.size;
428 while (prd_bytes_left > 0) {
429 uint_t bytes_to_write = 0;
431 if (drive->drive_type == BLOCK_DISK) {
432 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
435 if (ata_read(dev, channel, drive->data_buf, 1) == -1) {
436 PrintError("Failed to read next disk sector\n");
439 } else if (drive->drive_type == BLOCK_CDROM) {
440 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
441 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
443 if (atapi_read_chunk(dev, channel) == -1) {
444 PrintError("Failed to read next disk sector\n");
448 PrintDebug("DMA of command packet\n");
449 PrintError("How does this work???\n");
451 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
452 prd_bytes_left = bytes_to_write;
456 PrintDebug("Writing DMA data to guest Memory ptr=%p, len=%d\n",
457 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
459 drive->current_lba++;
461 ret = write_guest_pa_memory(dev->vm, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
463 if (ret != bytes_to_write) {
464 PrintError("Failed to copy data into guest memory... (ret=%d)\n", ret);
468 PrintDebug("\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
470 drive->transfer_index += ret;
471 prd_bytes_left -= ret;
476 channel->dma_tbl_index++;
478 if (drive->drive_type == BLOCK_DISK) {
479 if (drive->transfer_index % HD_SECTOR_SIZE) {
480 PrintError("We currently don't handle sectors that span PRD descriptors\n");
483 } else if (drive->drive_type == BLOCK_CDROM) {
484 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
485 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
486 PrintError("We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
487 PrintError("transfer_index=%d, transfer_length=%d\n",
488 drive->transfer_index, drive->transfer_length);
495 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
496 PrintError("DMA table not large enough for data transfer...\n");
502 drive->irq_flags.io_dir = 1;
503 drive->irq_flags.c_d = 1;
504 drive->irq_flags.rel = 0;
508 // Update to the next PRD entry
512 if (prd_entry.end_of_table) {
513 channel->status.busy = 0;
514 channel->status.ready = 1;
515 channel->status.data_req = 0;
516 channel->status.error = 0;
517 channel->status.seek_complete = 1;
519 channel->dma_status.active = 0;
520 channel->dma_status.err = 0;
523 ide_raise_irq(dev, channel);
529 static int dma_write(struct vm_device * dev, struct ide_channel * channel) {
530 struct ide_drive * drive = get_selected_drive(channel);
531 // This is at top level scope to do the EOT test at the end
532 struct ide_dma_prd prd_entry;
533 uint_t bytes_left = drive->transfer_length;
536 PrintDebug("DMA write from %d bytes\n", bytes_left);
538 // Loop through disk data
539 while (bytes_left > 0) {
540 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
541 uint_t prd_bytes_left = 0;
542 uint_t prd_offset = 0;
545 PrintDebug("PRD Table address = %x\n", channel->dma_prd_addr);
547 ret = read_guest_pa_memory(dev->vm, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
549 if (ret != sizeof(struct ide_dma_prd)) {
550 PrintError("Could not read PRD\n");
554 PrintDebug("PRD Addr: %x, PRD Len: %d, EOT: %d\n",
555 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
557 prd_bytes_left = prd_entry.size;
559 while (prd_bytes_left > 0) {
560 uint_t bytes_to_write = 0;
563 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
566 ret = read_guest_pa_memory(dev->vm, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
568 if (ret != bytes_to_write) {
569 PrintError("Faild to copy data from guest memory... (ret=%d)\n", ret);
573 PrintDebug("\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
576 if (ata_write(dev, channel, drive->data_buf, 1) == -1) {
577 PrintError("Failed to write data to disk\n");
581 drive->current_lba++;
583 drive->transfer_index += ret;
584 prd_bytes_left -= ret;
589 channel->dma_tbl_index++;
591 if (drive->transfer_index % HD_SECTOR_SIZE) {
592 PrintError("We currently don't handle sectors that span PRD descriptors\n");
596 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
597 PrintError("DMA table not large enough for data transfer...\n");
602 if (prd_entry.end_of_table) {
603 channel->status.busy = 0;
604 channel->status.ready = 1;
605 channel->status.data_req = 0;
606 channel->status.error = 0;
607 channel->status.seek_complete = 1;
609 channel->dma_status.active = 0;
610 channel->dma_status.err = 0;
613 ide_raise_irq(dev, channel);
620 #define DMA_CMD_PORT 0x00
621 #define DMA_STATUS_PORT 0x02
622 #define DMA_PRD_PORT0 0x04
623 #define DMA_PRD_PORT1 0x05
624 #define DMA_PRD_PORT2 0x06
625 #define DMA_PRD_PORT3 0x07
627 #define DMA_CHANNEL_FLAG 0x08
629 static int write_dma_port(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
630 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
631 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
632 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
633 struct ide_channel * channel = &(ide->channels[channel_flag]);
635 PrintDebug("IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
636 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
638 switch (port_offset) {
640 channel->dma_cmd.val = *(uint8_t *)src;
642 if (channel->dma_cmd.start == 0) {
643 channel->dma_tbl_index = 0;
645 channel->dma_status.active = 1;
647 if (channel->dma_cmd.read == 1) {
649 if (dma_read(dev, channel) == -1) {
650 PrintError("Failed DMA Read\n");
655 if (dma_write(dev, channel) == -1) {
656 PrintError("Failed DMA Write\n");
661 channel->dma_cmd.val &= 0x09;
666 case DMA_STATUS_PORT: {
667 uint8_t val = *(uint8_t *)src;
670 PrintError("Invalid read length for DMA status port\n");
675 channel->dma_status.val = ((val & 0x60) |
676 (channel->dma_status.val & 0x01) |
677 (channel->dma_status.val & ~val & 0x06));
684 case DMA_PRD_PORT3: {
685 uint_t addr_index = port_offset & 0x3;
686 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
689 if (addr_index + length > 4) {
690 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
694 for (i = 0; i < length; i++) {
695 addr_buf[addr_index + i] = *((uint8_t *)src + i);
698 PrintDebug("Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
703 PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
711 static int read_dma_port(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
712 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
713 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
714 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
715 struct ide_channel * channel = &(ide->channels[channel_flag]);
717 PrintDebug("Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
719 switch (port_offset) {
721 *(uint8_t *)dst = channel->dma_cmd.val;
724 case DMA_STATUS_PORT:
726 PrintError("Invalid read length for DMA status port\n");
730 *(uint8_t *)dst = channel->dma_status.val;
736 case DMA_PRD_PORT3: {
737 uint_t addr_index = port_offset & 0x3;
738 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
741 if (addr_index + length > 4) {
742 PrintError("DMA Port space overrun port=%x len=%d\n", port_offset, length);
746 for (i = 0; i < length; i++) {
747 *((uint8_t *)dst + i) = addr_buf[addr_index + i];
753 PrintError("IDE: Invalid DMA Port (%s)\n", dma_port_to_str(port_offset));
757 PrintDebug("\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
764 static int write_cmd_port(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
765 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
766 struct ide_channel * channel = get_selected_channel(ide, port);
767 struct ide_drive * drive = get_selected_drive(channel);
770 PrintError("Invalid Write Length on IDE command Port %x\n", port);
774 PrintDebug("IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
776 channel->cmd_reg = *(uint8_t *)src;
778 switch (channel->cmd_reg) {
780 case 0xa1: // ATAPI Identify Device Packet
781 if (drive->drive_type != BLOCK_CDROM) {
784 // JRL: Should we abort here?
785 ide_abort_command(dev, channel);
788 atapi_identify_device(drive);
790 channel->error_reg.val = 0;
791 channel->status.val = 0x58; // ready, data_req, seek_complete
793 ide_raise_irq(dev, channel);
796 case 0xec: // Identify Device
797 if (drive->drive_type != BLOCK_DISK) {
800 // JRL: Should we abort here?
801 ide_abort_command(dev, channel);
803 ata_identify_device(drive);
805 channel->error_reg.val = 0;
806 channel->status.val = 0x58;
808 ide_raise_irq(dev, channel);
812 case 0xa0: // ATAPI Command Packet
813 if (drive->drive_type != BLOCK_CDROM) {
814 ide_abort_command(dev, channel);
817 drive->sector_count = 1;
819 channel->status.busy = 0;
820 channel->status.write_fault = 0;
821 channel->status.data_req = 1;
822 channel->status.error = 0;
824 // reset the data buffer...
825 drive->transfer_length = ATAPI_PACKET_SIZE;
826 drive->transfer_index = 0;
830 case 0x20: // Read Sectors with Retry
831 case 0x21: // Read Sectors without Retry
832 drive->hd_state.cur_sector_num = 1;
834 if (ata_read_sectors(dev, channel) == -1) {
835 PrintError("Error reading sectors\n");
840 case 0x24: // Read Sectors Extended
841 drive->hd_state.cur_sector_num = 1;
843 if (ata_read_sectors_ext(dev, channel) == -1) {
844 PrintError("Error reading extended sectors\n");
849 case 0xc8: // Read DMA with retry
850 case 0xc9: { // Read DMA
851 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
853 if (ata_get_lba(dev, channel, &(drive->current_lba)) == -1) {
854 ide_abort_command(dev, channel);
858 drive->hd_state.cur_sector_num = 1;
860 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
861 drive->transfer_index = 0;
863 if (channel->dma_status.active == 1) {
865 if (dma_read(dev, channel) == -1) {
866 PrintError("Failed DMA Read\n");
873 case 0xca: { // Write DMA
874 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
876 if (ata_get_lba(dev, channel, &(drive->current_lba)) == -1) {
877 ide_abort_command(dev, channel);
881 drive->hd_state.cur_sector_num = 1;
883 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
884 drive->transfer_index = 0;
886 if (channel->dma_status.active == 1) {
888 if (dma_write(dev, channel) == -1) {
889 PrintError("Failed DMA Write\n");
895 case 0xe0: // Standby Now 1
896 case 0xe1: // Set Idle Immediate
897 case 0xe2: // Standby
898 case 0xe3: // Set Idle 1
899 case 0xe6: // Sleep Now 1
900 case 0x94: // Standby Now 2
901 case 0x95: // Idle Immediate (CFA)
902 case 0x96: // Standby 2
903 case 0x97: // Set idle 2
904 case 0x99: // Sleep Now 2
905 channel->status.val = 0;
906 channel->status.ready = 1;
907 ide_raise_irq(dev, channel);
910 case 0xef: // Set Features
911 // Prior to this the features register has been written to.
912 // This command tells the drive to check if the new value is supported (the value is drive specific)
913 // Common is that bit0=DMA enable
914 // If valid the drive raises an interrupt, if not it aborts.
916 // Do some checking here...
918 channel->status.busy = 0;
919 channel->status.write_fault = 0;
920 channel->status.error = 0;
921 channel->status.ready = 1;
922 channel->status.seek_complete = 1;
924 ide_raise_irq(dev, channel);
927 case 0x91: // Initialize Drive Parameters
928 case 0x10: // recalibrate?
929 channel->status.error = 0;
930 channel->status.ready = 1;
931 channel->status.seek_complete = 1;
932 ide_raise_irq(dev, channel);
934 case 0xc6: { // Set multiple mode (IDE Block mode)
935 // This makes the drive transfer multiple sectors before generating an interrupt
936 uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
938 if (tmp_sect_num > MAX_MULT_SECTORS) {
939 ide_abort_command(dev, channel);
943 if (drive->sector_count == 0) {
944 drive->hd_state.mult_sector_num= 1;
946 drive->hd_state.mult_sector_num = drive->sector_count;
949 channel->status.ready = 1;
950 channel->status.error = 0;
952 ide_raise_irq(dev, channel);
956 case 0xc4: // read multiple sectors
957 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
959 PrintError("Unimplemented IDE command (%x)\n", channel->cmd_reg);
967 static int write_data_port(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
968 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
969 struct ide_channel * channel = get_selected_channel(ide, port);
970 struct ide_drive * drive = get_selected_drive(channel);
972 // PrintDebug("IDE: Writing Data Port %x (val=%x, len=%d)\n",
973 // port, *(uint32_t *)src, length);
975 memcpy(drive->data_buf + drive->transfer_index, src, length);
976 drive->transfer_index += length;
978 // Transfer is complete, dispatch the command
979 if (drive->transfer_index >= drive->transfer_length) {
980 switch (channel->cmd_reg) {
981 case 0x30: // Write Sectors
982 PrintError("Writing Data not yet implemented\n");
985 case 0xa0: // ATAPI packet command
986 if (atapi_handle_packet(dev, channel) == -1) {
987 PrintError("Error handling ATAPI packet\n");
992 PrintError("Unhandld IDE Command %x\n", channel->cmd_reg);
1001 static int read_hd_data(uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
1002 struct ide_drive * drive = get_selected_drive(channel);
1003 int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1007 if (drive->transfer_index >= drive->transfer_length) {
1008 PrintError("Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1009 drive->transfer_length, drive->transfer_index,
1010 drive->transfer_index + length);
1015 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1016 drive->current_lba++;
1018 if (ata_read(dev, channel, drive->data_buf, 1) == -1) {
1019 PrintError("Could not read next disk sector\n");
1025 PrintDebug("Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1026 *(uint32_t *)(drive->data_buf + data_offset),
1027 length, data_offset);
1029 memcpy(dst, drive->data_buf + data_offset, length);
1031 drive->transfer_index += length;
1034 /* This is the trigger for interrupt injection.
1035 * For read single sector commands we interrupt after every sector
1036 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1037 * cur_sector_num is configured depending on the operation we are currently running
1038 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1040 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1041 (drive->transfer_index == drive->transfer_length)) {
1042 if (drive->transfer_index < drive->transfer_length) {
1043 // An increment is complete, but there is still more data to be transferred...
1044 PrintDebug("Integral Complete, still transferring more sectors\n");
1045 channel->status.data_req = 1;
1047 drive->irq_flags.c_d = 0;
1049 PrintDebug("Final Sector Transferred\n");
1050 // This was the final read of the request
1051 channel->status.data_req = 0;
1054 drive->irq_flags.c_d = 1;
1055 drive->irq_flags.rel = 0;
1058 channel->status.ready = 1;
1059 drive->irq_flags.io_dir = 1;
1060 channel->status.busy = 0;
1062 ide_raise_irq(dev, channel);
1071 static int read_cd_data(uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
1072 struct ide_drive * drive = get_selected_drive(channel);
1073 int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1074 int req_offset = drive->transfer_index % drive->req_len;
1076 if (drive->cd_state.atapi_cmd != 0x28) {
1077 PrintDebug("IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1080 if (drive->transfer_index >= drive->transfer_length) {
1081 PrintError("Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n",
1082 drive->transfer_length, drive->transfer_index,
1083 drive->transfer_index + length);
1088 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1089 if (atapi_update_data_buf(dev, channel) == -1) {
1090 PrintError("Could not update CDROM data buffer\n");
1095 memcpy(dst, drive->data_buf + data_offset, length);
1097 drive->transfer_index += length;
1100 // Should the req_offset be recalculated here?????
1101 if ((req_offset == 0) && (drive->transfer_index > 0)) {
1102 if (drive->transfer_index < drive->transfer_length) {
1103 // An increment is complete, but there is still more data to be transferred...
1105 channel->status.data_req = 1;
1107 drive->irq_flags.c_d = 0;
1109 // Update the request length in the cylinder regs
1110 if (atapi_update_req_len(dev, channel, drive->transfer_length - drive->transfer_index) == -1) {
1111 PrintError("Could not update request length after completed increment\n");
1115 // This was the final read of the request
1116 channel->status.data_req = 0;
1117 channel->status.ready = 1;
1119 drive->irq_flags.c_d = 1;
1120 drive->irq_flags.rel = 0;
1123 drive->irq_flags.io_dir = 1;
1124 channel->status.busy = 0;
1126 ide_raise_irq(dev, channel);
1133 static int read_drive_id(uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
1134 struct ide_drive * drive = get_selected_drive(channel);
1136 channel->status.busy = 0;
1137 channel->status.ready = 1;
1138 channel->status.write_fault = 0;
1139 channel->status.seek_complete = 1;
1140 channel->status.corrected = 0;
1141 channel->status.error = 0;
1144 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1145 drive->transfer_index += length;
1147 if (drive->transfer_index >= drive->transfer_length) {
1148 channel->status.data_req = 0;
1155 static int ide_read_data_port(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
1156 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1157 struct ide_channel * channel = get_selected_channel(ide, port);
1158 struct ide_drive * drive = get_selected_drive(channel);
1160 // PrintDebug("IDE: Reading Data Port %x (len=%d)\n", port, length);
1162 if ((channel->cmd_reg == 0xec) ||
1163 (channel->cmd_reg == 0xa1)) {
1164 return read_drive_id((uint8_t *)dst, length, dev, channel);
1167 if (drive->drive_type == BLOCK_CDROM) {
1168 if (read_cd_data((uint8_t *)dst, length, dev, channel) == -1) {
1169 PrintError("IDE: Could not read CD Data\n");
1172 } else if (drive->drive_type == BLOCK_DISK) {
1173 if (read_hd_data((uint8_t *)dst, length, dev, channel) == -1) {
1174 PrintError("IDE: Could not read HD Data\n");
1178 memset((uint8_t *)dst, 0, length);
1184 static int write_port_std(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
1185 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1186 struct ide_channel * channel = get_selected_channel(ide, port);
1187 struct ide_drive * drive = get_selected_drive(channel);
1190 PrintError("Invalid Write length on IDE port %x\n", port);
1194 PrintDebug("IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1197 // reset and interrupt enable
1199 case SEC_CTRL_PORT: {
1200 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1202 // only reset channel on a 0->1 reset bit transition
1203 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1204 channel_reset(channel);
1205 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1206 channel_reset_complete(channel);
1209 channel->ctrl_reg.val = tmp_ctrl->val;
1212 case PRI_FEATURES_PORT:
1213 case SEC_FEATURES_PORT:
1214 channel->features.val = *(uint8_t *)src;
1217 case PRI_SECT_CNT_PORT:
1218 case SEC_SECT_CNT_PORT:
1219 channel->drives[0].sector_count = *(uint8_t *)src;
1220 channel->drives[1].sector_count = *(uint8_t *)src;
1223 case PRI_SECT_NUM_PORT:
1224 case SEC_SECT_NUM_PORT:
1225 channel->drives[0].sector_num = *(uint8_t *)src;
1226 channel->drives[1].sector_num = *(uint8_t *)src;
1228 case PRI_CYL_LOW_PORT:
1229 case SEC_CYL_LOW_PORT:
1230 channel->drives[0].cylinder_low = *(uint8_t *)src;
1231 channel->drives[1].cylinder_low = *(uint8_t *)src;
1234 case PRI_CYL_HIGH_PORT:
1235 case SEC_CYL_HIGH_PORT:
1236 channel->drives[0].cylinder_high = *(uint8_t *)src;
1237 channel->drives[1].cylinder_high = *(uint8_t *)src;
1240 case PRI_DRV_SEL_PORT:
1241 case SEC_DRV_SEL_PORT: {
1242 channel->drive_head.val = *(uint8_t *)src;
1244 // make sure the reserved bits are ok..
1245 // JRL TODO: check with new ramdisk to make sure this is right...
1246 channel->drive_head.val |= 0xa0;
1248 drive = get_selected_drive(channel);
1250 // Selecting a non-present device is a no-no
1251 if (drive->drive_type == BLOCK_NONE) {
1252 PrintDebug("Attempting to select a non-present drive\n");
1253 channel->error_reg.abort = 1;
1254 channel->status.error = 1;
1260 PrintError("IDE: Write to unknown Port %x\n", port);
1267 static int read_port_std(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
1268 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1269 struct ide_channel * channel = get_selected_channel(ide, port);
1270 struct ide_drive * drive = get_selected_drive(channel);
1273 PrintError("Invalid Read length on IDE port %x\n", port);
1277 PrintDebug("IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1279 if ((port == PRI_ADDR_REG_PORT) ||
1280 (port == SEC_ADDR_REG_PORT)) {
1281 // unused, return 0xff
1282 *(uint8_t *)dst = 0xff;
1287 // if no drive is present just return 0 + reserved bits
1288 if (drive->drive_type == BLOCK_NONE) {
1289 if ((port == PRI_DRV_SEL_PORT) ||
1290 (port == SEC_DRV_SEL_PORT)) {
1291 *(uint8_t *)dst = 0xa0;
1293 *(uint8_t *)dst = 0;
1301 // This is really the error register.
1302 case PRI_FEATURES_PORT:
1303 case SEC_FEATURES_PORT:
1304 *(uint8_t *)dst = channel->error_reg.val;
1307 case PRI_SECT_CNT_PORT:
1308 case SEC_SECT_CNT_PORT:
1309 *(uint8_t *)dst = drive->sector_count;
1312 case PRI_SECT_NUM_PORT:
1313 case SEC_SECT_NUM_PORT:
1314 *(uint8_t *)dst = drive->sector_num;
1317 case PRI_CYL_LOW_PORT:
1318 case SEC_CYL_LOW_PORT:
1319 *(uint8_t *)dst = drive->cylinder_low;
1323 case PRI_CYL_HIGH_PORT:
1324 case SEC_CYL_HIGH_PORT:
1325 *(uint8_t *)dst = drive->cylinder_high;
1328 case PRI_DRV_SEL_PORT:
1329 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1330 *(uint8_t *)dst = channel->drive_head.val;
1337 // Something about lowering interrupts here....
1338 *(uint8_t *)dst = channel->status.val;
1342 PrintError("Invalid Port: %x\n", port);
1346 PrintDebug("\tVal=%x\n", *(uint8_t *)dst);
1353 static void init_drive(struct ide_drive * drive) {
1355 drive->sector_count = 0x01;
1356 drive->sector_num = 0x01;
1357 drive->cylinder = 0x0000;
1359 drive->drive_type = BLOCK_NONE;
1361 memset(drive->model, 0, sizeof(drive->model));
1363 drive->transfer_index = 0;
1364 drive->transfer_length = 0;
1365 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1367 drive->num_cylinders = 0;
1368 drive->num_heads = 0;
1369 drive->num_sectors = 0;
1372 drive->private_data = NULL;
1373 drive->cd_ops = NULL;
1376 static void init_channel(struct ide_channel * channel) {
1379 channel->error_reg.val = 0x01;
1380 channel->drive_head.val = 0x00;
1381 channel->status.val = 0x00;
1382 channel->cmd_reg = 0x00;
1383 channel->ctrl_reg.val = 0x08;
1386 channel->dma_cmd.val = 0;
1387 channel->dma_status.val = 0;
1388 channel->dma_prd_addr = 0;
1389 channel->dma_tbl_index = 0;
1391 for (i = 0; i < 2; i++) {
1392 init_drive(&(channel->drives[i]));
1398 static int pci_config_update(struct pci_device * pci_dev, uint_t reg_num, int length) {
1399 PrintDebug("PCI Config Update\n");
1400 PrintDebug("\t\tInterupt register (Dev=%s), irq=%d\n", pci_dev->name, pci_dev->config_header.intr_line);
1405 static int init_ide_state(struct vm_device * dev) {
1406 struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
1410 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1413 for (i = 0; i < 1; i++) {
1414 init_channel(&(ide->channels[i]));
1416 // JRL: this is a terrible hack...
1417 ide->channels[i].irq = PRI_DEFAULT_IRQ + i;
1427 static int ide_free(struct vm_device * dev) {
1428 // unhook io ports....
1429 // deregister from PCI?
1434 static struct v3_device_ops dev_ops = {
1443 static int ide_init(struct guest_info * vm, void * cfg_data) {
1444 struct ide_internal * ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1445 struct ide_cfg * cfg = (struct ide_cfg *)(cfg_data);
1447 PrintDebug("IDE: Initializing IDE\n");
1448 memset(ide, 0, sizeof(struct ide_internal));
1451 if (cfg->pci != NULL) {
1452 if (cfg->southbridge == NULL) {
1453 PrintError("PCI Enabled BUT southbridge is NULL\n");
1457 ide->pci_bus = v3_find_dev(vm, (char *)cfg->pci);
1459 if (ide->pci_bus == NULL) {
1460 PrintError("Could not find PCI device\n");
1464 struct vm_device * southbridge = v3_find_dev(vm, cfg->southbridge);
1467 PrintError("Could not find southbridge\n");
1471 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1475 PrintDebug("IDE: Creating IDE bus x 2\n");
1477 struct vm_device * dev = v3_allocate_device("IDE", &dev_ops, ide);
1479 if (v3_attach_device(vm, dev) == -1) {
1480 PrintError("Could not attach device %s\n", "IDE");
1485 if (init_ide_state(dev) == -1) {
1486 PrintError("Failed to initialize IDE state\n");
1490 PrintDebug("Connecting to IDE IO ports\n");
1492 v3_dev_hook_io(dev, PRI_DATA_PORT,
1493 &ide_read_data_port, &write_data_port);
1494 v3_dev_hook_io(dev, PRI_FEATURES_PORT,
1495 &read_port_std, &write_port_std);
1496 v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
1497 &read_port_std, &write_port_std);
1498 v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
1499 &read_port_std, &write_port_std);
1500 v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
1501 &read_port_std, &write_port_std);
1502 v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
1503 &read_port_std, &write_port_std);
1504 v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
1505 &read_port_std, &write_port_std);
1506 v3_dev_hook_io(dev, PRI_CMD_PORT,
1507 &read_port_std, &write_cmd_port);
1509 v3_dev_hook_io(dev, SEC_DATA_PORT,
1510 &ide_read_data_port, &write_data_port);
1511 v3_dev_hook_io(dev, SEC_FEATURES_PORT,
1512 &read_port_std, &write_port_std);
1513 v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
1514 &read_port_std, &write_port_std);
1515 v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
1516 &read_port_std, &write_port_std);
1517 v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
1518 &read_port_std, &write_port_std);
1519 v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
1520 &read_port_std, &write_port_std);
1521 v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
1522 &read_port_std, &write_port_std);
1523 v3_dev_hook_io(dev, SEC_CMD_PORT,
1524 &read_port_std, &write_cmd_port);
1527 v3_dev_hook_io(dev, PRI_CTRL_PORT,
1528 &read_port_std, &write_port_std);
1530 v3_dev_hook_io(dev, SEC_CTRL_PORT,
1531 &read_port_std, &write_port_std);
1534 v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
1535 &read_port_std, &write_port_std);
1537 v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
1538 &read_port_std, &write_port_std);
1544 struct v3_pci_bar bars[6];
1545 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
1546 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
1547 struct pci_device * pci_dev = NULL;
1550 PrintDebug("Connecting IDE to PCI bus\n");
1552 for (i = 0; i < 6; i++) {
1553 bars[i].type = PCI_BAR_NONE;
1556 bars[4].type = PCI_BAR_IO;
1557 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
1558 bars[4].default_base_port = -1;
1559 bars[4].num_ports = 16;
1561 bars[4].io_read = read_dma_port;
1562 bars[4].io_write = write_dma_port;
1564 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
1566 pci_config_update, NULL, NULL, dev);
1568 if (pci_dev == NULL) {
1569 PrintError("Failed to register IDE BUS %d with PCI\n", i);
1573 /* This is for CMD646 devices
1574 pci_dev->config_header.vendor_id = 0x1095;
1575 pci_dev->config_header.device_id = 0x0646;
1576 pci_dev->config_header.revision = 0x8f07;
1579 pci_dev->config_header.vendor_id = 0x8086;
1580 pci_dev->config_header.device_id = 0x7010;
1581 pci_dev->config_header.revision = 0x00;
1583 pci_dev->config_header.prog_if = 0x80; // Master IDE device
1584 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
1585 pci_dev->config_header.class = PCI_CLASS_STORAGE;
1587 pci_dev->config_header.command = 0;
1588 pci_dev->config_header.status = 0x0280;
1590 ide->ide_pci = pci_dev;
1595 PrintDebug("IDE Initialized\n");
1601 device_register("IDE", ide_init)
1611 int v3_ide_get_geometry(struct vm_device * ide_dev, int channel_num, int drive_num,
1612 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
1614 struct ide_internal * ide = (struct ide_internal *)(ide_dev->private_data);
1615 struct ide_channel * channel = &(ide->channels[channel_num]);
1616 struct ide_drive * drive = &(channel->drives[drive_num]);
1618 if (drive->drive_type == BLOCK_NONE) {
1622 *cylinders = drive->num_cylinders;
1623 *heads = drive->num_heads;
1624 *sectors = drive->num_sectors;
1632 int v3_ide_register_cdrom(struct vm_device * ide_dev,
1636 struct v3_cd_ops * ops,
1637 void * private_data) {
1639 struct ide_internal * ide = (struct ide_internal *)(ide_dev->private_data);
1640 struct ide_channel * channel = NULL;
1641 struct ide_drive * drive = NULL;
1643 V3_ASSERT((bus_num >= 0) && (bus_num < 2));
1644 V3_ASSERT((drive_num >= 0) && (drive_num < 2));
1646 channel = &(ide->channels[bus_num]);
1647 drive = &(channel->drives[drive_num]);
1649 if (drive->drive_type != BLOCK_NONE) {
1650 PrintError("Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1654 strncpy(drive->model, dev_name, sizeof(drive->model) - 1);
1656 while (strlen((char *)(drive->model)) < 40) {
1657 strcat((char*)(drive->model), " ");
1661 drive->drive_type = BLOCK_CDROM;
1663 drive->cd_ops = ops;
1666 // Hardcode this for now, but its not a good idea....
1667 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1670 drive->private_data = private_data;
1676 int v3_ide_register_harddisk(struct vm_device * ide_dev,
1680 struct v3_hd_ops * ops,
1681 void * private_data) {
1683 struct ide_internal * ide = (struct ide_internal *)(ide_dev->private_data);
1684 struct ide_channel * channel = NULL;
1685 struct ide_drive * drive = NULL;
1687 V3_ASSERT((bus_num >= 0) && (bus_num < 2));
1688 V3_ASSERT((drive_num >= 0) && (drive_num < 2));
1690 channel = &(ide->channels[bus_num]);
1691 drive = &(channel->drives[drive_num]);
1693 if (drive->drive_type != BLOCK_NONE) {
1694 PrintError("Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1698 strncpy(drive->model, dev_name, sizeof(drive->model) - 1);
1700 drive->drive_type = BLOCK_DISK;
1702 drive->hd_state.accessed = 0;
1703 drive->hd_state.mult_sector_num = 1;
1705 drive->hd_ops = ops;
1707 /* this is something of a hack... */
1708 drive->num_sectors = 63;
1709 drive->num_heads = 16;
1710 drive->num_cylinders = ops->get_capacity(private_data) / (drive->num_sectors * drive->num_heads);
1713 // Hardcode this for now, but its not a good idea....
1714 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1719 drive->private_data = private_data;