2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_dev_mgr.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <devices/ide.h>
24 #include <devices/pci.h>
25 #include <devices/southbridge.h>
26 #include "ide-types.h"
27 #include "atapi-types.h"
29 #ifndef V3_CONFIG_DEBUG_IDE
31 #define PrintDebug(fmt, args...)
34 #define PRI_DEFAULT_IRQ 14
35 #define SEC_DEFAULT_IRQ 15
38 #define PRI_DATA_PORT 0x1f0
39 #define PRI_FEATURES_PORT 0x1f1
40 #define PRI_SECT_CNT_PORT 0x1f2
41 #define PRI_SECT_NUM_PORT 0x1f3
42 #define PRI_CYL_LOW_PORT 0x1f4
43 #define PRI_CYL_HIGH_PORT 0x1f5
44 #define PRI_DRV_SEL_PORT 0x1f6
45 #define PRI_CMD_PORT 0x1f7
46 #define PRI_CTRL_PORT 0x3f6
47 #define PRI_ADDR_REG_PORT 0x3f7
49 #define SEC_DATA_PORT 0x170
50 #define SEC_FEATURES_PORT 0x171
51 #define SEC_SECT_CNT_PORT 0x172
52 #define SEC_SECT_NUM_PORT 0x173
53 #define SEC_CYL_LOW_PORT 0x174
54 #define SEC_CYL_HIGH_PORT 0x175
55 #define SEC_DRV_SEL_PORT 0x176
56 #define SEC_CMD_PORT 0x177
57 #define SEC_CTRL_PORT 0x376
58 #define SEC_ADDR_REG_PORT 0x377
61 #define PRI_DEFAULT_DMA_PORT 0xc000
62 #define SEC_DEFAULT_DMA_PORT 0xc008
64 #define DATA_BUFFER_SIZE 2048
66 #define ATAPI_BLOCK_SIZE 2048
67 #define HD_SECTOR_SIZE 512
70 static const char * ide_pri_port_strs[] = {"PRI_DATA", "PRI_FEATURES", "PRI_SECT_CNT", "PRI_SECT_NUM",
71 "PRI_CYL_LOW", "PRI_CYL_HIGH", "PRI_DRV_SEL", "PRI_CMD",
72 "PRI_CTRL", "PRI_ADDR_REG"};
75 static const char * ide_sec_port_strs[] = {"SEC_DATA", "SEC_FEATURES", "SEC_SECT_CNT", "SEC_SECT_NUM",
76 "SEC_CYL_LOW", "SEC_CYL_HIGH", "SEC_DRV_SEL", "SEC_CMD",
77 "SEC_CTRL", "SEC_ADDR_REG"};
79 static const char * ide_dma_port_strs[] = {"DMA_CMD", NULL, "DMA_STATUS", NULL,
80 "DMA_PRD0", "DMA_PRD1", "DMA_PRD2", "DMA_PRD3"};
83 typedef enum {BLOCK_NONE, BLOCK_DISK, BLOCK_CDROM} v3_block_type_t;
85 static inline const char * io_port_to_str(uint16_t port) {
86 if ((port >= PRI_DATA_PORT) && (port <= PRI_CMD_PORT)) {
87 return ide_pri_port_strs[port - PRI_DATA_PORT];
88 } else if ((port >= SEC_DATA_PORT) && (port <= SEC_CMD_PORT)) {
89 return ide_sec_port_strs[port - SEC_DATA_PORT];
90 } else if ((port == PRI_CTRL_PORT) || (port == PRI_ADDR_REG_PORT)) {
91 return ide_pri_port_strs[port - PRI_CTRL_PORT + 8];
92 } else if ((port == SEC_CTRL_PORT) || (port == SEC_ADDR_REG_PORT)) {
93 return ide_sec_port_strs[port - SEC_CTRL_PORT + 8];
99 static inline const char * dma_port_to_str(uint16_t port) {
100 return ide_dma_port_strs[port & 0x7];
105 struct ide_cd_state {
106 struct atapi_sense_data sense;
109 struct atapi_error_recovery err_recovery;
112 struct ide_hd_state {
115 /* this is the multiple sector transfer size as configured for read/write multiple sectors*/
116 uint32_t mult_sector_num;
118 /* This is the current op sector size:
119 * for multiple sector ops this equals mult_sector_num
120 * for standard ops this equals 1
122 uint32_t cur_sector_num;
128 v3_block_type_t drive_type;
130 struct v3_dev_blk_ops * ops;
133 struct ide_cd_state cd_state;
134 struct ide_hd_state hd_state;
139 // Where we are in the data transfer
140 uint32_t transfer_index;
142 // the length of a transfer
143 // calculated for easy access
144 uint32_t transfer_length;
146 uint64_t current_lba;
148 // We have a local data buffer that we use for IO port accesses
149 uint8_t data_buf[DATA_BUFFER_SIZE];
152 uint32_t num_cylinders;
154 uint32_t num_sectors;
159 uint8_t sector_count; // 0x1f2,0x172
160 struct atapi_irq_flags irq_flags;
161 } __attribute__((packed));
164 uint8_t sector_num; // 0x1f3,0x173
166 } __attribute__((packed));
173 uint8_t cylinder_low; // 0x1f4,0x174
174 uint8_t cylinder_high; // 0x1f5,0x175
175 } __attribute__((packed));
180 } __attribute__((packed));
183 // The transfer length requested by the CPU
185 } __attribute__((packed));
192 struct ide_drive drives[2];
195 struct ide_error_reg error_reg; // [read] 0x1f1,0x171
197 struct ide_features_reg features;
199 struct ide_drive_head_reg drive_head; // 0x1f6,0x176
201 struct ide_status_reg status; // [read] 0x1f7,0x177
202 uint8_t cmd_reg; // [write] 0x1f7,0x177
204 int irq; // this is temporary until we add PCI support
207 struct ide_ctrl_reg ctrl_reg; // [write] 0x3f6,0x376
210 uint8_t dma_ports[8];
212 struct ide_dma_cmd_reg dma_cmd;
214 struct ide_dma_status_reg dma_status;
216 uint32_t dma_prd_addr;
217 } __attribute__((packed));
218 } __attribute__((packed));
220 uint32_t dma_tbl_index;
225 struct ide_internal {
226 struct ide_channel channels[2];
228 struct v3_southbridge * southbridge;
229 struct vm_device * pci_bus;
231 struct pci_device * ide_pci;
233 struct v3_vm_info * vm;
240 /* Utility functions */
242 static inline uint16_t be_to_le_16(const uint16_t val) {
243 uint8_t * buf = (uint8_t *)&val;
244 return (buf[0] << 8) | (buf[1]) ;
247 static inline uint16_t le_to_be_16(const uint16_t val) {
248 return be_to_le_16(val);
252 static inline uint32_t be_to_le_32(const uint32_t val) {
253 uint8_t * buf = (uint8_t *)&val;
254 return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
257 static inline uint32_t le_to_be_32(const uint32_t val) {
258 return be_to_le_32(val);
262 static inline int get_channel_index(ushort_t port) {
263 if (((port & 0xfff8) == 0x1f0) ||
264 ((port & 0xfffe) == 0x3f6) ||
265 ((port & 0xfff8) == 0xc000)) {
267 } else if (((port & 0xfff8) == 0x170) ||
268 ((port & 0xfffe) == 0x376) ||
269 ((port & 0xfff8) == 0xc008)) {
276 static inline struct ide_channel * get_selected_channel(struct ide_internal * ide, ushort_t port) {
277 int channel_idx = get_channel_index(port);
278 return &(ide->channels[channel_idx]);
281 static inline struct ide_drive * get_selected_drive(struct ide_channel * channel) {
282 return &(channel->drives[channel->drive_head.drive_sel]);
286 static inline int is_lba_enabled(struct ide_channel * channel) {
287 return channel->drive_head.lba_mode;
292 static void ide_raise_irq(struct ide_internal * ide, struct ide_channel * channel) {
293 if (channel->ctrl_reg.irq_disable == 0) {
295 PrintDebug(ide->vm,VCORE_NONE, "Raising IDE Interrupt %d\n", channel->irq);
297 channel->dma_status.int_gen = 1;
298 v3_raise_irq(ide->vm, channel->irq);
300 PrintDebug(ide->vm,VCORE_NONE, "IDE Interrupt %d cannot be raised as irq is disabled on channel\n",channel->irq);
305 static void drive_reset(struct ide_drive * drive) {
306 drive->sector_count = 0x01;
307 drive->sector_num = 0x01;
309 PrintDebug(VM_NONE,VCORE_NONE, "Resetting drive %s\n", drive->model);
311 if (drive->drive_type == BLOCK_CDROM) {
312 drive->cylinder = 0xeb14;
314 drive->cylinder = 0x0000;
315 //drive->hd_state.accessed = 0;
319 memset(drive->data_buf, 0, sizeof(drive->data_buf));
320 drive->transfer_index = 0;
322 // Send the reset signal to the connected device callbacks
323 // channel->drives[0].reset();
324 // channel->drives[1].reset();
327 static void channel_reset(struct ide_channel * channel) {
329 // set busy and seek complete flags
330 channel->status.val = 0x90;
333 channel->error_reg.val = 0x01;
336 channel->cmd_reg = 0x00;
338 channel->ctrl_reg.irq_disable = 0;
341 static void channel_reset_complete(struct ide_channel * channel) {
342 channel->status.busy = 0;
343 channel->status.ready = 1;
345 channel->drive_head.head_num = 0;
347 drive_reset(&(channel->drives[0]));
348 drive_reset(&(channel->drives[1]));
352 static void ide_abort_command(struct ide_internal * ide, struct ide_channel * channel) {
353 channel->status.val = 0x41; // Error + ready
354 channel->error_reg.val = 0x04; // No idea...
356 ide_raise_irq(ide, channel);
360 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
361 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel);
364 /* ATAPI functions */
372 static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
373 struct ide_dma_prd prd_entry;
376 V3_Print(VM_NONE, VCORE_NONE,"Dumping PRD table\n");
379 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * index);
382 ret = v3_read_gpa_memory(&(ide->vm->cores[0]), prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
384 if (ret != sizeof(struct ide_dma_prd)) {
385 PrintError(VM_NONE, VCORE_NONE, "Could not read PRD\n");
389 V3_Print(VM_NONE, VCORE_NONE,"\tPRD Addr: %x, PRD Len: %d, EOT: %d\n",
391 (prd_entry.size == 0) ? 0x10000 : prd_entry.size,
392 prd_entry.end_of_table);
394 if (prd_entry.end_of_table) {
406 static int dma_read(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
407 struct ide_drive * drive = get_selected_drive(channel);
408 // This is at top level scope to do the EOT test at the end
409 struct ide_dma_prd prd_entry = {};
410 uint_t bytes_left = drive->transfer_length;
412 // Read in the data buffer....
413 // Read a sector/block at a time until the prd entry is full.
415 #ifdef V3_CONFIG_DEBUG_IDE
416 print_prd_table(ide, channel);
419 PrintDebug(core->vm_info, core, "DMA read for %d bytes\n", bytes_left);
421 // Loop through the disk data
422 while (bytes_left > 0) {
423 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
424 uint_t prd_bytes_left = 0;
425 uint_t prd_offset = 0;
428 PrintDebug(core->vm_info, core, "PRD table address = %x\n", channel->dma_prd_addr);
430 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
432 if (ret != sizeof(struct ide_dma_prd)) {
433 PrintError(core->vm_info, core, "Could not read PRD\n");
437 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
438 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
440 // loop through the PRD data....
442 if (prd_entry.size == 0) {
443 // a size of 0 means 64k
444 prd_bytes_left = 0x10000;
446 prd_bytes_left = prd_entry.size;
450 while (prd_bytes_left > 0) {
451 uint_t bytes_to_write = 0;
453 if (drive->drive_type == BLOCK_DISK) {
454 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
457 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
458 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
461 } else if (drive->drive_type == BLOCK_CDROM) {
462 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
463 bytes_to_write = (prd_bytes_left > ATAPI_BLOCK_SIZE) ? ATAPI_BLOCK_SIZE : prd_bytes_left;
465 if (atapi_read_chunk(ide, channel) == -1) {
466 PrintError(core->vm_info, core, "Failed to read next disk sector\n");
471 PrintError(core->vm_info, core, "How does this work (ATAPI CMD=%x)???\n", drive->cd_state.atapi_cmd);
476 //V3_Print(core->vm_info, core, "DMA of command packet\n");
478 bytes_to_write = (prd_bytes_left > bytes_left) ? bytes_left : prd_bytes_left;
479 prd_bytes_left = bytes_to_write;
482 // V3_Print(core->vm_info, core, "Writing ATAPI cmd OP DMA (cmd=%x) (len=%d)\n", drive->cd_state.atapi_cmd, prd_bytes_left);
483 cmd_ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset,
484 bytes_to_write, drive->data_buf);
491 drive->transfer_index += bytes_to_write;
493 channel->status.busy = 0;
494 channel->status.ready = 1;
495 channel->status.data_req = 0;
496 channel->status.error = 0;
497 channel->status.seek_complete = 1;
499 channel->dma_status.active = 0;
500 channel->dma_status.err = 0;
502 ide_raise_irq(ide, channel);
508 PrintDebug(core->vm_info, core, "Writing DMA data to guest Memory ptr=%p, len=%d\n",
509 (void *)(addr_t)(prd_entry.base_addr + prd_offset), bytes_to_write);
511 drive->current_lba++;
513 ret = v3_write_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
515 if (ret != bytes_to_write) {
516 PrintError(core->vm_info, core, "Failed to copy data into guest memory... (ret=%d)\n", ret);
520 PrintDebug(core->vm_info, core, "\t DMA ret=%d, (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
522 drive->transfer_index += ret;
523 prd_bytes_left -= ret;
528 channel->dma_tbl_index++;
530 if (drive->drive_type == BLOCK_DISK) {
531 if (drive->transfer_index % HD_SECTOR_SIZE) {
532 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
535 } else if (drive->drive_type == BLOCK_CDROM) {
536 if (atapi_cmd_is_data_op(drive->cd_state.atapi_cmd)) {
537 if (drive->transfer_index % ATAPI_BLOCK_SIZE) {
538 PrintError(core->vm_info, core, "We currently don't handle ATAPI BLOCKS that span PRD descriptors\n");
539 PrintError(core->vm_info, core, "transfer_index=%d, transfer_length=%d\n",
540 drive->transfer_index, drive->transfer_length);
547 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
548 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
554 drive->irq_flags.io_dir = 1;
555 drive->irq_flags.c_d = 1;
556 drive->irq_flags.rel = 0;
560 // Update to the next PRD entry
564 if (prd_entry.end_of_table) {
565 channel->status.busy = 0;
566 channel->status.ready = 1;
567 channel->status.data_req = 0;
568 channel->status.error = 0;
569 channel->status.seek_complete = 1;
571 channel->dma_status.active = 0;
572 channel->dma_status.err = 0;
575 ide_raise_irq(ide, channel);
581 static int dma_write(struct guest_info * core, struct ide_internal * ide, struct ide_channel * channel) {
582 struct ide_drive * drive = get_selected_drive(channel);
583 // This is at top level scope to do the EOT test at the end
584 struct ide_dma_prd prd_entry = {};
585 uint_t bytes_left = drive->transfer_length;
588 PrintDebug(core->vm_info, core, "DMA write from %d bytes\n", bytes_left);
590 // Loop through disk data
591 while (bytes_left > 0) {
592 uint32_t prd_entry_addr = channel->dma_prd_addr + (sizeof(struct ide_dma_prd) * channel->dma_tbl_index);
593 uint_t prd_bytes_left = 0;
594 uint_t prd_offset = 0;
597 PrintDebug(core->vm_info, core, "PRD Table address = %x\n", channel->dma_prd_addr);
599 ret = v3_read_gpa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
601 if (ret != sizeof(struct ide_dma_prd)) {
602 PrintError(core->vm_info, core, "Could not read PRD\n");
606 PrintDebug(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
607 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
610 if (prd_entry.size == 0) {
611 // a size of 0 means 64k
612 prd_bytes_left = 0x10000;
614 prd_bytes_left = prd_entry.size;
617 while (prd_bytes_left > 0) {
618 uint_t bytes_to_write = 0;
621 bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
624 ret = v3_read_gpa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
626 if (ret != bytes_to_write) {
627 PrintError(core->vm_info, core, "Faild to copy data from guest memory... (ret=%d)\n", ret);
631 PrintDebug(core->vm_info, core, "\t DMA ret=%d (prd_bytes_left=%d) (bytes_left=%d)\n", ret, prd_bytes_left, bytes_left);
634 if (ata_write(ide, channel, drive->data_buf, 1) == -1) {
635 PrintError(core->vm_info, core, "Failed to write data to disk\n");
639 drive->current_lba++;
641 drive->transfer_index += ret;
642 prd_bytes_left -= ret;
647 channel->dma_tbl_index++;
649 if (drive->transfer_index % HD_SECTOR_SIZE) {
650 PrintError(core->vm_info, core, "We currently don't handle sectors that span PRD descriptors\n");
654 if ((prd_entry.end_of_table == 1) && (bytes_left > 0)) {
655 PrintError(core->vm_info, core, "DMA table not large enough for data transfer...\n");
656 PrintError(core->vm_info, core, "\t(bytes_left=%u) (transfer_length=%u)...\n",
657 bytes_left, drive->transfer_length);
658 PrintError(core->vm_info, core, "PRD Addr: %x, PRD Len: %d, EOT: %d\n",
659 prd_entry.base_addr, prd_entry.size, prd_entry.end_of_table);
661 print_prd_table(ide, channel);
666 if (prd_entry.end_of_table) {
667 channel->status.busy = 0;
668 channel->status.ready = 1;
669 channel->status.data_req = 0;
670 channel->status.error = 0;
671 channel->status.seek_complete = 1;
673 channel->dma_status.active = 0;
674 channel->dma_status.err = 0;
677 ide_raise_irq(ide, channel);
684 #define DMA_CMD_PORT 0x00
685 #define DMA_STATUS_PORT 0x02
686 #define DMA_PRD_PORT0 0x04
687 #define DMA_PRD_PORT1 0x05
688 #define DMA_PRD_PORT2 0x06
689 #define DMA_PRD_PORT3 0x07
691 #define DMA_CHANNEL_FLAG 0x08
693 static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
694 struct ide_internal * ide = (struct ide_internal *)private_data;
695 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
696 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
697 struct ide_channel * channel = &(ide->channels[channel_flag]);
699 PrintDebug(core->vm_info, core, "IDE: Writing DMA Port %x (%s) (val=%x) (len=%d) (channel=%d)\n",
700 port, dma_port_to_str(port_offset), *(uint32_t *)src, length, channel_flag);
702 switch (port_offset) {
704 channel->dma_cmd.val = *(uint8_t *)src;
706 if (channel->dma_cmd.start == 0) {
707 channel->dma_tbl_index = 0;
709 channel->dma_status.active = 1;
711 if (channel->dma_cmd.read == 1) {
713 if (dma_read(core, ide, channel) == -1) {
714 PrintError(core->vm_info, core, "Failed DMA Read\n");
719 if (dma_write(core, ide, channel) == -1) {
720 PrintError(core->vm_info, core, "Failed DMA Write\n");
725 channel->dma_cmd.val &= 0x09;
730 case DMA_STATUS_PORT: {
731 uint8_t val = *(uint8_t *)src;
734 PrintError(core->vm_info, core, "Invalid read length for DMA status port\n");
739 channel->dma_status.val = ((val & 0x60) |
740 (channel->dma_status.val & 0x01) |
741 (channel->dma_status.val & ~val & 0x06));
748 case DMA_PRD_PORT3: {
749 uint_t addr_index = port_offset & 0x3;
750 uint8_t * addr_buf = (uint8_t *)&(channel->dma_prd_addr);
753 if (addr_index + length > 4) {
754 PrintError(core->vm_info, core, "DMA Port space overrun port=%x len=%d\n", port_offset, length);
758 for (i = 0; i < length; i++) {
759 addr_buf[addr_index + i] = *((uint8_t *)src + i);
762 PrintDebug(core->vm_info, core, "Writing PRD Port %x (val=%x)\n", port_offset, channel->dma_prd_addr);
767 PrintError(core->vm_info, core, "IDE: Invalid DMA Port (%d) (%s)\n", port, dma_port_to_str(port_offset));
775 static int read_dma_port(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
776 struct ide_internal * ide = (struct ide_internal *)private_data;
777 uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
778 uint_t channel_flag = (port & DMA_CHANNEL_FLAG) >> 3;
779 struct ide_channel * channel = &(ide->channels[channel_flag]);
781 PrintDebug(core->vm_info, core, "Reading DMA port %d (%x) (channel=%d)\n", port, port, channel_flag);
783 if (port_offset + length > 16) {
784 PrintError(core->vm_info, core, "DMA Port Read: Port overrun (port_offset=%d, length=%d)\n", port_offset, length);
788 memcpy(dst, channel->dma_ports + port_offset, length);
790 PrintDebug(core->vm_info, core, "\tval=%x (len=%d)\n", *(uint32_t *)dst, length);
797 static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
798 struct ide_internal * ide = priv_data;
799 struct ide_channel * channel = get_selected_channel(ide, port);
800 struct ide_drive * drive = get_selected_drive(channel);
803 PrintError(core->vm_info, core, "Invalid Write Length on IDE command Port %x\n", port);
807 PrintDebug(core->vm_info, core, "IDE: Writing Command Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
809 channel->cmd_reg = *(uint8_t *)src;
811 switch (channel->cmd_reg) {
813 case ATAPI_PIDENTIFY: // ATAPI Identify Device Packet
814 if (drive->drive_type != BLOCK_CDROM) {
817 // JRL: Should we abort here?
818 ide_abort_command(ide, channel);
821 atapi_identify_device(drive);
823 channel->error_reg.val = 0;
824 channel->status.val = 0x58; // ready, data_req, seek_complete
826 ide_raise_irq(ide, channel);
829 case ATAPI_IDENTIFY: // Identify Device
830 if (drive->drive_type != BLOCK_DISK) {
833 // JRL: Should we abort here?
834 ide_abort_command(ide, channel);
836 ata_identify_device(drive);
838 channel->error_reg.val = 0;
839 channel->status.val = 0x58;
841 ide_raise_irq(ide, channel);
845 case ATAPI_PACKETCMD: // ATAPI Command Packet
846 if (drive->drive_type != BLOCK_CDROM) {
847 ide_abort_command(ide, channel);
850 drive->sector_count = 1;
852 channel->status.busy = 0;
853 channel->status.write_fault = 0;
854 channel->status.data_req = 1;
855 channel->status.error = 0;
857 // reset the datxgoto-la buffer...
858 drive->transfer_length = ATAPI_PACKET_SIZE;
859 drive->transfer_index = 0;
863 case ATAPI_READ: // Read Sectors with Retry
864 case ATAPI_READ_ONCE: // Read Sectors without Retry
865 drive->hd_state.cur_sector_num = 1;
867 if (ata_read_sectors(ide, channel) == -1) {
868 PrintError(core->vm_info, core, "Error reading sectors\n");
873 case ATAPI_READ_EXT: // Read Sectors Extended
874 drive->hd_state.cur_sector_num = 1;
876 if (ata_read_sectors_ext(ide, channel) == -1) {
877 PrintError(core->vm_info, core, "Error reading extended sectors\n");
882 case ATAPI_WRITE: {// Write Sector
883 drive->hd_state.cur_sector_num = 1;
885 if (ata_write_sectors(ide, channel) == -1) {
886 PrintError(core->vm_info, core, "Error writing sectors\n");
894 case ATAPI_READDMA: // Read DMA with retry
895 case ATAPI_READDMA_ONCE: { // Read DMA
896 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
898 if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
899 ide_abort_command(ide, channel);
903 drive->hd_state.cur_sector_num = 1;
905 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
906 drive->transfer_index = 0;
908 if (channel->dma_status.active == 1) {
910 if (dma_read(core, ide, channel) == -1) {
911 PrintError(core->vm_info, core, "Failed DMA Read\n");
918 case ATAPI_WRITEDMA: { // Write DMA
919 uint32_t sect_cnt = (drive->sector_count == 0) ? 256 : drive->sector_count;
921 if (ata_get_lba(ide, channel, &(drive->current_lba)) == -1) {
922 ide_abort_command(ide, channel);
926 drive->hd_state.cur_sector_num = 1;
928 drive->transfer_length = sect_cnt * HD_SECTOR_SIZE;
929 drive->transfer_index = 0;
931 if (channel->dma_status.active == 1) {
933 if (dma_write(core, ide, channel) == -1) {
934 PrintError(core->vm_info, core, "Failed DMA Write\n");
940 case ATAPI_STANDBYNOW1: // Standby Now 1
941 case ATAPI_IDLEIMMEDIATE: // Set Idle Immediate
942 case ATAPI_STANDBY: // Standby
943 case ATAPI_SETIDLE1: // Set Idle 1
944 case ATAPI_SLEEPNOW1: // Sleep Now 1
945 case ATAPI_STANDBYNOW2: // Standby Now 2
946 case ATAPI_IDLEIMMEDIATE2: // Idle Immediate (CFA)
947 case ATAPI_STANDBY2: // Standby 2
948 case ATAPI_SETIDLE2: // Set idle 2
949 case ATAPI_SLEEPNOW2: // Sleep Now 2
950 channel->status.val = 0;
951 channel->status.ready = 1;
952 ide_raise_irq(ide, channel);
955 case ATAPI_SETFEATURES: // Set Features
956 // Prior to this the features register has been written to.
957 // This command tells the drive to check if the new value is supported (the value is drive specific)
958 // Common is that bit0=DMA enable
959 // If valid the drive raises an interrupt, if not it aborts.
961 // Do some checking here...
963 channel->status.busy = 0;
964 channel->status.write_fault = 0;
965 channel->status.error = 0;
966 channel->status.ready = 1;
967 channel->status.seek_complete = 1;
969 ide_raise_irq(ide, channel);
972 case ATAPI_SPECIFY: // Initialize Drive Parameters
973 case ATAPI_RECAL: // recalibrate?
974 channel->status.error = 0;
975 channel->status.ready = 1;
976 channel->status.seek_complete = 1;
977 ide_raise_irq(ide, channel);
979 case ATAPI_SETMULT: { // Set multiple mode (IDE Block mode)
980 // This makes the drive transfer multiple sectors before generating an interrupt
981 uint32_t tmp_sect_num = drive->sector_num; // GCC SUCKS
983 if (tmp_sect_num > MAX_MULT_SECTORS) {
984 ide_abort_command(ide, channel);
988 if (drive->sector_count == 0) {
989 drive->hd_state.mult_sector_num= 1;
991 drive->hd_state.mult_sector_num = drive->sector_count;
994 channel->status.ready = 1;
995 channel->status.error = 0;
997 ide_raise_irq(ide, channel);
1002 case ATAPI_DEVICE_RESET: // Reset Device
1004 channel->error_reg.val = 0x01;
1005 channel->status.busy = 0;
1006 channel->status.ready = 1;
1007 channel->status.seek_complete = 1;
1008 channel->status.write_fault = 0;
1009 channel->status.error = 0;
1012 case ATAPI_CHECKPOWERMODE1: // Check power mode
1013 drive->sector_count = 0xff; /* 0x00=standby, 0x80=idle, 0xff=active or idle */
1014 channel->status.busy = 0;
1015 channel->status.ready = 1;
1016 channel->status.write_fault = 0;
1017 channel->status.data_req = 0;
1018 channel->status.error = 0;
1021 case ATAPI_MULTREAD: // read multiple sectors
1022 drive->hd_state.cur_sector_num = drive->hd_state.mult_sector_num;
1024 PrintError(core->vm_info, core, "Unimplemented IDE command (%x)\n", channel->cmd_reg);
1032 static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1033 struct ide_internal * ide = priv_data;
1034 struct ide_channel * channel = get_selected_channel(ide, port);
1035 struct ide_drive * drive = get_selected_drive(channel);
1037 PrintDebug(core->vm_info, core, "IDE: Writing Data Port %x (val=%x, len=%d)\n",
1038 port, *(uint32_t *)src, length);
1040 memcpy(drive->data_buf + drive->transfer_index, src, length);
1041 drive->transfer_index += length;
1043 // Transfer is complete, dispatch the command
1044 if (drive->transfer_index >= drive->transfer_length) {
1045 switch (channel->cmd_reg) {
1047 case ATAPI_WRITE: // Write Sectors
1049 channel->status.busy = 1;
1050 channel->status.data_req = 0;
1052 if (ata_write(ide, channel, drive->data_buf, drive->transfer_length/HD_SECTOR_SIZE) == -1) {
1053 PrintError(core->vm_info, core, "Error writing to disk\n");
1057 PrintDebug(core->vm_info, core, "IDE: Write sectors complete\n");
1059 channel->status.error = 0;
1060 channel->status.busy = 0;
1062 ide_raise_irq(ide, channel);
1065 case ATAPI_PACKETCMD: // ATAPI packet command
1066 if (atapi_handle_packet(core, ide, channel) == -1) {
1067 PrintError(core->vm_info, core, "Error handling ATAPI packet\n");
1072 PrintError(core->vm_info, core, "Unhandld IDE Command %x\n", channel->cmd_reg);
1081 static int read_hd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1082 struct ide_drive * drive = get_selected_drive(channel);
1083 int data_offset = drive->transfer_index % HD_SECTOR_SIZE;
1087 if (drive->transfer_index >= drive->transfer_length) {
1088 PrintError(VM_NONE, VCORE_NONE, "Buffer overrun... (xfer_len=%d) (cur_idx=%x) (post_idx=%d)\n",
1089 drive->transfer_length, drive->transfer_index,
1090 drive->transfer_index + length);
1095 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1096 drive->current_lba++;
1098 if (ata_read(ide, channel, drive->data_buf, 1) == -1) {
1099 PrintError(VM_NONE, VCORE_NONE, "Could not read next disk sector\n");
1105 PrintDebug(VM_NONE, VCORE_NONE, "Reading HD Data (Val=%x), (len=%d) (offset=%d)\n",
1106 *(uint32_t *)(drive->data_buf + data_offset),
1107 length, data_offset);
1109 memcpy(dst, drive->data_buf + data_offset, length);
1111 drive->transfer_index += length;
1114 /* This is the trigger for interrupt injection.
1115 * For read single sector commands we interrupt after every sector
1116 * For multi sector reads we interrupt only at end of the cluster size (mult_sector_num)
1117 * cur_sector_num is configured depending on the operation we are currently running
1118 * We also trigger an interrupt if this is the last byte to transfer, regardless of sector count
1120 if (((drive->transfer_index % (HD_SECTOR_SIZE * drive->hd_state.cur_sector_num)) == 0) ||
1121 (drive->transfer_index == drive->transfer_length)) {
1122 if (drive->transfer_index < drive->transfer_length) {
1123 // An increment is complete, but there is still more data to be transferred...
1124 PrintDebug(VM_NONE, VCORE_NONE, "Integral Complete, still transferring more sectors\n");
1125 channel->status.data_req = 1;
1127 drive->irq_flags.c_d = 0;
1129 PrintDebug(VM_NONE, VCORE_NONE, "Final Sector Transferred\n");
1130 // This was the final read of the request
1131 channel->status.data_req = 0;
1134 drive->irq_flags.c_d = 1;
1135 drive->irq_flags.rel = 0;
1138 channel->status.ready = 1;
1139 drive->irq_flags.io_dir = 1;
1140 channel->status.busy = 0;
1142 ide_raise_irq(ide, channel);
1151 static int read_cd_data(uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1152 struct ide_drive * drive = get_selected_drive(channel);
1153 int data_offset = drive->transfer_index % ATAPI_BLOCK_SIZE;
1154 // int req_offset = drive->transfer_index % drive->req_len;
1156 if (drive->cd_state.atapi_cmd != 0x28) {
1157 PrintDebug(VM_NONE, VCORE_NONE, "IDE: Reading CD Data (len=%d) (req_len=%d)\n", length, drive->req_len);
1158 PrintDebug(VM_NONE, VCORE_NONE, "IDE: transfer len=%d, transfer idx=%d\n", drive->transfer_length, drive->transfer_index);
1163 if (drive->transfer_index >= drive->transfer_length) {
1164 PrintError(VM_NONE, VCORE_NONE, "Buffer Overrun... (xfer_len=%d) (cur_idx=%d) (post_idx=%d)\n",
1165 drive->transfer_length, drive->transfer_index,
1166 drive->transfer_index + length);
1171 if ((data_offset == 0) && (drive->transfer_index > 0)) {
1172 if (atapi_update_data_buf(ide, channel) == -1) {
1173 PrintError(VM_NONE, VCORE_NONE, "Could not update CDROM data buffer\n");
1178 memcpy(dst, drive->data_buf + data_offset, length);
1180 drive->transfer_index += length;
1183 // Should the req_offset be recalculated here?????
1184 if (/*(req_offset == 0) &&*/ (drive->transfer_index > 0)) {
1185 if (drive->transfer_index < drive->transfer_length) {
1186 // An increment is complete, but there is still more data to be transferred...
1188 channel->status.data_req = 1;
1190 drive->irq_flags.c_d = 0;
1192 // Update the request length in the cylinder regs
1193 if (atapi_update_req_len(ide, channel, drive->transfer_length - drive->transfer_index) == -1) {
1194 PrintError(VM_NONE, VCORE_NONE, "Could not update request length after completed increment\n");
1198 // This was the final read of the request
1201 channel->status.data_req = 0;
1202 channel->status.ready = 1;
1204 drive->irq_flags.c_d = 1;
1205 drive->irq_flags.rel = 0;
1208 drive->irq_flags.io_dir = 1;
1209 channel->status.busy = 0;
1211 ide_raise_irq(ide, channel);
1218 static int read_drive_id( uint8_t * dst, uint_t length, struct ide_internal * ide, struct ide_channel * channel) {
1219 struct ide_drive * drive = get_selected_drive(channel);
1221 channel->status.busy = 0;
1222 channel->status.ready = 1;
1223 channel->status.write_fault = 0;
1224 channel->status.seek_complete = 1;
1225 channel->status.corrected = 0;
1226 channel->status.error = 0;
1229 memcpy(dst, drive->data_buf + drive->transfer_index, length);
1230 drive->transfer_index += length;
1232 if (drive->transfer_index >= drive->transfer_length) {
1233 channel->status.data_req = 0;
1240 static int ide_read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1241 struct ide_internal * ide = priv_data;
1242 struct ide_channel * channel = get_selected_channel(ide, port);
1243 struct ide_drive * drive = get_selected_drive(channel);
1245 // PrintDebug(core->vm_info, core, "IDE: Reading Data Port %x (len=%d)\n", port, length);
1247 if ((channel->cmd_reg == 0xec) ||
1248 (channel->cmd_reg == 0xa1)) {
1249 return read_drive_id((uint8_t *)dst, length, ide, channel);
1252 if (drive->drive_type == BLOCK_CDROM) {
1253 if (read_cd_data((uint8_t *)dst, length, ide, channel) == -1) {
1254 PrintError(core->vm_info, core, "IDE: Could not read CD Data (atapi cmd=%x)\n", drive->cd_state.atapi_cmd);
1257 } else if (drive->drive_type == BLOCK_DISK) {
1258 if (read_hd_data((uint8_t *)dst, length, ide, channel) == -1) {
1259 PrintError(core->vm_info, core, "IDE: Could not read HD Data\n");
1263 memset((uint8_t *)dst, 0, length);
1269 static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
1270 struct ide_internal * ide = priv_data;
1271 struct ide_channel * channel = get_selected_channel(ide, port);
1272 struct ide_drive * drive = get_selected_drive(channel);
1275 PrintError(core->vm_info, core, "Invalid Write length on IDE port %x\n", port);
1279 PrintDebug(core->vm_info, core, "IDE: Writing Standard Port %x (%s) (val=%x)\n", port, io_port_to_str(port), *(uint8_t *)src);
1282 // reset and interrupt enable
1284 case SEC_CTRL_PORT: {
1285 struct ide_ctrl_reg * tmp_ctrl = (struct ide_ctrl_reg *)src;
1287 // only reset channel on a 0->1 reset bit transition
1288 if ((!channel->ctrl_reg.soft_reset) && (tmp_ctrl->soft_reset)) {
1289 channel_reset(channel);
1290 } else if ((channel->ctrl_reg.soft_reset) && (!tmp_ctrl->soft_reset)) {
1291 channel_reset_complete(channel);
1294 channel->ctrl_reg.val = tmp_ctrl->val;
1297 case PRI_FEATURES_PORT:
1298 case SEC_FEATURES_PORT:
1299 channel->features.val = *(uint8_t *)src;
1302 case PRI_SECT_CNT_PORT:
1303 case SEC_SECT_CNT_PORT:
1304 channel->drives[0].sector_count = *(uint8_t *)src;
1305 channel->drives[1].sector_count = *(uint8_t *)src;
1308 case PRI_SECT_NUM_PORT:
1309 case SEC_SECT_NUM_PORT:
1310 channel->drives[0].sector_num = *(uint8_t *)src;
1311 channel->drives[1].sector_num = *(uint8_t *)src;
1313 case PRI_CYL_LOW_PORT:
1314 case SEC_CYL_LOW_PORT:
1315 channel->drives[0].cylinder_low = *(uint8_t *)src;
1316 channel->drives[1].cylinder_low = *(uint8_t *)src;
1319 case PRI_CYL_HIGH_PORT:
1320 case SEC_CYL_HIGH_PORT:
1321 channel->drives[0].cylinder_high = *(uint8_t *)src;
1322 channel->drives[1].cylinder_high = *(uint8_t *)src;
1325 case PRI_DRV_SEL_PORT:
1326 case SEC_DRV_SEL_PORT: {
1327 channel->drive_head.val = *(uint8_t *)src;
1329 // make sure the reserved bits are ok..
1330 // JRL TODO: check with new ramdisk to make sure this is right...
1331 channel->drive_head.val |= 0xa0;
1333 drive = get_selected_drive(channel);
1335 // Selecting a non-present device is a no-no
1336 if (drive->drive_type == BLOCK_NONE) {
1337 PrintDebug(core->vm_info, core, "Attempting to select a non-present drive\n");
1338 channel->error_reg.abort = 1;
1339 channel->status.error = 1;
1341 channel->status.busy = 0;
1342 channel->status.ready = 1;
1343 channel->status.data_req = 0;
1344 channel->status.error = 0;
1345 channel->status.seek_complete = 1;
1347 channel->dma_status.active = 0;
1348 channel->dma_status.err = 0;
1354 PrintError(core->vm_info, core, "IDE: Write to unknown Port %x\n", port);
1361 static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * priv_data) {
1362 struct ide_internal * ide = priv_data;
1363 struct ide_channel * channel = get_selected_channel(ide, port);
1364 struct ide_drive * drive = get_selected_drive(channel);
1367 PrintError(core->vm_info, core, "Invalid Read length on IDE port %x\n", port);
1371 PrintDebug(core->vm_info, core, "IDE: Reading Standard Port %x (%s)\n", port, io_port_to_str(port));
1373 if ((port == PRI_ADDR_REG_PORT) ||
1374 (port == SEC_ADDR_REG_PORT)) {
1375 // unused, return 0xff
1376 *(uint8_t *)dst = 0xff;
1381 // if no drive is present just return 0 + reserved bits
1382 if (drive->drive_type == BLOCK_NONE) {
1383 if ((port == PRI_DRV_SEL_PORT) ||
1384 (port == SEC_DRV_SEL_PORT)) {
1385 *(uint8_t *)dst = 0xa0;
1387 *(uint8_t *)dst = 0;
1395 // This is really the error register.
1396 case PRI_FEATURES_PORT:
1397 case SEC_FEATURES_PORT:
1398 *(uint8_t *)dst = channel->error_reg.val;
1401 case PRI_SECT_CNT_PORT:
1402 case SEC_SECT_CNT_PORT:
1403 *(uint8_t *)dst = drive->sector_count;
1406 case PRI_SECT_NUM_PORT:
1407 case SEC_SECT_NUM_PORT:
1408 *(uint8_t *)dst = drive->sector_num;
1411 case PRI_CYL_LOW_PORT:
1412 case SEC_CYL_LOW_PORT:
1413 *(uint8_t *)dst = drive->cylinder_low;
1417 case PRI_CYL_HIGH_PORT:
1418 case SEC_CYL_HIGH_PORT:
1419 *(uint8_t *)dst = drive->cylinder_high;
1422 case PRI_DRV_SEL_PORT:
1423 case SEC_DRV_SEL_PORT: // hard disk drive and head register 0x1f6
1424 *(uint8_t *)dst = channel->drive_head.val;
1431 // Something about lowering interrupts here....
1432 *(uint8_t *)dst = channel->status.val;
1436 PrintError(core->vm_info, core, "Invalid Port: %x\n", port);
1440 PrintDebug(core->vm_info, core, "\tVal=%x\n", *(uint8_t *)dst);
1447 static void init_drive(struct ide_drive * drive) {
1449 drive->sector_count = 0x01;
1450 drive->sector_num = 0x01;
1451 drive->cylinder = 0x0000;
1453 drive->drive_type = BLOCK_NONE;
1455 memset(drive->model, 0, sizeof(drive->model));
1457 drive->transfer_index = 0;
1458 drive->transfer_length = 0;
1459 memset(drive->data_buf, 0, sizeof(drive->data_buf));
1461 drive->num_cylinders = 0;
1462 drive->num_heads = 0;
1463 drive->num_sectors = 0;
1466 drive->private_data = NULL;
1470 static void init_channel(struct ide_channel * channel) {
1473 channel->error_reg.val = 0x01;
1475 //** channel->features = 0x0;
1477 channel->drive_head.val = 0x00;
1478 channel->status.val = 0x00;
1479 channel->cmd_reg = 0x00;
1480 channel->ctrl_reg.val = 0x08;
1482 channel->dma_cmd.val = 0;
1483 channel->dma_status.val = 0;
1484 channel->dma_prd_addr = 0;
1485 channel->dma_tbl_index = 0;
1487 for (i = 0; i < 2; i++) {
1488 init_drive(&(channel->drives[i]));
1494 static int pci_config_update(struct pci_device * pci_dev, uint32_t reg_num, void * src, uint_t length, void * private_data) {
1495 PrintDebug(VM_NONE, VCORE_NONE, "PCI Config Update\n");
1497 struct ide_internal * ide = (struct ide_internal *)(private_data);
1499 PrintDebug(VM_NONE, VCORE_NONE, info, "\t\tInterupt register (Dev=%s), irq=%d\n", ide->ide_pci->name, ide->ide_pci->config_header.intr_line);
1505 static int init_ide_state(struct ide_internal * ide) {
1508 * Check if the PIIX 3 actually represents both IDE channels in a single PCI entry
1511 init_channel(&(ide->channels[0]));
1512 ide->channels[0].irq = PRI_DEFAULT_IRQ ;
1514 init_channel(&(ide->channels[1]));
1515 ide->channels[1].irq = SEC_DEFAULT_IRQ ;
1524 static int ide_free(struct ide_internal * ide) {
1526 // deregister from PCI?
1533 #ifdef V3_CONFIG_CHECKPOINT
1535 #include <palacios/vmm_sprintf.h>
1537 static int ide_save_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1538 struct ide_internal * ide = (struct ide_internal *)private_data;
1539 struct v3_chkpt_ctx *ctx=0;
1545 ctx=v3_chkpt_open_ctx(chkpt,id);
1548 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for save\n");
1552 // nothing saved yet
1554 v3_chkpt_close_ctx(ctx);ctx=0;
1557 for (ch_num = 0; ch_num < 2; ch_num++) {
1558 struct ide_channel * ch = &(ide->channels[ch_num]);
1560 snprintf(buf, 128, "%s-%d", id, ch_num);
1562 ctx = v3_chkpt_open_ctx(chkpt, buf);
1565 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save channel %d\n",ch_num);
1569 V3_CHKPT_SAVE(ctx, "ERROR", ch->error_reg.val, savefailout);
1570 V3_CHKPT_SAVE(ctx, "FEATURES", ch->features.val, savefailout);
1571 V3_CHKPT_SAVE(ctx, "DRIVE_HEAD", ch->drive_head.val, savefailout);
1572 V3_CHKPT_SAVE(ctx, "STATUS", ch->status.val, savefailout);
1573 V3_CHKPT_SAVE(ctx, "CMD_REG", ch->cmd_reg, savefailout);
1574 V3_CHKPT_SAVE(ctx, "CTRL_REG", ch->ctrl_reg.val, savefailout);
1575 V3_CHKPT_SAVE(ctx, "DMA_CMD", ch->dma_cmd.val, savefailout);
1576 V3_CHKPT_SAVE(ctx, "DMA_STATUS", ch->dma_status.val, savefailout);
1577 V3_CHKPT_SAVE(ctx, "PRD_ADDR", ch->dma_prd_addr, savefailout);
1578 V3_CHKPT_SAVE(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, savefailout);
1580 v3_chkpt_close_ctx(ctx); ctx=0;
1582 for (drive_num = 0; drive_num < 2; drive_num++) {
1583 struct ide_drive * drive = &(ch->drives[drive_num]);
1585 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1587 ctx = v3_chkpt_open_ctx(chkpt, buf);
1590 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to save drive %d\n",drive_num);
1594 V3_CHKPT_SAVE(ctx, "DRIVE_TYPE", drive->drive_type, savefailout);
1595 V3_CHKPT_SAVE(ctx, "SECTOR_COUNT", drive->sector_count, savefailout);
1596 V3_CHKPT_SAVE(ctx, "SECTOR_NUM", drive->sector_num, savefailout);
1597 V3_CHKPT_SAVE(ctx, "CYLINDER", drive->cylinder,savefailout);
1599 V3_CHKPT_SAVE(ctx, "CURRENT_LBA", drive->current_lba, savefailout);
1600 V3_CHKPT_SAVE(ctx, "TRANSFER_LENGTH", drive->transfer_length, savefailout);
1601 V3_CHKPT_SAVE(ctx, "TRANSFER_INDEX", drive->transfer_index, savefailout);
1603 V3_CHKPT_SAVE(ctx, "DATA_BUF", drive->data_buf, savefailout);
1606 /* For now we'll just pack the type specific data at the end... */
1607 /* We should probably add a new context here in the future... */
1608 if (drive->drive_type == BLOCK_CDROM) {
1609 V3_CHKPT_SAVE(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, savefailout);
1610 V3_CHKPT_SAVE(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, savefailout);
1611 V3_CHKPT_SAVE(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, savefailout);
1612 } else if (drive->drive_type == BLOCK_DISK) {
1613 V3_CHKPT_SAVE(ctx, "ACCESSED", drive->hd_state.accessed, savefailout);
1614 V3_CHKPT_SAVE(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, savefailout);
1615 V3_CHKPT_SAVE(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, savefailout);
1616 } else if (drive->drive_type == BLOCK_NONE) {
1617 // no drive connected, so no data
1619 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1623 v3_chkpt_close_ctx(ctx); ctx=0;
1631 PrintError(VM_NONE, VCORE_NONE, "Failed to save IDE\n");
1632 if (ctx) {v3_chkpt_close_ctx(ctx); }
1638 static int ide_load_extended(struct v3_chkpt *chkpt, char *id, void * private_data) {
1639 struct ide_internal * ide = (struct ide_internal *)private_data;
1640 struct v3_chkpt_ctx *ctx=0;
1645 ctx=v3_chkpt_open_ctx(chkpt,id);
1648 PrintError(VM_NONE, VCORE_NONE, "Failed to open context for load\n");
1652 // nothing saved yet
1654 v3_chkpt_close_ctx(ctx);ctx=0;
1657 for (ch_num = 0; ch_num < 2; ch_num++) {
1658 struct ide_channel * ch = &(ide->channels[ch_num]);
1660 snprintf(buf, 128, "%s-%d", id, ch_num);
1662 ctx = v3_chkpt_open_ctx(chkpt, buf);
1665 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load channel %d\n",ch_num);
1669 V3_CHKPT_LOAD(ctx, "ERROR", ch->error_reg.val, loadfailout);
1670 V3_CHKPT_LOAD(ctx, "FEATURES", ch->features.val, loadfailout);
1671 V3_CHKPT_LOAD(ctx, "DRIVE_HEAD", ch->drive_head.val, loadfailout);
1672 V3_CHKPT_LOAD(ctx, "STATUS", ch->status.val, loadfailout);
1673 V3_CHKPT_LOAD(ctx, "CMD_REG", ch->cmd_reg, loadfailout);
1674 V3_CHKPT_LOAD(ctx, "CTRL_REG", ch->ctrl_reg.val, loadfailout);
1675 V3_CHKPT_LOAD(ctx, "DMA_CMD", ch->dma_cmd.val, loadfailout);
1676 V3_CHKPT_LOAD(ctx, "DMA_STATUS", ch->dma_status.val, loadfailout);
1677 V3_CHKPT_LOAD(ctx, "PRD_ADDR", ch->dma_prd_addr, loadfailout);
1678 V3_CHKPT_LOAD(ctx, "DMA_TBL_IDX", ch->dma_tbl_index, loadfailout);
1680 v3_chkpt_close_ctx(ctx); ctx=0;
1682 for (drive_num = 0; drive_num < 2; drive_num++) {
1683 struct ide_drive * drive = &(ch->drives[drive_num]);
1685 snprintf(buf, 128, "%s-%d-%d", id, ch_num, drive_num);
1687 ctx = v3_chkpt_open_ctx(chkpt, buf);
1690 PrintError(VM_NONE, VCORE_NONE, "Unable to open context to load drive %d\n",drive_num);
1694 V3_CHKPT_LOAD(ctx, "DRIVE_TYPE", drive->drive_type, loadfailout);
1695 V3_CHKPT_LOAD(ctx, "SECTOR_COUNT", drive->sector_count, loadfailout);
1696 V3_CHKPT_LOAD(ctx, "SECTOR_NUM", drive->sector_num, loadfailout);
1697 V3_CHKPT_LOAD(ctx, "CYLINDER", drive->cylinder,loadfailout);
1699 V3_CHKPT_LOAD(ctx, "CURRENT_LBA", drive->current_lba, loadfailout);
1700 V3_CHKPT_LOAD(ctx, "TRANSFER_LENGTH", drive->transfer_length, loadfailout);
1701 V3_CHKPT_LOAD(ctx, "TRANSFER_INDEX", drive->transfer_index, loadfailout);
1703 V3_CHKPT_LOAD(ctx, "DATA_BUF", drive->data_buf, loadfailout);
1706 /* For now we'll just pack the type specific data at the end... */
1707 /* We should probably add a new context here in the future... */
1708 if (drive->drive_type == BLOCK_CDROM) {
1709 V3_CHKPT_LOAD(ctx, "ATAPI_SENSE_DATA", drive->cd_state.sense.buf, loadfailout);
1710 V3_CHKPT_LOAD(ctx, "ATAPI_CMD", drive->cd_state.atapi_cmd, loadfailout);
1711 V3_CHKPT_LOAD(ctx, "ATAPI_ERR_RECOVERY", drive->cd_state.err_recovery.buf, loadfailout);
1712 } else if (drive->drive_type == BLOCK_DISK) {
1713 V3_CHKPT_LOAD(ctx, "ACCESSED", drive->hd_state.accessed, loadfailout);
1714 V3_CHKPT_LOAD(ctx, "MULT_SECT_NUM", drive->hd_state.mult_sector_num, loadfailout);
1715 V3_CHKPT_LOAD(ctx, "CUR_SECT_NUM", drive->hd_state.cur_sector_num, loadfailout);
1716 } else if (drive->drive_type == BLOCK_NONE) {
1717 // no drive connected, so no data
1719 PrintError(VM_NONE, VCORE_NONE, "Invalid drive type %d\n",drive->drive_type);
1728 PrintError(VM_NONE, VCORE_NONE, "Failed to load IDE\n");
1729 if (ctx) {v3_chkpt_close_ctx(ctx); }
1739 static struct v3_device_ops dev_ops = {
1740 .free = (int (*)(void *))ide_free,
1741 #ifdef V3_CONFIG_CHECKPOINT
1742 .save_extended = ide_save_extended,
1743 .load_extended = ide_load_extended
1750 static int connect_fn(struct v3_vm_info * vm,
1751 void * frontend_data,
1752 struct v3_dev_blk_ops * ops,
1753 v3_cfg_tree_t * cfg,
1754 void * private_data) {
1755 struct ide_internal * ide = (struct ide_internal *)(frontend_data);
1756 struct ide_channel * channel = NULL;
1757 struct ide_drive * drive = NULL;
1759 char * bus_str = v3_cfg_val(cfg, "bus_num");
1760 char * drive_str = v3_cfg_val(cfg, "drive_num");
1761 char * type_str = v3_cfg_val(cfg, "type");
1762 char * model_str = v3_cfg_val(cfg, "model");
1764 uint_t drive_num = 0;
1767 if ((!type_str) || (!drive_str) || (!bus_str)) {
1768 PrintError(vm, VCORE_NONE, "Incomplete IDE Configuration\n");
1772 bus_num = atoi(bus_str);
1773 drive_num = atoi(drive_str);
1775 channel = &(ide->channels[bus_num]);
1776 drive = &(channel->drives[drive_num]);
1778 if (drive->drive_type != BLOCK_NONE) {
1779 PrintError(vm, VCORE_NONE, "Device slot (bus=%d, drive=%d) already occupied\n", bus_num, drive_num);
1783 if (model_str != NULL) {
1784 strncpy(drive->model, model_str, sizeof(drive->model) - 1);
1787 if (strcasecmp(type_str, "cdrom") == 0) {
1788 drive->drive_type = BLOCK_CDROM;
1790 while (strlen((char *)(drive->model)) < 40) {
1791 strcat((char*)(drive->model), " ");
1794 } else if (strcasecmp(type_str, "hd") == 0) {
1795 drive->drive_type = BLOCK_DISK;
1797 drive->hd_state.accessed = 0;
1798 drive->hd_state.mult_sector_num = 1;
1800 drive->num_sectors = 63;
1801 drive->num_heads = 16;
1802 drive->num_cylinders = (ops->get_capacity(private_data) / HD_SECTOR_SIZE) / (drive->num_sectors * drive->num_heads);
1804 PrintError(vm, VCORE_NONE, "invalid IDE drive type\n");
1811 // Hardcode this for now, but its not a good idea....
1812 ide->ide_pci->config_space[0x41 + (bus_num * 2)] = 0x80;
1815 drive->private_data = private_data;
1823 static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
1824 struct ide_internal * ide = NULL;
1825 char * dev_id = v3_cfg_val(cfg, "ID");
1828 PrintDebug(vm, VCORE_NONE, "IDE: Initializing IDE\n");
1830 ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
1833 PrintError(vm, VCORE_NONE, "Error allocating IDE state\n");
1837 memset(ide, 0, sizeof(struct ide_internal));
1840 ide->pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
1842 if (ide->pci_bus != NULL) {
1843 struct vm_device * southbridge = v3_find_dev(vm, v3_cfg_val(cfg, "controller"));
1846 PrintError(vm, VCORE_NONE, "Could not find southbridge\n");
1851 ide->southbridge = (struct v3_southbridge *)(southbridge->private_data);
1854 PrintDebug(vm, VCORE_NONE, "IDE: Creating IDE bus x 2\n");
1856 struct vm_device * dev = v3_add_device(vm, dev_id, &dev_ops, ide);
1859 PrintError(vm, VCORE_NONE, "Could not attach device %s\n", dev_id);
1864 if (init_ide_state(ide) == -1) {
1865 PrintError(vm, VCORE_NONE, "Failed to initialize IDE state\n");
1866 v3_remove_device(dev);
1870 PrintDebug(vm, VCORE_NONE, "Connecting to IDE IO ports\n");
1872 ret |= v3_dev_hook_io(dev, PRI_DATA_PORT,
1873 &ide_read_data_port, &write_data_port);
1874 ret |= v3_dev_hook_io(dev, PRI_FEATURES_PORT,
1875 &read_port_std, &write_port_std);
1876 ret |= v3_dev_hook_io(dev, PRI_SECT_CNT_PORT,
1877 &read_port_std, &write_port_std);
1878 ret |= v3_dev_hook_io(dev, PRI_SECT_NUM_PORT,
1879 &read_port_std, &write_port_std);
1880 ret |= v3_dev_hook_io(dev, PRI_CYL_LOW_PORT,
1881 &read_port_std, &write_port_std);
1882 ret |= v3_dev_hook_io(dev, PRI_CYL_HIGH_PORT,
1883 &read_port_std, &write_port_std);
1884 ret |= v3_dev_hook_io(dev, PRI_DRV_SEL_PORT,
1885 &read_port_std, &write_port_std);
1886 ret |= v3_dev_hook_io(dev, PRI_CMD_PORT,
1887 &read_port_std, &write_cmd_port);
1889 ret |= v3_dev_hook_io(dev, SEC_DATA_PORT,
1890 &ide_read_data_port, &write_data_port);
1891 ret |= v3_dev_hook_io(dev, SEC_FEATURES_PORT,
1892 &read_port_std, &write_port_std);
1893 ret |= v3_dev_hook_io(dev, SEC_SECT_CNT_PORT,
1894 &read_port_std, &write_port_std);
1895 ret |= v3_dev_hook_io(dev, SEC_SECT_NUM_PORT,
1896 &read_port_std, &write_port_std);
1897 ret |= v3_dev_hook_io(dev, SEC_CYL_LOW_PORT,
1898 &read_port_std, &write_port_std);
1899 ret |= v3_dev_hook_io(dev, SEC_CYL_HIGH_PORT,
1900 &read_port_std, &write_port_std);
1901 ret |= v3_dev_hook_io(dev, SEC_DRV_SEL_PORT,
1902 &read_port_std, &write_port_std);
1903 ret |= v3_dev_hook_io(dev, SEC_CMD_PORT,
1904 &read_port_std, &write_cmd_port);
1907 ret |= v3_dev_hook_io(dev, PRI_CTRL_PORT,
1908 &read_port_std, &write_port_std);
1910 ret |= v3_dev_hook_io(dev, SEC_CTRL_PORT,
1911 &read_port_std, &write_port_std);
1914 ret |= v3_dev_hook_io(dev, SEC_ADDR_REG_PORT,
1915 &read_port_std, &write_port_std);
1917 ret |= v3_dev_hook_io(dev, PRI_ADDR_REG_PORT,
1918 &read_port_std, &write_port_std);
1922 PrintError(vm, VCORE_NONE, "Error hooking IDE IO port\n");
1923 v3_remove_device(dev);
1929 struct v3_pci_bar bars[6];
1930 struct v3_southbridge * southbridge = (struct v3_southbridge *)(ide->southbridge);
1931 struct pci_device * sb_pci = (struct pci_device *)(southbridge->southbridge_pci);
1932 struct pci_device * pci_dev = NULL;
1935 PrintDebug(vm, VCORE_NONE, "Connecting IDE to PCI bus\n");
1937 for (i = 0; i < 6; i++) {
1938 bars[i].type = PCI_BAR_NONE;
1941 bars[4].type = PCI_BAR_IO;
1942 // bars[4].default_base_port = PRI_DEFAULT_DMA_PORT;
1943 bars[4].default_base_port = -1;
1944 bars[4].num_ports = 16;
1946 bars[4].io_read = read_dma_port;
1947 bars[4].io_write = write_dma_port;
1948 bars[4].private_data = ide;
1950 pci_dev = v3_pci_register_device(ide->pci_bus, PCI_STD_DEVICE, 0, sb_pci->dev_num, 1,
1952 pci_config_update, NULL, NULL, NULL, ide);
1954 if (pci_dev == NULL) {
1955 PrintError(vm, VCORE_NONE, "Failed to register IDE BUS %d with PCI\n", i);
1956 v3_remove_device(dev);
1960 /* This is for CMD646 devices
1961 pci_dev->config_header.vendor_id = 0x1095;
1962 pci_dev->config_header.device_id = 0x0646;
1963 pci_dev->config_header.revision = 0x8f07;
1966 pci_dev->config_header.vendor_id = 0x8086;
1967 pci_dev->config_header.device_id = 0x7010;
1968 pci_dev->config_header.revision = 0x00;
1970 pci_dev->config_header.prog_if = 0x80; // Master IDE device
1971 pci_dev->config_header.subclass = PCI_STORAGE_SUBCLASS_IDE;
1972 pci_dev->config_header.class = PCI_CLASS_STORAGE;
1974 pci_dev->config_header.command = 0;
1975 pci_dev->config_header.status = 0x0280;
1977 ide->ide_pci = pci_dev;
1982 if (v3_dev_add_blk_frontend(vm, dev_id, connect_fn, (void *)ide) == -1) {
1983 PrintError(vm, VCORE_NONE, "Could not register %s as frontend\n", dev_id);
1984 v3_remove_device(dev);
1989 PrintDebug(vm, VCORE_NONE, "IDE Initialized\n");
1995 device_register("IDE", ide_init)
2000 int v3_ide_get_geometry(void * ide_data, int channel_num, int drive_num,
2001 uint32_t * cylinders, uint32_t * heads, uint32_t * sectors) {
2003 struct ide_internal * ide = ide_data;
2004 struct ide_channel * channel = &(ide->channels[channel_num]);
2005 struct ide_drive * drive = &(channel->drives[drive_num]);
2007 if (drive->drive_type == BLOCK_NONE) {
2011 *cylinders = drive->num_cylinders;
2012 *heads = drive->num_heads;
2013 *sectors = drive->num_sectors;