// Internal PCI data
uint32_t val;
- int updated;
+ uint8_t updated;
uint32_t mask;
};
#define V3_CHKPT_STD_LOAD(ctx,x) v3_chkpt_load(ctx,#x,sizeof(x),&(x))
+
int v3_chkpt_save(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf);
int v3_chkpt_load(struct v3_chkpt_ctx * ctx, char * tag, uint64_t len, void * buf);
+static inline int v3_chkpt_save_64(struct v3_chkpt_ctx * ctx, char * tag, void * buf) {
+ return v3_chkpt_save(ctx, tag, 8, buf);
+}
+static inline int v3_chkpt_save_32(struct v3_chkpt_ctx * ctx, char * tag, void * buf) {
+ return v3_chkpt_save(ctx, tag, 4, buf);
+}
+static inline int v3_chkpt_save_16(struct v3_chkpt_ctx * ctx, char * tag, void * buf) {
+ return v3_chkpt_save(ctx, tag, 2, buf);
+}
+static inline int v3_chkpt_save_8(struct v3_chkpt_ctx * ctx, char * tag, void * buf) {
+ return v3_chkpt_save(ctx, tag, 1, buf);
+}
+
+static inline int v3_chkpt_load_64(struct v3_chkpt_ctx * ctx, char * tag, void * buf) {
+ return v3_chkpt_load(ctx, tag, 8, buf);
+}
+static inline int v3_chkpt_load_32(struct v3_chkpt_ctx * ctx, char * tag, void * buf) {
+ return v3_chkpt_load(ctx, tag, 4, buf);
+}
+static inline int v3_chkpt_load_16(struct v3_chkpt_ctx * ctx, char * tag, void * buf) {
+ return v3_chkpt_load(ctx, tag, 2, buf);
+}
+static inline int v3_chkpt_load_8(struct v3_chkpt_ctx * ctx, char * tag, void * buf) {
+ return v3_chkpt_load(ctx, tag, 1, buf);
+}
+
+
+
int v3_chkpt_close_ctx(struct v3_chkpt_ctx * ctx);
struct v3_chkpt_ctx * v3_chkpt_open_ctx(struct v3_chkpt * chkpt, struct v3_chkpt_ctx * parent, char * name);
struct pic_internal {
- uchar_t master_irr;
- uchar_t slave_irr;
+ uint8_t master_irr;
+ uint8_t slave_irr;
- uchar_t master_isr;
- uchar_t slave_isr;
+ uint8_t master_isr;
+ uint8_t slave_isr;
- uchar_t master_elcr;
- uchar_t slave_elcr;
- uchar_t master_elcr_mask;
- uchar_t slave_elcr_mask;
+ uint8_t master_elcr;
+ uint8_t slave_elcr;
+ uint8_t master_elcr_mask;
+ uint8_t slave_elcr_mask;
- uchar_t master_icw1;
- uchar_t master_icw2;
- uchar_t master_icw3;
- uchar_t master_icw4;
+ uint8_t master_icw1;
+ uint8_t master_icw2;
+ uint8_t master_icw3;
+ uint8_t master_icw4;
- uchar_t slave_icw1;
- uchar_t slave_icw2;
- uchar_t slave_icw3;
- uchar_t slave_icw4;
+ uint8_t slave_icw1;
+ uint8_t slave_icw2;
+ uint8_t slave_icw3;
+ uint8_t slave_icw4;
- uchar_t master_imr;
- uchar_t slave_imr;
- uchar_t master_ocw2;
- uchar_t master_ocw3;
- uchar_t slave_ocw2;
- uchar_t slave_ocw3;
+ uint8_t master_imr;
+ uint8_t slave_imr;
+ uint8_t master_ocw2;
+ uint8_t master_ocw3;
+ uint8_t slave_ocw2;
+ uint8_t slave_ocw3;
pic_state_t master_state;
pic_state_t slave_state;
}
if ((state->master_ocw3 & 0x03) == 0x02) {
- *(uchar_t *)dst = state->master_irr;
+ *(uint8_t *)dst = state->master_irr;
} else if ((state->master_ocw3 & 0x03) == 0x03) {
- *(uchar_t *)dst = state->master_isr;
+ *(uint8_t *)dst = state->master_isr;
} else {
- *(uchar_t *)dst = 0;
+ *(uint8_t *)dst = 0;
}
return 1;
return -1;
}
- *(uchar_t *)dst = state->master_imr;
+ *(uint8_t *)dst = state->master_imr;
return 1;
}
if ((state->slave_ocw3 & 0x03) == 0x02) {
- *(uchar_t*)dst = state->slave_irr;
+ *(uint8_t*)dst = state->slave_irr;
} else if ((state->slave_ocw3 & 0x03) == 0x03) {
- *(uchar_t *)dst = state->slave_isr;
+ *(uint8_t *)dst = state->slave_isr;
} else {
- *(uchar_t *)dst = 0;
+ *(uint8_t *)dst = 0;
}
return 1;
return -1;
}
- *(uchar_t *)dst = state->slave_imr;
+ *(uint8_t *)dst = state->slave_imr;
return 1;
}
static int write_master_port1(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
struct pic_internal * state = (struct pic_internal *)priv_data;
- uchar_t cw = *(uchar_t *)src;
+ uint8_t cw = *(uint8_t *)src;
PrintDebug("8259 PIC: Write master port 1 with 0x%x\n",cw);
static int write_master_port2(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
struct pic_internal * state = (struct pic_internal *)priv_data;
- uchar_t cw = *(uchar_t *)src;
+ uint8_t cw = *(uint8_t *)src;
PrintDebug("8259 PIC: Write master port 2 with 0x%x\n",cw);
static int write_slave_port1(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
struct pic_internal * state = (struct pic_internal *)priv_data;
- uchar_t cw = *(uchar_t *)src;
+ uint8_t cw = *(uint8_t *)src;
PrintDebug("8259 PIC: Write slave port 1 with 0x%x\n",cw);
static int write_slave_port2(struct guest_info * core, ushort_t port, void * src, uint_t length, void * priv_data) {
struct pic_internal * state = (struct pic_internal *)priv_data;
- uchar_t cw = *(uchar_t *)src;
+ uint8_t cw = *(uint8_t *)src;
PrintDebug("8259 PIC: Write slave port 2 with 0x%x\n",cw);
static int pic_save(struct v3_chkpt_ctx * ctx, void * private_data) {
struct pic_internal * pic = (struct pic_internal *)private_data;
- V3_CHKPT_STD_SAVE(ctx, pic->master_irr);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_irr);
+ v3_chkpt_save_8(ctx, "MASTER_IRR", &(pic->master_irr));
+ v3_chkpt_save_8(ctx, "SLAVE_IRR", &(pic->slave_irr));
- V3_CHKPT_STD_SAVE(ctx, pic->master_isr);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_isr);
+ v3_chkpt_save_8(ctx, "MASTER_ISR", &(pic->master_isr));
+ v3_chkpt_save_8(ctx, "SLAVE_ISR", &(pic->slave_isr));
- V3_CHKPT_STD_SAVE(ctx, pic->master_elcr);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_elcr);
- V3_CHKPT_STD_SAVE(ctx, pic->master_elcr_mask);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_elcr_mask);
+ v3_chkpt_save_8(ctx, "MASTER_ELCR", &(pic->master_elcr));
+ v3_chkpt_save_8(ctx, "SLAVE_ELCR", &(pic->slave_elcr));
+ v3_chkpt_save_8(ctx, "MASTER_ELCR_MASK", &(pic->master_elcr_mask));
+ v3_chkpt_save_8(ctx, "SLAVE_ELCR_MASK", &(pic->slave_elcr_mask));
- V3_CHKPT_STD_SAVE(ctx, pic->master_icw1);
- V3_CHKPT_STD_SAVE(ctx, pic->master_icw2);
- V3_CHKPT_STD_SAVE(ctx, pic->master_icw3);
- V3_CHKPT_STD_SAVE(ctx, pic->master_icw4);
+ v3_chkpt_save_8(ctx, "MASTER_ICW1", &(pic->master_icw1));
+ v3_chkpt_save_8(ctx, "MASTER_ICW2", &(pic->master_icw2));
+ v3_chkpt_save_8(ctx, "MASTER_ICW3", &(pic->master_icw3));
+ v3_chkpt_save_8(ctx, "MASTER_ICW4", &(pic->master_icw4));
- V3_CHKPT_STD_SAVE(ctx, pic->slave_icw1);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_icw2);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_icw3);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_icw4);
+ v3_chkpt_save_8(ctx, "SLAVE_ICW1", &(pic->slave_icw1));
+ v3_chkpt_save_8(ctx, "SLAVE_ICW2", &(pic->slave_icw2));
+ v3_chkpt_save_8(ctx, "SLAVE_ICW3", &(pic->slave_icw3));
+ v3_chkpt_save_8(ctx, "SLAVE_ICW4", &(pic->slave_icw4));
- V3_CHKPT_STD_SAVE(ctx, pic->master_imr);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_imr);
- V3_CHKPT_STD_SAVE(ctx, pic->master_ocw2);
- V3_CHKPT_STD_SAVE(ctx, pic->master_ocw3);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_ocw2);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_ocw3);
+ v3_chkpt_save_8(ctx, "MASTER_IMR", &(pic->master_imr));
+ v3_chkpt_save_8(ctx, "SLAVE_IMR", &(pic->slave_imr));
+ v3_chkpt_save_8(ctx, "MASTER_OCW2", &(pic->master_ocw2));
+ v3_chkpt_save_8(ctx, "MASTER_OCW3", &(pic->master_ocw3));
+ v3_chkpt_save_8(ctx, "SLAVE_OCW2", &(pic->slave_ocw2));
+ v3_chkpt_save_8(ctx, "SLAVE_OCW3", &(pic->slave_ocw3));
- V3_CHKPT_STD_SAVE(ctx, pic->master_state);
- V3_CHKPT_STD_SAVE(ctx, pic->slave_state);
+ v3_chkpt_save_8(ctx, "MASTER_STATE", &(pic->master_state));
+ v3_chkpt_save_8(ctx, "SLAVE_STATE", &(pic->slave_state));
return 0;
static int pic_load(struct v3_chkpt_ctx * ctx, void * private_data) {
struct pic_internal * pic = (struct pic_internal *)private_data;
- V3_CHKPT_STD_LOAD(ctx, pic->master_irr);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_irr);
+
+ v3_chkpt_load_8(ctx, "MASTER_IRR", &(pic->master_irr));
+ v3_chkpt_load_8(ctx, "SLAVE_IRR", &(pic->slave_irr));
- V3_CHKPT_STD_LOAD(ctx, pic->master_isr);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_isr);
+ v3_chkpt_load_8(ctx, "MASTER_ISR", &(pic->master_isr));
+ v3_chkpt_load_8(ctx, "SLAVE_ISR", &(pic->slave_isr));
- V3_CHKPT_STD_LOAD(ctx, pic->master_elcr);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_elcr);
- V3_CHKPT_STD_LOAD(ctx, pic->master_elcr_mask);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_elcr_mask);
+ v3_chkpt_load_8(ctx, "MASTER_ELCR", &(pic->master_elcr));
+ v3_chkpt_load_8(ctx, "SLAVE_ELCR", &(pic->slave_elcr));
+ v3_chkpt_load_8(ctx, "MASTER_ELCR_MASK", &(pic->master_elcr_mask));
+ v3_chkpt_load_8(ctx, "SLAVE_ELCR_MASK", &(pic->slave_elcr_mask));
- V3_CHKPT_STD_LOAD(ctx, pic->master_icw1);
- V3_CHKPT_STD_LOAD(ctx, pic->master_icw2);
- V3_CHKPT_STD_LOAD(ctx, pic->master_icw3);
- V3_CHKPT_STD_LOAD(ctx, pic->master_icw4);
+ v3_chkpt_load_8(ctx, "MASTER_ICW1", &(pic->master_icw1));
+ v3_chkpt_load_8(ctx, "MASTER_ICW2", &(pic->master_icw2));
+ v3_chkpt_load_8(ctx, "MASTER_ICW3", &(pic->master_icw3));
+ v3_chkpt_load_8(ctx, "MASTER_ICW4", &(pic->master_icw4));
- V3_CHKPT_STD_LOAD(ctx, pic->slave_icw1);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_icw2);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_icw3);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_icw4);
+ v3_chkpt_load_8(ctx, "SLAVE_ICW1", &(pic->slave_icw1));
+ v3_chkpt_load_8(ctx, "SLAVE_ICW2", &(pic->slave_icw2));
+ v3_chkpt_load_8(ctx, "SLAVE_ICW3", &(pic->slave_icw3));
+ v3_chkpt_load_8(ctx, "SLAVE_ICW4", &(pic->slave_icw4));
- V3_CHKPT_STD_LOAD(ctx, pic->master_imr);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_imr);
- V3_CHKPT_STD_LOAD(ctx, pic->master_ocw2);
- V3_CHKPT_STD_LOAD(ctx, pic->master_ocw3);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_ocw2);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_ocw3);
+ v3_chkpt_load_8(ctx, "MASTER_IMR", &(pic->master_imr));
+ v3_chkpt_load_8(ctx, "SLAVE_IMR", &(pic->slave_imr));
+ v3_chkpt_load_8(ctx, "MASTER_OCW2", &(pic->master_ocw2));
+ v3_chkpt_load_8(ctx, "MASTER_OCW3", &(pic->master_ocw3));
+ v3_chkpt_load_8(ctx, "SLAVE_OCW2", &(pic->slave_ocw2));
+ v3_chkpt_load_8(ctx, "SLAVE_OCW3", &(pic->slave_ocw3));
- V3_CHKPT_STD_LOAD(ctx, pic->master_state);
- V3_CHKPT_STD_LOAD(ctx, pic->slave_state);
+ v3_chkpt_load_8(ctx, "MASTER_STATE", &(pic->master_state));
+ v3_chkpt_load_8(ctx, "SLAVE_STATE", &(pic->slave_state));
return 0;
}
} __attribute__((packed));
+struct atapi_error_recovery {
+ union {
+ uint8_t buf[12];
+ struct {
+ uint8_t page_code : 6;
+ uint8_t rsvd : 1;
+ uint8_t page_ctrl : 1;
+ uint8_t page_len;
+ uint8_t dcr : 1;
+ uint8_t dte : 1;
+ uint8_t per : 1;
+ uint8_t rsvd1 : 1;
+ uint8_t rc : 1;
+ uint8_t tb : 1;
+ uint8_t arre : 1;
+ uint8_t awre : 1;
+ uint8_t rd_retry_cnt;
+ uint8_t correct_spin;
+ uint8_t head_offset;
+ uint8_t data_strobe_offset;
+ uint8_t emcdr : 2;
+ uint8_t rsvd2 : 6;
+ uint8_t wr_retry_cnt;
+ uint8_t rsvd3;
+ uint16_t recovery_time_limit;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
+
+
struct atapi_read10_cmd {
-struct atapi_error_recovery {
- uint8_t page_code : 6;
- uint8_t rsvd : 1;
- uint8_t page_ctrl : 1;
- uint8_t page_len;
- uint8_t dcr : 1;
- uint8_t dte : 1;
- uint8_t per : 1;
- uint8_t rsvd1 : 1;
- uint8_t rc : 1;
- uint8_t tb : 1;
- uint8_t arre : 1;
- uint8_t awre : 1;
- uint8_t rd_retry_cnt;
- uint8_t correct_spin;
- uint8_t head_offset;
- uint8_t data_strobe_offset;
- uint8_t emcdr : 2;
- uint8_t rsvd2 : 6;
- uint8_t wr_retry_cnt;
- uint8_t rsvd3;
- uint16_t recovery_time_limit;
-} __attribute__((packed));
struct atapi_cdrom_caps {
static int cga_save(struct v3_chkpt_ctx * ctx, void * private_data) {
struct video_internal * cga = (struct video_internal *)private_data;
+ v3_chkpt_save(ctx, "FRAMEBUFFER", FRAMEBUF_SIZE, cga->framebuf);
+
V3_CHKPT_STD_SAVE(ctx, cga->misc_outp_reg);
V3_CHKPT_STD_SAVE(ctx, cga->seq_index_reg);
V3_CHKPT_STD_SAVE(ctx, cga->seq_data_regs[SEQ_REG_COUNT]);
V3_CHKPT_STD_SAVE(ctx, cga->passthrough);
+ v3_chkpt_save_16(ctx, "SCREEN_OFFSET", &(cga->screen_offset));
+ v3_chkpt_save_16(ctx, "CURSOR_OFFSET", &(cga->cursor_offset));
+
return 0;
}
static int cga_load(struct v3_chkpt_ctx * ctx, void * private_data) {
struct video_internal * cga = (struct video_internal *)private_data;
+ v3_chkpt_load(ctx, "FRAMEBUFFER", FRAMEBUF_SIZE, cga->framebuf);
+
+
V3_CHKPT_STD_LOAD(ctx, cga->misc_outp_reg);
V3_CHKPT_STD_LOAD(ctx, cga->seq_index_reg);
V3_CHKPT_STD_LOAD(ctx, cga->seq_data_regs[SEQ_REG_COUNT]);
V3_CHKPT_STD_LOAD(ctx, cga->passthrough);
+ v3_chkpt_load_16(ctx, "SCREEN_OFFSET", &(cga->screen_offset));
+ v3_chkpt_load_16(ctx, "CURSOR_OFFSET", &(cga->cursor_offset));
+
+
return 0;
}
};
struct ide_hd_state {
- int accessed;
+ uint32_t accessed;
/* this is the multiple sector transfer size as configured for read/write multiple sectors*/
- uint_t mult_sector_num;
+ uint32_t mult_sector_num;
/* This is the current op sector size:
* for multiple sector ops this equals mult_sector_num
* for standard ops this equals 1
*/
- uint_t cur_sector_num;
+ uint32_t cur_sector_num;
};
struct ide_drive {
char model[41];
// Where we are in the data transfer
- uint_t transfer_index;
+ uint32_t transfer_index;
// the length of a transfer
// calculated for easy access
- uint_t transfer_length;
+ uint32_t transfer_length;
uint64_t current_lba;
struct ide_dma_cmd_reg dma_cmd;
struct ide_dma_status_reg dma_status;
uint32_t dma_prd_addr;
- uint_t dma_tbl_index;
+ uint32_t dma_tbl_index;
};
return 0;
}
+#ifdef V3_CONFIG_CHECKPOINT
+
+#include <palacios/vmm_sprintf.h>
+static int ide_save(struct v3_chkpt_ctx * ctx, void * private_data) {
+ struct ide_internal * ide = (struct ide_internal *)private_data;
+ int ch_num = 0;
+ int drive_num = 0;
+ char buf[128];
+
+
+ for (ch_num = 0; ch_num < 2; ch_num++) {
+ struct v3_chkpt_ctx * ch_ctx = NULL;
+ struct ide_channel * ch = &(ide->channels[ch_num]);
+
+ snprintf(buf, 128, "channel-%d", ch_num);
+ ch_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
+
+ v3_chkpt_save_8(ch_ctx, "ERROR", &(ch->error_reg.val));
+ v3_chkpt_save_8(ch_ctx, "FEATURES", &(ch->features.val));
+ v3_chkpt_save_8(ch_ctx, "DRIVE_HEAD", &(ch->drive_head.val));
+ v3_chkpt_save_8(ch_ctx, "STATUS", &(ch->status.val));
+ v3_chkpt_save_8(ch_ctx, "CMD_REG", &(ch->cmd_reg));
+ v3_chkpt_save_8(ch_ctx, "CTRL_REG", &(ch->ctrl_reg.val));
+ v3_chkpt_save_8(ch_ctx, "DMA_CMD", &(ch->dma_cmd.val));
+ v3_chkpt_save_8(ch_ctx, "DMA_STATUS", &(ch->dma_status.val));
+ v3_chkpt_save_32(ch_ctx, "PRD_ADDR", &(ch->dma_prd_addr));
+ v3_chkpt_save_32(ch_ctx, "DMA_TBL_IDX", &(ch->dma_tbl_index));
+
+
+ for (drive_num = 0; drive_num < 2; drive_num++) {
+ struct v3_chkpt_ctx * drive_ctx = NULL;
+ struct ide_drive * drive = &(ch->drives[drive_num]);
+
+ snprintf(buf, 128, "drive-%d-%d", ch_num, drive_num);
+ drive_ctx = v3_chkpt_open_ctx(ctx->chkpt, ch_ctx, buf);
+
+ v3_chkpt_save_8(drive_ctx, "DRIVE_TYPE", &(drive->drive_type));
+ v3_chkpt_save_8(drive_ctx, "SECTOR_COUNT", &(drive->sector_count));
+ v3_chkpt_save_8(drive_ctx, "SECTOR_NUM", &(drive->sector_num));
+ v3_chkpt_save_16(drive_ctx, "CYLINDER", &(drive->cylinder));
+
+ v3_chkpt_save_64(drive_ctx, "CURRENT_LBA", &(drive->current_lba));
+ v3_chkpt_save_32(drive_ctx, "TRANSFER_LENGTH", &(drive->transfer_length));
+ v3_chkpt_save_32(drive_ctx, "TRANSFER_INDEX", &(drive->transfer_index));
+
+ v3_chkpt_save(drive_ctx, "DATA_BUF", DATA_BUFFER_SIZE, drive->data_buf);
+
+
+ /* For now we'll just pack the type specific data at the end... */
+ /* We should probably add a new context here in the future... */
+ if (drive->drive_type == BLOCK_CDROM) {
+ v3_chkpt_save(drive_ctx, "ATAPI_SENSE_DATA", 18, drive->cd_state.sense.buf);
+ v3_chkpt_save_8(drive_ctx, "ATAPI_CMD", &(drive->cd_state.atapi_cmd));
+ v3_chkpt_save(drive_ctx, "ATAPI_ERR_RECOVERY", 12, drive->cd_state.err_recovery.buf);
+ } else if (drive->drive_type == BLOCK_DISK) {
+ v3_chkpt_save_32(drive_ctx, "ACCESSED", &(drive->hd_state.accessed));
+ v3_chkpt_save_32(drive_ctx, "MULT_SECT_NUM", &(drive->hd_state.mult_sector_num));
+ v3_chkpt_save_32(drive_ctx, "CUR_SECT_NUM", &(drive->hd_state.cur_sector_num));
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+
+static int ide_load(struct v3_chkpt_ctx * ctx, void * private_data) {
+ struct ide_internal * ide = (struct ide_internal *)private_data;
+ int ch_num = 0;
+ int drive_num = 0;
+ char buf[128];
+
+
+ for (ch_num = 0; ch_num < 2; ch_num++) {
+ struct v3_chkpt_ctx * ch_ctx = NULL;
+ struct ide_channel * ch = &(ide->channels[ch_num]);
+
+ snprintf(buf, 128, "channel-%d", ch_num);
+ ch_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
+
+ v3_chkpt_load_8(ch_ctx, "ERROR", &(ch->error_reg.val));
+ v3_chkpt_load_8(ch_ctx, "FEATURES", &(ch->features.val));
+ v3_chkpt_load_8(ch_ctx, "DRIVE_HEAD", &(ch->drive_head.val));
+ v3_chkpt_load_8(ch_ctx, "STATUS", &(ch->status.val));
+ v3_chkpt_load_8(ch_ctx, "CMD_REG", &(ch->cmd_reg));
+ v3_chkpt_load_8(ch_ctx, "CTRL_REG", &(ch->ctrl_reg.val));
+ v3_chkpt_load_8(ch_ctx, "DMA_CMD", &(ch->dma_cmd.val));
+ v3_chkpt_load_8(ch_ctx, "DMA_STATUS", &(ch->dma_status.val));
+ v3_chkpt_load_32(ch_ctx, "PRD_ADDR", &(ch->dma_prd_addr));
+ v3_chkpt_load_32(ch_ctx, "DMA_TBL_IDX", &(ch->dma_tbl_index));
+
+
+ for (drive_num = 0; drive_num < 2; drive_num++) {
+ struct v3_chkpt_ctx * drive_ctx = NULL;
+ struct ide_drive * drive = &(ch->drives[drive_num]);
+
+ snprintf(buf, 128, "drive-%d-%d", ch_num, drive_num);
+ drive_ctx = v3_chkpt_open_ctx(ctx->chkpt, ch_ctx, buf);
+
+ v3_chkpt_load_8(drive_ctx, "DRIVE_TYPE", &(drive->drive_type));
+ v3_chkpt_load_8(drive_ctx, "SECTOR_COUNT", &(drive->sector_count));
+ v3_chkpt_load_8(drive_ctx, "SECTOR_NUM", &(drive->sector_num));
+ v3_chkpt_load_16(drive_ctx, "CYLINDER", &(drive->cylinder));
+
+ v3_chkpt_load_64(drive_ctx, "CURRENT_LBA", &(drive->current_lba));
+ v3_chkpt_load_32(drive_ctx, "TRANSFER_LENGTH", &(drive->transfer_length));
+ v3_chkpt_load_32(drive_ctx, "TRANSFER_INDEX", &(drive->transfer_index));
+
+ v3_chkpt_load(drive_ctx, "DATA_BUF", DATA_BUFFER_SIZE, drive->data_buf);
+
+
+ /* For now we'll just pack the type specific data at the end... */
+ /* We should probably add a new context here in the future... */
+ if (drive->drive_type == BLOCK_CDROM) {
+ v3_chkpt_load(drive_ctx, "ATAPI_SENSE_DATA", 18, drive->cd_state.sense.buf);
+ v3_chkpt_load_8(drive_ctx, "ATAPI_CMD", &(drive->cd_state.atapi_cmd));
+ v3_chkpt_load(drive_ctx, "ATAPI_ERR_RECOVERY", 12, drive->cd_state.err_recovery.buf);
+ } else if (drive->drive_type == BLOCK_DISK) {
+ v3_chkpt_load_32(drive_ctx, "ACCESSED", &(drive->hd_state.accessed));
+ v3_chkpt_load_32(drive_ctx, "MULT_SECT_NUM", &(drive->hd_state.mult_sector_num));
+ v3_chkpt_load_32(drive_ctx, "CUR_SECT_NUM", &(drive->hd_state.cur_sector_num));
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+
+#endif
+
static struct v3_device_ops dev_ops = {
.free = (int (*)(void *))ide_free,
+#ifdef V3_CONFIG_CHECKPOINT
+ .save = ide_save,
+ .load = ide_load
+#endif
};
// Data for system
uint8_t wrap;
- int mouse_enabled;
- int scancode_set;
+ uint8_t mouse_enabled;
+ uint8_t scancode_set;
struct queue kbd_queue;
struct queue mouse_queue;
#ifdef V3_CONFIG_CHECKPOINT
static int keyboard_save(struct v3_chkpt_ctx * ctx, void * private_data) {
+ struct keyboard_internal * kbd = (struct keyboard_internal *)private_data;
+
+ v3_chkpt_save_8(ctx, "CMD_REG", &(kbd->cmd.val));
+ v3_chkpt_save_8(ctx, "STATUS_REG", &(kbd->status.val));
+ v3_chkpt_save_8(ctx, "STATE", &(kbd->state));
+ v3_chkpt_save_8(ctx, "MOUSE_STATE", &(kbd->mouse_state));
+ v3_chkpt_save_8(ctx, "OUTPUT", &(kbd->output_byte));
+ v3_chkpt_save_8(ctx, "INPUT", &(kbd->input_byte));
+ v3_chkpt_save_8(ctx, "SCANCODE_SET", &(kbd->scancode_set));
+ v3_chkpt_save_8(ctx, "MOUSE_ENABLED", &(kbd->mouse_enabled));
+
+
return 0;
}
static int keyboard_load(struct v3_chkpt_ctx * ctx, void * private_data) {
struct keyboard_internal * kbd = (struct keyboard_internal *)private_data;
keyboard_reset_device(kbd);
+
+ v3_chkpt_load_8(ctx, "CMD_REG", &(kbd->cmd.val));
+ v3_chkpt_load_8(ctx, "STATUS_REG", &(kbd->status.val));
+ v3_chkpt_load_8(ctx, "STATE", &(kbd->state));
+ v3_chkpt_load_8(ctx, "MOUSE_STATE", &(kbd->mouse_state));
+ v3_chkpt_load_8(ctx, "OUTPUT", &(kbd->output_byte));
+ v3_chkpt_load_8(ctx, "INPUT", &(kbd->input_byte));
+ v3_chkpt_load_8(ctx, "SCANCODE_SET", &(kbd->scancode_set));
+ v3_chkpt_load_8(ctx, "MOUSE_ENABLED", &(kbd->mouse_enabled));
+
+
return 0;
}
return 0;
}
+#ifdef V3_CONFIG_CHECKPOINT
+
+#include <palacios/vmm_sprintf.h>
+
+static int pci_save(struct v3_chkpt_ctx * ctx, void * private_data) {
+ struct pci_internal * pci = (struct pci_internal *)private_data;
+ char buf[128];
+ int i = 0;
+
+ v3_chkpt_save_32(ctx, "ADDR_REG", &(pci->addr_reg.val));
+ v3_chkpt_save_16(ctx, "IO_BASE", &(pci->dev_io_base));
+
+ for (i = 0; i < PCI_BUS_COUNT; i++) {
+ struct pci_bus * bus = &(pci->bus_list[i]);
+ struct rb_node * node = v3_rb_first(&(bus->devices));
+ struct pci_device * dev = NULL;
+ struct v3_chkpt_ctx * bus_ctx = NULL;
+
+ snprintf(buf, 128, "pci-%d\n", i);
+
+ bus_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
+
+ while (node) {
+ struct v3_chkpt_ctx * dev_ctx = NULL;
+ int bar_idx = 0;
+ dev = rb_entry(node, struct pci_device, dev_tree_node);
+
+ snprintf(buf, 128, "pci-%d.%d-%d", i, dev->dev_num, dev->fn_num);
+ dev_ctx = v3_chkpt_open_ctx(bus_ctx->chkpt, bus_ctx, buf);
+
+ v3_chkpt_save(dev_ctx, "CONFIG_SPACE", 256, dev->config_space);
+
+ for (bar_idx = 0; bar_idx < 6; bar_idx++) {
+ snprintf(buf, 128, "BAR-%d", bar_idx);
+ v3_chkpt_save_32(dev_ctx, buf, &(dev->bar[bar_idx].val));
+ }
+
+ node = v3_rb_next(node);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static int pci_load(struct v3_chkpt_ctx * ctx, void * private_data) {
+ struct pci_internal * pci = (struct pci_internal *)private_data;
+ char buf[128];
+ int i = 0;
+
+ v3_chkpt_load_32(ctx, "ADDR_REG", &(pci->addr_reg.val));
+ v3_chkpt_load_16(ctx, "IO_BASE", &(pci->dev_io_base));
+
+ for (i = 0; i < PCI_BUS_COUNT; i++) {
+ struct pci_bus * bus = &(pci->bus_list[i]);
+ struct rb_node * node = v3_rb_first(&(bus->devices));
+ struct pci_device * dev = NULL;
+ struct v3_chkpt_ctx * bus_ctx = NULL;
+
+ snprintf(buf, 128, "pci-%d\n", i);
+
+ bus_ctx = v3_chkpt_open_ctx(ctx->chkpt, ctx, buf);
+
+ while (node) {
+ struct v3_chkpt_ctx * dev_ctx = NULL;
+ int bar_idx = 0;
+ dev = rb_entry(node, struct pci_device, dev_tree_node);
+
+ snprintf(buf, 128, "pci-%d.%d-%d", i, dev->dev_num, dev->fn_num);
+ dev_ctx = v3_chkpt_open_ctx(bus_ctx->chkpt, bus_ctx, buf);
+
+ v3_chkpt_load(dev_ctx, "CONFIG_SPACE", 256, dev->config_space);
+
+ for (bar_idx = 0; bar_idx < 6; bar_idx++) {
+ snprintf(buf, 128, "BAR-%d", bar_idx);
+ v3_chkpt_load_32(dev_ctx, buf, &(dev->bar[bar_idx].val));
+ }
+
+ node = v3_rb_next(node);
+ }
+ }
+
+
+ return 0;
+}
+
+
+#endif
+
+
+
static struct v3_device_ops dev_ops = {
.free = (int (*)(void *))pci_free,
-
+#ifdef V3_CONFIG_CHECKPOINT
+ .save = pci_save,
+ .load = pci_load
+#endif
};
#ifdef V3_CONFIG_CHECKPOINT
int v3_svm_save_core(struct guest_info * core, void * ctx){
+ v3_chkpt_save_8(ctx, "cpl", &(core->cpl));
v3_chkpt_save(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data);
return 0;
}
-int v3_svm_load_core(struct guest_info * core, void * chkpt_ctx){
- struct cr0_32 * shadow_cr0;
- vmcb_saved_state_t * guest_state;
- vmcb_ctrl_t * guest_ctrl;
-
-
+int v3_svm_load_core(struct guest_info * core, void * ctx){
+
+ v3_chkpt_load_8(ctx, "cpl", &(core->cpl));
- if (v3_chkpt_load(chkpt_ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1){
+ if (v3_chkpt_load(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) {
return -1;
}
- guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t *)(core->vmm_data));
- guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t *)(core->vmm_data));
-
-
- core->rip = guest_state->rip;
- core->vm_regs.rsp = guest_state->rsp;
- core->vm_regs.rax = guest_state->rax;
-
- core->cpl = guest_state->cpl;
-
- core->ctrl_regs.cr0 = guest_state->cr0;
- core->ctrl_regs.cr2 = guest_state->cr2;
- core->ctrl_regs.cr4 = guest_state->cr4;
- core->dbg_regs.dr6 = guest_state->dr6;
- core->dbg_regs.dr7 = guest_state->dr7;
- core->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
- core->ctrl_regs.rflags = guest_state->rflags;
- core->ctrl_regs.efer = guest_state->efer;
-
-
- shadow_cr0 = (struct cr0_32 *)&(core->ctrl_regs.cr0);
-
-
- if (core->shdw_pg_mode == SHADOW_PAGING) {
- if (v3_get_vm_mem_mode(core) == VIRTUAL_MEM) {
- if (v3_activate_shadow_pt(core) == -1) {
- PrintError("Failed to activate shadow page tables\n");
- return -1;
- }
- } else {
- if (v3_activate_passthrough_pt(core) == -1) {
- PrintError("Failed to activate passthrough page tables\n");
- return -1;
- }
- }
- }
-
-
- v3_get_vmcb_segments((vmcb_t *)(core->vmm_data), &(core->segments));
return 0;
}
#endif
V3_Print("NumExits: %u\n", (uint32_t)info->num_exits);
+ V3_Print("IRQ STATE: started=%d, pending=%d\n",
+ info->intr_core_state.irq_started,
+ info->intr_core_state.irq_pending);
+ V3_Print("EXCP STATE: err_code_valid=%d, err_code=%x\n",
+ info->excp_state.excp_error_code_valid,
+ info->excp_state.excp_error_code);
+
+
v3_print_segments(&(info->segments));
v3_print_ctrl_regs(info);
#include <palacios/vmx.h>
#include <palacios/vmm_checkpoint.h>
#include <palacios/vmm_hashtable.h>
+#include <palacios/vmm_direct_paging.h>
#include <palacios/vmm_dev_mgr.h>
ctx = v3_chkpt_open_ctx(chkpt, NULL, key_name);
+ v3_chkpt_load_64(ctx, "RIP", &(info->rip));
+
V3_CHKPT_STD_LOAD(ctx, info->vm_regs);
V3_CHKPT_STD_LOAD(ctx, info->ctrl_regs.cr0);
info->cpu_mode = v3_get_vm_cpu_mode(info);
info->mem_mode = v3_get_vm_mem_mode(info);
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
+ if (v3_activate_shadow_pt(info) == -1) {
+ PrintError("Failed to activate shadow page tables\n");
+ return -1;
+ }
+ } else {
+ if (v3_activate_passthrough_pt(info) == -1) {
+ PrintError("Failed to activate passthrough page tables\n");
+ return -1;
+ }
+ }
+ }
+
switch (cpu_type) {
case V3_SVM_CPU:
return -1;
}
+ v3_print_guest_state(info);
+
return 0;
}
memset(key_name, 0, 16);
+ v3_print_guest_state(info);
+
snprintf(key_name, 16, "guest_info%d", info->vcpu_id);
ctx = v3_chkpt_open_ctx(chkpt, NULL, key_name);
+ v3_chkpt_save_64(ctx, "RIP", &(info->rip));
+
V3_CHKPT_STD_SAVE(ctx, info->vm_regs);
V3_CHKPT_STD_SAVE(ctx, info->ctrl_regs.cr0);
if (dev->ops->save) {
struct v3_chkpt_ctx * dev_ctx = NULL;
-
+
+ V3_Print("Saving state for device (%s)\n", dev->name);
dev_ctx = v3_chkpt_open_ctx(chkpt, dev_mgr_ctx, dev->name);