struct palacios_console * cons = priv_data;
int cons_fd = 0;
unsigned long flags;
+ int acquired = 0;
if (cons->open == 0) {
printk("Attempted to connect to unopened console\n");
}
spin_lock_irqsave(&(cons->lock), flags);
+ if (cons->connected == 0) {
+ cons->connected = 1;
+ acquired = 1;
+ }
+ spin_unlock_irqrestore(&(cons->lock), flags);
+
+ if (acquired == 0) {
+ printk("Console already connected\n");
+ return -1;
+ }
cons_fd = anon_inode_getfd("v3-cons", &cons_fops, cons, 0);
return cons_fd;
}
- cons->connected = 1;
-
v3_deliver_console_event(guest->v3_ctx, NULL);
- spin_unlock_irqrestore(&(cons->lock), flags);
+
printk("Console connected\n");
return thread;
}
+
+/**
+ * Rebind a kernel thread to the specified CPU
+ * The thread will be running on target CPU on return
+ * non-zero return means failure
+ */
+static int
+palacios_move_thread_to_cpu(int new_cpu_id,
+ void * thread_ptr) {
+ struct task_struct * thread = (struct task_struct *)thread_ptr;
+
+ printk("Moving thread (%p) to cpu %d\n", thread, new_cpu_id);
+
+ if (thread == NULL) {
+ thread = current;
+ }
+
+ /*
+ * Bind to the specified CPU. When this call returns,
+ * the thread should be running on the target CPU.
+ */
+ return set_cpus_allowed_ptr(thread, cpumask_of(new_cpu_id));
+}
+
+
/**
* Returns the CPU ID that the caller is running on.
*/
.interrupt_cpu = palacios_interrupt_cpu,
.call_on_cpu = palacios_xcall,
.start_thread_on_cpu = palacios_start_thread_on_cpu,
+ .move_thread_to_cpu = palacios_move_thread_to_cpu,
};
#define V3_VM_PAUSE 23
#define V3_VM_CONTINUE 24
-
#define V3_VM_INSPECT 30
+#define V3_VM_MOVE_CORE 33
+
#define V3_VM_FB_INPUT (256+1)
#define V3_VM_FB_QUERY (256+2)
unsigned long long num_pages;
};
+struct v3_core_move_cmd{
+ uint16_t vcore_id;
+ uint16_t pcore_id;
+};
+
void * trace_malloc(size_t size, gfp_t flags);
void trace_free(const void * objp);
v3_continue_vm(guest->v3_ctx);
break;
}
+ case V3_VM_MOVE_CORE: {
+ struct v3_core_move_cmd cmd;
+ void __user * argp = (void __user *)arg;
+
+ memset(&cmd, 0, sizeof(struct v3_core_move_cmd));
+
+ if (copy_from_user(&cmd, argp, sizeof(struct v3_core_move_cmd))) {
+ printk("copy from user error getting migrate command...\n");
+ return -EFAULT;
+ }
+
+ printk("moving guest %s vcore %d to CPU %d\n", guest->name, cmd.vcore_id, cmd.pcore_id);
+
+ v3_move_vm_core(guest->v3_ctx, cmd.vcore_id, cmd.pcore_id);
+ }
+ break;
default: {
struct vm_ctrl * ctrl = get_ctrl(guest, ioctl);
-all: v3_ctrl v3_stop v3_cons v3_cons_sc v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file
+all: v3_ctrl v3_stop v3_cons v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file v3_core_move
+
v3_ctrl : v3_ctrl.c v3_ctrl.h
gcc -static v3_mem.c -o v3_mem
-v3_cons : v3_cons.c v3_ctrl.h
- gcc -static v3_cons.c -o v3_cons -lcurses
+v3_cons : v3_cons.c v3_cons_sc.c v3_ctrl.h
+ gcc v3_cons.c -o v3_cons -lcurses
+ gcc v3_cons_sc.c -o v3_cons_sc -lcurses
v3_stream : v3_stream.c v3_ctrl.h
gcc -static v3_stream.c -o v3_stream
-v3_cons_sc : v3_cons_sc.c v3_ctrl.h
- gcc -static v3_cons_sc.c -o v3_cons_sc -lcurses
-
-
v3_monitor : v3_cons.c v3_ctrl.h
gcc -static v3_monitor.c -o v3_monitor
v3_user_keyed_stream_file: v3_user_keyed_stream_file.c v3_user_keyed_stream.h v3_user_keyed_stream.c
gcc -static -I../linux_module v3_user_keyed_stream_file.c v3_user_keyed_stream.c -o v3_user_keyed_stream_file
+v3_core_move : v3_core_move.c v3_ctrl.h
+ gcc -static v3_core_move.c -o v3_core_move
+
v3_inject_ecc_scrubber_mce: v3_inject_ecc_scrubber_mce.c
gcc -static -I../linux_module v3_inject_ecc_scrubber_mce.c -o v3_inject_ecc_scrubber_mce
clean:
- rm -f v3_ctrl v3_cons v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file
+ rm -f v3_ctrl v3_cons v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file v3_core_migrate
--- /dev/null
+/*
+ * V3 Virtual Core Migrate Control
+ * (c) Lei Xia, 2011
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <errno.h>
+#include <assert.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "v3_ctrl.h"
+
+struct v3_core_move_cmd {
+ unsigned short vcore_id;
+ unsigned short pcore_id;
+};
+
+
+int main(int argc, char* argv[]) {
+ int vm_fd;
+ char * vm_dev = NULL;
+ struct v3_core_move_cmd cmd;
+
+ if (argc < 4) {
+ printf("Usage: v3_core_migrate <vm_device> <vcore id> <target physical CPU id>\n");
+ return -1;
+ }
+
+ vm_dev = argv[1];
+ cmd.vcore_id = atoi(argv[2]);
+ cmd.pcore_id = atoi(argv[3]);
+
+ printf("Migrate vcore %d to physical CPU %d\n", cmd.vcore_id, cmd.pcore_id);
+
+ vm_fd = open(vm_dev, O_RDONLY);
+
+ if (vm_fd == -1) {
+ printf("Error opening VM device: %s\n", vm_dev);
+ return -1;
+ }
+
+ int err = ioctl(vm_fd, V3_VM_MOVE_CORE, &cmd);
+
+ if (err < 0) {
+ printf("Error write core migrating command to vm\n");
+ return -1;
+ }
+
+ close(vm_fd);
+
+ return 0;
+}
+
+
#define V3_VM_SERIAL_CONNECT 21
#define V3_VM_STOP 22
+#define V3_VM_MOVE_CORE 33
+
static const char * v3_dev = "/dev/v3vee";
struct v3_guest_img {
v3_core_operating_mode_t core_run_state;
+ void * core_thread; /* thread struct for virtual core */
+
/* the logical cpu on which this core runs */
uint32_t pcpu_id;
thread; \
})
+#define V3_MOVE_THREAD_TO_CPU(pcpu, thread) ({ \
+ int ret = -1; \
+ extern struct v3_os_hooks * os_hooks; \
+ if((os_hooks) && (os_hooks)->move_thread_to_cpu) { \
+ ret = (os_hooks)->move_thread_to_cpu(pcpu, thread); \
+ } \
+ ret; \
+ })
+
#endif
/* ** */
void (*interrupt_cpu)(struct v3_vm_info * vm, int logical_cpu, int vector);
void (*call_on_cpu)(int logical_cpu, void (*fn)(void * arg), void * arg);
void * (*start_thread_on_cpu)(int cpu_id, int (*fn)(void * arg), void * arg, char * thread_name);
-
+ int (*move_thread_to_cpu)(int cpu_id, void * thread);
};
int v3_pause_vm(struct v3_vm_info * vm);
int v3_continue_vm(struct v3_vm_info * vm);
+int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu);
int v3_free_vm(struct v3_vm_info * vm);
drive->irq_flags.c_d = 0;
channel->status.busy = 0;
- channel->status.data_req = 1;
channel->status.error = 0;
+ if (drive->transfer_length > 0) {
+ channel->status.data_req = 1;
+ }
+
ide_raise_irq(ide, channel);
}
static int atapi_read_chunk(struct ide_internal * ide, struct ide_channel * channel) {
struct ide_drive * drive = get_selected_drive(channel);
- int ret = drive->ops->read(drive->data_buf, drive->current_lba * ATAPI_BLOCK_SIZE, ATAPI_BLOCK_SIZE,
-drive->private_data);
+ int ret = drive->ops->read(drive->data_buf,
+ drive->current_lba * ATAPI_BLOCK_SIZE,
+ ATAPI_BLOCK_SIZE, drive->private_data);
if (ret == -1) {
PrintError("IDE: Error reading CD block (LBA=%p)\n", (void *)(addr_t)(drive->current_lba));
return 0;
}
- if (lba + xfer_len > drive->ops->get_capacity(drive->private_data)) {
+ if ((lba + xfer_len) > (drive->ops->get_capacity(drive->private_data) / ATAPI_BLOCK_SIZE)) {
PrintError("IDE: xfer len exceeded capacity (lba=%d) (xfer_len=%d) (ReadEnd=%d) (capacity=%d)\n",
lba, xfer_len, lba + xfer_len,
(uint32_t)drive->ops->get_capacity(drive->private_data));
return 0;
}
- // PrintDebug("Reading %d blocks from LBA 0x%x\n", xfer_len, lba);
-
+ // PrintDebug("Reading %d blocks from LBA 0x%x\n", xfer_len, lba);
drive->current_lba = lba;
// Update the request length value in the cylinder registers
struct atapi_rd_capacity_resp * resp = (struct atapi_rd_capacity_resp *)(drive->data_buf);
uint32_t capacity = drive->ops->get_capacity(drive->private_data);
- resp->lba = le_to_be_32(capacity);
+ resp->lba = le_to_be_32((capacity / ATAPI_BLOCK_SIZE) - 1);
resp->block_len = le_to_be_32(ATAPI_BLOCK_SIZE);
atapi_setup_cmd_resp(ide, channel, sizeof(struct atapi_rd_capacity_resp));
xfer_len = alloc_len;
}
+ V3_Print("ATAPI Get config: xfer_len=%d\b", xfer_len);
+
atapi_setup_cmd_resp(ide, channel, xfer_len);
return 0;
PrintDebug("Reading %d bytes from %p to %p\n", (uint32_t)num_bytes, (uint8_t *)(disk->disk_image + lba), buf);
+ if (lba + num_bytes > disk->capacity) {
+ PrintError("Out of bounds read: lba=%llu, num_bytes=%llu, capacity=%llu\n",
+ lba, num_bytes, disk->capacity);
+ return -1;
+ }
+
return read_all(disk->fd, buf, lba, num_bytes);
}
PrintDebug("Writing %d bytes from %p to %p\n", (uint32_t)num_bytes, buf, (uint8_t *)(disk->disk_image + lba));
+ if (lba + num_bytes > disk->capacity) {
+ PrintError("Out of bounds read: lba=%llu, num_bytes=%llu, capacity=%llu\n",
+ lba, num_bytes, disk->capacity);
+ return -1;
+ }
+
+
return write_all(disk->fd, buf, lba, num_bytes);
}
static uint64_t get_capacity(void * private_data) {
struct disk_state * disk = (struct disk_state *)private_data;
- PrintDebug("Querying RAMDISK capacity %d\n",
+ PrintDebug("Querying FILEDISK capacity %d\n",
(uint32_t)(disk->capacity));
return disk->capacity;
PrintDebug("Attempting to select a non-present drive\n");
channel->error_reg.abort = 1;
channel->status.error = 1;
+ } else {
+ channel->status.busy = 0;
+ channel->status.ready = 1;
+ channel->status.data_req = 0;
+ channel->status.error = 0;
+ channel->status.seek_complete = 1;
+
+ channel->dma_status.active = 0;
+ channel->dma_status.err = 0;
}
break;
return -1;
}
- strncpy(drive->model, model_str, sizeof(drive->model) - 1);
-
+ if (model_str != NULL) {
+ strncpy(drive->model, model_str, sizeof(drive->model) - 1);
+ }
+
if (strcasecmp(type_str, "cdrom") == 0) {
drive->drive_type = BLOCK_CDROM;
PrintDebug("Reading %d bytes from %p to %p\n", (uint32_t)num_bytes, (uint8_t *)(disk->disk_image + lba), buf);
+ if (lba + num_bytes > disk->capacity) {
+ PrintError("read out of bounds: lba=%llu (%p), num_bytes=%llu, capacity=%d (%p)\n",
+ lba, (void *)(addr_t)lba, num_bytes, disk->capacity, (void *)(addr_t)disk->capacity);
+ return -1;
+ }
+
memcpy(buf, (uint8_t *)(disk->disk_image + lba), num_bytes);
return 0;
PrintDebug("Writing %d bytes from %p to %p\n", (uint32_t)num_bytes, buf, (uint8_t *)(disk->disk_image + lba));
+ if (lba + num_bytes > disk->capacity) {
+ PrintError("write out of bounds: lba=%llu (%p), num_bytes=%llu, capacity=%d (%p)\n",
+ lba, (void *)(addr_t)lba, num_bytes, disk->capacity, (void *)(addr_t)disk->capacity);
+ return -1;
+ }
+
+
memcpy((uint8_t *)(disk->disk_image + lba), buf, num_bytes);
return 0;
struct dlm_register {
uint8_t data;
};
-#define SERIAL_BUF_LEN 16
+#define SERIAL_BUF_LEN 128
struct serial_buffer {
int head; // most recent data
v3_time_enter_vm(info);
guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
+
//V3_Print("Calling v3_svm_launch\n");
v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
for (i = 0, vcore_id = 1; (i < MAX_CORES) && (vcore_id < vm->num_cores); i++) {
int major = 0;
int minor = 0;
- void * core_thread = NULL;
struct guest_info * core = &(vm->cores[vcore_id]);
char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu");
uint32_t core_idx = 0;
// TODO: actually manage these threads instead of just launching them
core->pcpu_id = core_idx;
- core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
+ core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
- if (core_thread == NULL) {
+ if (core->core_thread == NULL) {
PrintError("Thread launch failed\n");
v3_stop_vm(vm);
return -1;
+/* move a virtual core to different physical core */
+int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
+ struct guest_info * core = NULL;
+
+ if ((vcore_id < 0) || (vcore_id >= vm->num_cores)) {
+ PrintError("Attempted to migrate invalid virtual core (%d)\n", vcore_id);
+ return -1;
+ }
+
+ core = &(vm->cores[vcore_id]);
+
+ if (target_cpu == core->pcpu_id) {
+ PrintError("Attempted to migrate to local core (%d)\n", target_cpu);
+ // well that was pointless
+ return 0;
+ }
+
+ if (core->core_thread == NULL) {
+ PrintError("Attempted to migrate a core without a valid thread context\n");
+ return -1;
+ }
+
+ while (v3_raise_barrier(vm, NULL) == -1);
+
+ V3_Print("Performing Migration from %d to %d\n", core->pcpu_id, target_cpu);
+
+ // Double check that we weren't preemptively migrated
+ if (target_cpu != core->pcpu_id) {
+
+ V3_Print("Moving Core\n");
+
+ if (V3_MOVE_THREAD_TO_CPU(target_cpu, core->core_thread) != 0) {
+ PrintError("Failed to move Vcore %d to CPU %d\n",
+ core->vcpu_id, target_cpu);
+ v3_lower_barrier(vm);
+ return -1;
+ }
+
+ /* There will be a benign race window here:
+ core->pcpu_id will be set to the target core before its fully "migrated"
+ However the core will NEVER run on the old core again, its just in flight to the new core
+ */
+ core->pcpu_id = target_cpu;
+
+ V3_Print("core now at %d\n", core->pcpu_id);
+
+ }
+
+
+
+
+ v3_lower_barrier(vm);
+
+ return 0;
+}
+
int v3_stop_vm(struct v3_vm_info * vm) {
/* if no interrupt, then we do halt */
/* asm("hlt"); */
}
+
}
/* V3_Print("palacios: done with halt\n"); */
#include <palacios/vmm_direct_paging.h>
#include <palacios/vmx_io.h>
#include <palacios/vmx_msr.h>
+#include <palacios/vmm_decoder.h>
#include <palacios/vmx_ept.h>
#include <palacios/vmx_assist.h>
v3_update_timers(info);
if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
+ vmcs_clear(vmx_info->vmcs_ptr_phys);
vmcs_load(vmx_info->vmcs_ptr_phys);
+ vmx_info->state = VMX_UNLAUNCHED;
}
v3_vmx_restore_vmcs(info);
check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
+
if (v3_update_vmcs_host_state(info)) {
v3_enable_ints();
PrintError("Could not write host state\n");
if (vmx_info->state == VMX_UNLAUNCHED) {
vmx_info->state = VMX_LAUNCHED;
+
info->vm_info->run_state = VM_RUNNING;
ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
} else {
v3_yield_cond(info);
if (v3_handle_vmx_exit(info, &exit_info) == -1) {
- PrintError("Error in VMX exit handler\n");
+ PrintError("Error in VMX exit handler (Exit reason=%x)\n", exit_info.exit_reason);
return -1;
}
}
if (v3_vmx_enter(info) == -1) {
+
+ addr_t host_addr;
+ addr_t linear_addr = 0;
+
+ info->vm_info->run_state = VM_ERROR;
+
+ V3_Print("VMX core %u: VMX ERROR!!\n", info->vcpu_id);
+
+ v3_print_guest_state(info);
+
+ V3_Print("VMX core %u\n", info->vcpu_id);
+
+
+
+ linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
+
+ if (info->mem_mode == PHYSICAL_MEM) {
+ v3_gpa_to_hva(info, linear_addr, &host_addr);
+ } else if (info->mem_mode == VIRTUAL_MEM) {
+ v3_gva_to_hva(info, linear_addr, &host_addr);
+ }
+
+ V3_Print("VMX core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
+
+ V3_Print("VMX core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
+ v3_dump_mem((uint8_t *)host_addr, 15);
+
+ v3_print_stack(info);
+
+
v3_print_vmcs();
print_exit_log(info);
return -1;