return thread;
}
+
+/**
+ * Rebind a kernel thread to the specified CPU
+ * The thread will be running on target CPU on return
+ * non-zero return means failure
+ */
+static int
+palacios_move_thread_to_cpu(int new_cpu_id,
+ void * thread_ptr) {
+ struct task_struct * thread = (struct task_struct *)thread_ptr;
+
+ if(thread == NULL){
+ thread = current;
+ }
+
+ /*
+ * Bind to the specified CPU. When this call returns,
+ * the thread should be running on the target CPU.
+ */
+ return set_cpus_allowed(thread, cpumask_of_cpu(new_cpu_id));
+}
+
+
/**
* Returns the CPU ID that the caller is running on.
*/
.interrupt_cpu = palacios_interrupt_cpu,
.call_on_cpu = palacios_xcall,
.start_thread_on_cpu = palacios_start_thread_on_cpu,
+ .move_thread_to_cpu = palacios_move_thread_to_cpu,
};
#define V3_VM_PAUSE 23
#define V3_VM_CONTINUE 24
-
#define V3_VM_INSPECT 30
+#define V3_VM_MOVE_CORE 33
+
#define V3_VM_FB_INPUT (256+1)
#define V3_VM_FB_QUERY (256+2)
unsigned long long num_pages;
};
+struct v3_core_move_cmd{
+ uint16_t vcore_id;
+ uint16_t pcore_id;
+};
+
void * trace_malloc(size_t size, gfp_t flags);
void trace_free(const void * objp);
v3_continue_vm(guest->v3_ctx);
break;
}
+ case V3_VM_MOVE_CORE: {
+ struct v3_core_move_cmd cmd;
+ void __user * argp = (void __user *)arg;
+
+ memset(&cmd, 0, sizeof(struct v3_core_move_cmd));
+
+ if (copy_from_user(&cmd, argp, sizeof(struct v3_core_move_cmd))) {
+ printk("copy from user error getting migrate command...\n");
+ return -EFAULT;
+ }
+
+ printk("moving guest %s vcore %d to CPU %d\n", guest->name, cmd.vcore_id, cmd.pcore_id);
+
+ v3_move_vm_core(guest->v3_ctx, cmd.vcore_id, cmd.pcore_id);
+ }
+ break;
default: {
struct vm_ctrl * ctrl = get_ctrl(guest, ioctl);
-all: v3_ctrl v3_stop v3_cons v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file
+all: v3_ctrl v3_stop v3_cons v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file v3_core_move
v3_user_keyed_stream_file: v3_user_keyed_stream_file.c v3_user_keyed_stream.h v3_user_keyed_stream.c
gcc -static -I../linux_module v3_user_keyed_stream_file.c v3_user_keyed_stream.c -o v3_user_keyed_stream_file
-
+v3_core_move : v3_core_move.c v3_ctrl.h
+ gcc -static v3_core_move.c -o v3_core_move
clean:
- rm -f v3_ctrl v3_cons v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file
+ rm -f v3_ctrl v3_cons v3_mem v3_monitor v3_stream v3_user_host_dev_example v3_os_debug v3_user_keyed_stream_example v3_user_keyed_stream_file v3_core_migrate
--- /dev/null
+/*
+ * V3 Virtual Core Migrate Control
+ * (c) Lei Xia, 2011
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <errno.h>
+#include <assert.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "v3_ctrl.h"
+
+struct v3_core_move_cmd {
+ unsigned short vcore_id;
+ unsigned short pcore_id;
+};
+
+
+int main(int argc, char* argv[]) {
+ int vm_fd;
+ char * vm_dev = NULL;
+ struct v3_core_move_cmd cmd;
+
+ if (argc < 4) {
+ printf("Usage: v3_core_migrate <vm_device> <vcore id> <target physical CPU id>\n");
+ return -1;
+ }
+
+ vm_dev = argv[1];
+ cmd.vcore_id = atoi(argv[2]);
+ cmd.pcore_id = atoi(argv[3]);
+
+ printf("Migrate vcore %d to physical CPU %d\n", cmd.vcore_id, cmd.pcore_id);
+
+ vm_fd = open(vm_dev, O_RDONLY);
+
+ if (vm_fd == -1) {
+ printf("Error opening VM device: %s\n", vm_dev);
+ return -1;
+ }
+
+ int err = ioctl(vm_fd, V3_VM_MOVE_CORE, &cmd);
+
+ if (err < 0) {
+ printf("Error write core migrating command to vm\n");
+ return -1;
+ }
+
+ close(vm_fd);
+
+ return 0;
+}
+
+
#define V3_VM_SERIAL_CONNECT 21
#define V3_VM_STOP 22
+#define V3_VM_MOVE_CORE 33
+
static const char * v3_dev = "/dev/v3vee";
struct v3_guest_img {
v3_core_operating_mode_t core_run_state;
+ v3_core_moving_state_t core_move_state; /* if 1, then it is to be migrated to current pcpu_id */
+ uint32_t target_pcpu_id; /* the target physical CPU id for core moving */
+
+ void * core_thread; /* thread struct for virtual core */
+
/* the logical cpu on which this core runs */
uint32_t pcpu_id;
#define V3_CREATE_THREAD_ON_CPU(cpu, fn, arg, name) ({ \
void * thread = NULL; \
- extern struct v3_os_hooks * os_hooks; \
- if ((os_hooks) && (os_hooks)->start_thread_on_cpu) { \
- thread = (os_hooks)->start_thread_on_cpu(cpu, fn, arg, name); \
- } \
- thread; \
- })
+ extern struct v3_os_hooks * os_hooks; \
+ if ((os_hooks) && (os_hooks)->start_thread_on_cpu) { \
+ thread = (os_hooks)->start_thread_on_cpu(cpu, fn, arg, name); \
+ } \
+ thread; \
+ })
+#define V3_MOVE_THREAD_TO_CPU(pcpu, thread) ({ \
+ int ret = -1; \
+ extern struct v3_os_hooks * os_hooks; \
+ if((os_hooks) && (os_hooks)->move_thread_to_cpu) { \
+ ret = (os_hooks)->move_thread_to_cpu(pcpu, thread); \
+ } \
+ ret; \
+ })
+
#endif
/* ** */
void (*interrupt_cpu)(struct v3_vm_info * vm, int logical_cpu, int vector);
void (*call_on_cpu)(int logical_cpu, void (*fn)(void * arg), void * arg);
void * (*start_thread_on_cpu)(int cpu_id, int (*fn)(void * arg), void * arg, char * thread_name);
-
+ int (*move_thread_to_cpu)(int cpu_id, void * thread);
};
int v3_pause_vm(struct v3_vm_info * vm);
int v3_continue_vm(struct v3_vm_info * vm);
+int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu);
int v3_free_vm(struct v3_vm_info * vm);
typedef enum {SHADOW_PAGING, NESTED_PAGING} v3_paging_mode_t;
typedef enum {VM_RUNNING, VM_STOPPED, VM_PAUSED, VM_ERROR} v3_vm_operating_mode_t;
typedef enum {CORE_RUNNING, CORE_STOPPED} v3_core_operating_mode_t;
+typedef enum {CORE_MOVE_DONE, CORE_MOVE_PENDING} v3_core_moving_state_t;
typedef enum {REAL, /*UNREAL,*/ PROTECTED, PROTECTED_PAE, LONG, LONG_32_COMPAT, LONG_16_COMPAT} v3_cpu_mode_t;
typedef enum {PHYSICAL_MEM, VIRTUAL_MEM} v3_mem_mode_t;
v3_time_enter_vm(info);
guest_ctrl->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
+ if(info->core_move_state == CORE_MOVE_PENDING) {
+ v3_stgi();
+
+ if(V3_MOVE_THREAD_TO_CPU(info->target_pcpu_id, info->core_thread) != 0){
+ PrintError("Failed to move Vcore %d to CPU %d\n",
+ info->vcpu_id,
+ info->target_pcpu_id);
+ } else {
+ info->pcpu_id = info->target_pcpu_id;
+ V3_Print("Core move done, vcore %d is running on CPU %d now\n",
+ info->vcpu_id,
+ V3_Get_CPU());
+ }
+
+ info->core_move_state = CORE_MOVE_DONE;
+
+ /* disable global interrupts,
+ * NOTE now it is being running on a different CPU
+ */
+ v3_clgi();
+ }
+
+
+
//V3_Print("Calling v3_svm_launch\n");
v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
for (i = 0, vcore_id = 1; (i < MAX_CORES) && (vcore_id < vm->num_cores); i++) {
int major = 0;
int minor = 0;
- void * core_thread = NULL;
struct guest_info * core = &(vm->cores[vcore_id]);
char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu");
uint32_t core_idx = 0;
// TODO: actually manage these threads instead of just launching them
core->pcpu_id = core_idx;
- core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
+ core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
- if (core_thread == NULL) {
+ if (core->core_thread == NULL) {
PrintError("Thread launch failed\n");
v3_stop_vm(vm);
return -1;
+/* move a virtual core to different physical core */
+int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
+ struct guest_info * core = NULL;
+
+ if(vcore_id < 0 || vcore_id > vm->num_cores) {
+ return -1;
+ }
+
+ core = &(vm->cores[vcore_id]);
+
+ if(target_cpu != core->pcpu_id &&
+ core->core_move_state != CORE_MOVE_PENDING){
+ core->core_move_state = CORE_MOVE_PENDING;
+ core->target_pcpu_id = target_cpu;
+ v3_interrupt_cpu(vm, core->pcpu_id, 0);
+
+ while(core->core_move_state != CORE_MOVE_DONE){
+ v3_yield(NULL);
+ }
+ }
+
+
+ return 0;
+}
+
int v3_stop_vm(struct v3_vm_info * vm) {
/* if no interrupt, then we do halt */
/* asm("hlt"); */
}
+
+ /* check if there is a core move request pending
+ * if there is, resume the guest with RIP on halt instruction again
+ */
+ if (info->core_move_state == CORE_MOVE_PENDING){
+ return 0;
+ }
}
/* V3_Print("palacios: done with halt\n"); */
check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
+
+ /* determine if we need to move to a different physical core */
+ if(info->core_move_state == CORE_MOVE_PENDING) {
+ vmcs_clear(vmx_info->vmcs_ptr_phys);
+
+ v3_enable_ints();
+
+ if(V3_MOVE_THREAD_TO_CPU(info->target_pcpu_id, info->core_thread) != 0){
+ PrintError("Failed to move vcore %d to CPU %d\n", info->vcpu_id, info->target_pcpu_id);
+ } else {
+ info->pcpu_id = info->target_pcpu_id;
+ PrintDebug("Core move done, vcore %d is running on CPU %d now\n", info->vcpu_id, V3_Get_CPU());
+ }
+
+ /* disable global interrupts,
+ * NOTE now it is being running on a different CPU
+ */
+ v3_disable_ints();
+
+ vmcs_load(vmx_info->vmcs_ptr_phys);
+ vmx_info->state = VMX_UNLAUNCHED;
+ info->core_move_state= CORE_MOVE_DONE;
+ }
+
+
if (v3_update_vmcs_host_state(info)) {
v3_enable_ints();
PrintError("Could not write host state\n");