addr = kmalloc(size, GFP_KERNEL);
}
mallocs++;
+
return addr;
}
return thread;
}
+
+/**
+ * Rebind a kernel thread to the specified CPU
+ * The thread will be running on target CPU on return
+ * non-zero return means failure
+ */
+static int
+palacios_move_thread_to_cpu(int new_cpu_id,
+ void * thread_ptr) {
+ struct task_struct * thread = (struct task_struct *)thread_ptr;
+
+ if(thread == NULL){
+ thread = current;
+ }
+
+ /*
+ * Bind to the specified CPU. When this call returns,
+ * the thread should be running on the target CPU.
+ */
+ return set_cpus_allowed(thread, cpumask_of_cpu(new_cpu_id));
+}
+
+
/**
* Returns the CPU ID that the caller is running on.
*/
.interrupt_cpu = palacios_interrupt_cpu,
.call_on_cpu = palacios_xcall,
.start_thread_on_cpu = palacios_start_thread_on_cpu,
+ .move_thread_to_cpu = palacios_move_thread_to_cpu,
};
printk("palacios_init starting - calling init_v3\n");
- Init_V3(&palacios_os_hooks, nr_cpu_ids);
+ Init_V3(&palacios_os_hooks, num_online_cpus());
return 0;
#include <linux/poll.h>
#include <linux/anon_inodes.h>
#include <linux/sched.h>
-
+ #include <linux/vmalloc.h>
#include <linux/file.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
v3_continue_vm(guest->v3_ctx);
break;
}
+ case V3_VM_MOVE_CORE: {
+ struct v3_core_move_cmd cmd;
+ void __user * argp = (void __user *)arg;
+
+ memset(&cmd, 0, sizeof(struct v3_core_move_cmd));
+
+ if (copy_from_user(&cmd, argp, sizeof(struct v3_core_move_cmd))) {
+ printk("copy from user error getting migrate command...\n");
+ return -EFAULT;
+ }
+
+ printk("moving guest %s vcore %d to CPU %d\n", guest->name, cmd.vcore_id, cmd.pcore_id);
+
+ v3_move_vm_core(guest->v3_ctx, cmd.vcore_id, cmd.pcore_id);
+ }
+ break;
default: {
struct vm_ctrl * ctrl = get_ctrl(guest, ioctl);
cdev_del(&(guest->cdev));
- kfree(guest->img);
+ vfree(guest->img);
kfree(guest);
return 0;
if ((os_hooks) && (os_hooks->call_on_cpu)) {
for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
if (v3_cpu_types[i] != V3_INVALID_CPU) {
- deinit_cpu((void *)(addr_t)i);
+ V3_Call_On_CPU(i, deinit_cpu, (void *)(addr_t)i);
+ //deinit_cpu((void *)(addr_t)i);
}
}
}
for (i = 0, vcore_id = 1; (i < MAX_CORES) && (vcore_id < vm->num_cores); i++) {
int major = 0;
int minor = 0;
- void * core_thread = NULL;
struct guest_info * core = &(vm->cores[vcore_id]);
char * specified_cpu = v3_cfg_val(core->core_cfg_data, "target_cpu");
uint32_t core_idx = 0;
// TODO: actually manage these threads instead of just launching them
core->pcpu_id = core_idx;
- core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
+ core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
- if (core_thread == NULL) {
+ if (core->core_thread == NULL) {
PrintError("Thread launch failed\n");
v3_stop_vm(vm);
return -1;
+/* move a virtual core to different physical core */
+int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu) {
+ struct guest_info * core = NULL;
+
+ if(vcore_id < 0 || vcore_id > vm->num_cores) {
+ return -1;
+ }
+
+ core = &(vm->cores[vcore_id]);
+
+ if(target_cpu != core->pcpu_id &&
+ core->core_move_state != CORE_MOVE_PENDING){
+ core->core_move_state = CORE_MOVE_PENDING;
+ core->target_pcpu_id = target_cpu;
+ v3_interrupt_cpu(vm, core->pcpu_id, 0);
+
+ while(core->core_move_state != CORE_MOVE_DONE){
+ v3_yield(NULL);
+ }
+ }
+
+
+ return 0;
+}
+
int v3_stop_vm(struct v3_vm_info * vm) {
struct vmx_data * vmx_state = core->vmm_data;
V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
- V3_FreePages(vmx_state->msr_area, 1);
+ V3_FreePages(V3_PAddr(vmx_state->msr_area), 1);
V3_Free(vmx_state);
check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
+
+ /* determine if we need to move to a different physical core */
+ if(info->core_move_state == CORE_MOVE_PENDING) {
+ vmcs_clear(vmx_info->vmcs_ptr_phys);
+
+ v3_enable_ints();
+
+ if(V3_MOVE_THREAD_TO_CPU(info->target_pcpu_id, info->core_thread) != 0){
+ PrintError("Failed to move vcore %d to CPU %d\n", info->vcpu_id, info->target_pcpu_id);
+ } else {
+ info->pcpu_id = info->target_pcpu_id;
+ PrintDebug("Core move done, vcore %d is running on CPU %d now\n", info->vcpu_id, V3_Get_CPU());
+ }
+
+ /* disable global interrupts,
+ * NOTE now it is being running on a different CPU
+ */
+ v3_disable_ints();
+
+ vmcs_load(vmx_info->vmcs_ptr_phys);
+ vmx_info->state = VMX_UNLAUNCHED;
+ info->core_move_state= CORE_MOVE_DONE;
+ }
+
+
if (v3_update_vmcs_host_state(info)) {
v3_enable_ints();
PrintError("Could not write host state\n");
void v3_init_vmx_cpu(int cpu_id) {
+ addr_t vmx_on_region = 0;
if (cpu_id == 0) {
if (v3_init_vmx_hw(&hw_info) == -1) {
// Setup VMXON Region
- host_vmcs_ptrs[cpu_id] = allocate_vmcs();
+ vmx_on_region = allocate_vmcs();
- PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
- if (vmx_on(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) {
+ if (vmx_on(vmx_on_region) == VMX_SUCCESS) {
V3_Print("VMX Enabled\n");
+ host_vmcs_ptrs[cpu_id] = vmx_on_region;
} else {
- PrintError("VMX initialization failure\n");
- return;
+ V3_Print("VMX already enabled\n");
+ V3_FreePages((void *)vmx_on_region, 1);
}
-
+
+ PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
{
struct vmx_sec_proc_ctrls sec_proc_ctrls;
void v3_deinit_vmx_cpu(int cpu_id) {
extern v3_cpu_arch_t v3_cpu_types[];
v3_cpu_types[cpu_id] = V3_INVALID_CPU;
- V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
+
+ if (host_vmcs_ptrs[cpu_id] != 0) {
+ V3_Print("Disabling VMX\n");
+
+ if (vmx_off() != VMX_SUCCESS) {
+ PrintError("Error executing VMXOFF\n");
+ }
+
+ V3_FreePages((void *)host_vmcs_ptrs[cpu_id], 1);
+
+ host_vmcs_ptrs[cpu_id] = 0;
+ }
}