From: Jack Lange Date: Tue, 13 Sep 2011 20:22:05 +0000 (-0400) Subject: Remove non-multithreaded OS option. Change VM creation to be fully asynchronous.... X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=commitdiff_plain;h=b1128b2a9d269fefc786c60c26878b372d5e39c1;p=palacios.git Remove non-multithreaded OS option. Change VM creation to be fully asynchronous. Update host interface and linux module to match --- diff --git a/Kconfig b/Kconfig index 9a7f7e7..d484470 100644 --- a/Kconfig +++ b/Kconfig @@ -108,13 +108,6 @@ endchoice menu "Supported host OS features" -config MULTITHREAD_OS - bool "Host support for multiple threads" - default y - help - Select this if your OS supports multiple threads of execution. This will enable features in Palacios - to require the creation of additional execution threads. - config ALIGNED_PG_ALLOC bool "Host support for aligned page allocations" diff --git a/linux_module/main.c b/linux_module/main.c index a6787ce..9317635 100644 --- a/linux_module/main.c +++ b/linux_module/main.c @@ -67,74 +67,54 @@ static long v3_dev_ioctl(struct file * filp, struct v3_guest * guest = kmalloc(sizeof(struct v3_guest), GFP_KERNEL); if (IS_ERR(guest)) { - printk("Error allocating Kernel guest_image\n"); + printk("Palacios: Error allocating Kernel guest_image\n"); return -EFAULT; } memset(guest, 0, sizeof(struct v3_guest)); - printk("Starting V3 Guest... (%p)\n", guest); + printk("Palacios: Starting V3 Guest...\n"); vm_minor = register_vm(guest); if (vm_minor == -1) { - printk("Too many VMs are currently running\n"); + printk("Palacios Error: Too many VMs are currently running\n"); return -EFAULT; } guest->vm_dev = MKDEV(v3_major_num, vm_minor); if (copy_from_user(&user_image, argp, sizeof(struct v3_guest_img))) { - printk("copy from user error getting guest image...\n"); + printk("Palacios Error: copy from user error getting guest image...\n"); return -EFAULT; } guest->img_size = user_image.size; - printk("Allocating kernel memory for guest image (%llu bytes)\n", user_image.size); + printk("Palacios: Allocating kernel memory for guest image (%llu bytes)\n", user_image.size); guest->img = vmalloc(guest->img_size); if (IS_ERR(guest->img)) { - printk("Error: Could not allocate space for guest image\n"); + printk("Palacios Error: Could not allocate space for guest image\n"); return -EFAULT; } if (copy_from_user(guest->img, user_image.guest_data, guest->img_size)) { - printk("Error loading guest data\n"); + printk("Palacios: Error loading guest data\n"); return -EFAULT; } strncpy(guest->name, user_image.name, 127); - printk("Launching VM\n"); + printk("Palacios: Launching VM\n"); INIT_LIST_HEAD(&(guest->exts)); - init_completion(&(guest->start_done)); - init_completion(&(guest->thread_done)); - - { - struct task_struct * launch_thread = NULL; - // At some point we're going to want to allow the user to specify a CPU mask - // But for now, well just launch from the local core, and rely on the global cpu mask - - preempt_disable(); - launch_thread = kthread_create(start_palacios_vm, guest, guest->name); - - if (IS_ERR(launch_thread)) { - preempt_enable(); - printk("Palacios error creating launch thread for vm (%s)\n", guest->name); - return -EFAULT; - } - - kthread_bind(launch_thread, smp_processor_id()); - preempt_enable(); - - wake_up_process(launch_thread); + if (start_palacios_vm(guest) == -1) { + printk("Palacios: Error starting guest\n"); + return -EFAULT; } - wait_for_completion(&(guest->start_done)); - return guest->vm_dev; break; } diff --git a/linux_module/palacios-stubs.c b/linux_module/palacios-stubs.c index 26ada0b..8eeeb7d 100644 --- a/linux_module/palacios-stubs.c +++ b/linux_module/palacios-stubs.c @@ -180,7 +180,7 @@ static int lnx_thread_target(void * arg) { /** * Creates a kernel thread. */ -static void +static void * palacios_start_kernel_thread( int (*fn) (void * arg), void * arg, @@ -192,8 +192,7 @@ palacios_start_kernel_thread( thread_info->arg = arg; thread_info->name = thread_name; - kthread_run( lnx_thread_target, thread_info, thread_name ); - return; + return kthread_run( lnx_thread_target, thread_info, thread_name ); } @@ -220,7 +219,7 @@ palacios_start_thread_on_cpu(int cpu_id, return NULL; } - kthread_bind(thread, cpu_id); + set_cpus_allowed_ptr(thread, cpumask_of(cpu_id)); wake_up_process(thread); return thread; diff --git a/linux_module/palacios.h b/linux_module/palacios.h index d96739c..1ac8c03 100644 --- a/linux_module/palacios.h +++ b/linux_module/palacios.h @@ -78,9 +78,6 @@ struct v3_guest { struct rb_root vm_ctrls; struct list_head exts; - struct completion start_done; - struct completion thread_done; - dev_t vm_dev; struct cdev cdev; }; diff --git a/linux_module/vm.c b/linux_module/vm.c index 08ae7e0..abb798f 100644 --- a/linux_module/vm.c +++ b/linux_module/vm.c @@ -246,22 +246,15 @@ extern u32 pg_frees; extern u32 mallocs; extern u32 frees; -int start_palacios_vm(void * arg) { - struct v3_guest * guest = (struct v3_guest *)arg; +int start_palacios_vm(struct v3_guest * guest) { int err; - - daemonize(guest->name); - // allow_signal(SIGKILL); - - init_vm_extensions(guest); guest->v3_ctx = v3_create_vm(guest->img, (void *)guest, guest->name); if (guest->v3_ctx == NULL) { printk("palacios: failed to create vm\n"); - complete(&(guest->start_done)); return -1; } @@ -280,7 +273,6 @@ int start_palacios_vm(void * arg) { if (err) { printk("Fails to add cdev\n"); v3_free_vm(guest->v3_ctx); - complete(&(guest->start_done)); return -1; } @@ -288,12 +280,9 @@ int start_palacios_vm(void * arg) { printk("Fails to create device\n"); cdev_del(&(guest->cdev)); v3_free_vm(guest->v3_ctx); - complete(&(guest->start_done)); return -1; } - complete(&(guest->start_done)); - printk("palacios: launching vm\n"); if (v3_start_vm(guest->v3_ctx, 0xffffffff) < 0) { @@ -304,8 +293,6 @@ int start_palacios_vm(void * arg) { return -1; } - complete(&(guest->thread_done)); - printk("palacios: vm completed. returning.\n"); return 0; @@ -319,7 +306,6 @@ int stop_palacios_vm(struct v3_guest * guest) { v3_stop_vm(guest->v3_ctx); - wait_for_completion(&(guest->thread_done)); v3_free_vm(guest->v3_ctx); diff --git a/linux_module/vm.h b/linux_module/vm.h index 3423d34..f8df6c7 100644 --- a/linux_module/vm.h +++ b/linux_module/vm.h @@ -8,7 +8,7 @@ #include "palacios.h" -int start_palacios_vm(void * arg); +int start_palacios_vm(struct v3_guest * guest); int stop_palacios_vm(struct v3_guest * guest); diff --git a/palacios/include/palacios/vmm.h b/palacios/include/palacios/vmm.h index 8364325..ad3f952 100644 --- a/palacios/include/palacios/vmm.h +++ b/palacios/include/palacios/vmm.h @@ -182,15 +182,16 @@ struct guest_info; }) -#ifdef V3_CONFIG_MULTITHREAD_OS -#define V3_CREATE_THREAD(fn, arg, name) \ - do { \ - extern struct v3_os_hooks * os_hooks; \ - if ((os_hooks) && (os_hooks)->start_kernel_thread) { \ - (os_hooks)->start_kernel_thread(fn, arg, name); \ - } \ - } while (0) + +#define V3_CREATE_THREAD(fn, arg, name) ({ \ + void * thread = NULL; \ + extern struct v3_os_hooks * os_hooks; \ + if ((os_hooks) && (os_hooks)->start_kernel_thread) { \ + thread = (os_hooks)->start_kernel_thread(fn, arg, name); \ + } \ + thread; \ + }) @@ -223,7 +224,6 @@ struct guest_info; ret; \ }) -#endif /* ** */ @@ -260,10 +260,8 @@ void v3_yield(struct guest_info * info); void v3_yield_cond(struct guest_info * info); void v3_print_cond(const char * fmt, ...); - -#ifdef V3_CONFIG_MULTITHREAD_OS void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector); -#endif + v3_cpu_arch_t v3_get_cpu_type(int cpu_id); @@ -310,7 +308,7 @@ struct v3_os_hooks { - void (*start_kernel_thread)(int (*fn)(void * arg), void * arg, char * thread_name); + void * (*start_kernel_thread)(int (*fn)(void * arg), void * arg, char * thread_name); void (*interrupt_cpu)(struct v3_vm_info * vm, int logical_cpu, int vector); void (*call_on_cpu)(int logical_cpu, void (*fn)(void * arg), void * arg); void * (*start_thread_on_cpu)(int cpu_id, int (*fn)(void * arg), void * arg, char * thread_name); diff --git a/palacios/src/palacios/vmm.c b/palacios/src/palacios/vmm.c index 4b9083b..ca12fc7 100644 --- a/palacios/src/palacios/vmm.c +++ b/palacios/src/palacios/vmm.c @@ -129,7 +129,7 @@ void Init_V3(struct v3_os_hooks * hooks, int num_cpus) { -#ifdef V3_CONFIG_MULTITHREAD_OS + if ((hooks) && (hooks->call_on_cpu)) { for (i = 0; i < num_cpus; i++) { @@ -138,9 +138,7 @@ void Init_V3(struct v3_os_hooks * hooks, int num_cpus) { hooks->call_on_cpu(i, &init_cpu, (void *)(addr_t)i); } } -#else - init_cpu(0); -#endif + } @@ -162,7 +160,6 @@ void Shutdown_V3() { #endif -#ifdef V3_CONFIG_MULTITHREAD_OS if ((os_hooks) && (os_hooks->call_on_cpu)) { for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) { if (v3_cpu_types[i] != V3_INVALID_CPU) { @@ -171,9 +168,6 @@ void Shutdown_V3() { } } } -#else - deinit_cpu(0); -#endif } @@ -240,11 +234,7 @@ static int start_core(void * p) // For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type... -#ifdef V3_CONFIG_MULTITHREAD_OS #define MAX_CORES 32 -#else -#define MAX_CORES 1 -#endif int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { @@ -276,9 +266,9 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { return -1; } -#ifdef V3_CONFIG_MULTITHREAD_OS - // spawn off new threads, for other cores - for (i = 0, vcore_id = 1; (i < MAX_CORES) && (vcore_id < vm->num_cores); i++) { + // Spawn off threads for each core. + // We work backwards, so that core 0 is always started last. + for (i = 0, vcore_id = vm->num_cores - 1; (i < MAX_CORES) && (vcore_id >= 0); i++) { int major = 0; int minor = 0; struct guest_info * core = &(vm->cores[vcore_id]); @@ -294,19 +284,12 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { i--; // We reset the logical core idx. Not strictly necessary I guess... } else { - - if (i == V3_Get_CPU()) { - // We skip the local CPU because it is reserved for vcore 0 - continue; - } - core_idx = i; } major = core_idx / 8; minor = core_idx % 8; - if ((core_mask[major] & (0x1 << minor)) == 0) { PrintError("Logical CPU %d not available for virtual core %d; not started\n", core_idx, vcore_id); @@ -328,7 +311,6 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { PrintDebug("run: core=%u, func=0x%p, arg=0x%p, name=%s\n", core_idx, start_core, core, core->exec_name); - // TODO: actually manage these threads instead of just launching them core->pcpu_id = core_idx; core->core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name); @@ -338,18 +320,7 @@ int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { return -1; } - vcore_id++; - } -#endif - - sprintf(vm->cores[0].exec_name, "%s", vm->name); - - vm->cores[0].pcpu_id = V3_Get_CPU(); - - if (start_core(&(vm->cores[0])) != 0) { - PrintError("Error starting VM core 0\n"); - v3_stop_vm(vm); - return -1; + vcore_id--; } @@ -623,7 +594,6 @@ void v3_print_cond(const char * fmt, ...) { } -#ifdef V3_CONFIG_MULTITHREAD_OS void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) { extern struct v3_os_hooks * os_hooks; @@ -632,7 +602,6 @@ void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) { (os_hooks)->interrupt_cpu(vm, logical_cpu, vector); } } -#endif