X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=linux_module%2Fpalacios-stubs.c;h=554fb9bf8f84d906f854ff18a6a0fa235359102c;hb=9aee4d463c4401f25142ba43ddbb720e4e129340;hp=36f7625916a2a69a445e43168085911715cce31c;hpb=a489c2ba0f26f4be1fa98d4af2c2bfa113c28dde;p=palacios.git diff --git a/linux_module/palacios-stubs.c b/linux_module/palacios-stubs.c index 36f7625..554fb9b 100644 --- a/linux_module/palacios-stubs.c +++ b/linux_module/palacios-stubs.c @@ -36,17 +36,52 @@ static struct v3_vm_info * irq_to_guest_map[256]; extern unsigned int cpu_khz; +extern int cpu_list[NR_CPUS]; +extern int cpu_list_len; + + + /** * Prints a message to the console. */ static void palacios_print(const char * fmt, ...) { + +#if V3_PRINTK_OLD_STYLE_OUTPUT + va_list ap; + va_start(ap, fmt); vprintk(fmt, ap); va_end(ap); - + + return + +#else + + va_list ap; + char *buf; + + // Allocate space atomically, in case we are called + // with a lock held + buf = kmalloc(V3_PRINTK_BUF_SIZE, GFP_ATOMIC); + if (!buf) { + printk("palacios: output skipped - unable to allocate\n"); + return; + } + + va_start(ap, fmt); + vsnprintf(buf,V3_PRINTK_BUF_SIZE, fmt, ap); + va_end(ap); + + printk(KERN_INFO "palacios: %s",buf); + + kfree(buf); + return; + +#endif + } @@ -160,27 +195,32 @@ struct lnx_thread_arg { static int lnx_thread_target(void * arg) { struct lnx_thread_arg * thread_info = (struct lnx_thread_arg *)arg; - + int ret = 0; /* - printk("Daemonizing new Palacios thread (name=%s)\n", thread_info->name); + INFO("Daemonizing new Palacios thread (name=%s)\n", thread_info->name); daemonize(thread_info->name); allow_signal(SIGKILL); */ - thread_info->fn(thread_info->arg); + ret = thread_info->fn(thread_info->arg); + + + INFO("Palacios Thread (%s) EXITTING\n", thread_info->name); kfree(thread_info); // handle cleanup + + do_exit(ret); - return 0; + return 0; // should not get here. } /** * Creates a kernel thread. */ -static void +static void * palacios_start_kernel_thread( int (*fn) (void * arg), void * arg, @@ -192,8 +232,7 @@ palacios_start_kernel_thread( thread_info->arg = arg; thread_info->name = thread_name; - kthread_run( lnx_thread_target, thread_info, thread_name ); - return; + return kthread_run( lnx_thread_target, thread_info, thread_name ); } @@ -216,11 +255,15 @@ palacios_start_thread_on_cpu(int cpu_id, thread = kthread_create( lnx_thread_target, thread_info, thread_name ); if (IS_ERR(thread)) { - printk("Palacios error creating thread: %s\n", thread_name); + WARNING("Palacios error creating thread: %s\n", thread_name); + return NULL; + } + + if (set_cpus_allowed_ptr(thread, cpumask_of(cpu_id)) != 0) { + kthread_stop(thread); return NULL; } - kthread_bind(thread, cpu_id); wake_up_process(thread); return thread; @@ -234,10 +277,12 @@ palacios_start_thread_on_cpu(int cpu_id, */ static int palacios_move_thread_to_cpu(int new_cpu_id, - void * thread_ptr) { + void * thread_ptr) { struct task_struct * thread = (struct task_struct *)thread_ptr; - if(thread == NULL){ + INFO("Moving thread (%p) to cpu %d\n", thread, new_cpu_id); + + if (thread == NULL) { thread = current; } @@ -245,7 +290,7 @@ palacios_move_thread_to_cpu(int new_cpu_id, * Bind to the specified CPU. When this call returns, * the thread should be running on the target CPU. */ - return set_cpus_allowed(thread, cpumask_of_cpu(new_cpu_id)); + return set_cpus_allowed_ptr(thread, cpumask_of(new_cpu_id)); } @@ -318,16 +363,16 @@ palacios_dispatch_interrupt( int vector, void * dev, struct pt_regs * regs ) { static int palacios_hook_interrupt(struct v3_vm_info * vm, unsigned int vector ) { - printk("hooking vector %d\n", vector); + INFO("hooking vector %d\n", vector); if (irq_to_guest_map[vector]) { - printk(KERN_WARNING + WARNING( "%s: Interrupt vector %u is already hooked.\n", __func__, vector); return -1; } - printk(KERN_DEBUG + DEBUG( "%s: Hooking interrupt vector %u to vm %p.\n", __func__, vector, vm); @@ -350,7 +395,7 @@ palacios_hook_interrupt(struct v3_vm_info * vm, int flag = 0; int error; - printk("hooking vector: %d\n", vector); + DEBUG("hooking vector: %d\n", vector); if (vector == 32) { flag = IRQF_TIMER; @@ -365,8 +410,8 @@ palacios_hook_interrupt(struct v3_vm_info * vm, &device_id); if (error) { - printk("error code for request_irq is %d\n", error); - panic("request vector %d failed",vector); + ERROR("error code for request_irq is %d\n", error); + panic("request vector %d failed", vector); } } @@ -384,7 +429,7 @@ palacios_ack_interrupt( ) { ack_APIC_irq(); - printk("Pretending to ack interrupt, vector=%d\n",vector); + DEBUG("Pretending to ack interrupt, vector=%d\n", vector); return 0; } @@ -394,10 +439,10 @@ palacios_ack_interrupt( static unsigned int palacios_get_cpu_khz(void) { - printk("cpu_khz is %u\n",cpu_khz); + INFO("cpu_khz is %u\n", cpu_khz); if (cpu_khz == 0) { - printk("faking cpu_khz to 1000000\n"); + INFO("faking cpu_khz to 1000000\n"); return 1000000; } else { return cpu_khz; @@ -492,12 +537,35 @@ static struct v3_os_hooks palacios_os_hooks = { int palacios_vmm_init( void ) { + int num_cpus = num_online_cpus(); + char * cpu_mask = NULL; + + if (cpu_list_len > 0) { + int major = 0; + int minor = 0; + int i = 0; + + cpu_mask = kmalloc((num_cpus / 8) + 1, GFP_KERNEL); + memset(cpu_mask, 0, (num_cpus / 8) + 1); + + for (i = 0; i < cpu_list_len; i++) { + if (cpu_list[i] >= num_cpus) { + WARNING("CPU (%d) exceeds number of available CPUs. Ignoring...\n", cpu_list[i]); + continue; + } + + major = cpu_list[i] / 8; + minor = cpu_list[i] % 8; + *(cpu_mask + major) |= (0x1 << minor); + } + } + memset(irq_to_guest_map, 0, sizeof(struct v3_vm_info *) * 256); + + INFO("palacios_init starting - calling init_v3\n"); - printk("palacios_init starting - calling init_v3\n"); - - Init_V3(&palacios_os_hooks, num_online_cpus()); + Init_V3(&palacios_os_hooks, cpu_mask, num_cpus); return 0;