extern unsigned int cpu_khz;
+extern int cpu_list[NR_CPUS];
+extern int cpu_list_len;
+
+
+
/**
* Prints a message to the console.
*/
static void palacios_print(const char * fmt, ...) {
+
+#if V3_PRINTK_OLD_STYLE_OUTPUT
+
va_list ap;
+
va_start(ap, fmt);
vprintk(fmt, ap);
va_end(ap);
-
+
+ return
+
+#else
+
+ va_list ap;
+ char *buf;
+
+ // Allocate space atomically, in case we are called
+ // with a lock held
+ buf = kmalloc(V3_PRINTK_BUF_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk("palacios: output skipped - unable to allocate\n");
+ return;
+ }
+
+ va_start(ap, fmt);
+ vsnprintf(buf,V3_PRINTK_BUF_SIZE, fmt, ap);
+ va_end(ap);
+
+ printk(KERN_INFO "palacios: %s",buf);
+
+ kfree(buf);
+
return;
+
+#endif
+
}
static int lnx_thread_target(void * arg) {
struct lnx_thread_arg * thread_info = (struct lnx_thread_arg *)arg;
-
+ int ret = 0;
/*
- printk("Daemonizing new Palacios thread (name=%s)\n", thread_info->name);
+ INFO("Daemonizing new Palacios thread (name=%s)\n", thread_info->name);
daemonize(thread_info->name);
allow_signal(SIGKILL);
*/
- thread_info->fn(thread_info->arg);
+ ret = thread_info->fn(thread_info->arg);
+
+
+ INFO("Palacios Thread (%s) EXITTING\n", thread_info->name);
kfree(thread_info);
// handle cleanup
+
+ do_exit(ret);
- return 0;
+ return 0; // should not get here.
}
/**
* Creates a kernel thread.
*/
-static void
+static void *
palacios_start_kernel_thread(
int (*fn) (void * arg),
void * arg,
thread_info->arg = arg;
thread_info->name = thread_name;
- kthread_run( lnx_thread_target, thread_info, thread_name );
- return;
+ return kthread_run( lnx_thread_target, thread_info, thread_name );
}
thread = kthread_create( lnx_thread_target, thread_info, thread_name );
if (IS_ERR(thread)) {
- printk("Palacios error creating thread: %s\n", thread_name);
+ WARNING("Palacios error creating thread: %s\n", thread_name);
+ return NULL;
+ }
+
+ if (set_cpus_allowed_ptr(thread, cpumask_of(cpu_id)) != 0) {
+ kthread_stop(thread);
return NULL;
}
- kthread_bind(thread, cpu_id);
wake_up_process(thread);
return thread;
}
+
+/**
+ * Rebind a kernel thread to the specified CPU
+ * The thread will be running on target CPU on return
+ * non-zero return means failure
+ */
+static int
+palacios_move_thread_to_cpu(int new_cpu_id,
+ void * thread_ptr) {
+ struct task_struct * thread = (struct task_struct *)thread_ptr;
+
+ INFO("Moving thread (%p) to cpu %d\n", thread, new_cpu_id);
+
+ if (thread == NULL) {
+ thread = current;
+ }
+
+ /*
+ * Bind to the specified CPU. When this call returns,
+ * the thread should be running on the target CPU.
+ */
+ return set_cpus_allowed_ptr(thread, cpumask_of(new_cpu_id));
+}
+
+
/**
* Returns the CPU ID that the caller is running on.
*/
static int
palacios_hook_interrupt(struct v3_vm_info * vm,
unsigned int vector ) {
- printk("hooking vector %d\n", vector);
+ INFO("hooking vector %d\n", vector);
if (irq_to_guest_map[vector]) {
- printk(KERN_WARNING
+ WARNING(
"%s: Interrupt vector %u is already hooked.\n",
__func__, vector);
return -1;
}
- printk(KERN_DEBUG
+ DEBUG(
"%s: Hooking interrupt vector %u to vm %p.\n",
__func__, vector, vm);
int flag = 0;
int error;
- printk("hooking vector: %d\n", vector);
+ DEBUG("hooking vector: %d\n", vector);
if (vector == 32) {
flag = IRQF_TIMER;
&device_id);
if (error) {
- printk("error code for request_irq is %d\n", error);
- panic("request vector %d failed",vector);
+ ERROR("error code for request_irq is %d\n", error);
+ panic("request vector %d failed", vector);
}
}
)
{
ack_APIC_irq();
- printk("Pretending to ack interrupt, vector=%d\n",vector);
+ DEBUG("Pretending to ack interrupt, vector=%d\n", vector);
return 0;
}
static unsigned int
palacios_get_cpu_khz(void)
{
- printk("cpu_khz is %u\n",cpu_khz);
+ INFO("cpu_khz is %u\n", cpu_khz);
if (cpu_khz == 0) {
- printk("faking cpu_khz to 1000000\n");
+ INFO("faking cpu_khz to 1000000\n");
return 1000000;
} else {
return cpu_khz;
.interrupt_cpu = palacios_interrupt_cpu,
.call_on_cpu = palacios_xcall,
.start_thread_on_cpu = palacios_start_thread_on_cpu,
+ .move_thread_to_cpu = palacios_move_thread_to_cpu,
};
int palacios_vmm_init( void )
{
+ int num_cpus = num_online_cpus();
+ char * cpu_mask = NULL;
+
+ if (cpu_list_len > 0) {
+ int major = 0;
+ int minor = 0;
+ int i = 0;
+
+ cpu_mask = kmalloc((num_cpus / 8) + 1, GFP_KERNEL);
+ memset(cpu_mask, 0, (num_cpus / 8) + 1);
+
+ for (i = 0; i < cpu_list_len; i++) {
+ if (cpu_list[i] >= num_cpus) {
+ WARNING("CPU (%d) exceeds number of available CPUs. Ignoring...\n", cpu_list[i]);
+ continue;
+ }
+
+ major = cpu_list[i] / 8;
+ minor = cpu_list[i] % 8;
+ *(cpu_mask + major) |= (0x1 << minor);
+ }
+ }
+
memset(irq_to_guest_map, 0, sizeof(struct v3_vm_info *) * 256);
+
+ INFO("palacios_init starting - calling init_v3\n");
- printk("palacios_init starting - calling init_v3\n");
-
- Init_V3(&palacios_os_hooks, num_online_cpus());
+ Init_V3(&palacios_os_hooks, cpu_mask, num_cpus);
return 0;