X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=linux_module%2Fpalacios-stubs.c;h=78759d0886c9e1534174c11e767fa98c75487887;hb=a04338047280b77bdaa3882b748a442d9c9c06a8;hp=554fb9bf8f84d906f854ff18a6a0fa235359102c;hpb=9d3c2538b9deab4c102cc9c4312517e1df752488;p=palacios.git diff --git a/linux_module/palacios-stubs.c b/linux_module/palacios-stubs.c index 554fb9b..78759d0 100644 --- a/linux_module/palacios-stubs.c +++ b/linux_module/palacios-stubs.c @@ -40,12 +40,48 @@ extern int cpu_list[NR_CPUS]; extern int cpu_list_len; +static char *print_buffer[NR_CPUS]; +static void deinit_print_buffers(void) +{ + int i; + + for (i=0;ifn(thread_info->arg); - INFO("Palacios Thread (%s) EXITTING\n", thread_info->name); + INFO("Palacios Thread (%s) EXITING\n", thread_info->name); kfree(thread_info); // handle cleanup @@ -220,7 +270,7 @@ static int lnx_thread_target(void * arg) { /** * Creates a kernel thread. */ -static void * +void * palacios_start_kernel_thread( int (*fn) (void * arg), void * arg, @@ -239,7 +289,7 @@ palacios_start_kernel_thread( /** * Starts a kernel thread on the specified CPU. */ -static void * +void * palacios_start_thread_on_cpu(int cpu_id, int (*fn)(void * arg), void * arg, @@ -275,7 +325,7 @@ palacios_start_thread_on_cpu(int cpu_id, * The thread will be running on target CPU on return * non-zero return means failure */ -static int +int palacios_move_thread_to_cpu(int new_cpu_id, void * thread_ptr) { struct task_struct * thread = (struct task_struct *)thread_ptr; @@ -297,7 +347,7 @@ palacios_move_thread_to_cpu(int new_cpu_id, /** * Returns the CPU ID that the caller is running on. */ -static unsigned int +unsigned int palacios_get_cpu(void) { @@ -436,7 +486,7 @@ palacios_ack_interrupt( /** * Returns the CPU frequency in kilohertz. */ -static unsigned int +unsigned int palacios_get_cpu_khz(void) { INFO("cpu_khz is %u\n", cpu_khz); @@ -452,21 +502,37 @@ palacios_get_cpu_khz(void) /** * Yield the CPU so other host OS tasks can run. + * This will return immediately if there is no other thread that is runnable + * And there is no real bound on how long it will yield */ -static void +void palacios_yield_cpu(void) { schedule(); return; } +/** + * Yield the CPU so other host OS tasks can run. + * Given now immediately if there is no other thread that is runnable + * And there is no real bound on how long it will yield + */ +void palacios_yield_cpu_timed(unsigned int us) +{ + unsigned int jiffies = 1000000U * HZ / us; + + set_current_state(TASK_INTERRUPTIBLE); + + schedule_timeout(jiffies); + +} /** * Allocates a mutex. * Returns NULL on failure. */ -static void * +void * palacios_mutex_alloc(void) { spinlock_t *lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); @@ -481,7 +547,7 @@ palacios_mutex_alloc(void) /** * Frees a mutex. */ -static void +void palacios_mutex_free(void * mutex) { kfree(mutex); } @@ -489,15 +555,30 @@ palacios_mutex_free(void * mutex) { /** * Locks a mutex. */ -static void +void palacios_mutex_lock(void * mutex, int must_spin) { spin_lock((spinlock_t *)mutex); } + +/** + * Locks a mutex, disabling interrupts on this core + */ +void * +palacios_mutex_lock_irqsave(void * mutex, int must_spin) { + + unsigned long flags; + + spin_lock_irqsave((spinlock_t *)mutex,flags); + + return (void *)flags; +} + + /** * Unlocks a mutex. */ -static void +void palacios_mutex_unlock( void * mutex ) @@ -505,6 +586,17 @@ palacios_mutex_unlock( spin_unlock((spinlock_t *)mutex); } + +/** + * Unlocks a mutex. + */ +void +palacios_mutex_unlock_irqrestore(void *mutex, void *flags) +{ + // This is correct, flags is opaque + spin_unlock_irqrestore((spinlock_t *)mutex,(unsigned long)flags); +} + /** * Structure used by the Palacios hypervisor to interface with the host kernel. */ @@ -521,15 +613,18 @@ static struct v3_os_hooks palacios_os_hooks = { .get_cpu_khz = palacios_get_cpu_khz, .start_kernel_thread = palacios_start_kernel_thread, .yield_cpu = palacios_yield_cpu, + .yield_cpu_timed = palacios_yield_cpu_timed, .mutex_alloc = palacios_mutex_alloc, .mutex_free = palacios_mutex_free, .mutex_lock = palacios_mutex_lock, .mutex_unlock = palacios_mutex_unlock, + .mutex_lock_irqsave = palacios_mutex_lock_irqsave, + .mutex_unlock_irqrestore= palacios_mutex_unlock_irqrestore, .get_cpu = palacios_get_cpu, .interrupt_cpu = palacios_interrupt_cpu, .call_on_cpu = palacios_xcall, .start_thread_on_cpu = palacios_start_thread_on_cpu, - .move_thread_to_cpu = palacios_move_thread_to_cpu, + .move_thread_to_cpu = palacios_move_thread_to_cpu, }; @@ -546,6 +641,12 @@ int palacios_vmm_init( void ) int i = 0; cpu_mask = kmalloc((num_cpus / 8) + 1, GFP_KERNEL); + + if (!cpu_mask) { + ERROR("Cannot allocate cpu mask\n"); + return -1; + } + memset(cpu_mask, 0, (num_cpus / 8) + 1); for (i = 0; i < cpu_list_len; i++) { @@ -563,8 +664,14 @@ int palacios_vmm_init( void ) memset(irq_to_guest_map, 0, sizeof(struct v3_vm_info *) * 256); + if (init_print_buffers()) { + ERROR("Cannot initialize print buffers\n"); + kfree(cpu_mask); + return -1; + } + INFO("palacios_init starting - calling init_v3\n"); - + Init_V3(&palacios_os_hooks, cpu_mask, num_cpus); return 0; @@ -576,5 +683,9 @@ int palacios_vmm_exit( void ) { Shutdown_V3(); + INFO("palacios shutdown complete\n"); + + deinit_print_buffers(); + return 0; }