X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=linux_module%2Fpalacios-stubs.c;h=08021e4a91b496ab8ce6cc66ae18afa1c910a1c9;hb=82071a7f5f0b18fbf1a4adc2a37fed1624572a79;hp=de6dddda777ad12dd94d1a45683f82b7d6b2dadb;hpb=ad00932be31579b80f80c1cc67677bd3c263fd89;p=palacios.git diff --git a/linux_module/palacios-stubs.c b/linux_module/palacios-stubs.c index de6dddd..08021e4 100644 --- a/linux_module/palacios-stubs.c +++ b/linux_module/palacios-stubs.c @@ -14,22 +14,30 @@ #include #include #include +#include #include #include #include "palacios.h" +#include "mm.h" +#include "memcheck.h" +#include "lockcheck.h" - -#include "mm.h" +// The following can be used to track heap bugs +// zero memory after allocation +#define ALLOC_ZERO_MEM 0 +// pad allocations by this many bytes on both ends of block +#define ALLOC_PAD 0 u32 pg_allocs = 0; u32 pg_frees = 0; u32 mallocs = 0; u32 frees = 0; - +u32 vmallocs = 0; +u32 vfrees = 0; static struct v3_vm_info * irq_to_guest_map[256]; @@ -40,12 +48,48 @@ extern int cpu_list[NR_CPUS]; extern int cpu_list_len; +static char *print_buffer[NR_CPUS]; + +static void deinit_print_buffers(void) +{ + int i; + + for (i=0;i=0) { + printk(KERN_INFO "palacios (pcore %u vm %s vcore %u): %s", + cpu, + guest->name, + vcore, + buf); + } else { + printk(KERN_INFO "palacios (pcore %u vm %s): %s", + cpu, + guest->name, + buf); + } + } else { + printk(KERN_INFO "palacios (pcore %u): %s", + cpu, + buf); + } + return; #endif @@ -85,17 +161,29 @@ static void palacios_print(const char * fmt, ...) { } - /* * Allocates a contiguous region of pages of the requested size. * Returns the physical address of the first page in the region. */ -static void * palacios_allocate_pages(int num_pages, unsigned int alignment) { +void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id, int constraints) { void * pg_addr = NULL; - pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment); + if (num_pages<=0) { + ERROR("ALERT ALERT Attempt to allocate zero or fewer pages\n"); + return NULL; + } + + pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id, constraints); + + if (!pg_addr) { + ERROR("ALERT ALERT Page allocation has FAILED Warning\n"); + return NULL; + } + pg_allocs += num_pages; + MEMCHECK_ALLOC_PAGES(pg_addr,num_pages*4096); + return pg_addr; } @@ -106,48 +194,115 @@ static void * palacios_allocate_pages(int num_pages, unsigned int alignment) { * a single call while palacios_free_page() only frees a single page. */ -static void palacios_free_pages(void * page_paddr, int num_pages) { +void palacios_free_pages(void * page_paddr, int num_pages) { pg_frees += num_pages; free_palacios_pgs((uintptr_t)page_paddr, num_pages); + MEMCHECK_FREE_PAGES(page_paddr,num_pages*4096); + +} + + +void * +palacios_alloc_extended(unsigned int size, unsigned int flags, int node) { + void * addr = NULL; + + if (size==0) { + // note that modern kernels will respond to a zero byte + // kmalloc and return the address 0x10... In Palacios, + // we will simply not allow 0 byte allocs at all, of any kind + ERROR("ALERT ALERT attempt to kmalloc zero bytes rejected\n"); + return NULL; + } + + if (node==-1) { + addr = kmalloc(size+2*ALLOC_PAD, flags); + } else { + addr = kmalloc_node(size+2*ALLOC_PAD, flags, node); + } + + if (!addr) { + ERROR("ALERT ALERT kmalloc has FAILED FAILED FAILED\n"); + return NULL; + } + + mallocs++; + +#if ALLOC_ZERO_MEM + memset(addr,0,size+2*ALLOC_PAD); +#endif + + MEMCHECK_KMALLOC(addr,size+2*ALLOC_PAD); + + return addr+ALLOC_PAD; +} + +void * +palacios_valloc(unsigned int size) +{ + void * addr = NULL; + + if (size==0) { + ERROR("ALERT ALERT attempt to vmalloc zero bytes rejected\n"); + return NULL; + } + + addr = vmalloc(size); + + if (!addr) { + ERROR("ALERT ALERT vmalloc has FAILED FAILED FAILED\n"); + return NULL; + } + + vmallocs++; + + MEMCHECK_VMALLOC(addr,size); + + return addr; } +void palacios_vfree(void *p) +{ + vfree(p); + vfrees++; + MEMCHECK_VFREE(p); +} /** * Allocates 'size' bytes of kernel memory. * Returns the kernel virtual address of the memory allocated. */ -static void * +void * palacios_alloc(unsigned int size) { - void * addr = NULL; + // It is very important that this test remains since + // this function is used extensively throughout palacios and the linux + // module, both in places where interrupts are off and where they are on + // a GFP_KERNEL call, when done with interrupts off can lead to DEADLOCK if (irqs_disabled()) { - addr = kmalloc(size, GFP_ATOMIC); + return palacios_alloc_extended(size,GFP_ATOMIC,-1); } else { - addr = kmalloc(size, GFP_KERNEL); + return palacios_alloc_extended(size,GFP_KERNEL,-1); } - mallocs++; - - return addr; } /** * Frees memory that was previously allocated by palacios_alloc(). */ -static void +void palacios_free( void * addr ) { frees++; - kfree(addr); - return; + kfree(addr-ALLOC_PAD); + MEMCHECK_KFREE(addr-ALLOC_PAD); } /** * Converts a kernel virtual address to the corresponding physical address. */ -static void * +void * palacios_vaddr_to_paddr( void * vaddr ) @@ -159,7 +314,7 @@ palacios_vaddr_to_paddr( /** * Converts a physical address to the corresponding kernel virtual address. */ -static void * +void * palacios_paddr_to_vaddr( void * paddr ) @@ -170,8 +325,6 @@ palacios_paddr_to_vaddr( /** * Runs a function on the specified CPU. */ - -// For now, do call only on local CPU static void palacios_xcall( int cpu_id, @@ -187,10 +340,13 @@ palacios_xcall( return; } + +#define MAX_THREAD_NAME 32 + struct lnx_thread_arg { int (*fn)(void * arg); void * arg; - char * name; + char name[MAX_THREAD_NAME]; }; static int lnx_thread_target(void * arg) { @@ -206,11 +362,11 @@ static int lnx_thread_target(void * arg) { ret = thread_info->fn(thread_info->arg); - kfree(thread_info); - // handle cleanup - - printk("Palacios Thread (%s) EXITTING\n", thread_info->name); + INFO("Palacios Thread (%s) EXITING\n", thread_info->name); + + palacios_free(thread_info); + // handle cleanup do_exit(ret); @@ -220,47 +376,61 @@ static int lnx_thread_target(void * arg) { /** * Creates a kernel thread. */ -static void * +void * palacios_start_kernel_thread( int (*fn) (void * arg), void * arg, char * thread_name) { - struct lnx_thread_arg * thread_info = kmalloc(sizeof(struct lnx_thread_arg), GFP_KERNEL); + struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg)); + + if (!thread_info) { + ERROR("ALERT ALERT Unable to allocate thread\n"); + return NULL; + } thread_info->fn = fn; thread_info->arg = arg; - thread_info->name = thread_name; + strncpy(thread_info->name,thread_name,MAX_THREAD_NAME); + thread_info->name[MAX_THREAD_NAME-1] =0; - return kthread_run( lnx_thread_target, thread_info, thread_name ); + return kthread_run( lnx_thread_target, thread_info, thread_info->name ); } /** * Starts a kernel thread on the specified CPU. */ -static void * +void * palacios_start_thread_on_cpu(int cpu_id, int (*fn)(void * arg), void * arg, char * thread_name ) { struct task_struct * thread = NULL; - struct lnx_thread_arg * thread_info = kmalloc(sizeof(struct lnx_thread_arg), GFP_KERNEL); + struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg)); + + if (!thread_info) { + ERROR("ALERT ALERT Unable to allocate thread to start on cpu\n"); + return NULL; + } thread_info->fn = fn; thread_info->arg = arg; - thread_info->name = thread_name; + strncpy(thread_info->name,thread_name,MAX_THREAD_NAME); + thread_info->name[MAX_THREAD_NAME-1] =0; - - thread = kthread_create( lnx_thread_target, thread_info, thread_name ); + thread = kthread_create( lnx_thread_target, thread_info, thread_info->name ); if (IS_ERR(thread)) { - WARNING("Palacios error creating thread: %s\n", thread_name); + WARNING("Palacios error creating thread: %s\n", thread_info->name); + palacios_free(thread_info); return NULL; } if (set_cpus_allowed_ptr(thread, cpumask_of(cpu_id)) != 0) { + WARNING("Attempt to start thread on disallowed CPU\n"); kthread_stop(thread); + palacios_free(thread_info); return NULL; } @@ -275,7 +445,7 @@ palacios_start_thread_on_cpu(int cpu_id, * The thread will be running on target CPU on return * non-zero return means failure */ -static int +int palacios_move_thread_to_cpu(int new_cpu_id, void * thread_ptr) { struct task_struct * thread = (struct task_struct *)thread_ptr; @@ -297,7 +467,7 @@ palacios_move_thread_to_cpu(int new_cpu_id, /** * Returns the CPU ID that the caller is running on. */ -static unsigned int +unsigned int palacios_get_cpu(void) { @@ -388,7 +558,8 @@ palacios_hook_interrupt(struct v3_vm_info * vm, //set_idtvec_handler(vector, palacios_dispatch_interrupt); if (vector < 32) { - panic("unexpected vector for hooking\n"); + ERROR("unexpected vector for hooking\n"); + return -1; } else { int device_id = 0; @@ -411,7 +582,8 @@ palacios_hook_interrupt(struct v3_vm_info * vm, if (error) { ERROR("error code for request_irq is %d\n", error); - panic("request vector %d failed",vector); + ERROR("request vector %d failed", vector); + return -1; } } @@ -429,17 +601,17 @@ palacios_ack_interrupt( ) { ack_APIC_irq(); - DEBUG("Pretending to ack interrupt, vector=%d\n",vector); + DEBUG("Pretending to ack interrupt, vector=%d\n", vector); return 0; } /** * Returns the CPU frequency in kilohertz. */ -static unsigned int +unsigned int palacios_get_cpu_khz(void) { - INFO("cpu_khz is %u\n",cpu_khz); + INFO("cpu_khz is %u\n", cpu_khz); if (cpu_khz == 0) { INFO("faking cpu_khz to 1000000\n"); @@ -452,64 +624,151 @@ palacios_get_cpu_khz(void) /** * Yield the CPU so other host OS tasks can run. + * This will return immediately if there is no other thread that is runnable + * And there is no real bound on how long it will yield */ -static void +void palacios_yield_cpu(void) { schedule(); return; } +/** + * Yield the CPU so other host OS tasks can run. + * Given now immediately if there is no other thread that is runnable + * And there is no real bound on how long it will yield + */ +void palacios_sleep_cpu(unsigned int us) +{ + + set_current_state(TASK_INTERRUPTIBLE); + if (us) { + unsigned int uspj = 1000000U/HZ; + unsigned int jiffies = us/uspj + ((us%uspj) !=0); // ceiling + schedule_timeout(jiffies); + } else { + schedule(); + } + return; +} +void palacios_wakeup_cpu(void *thread) +{ + wake_up_process(thread); + return; +} /** * Allocates a mutex. * Returns NULL on failure. */ -static void * +void * palacios_mutex_alloc(void) { - spinlock_t *lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); + spinlock_t *lock = palacios_alloc(sizeof(spinlock_t)); if (lock) { spin_lock_init(lock); + LOCKCHECK_ALLOC(lock); + } else { + ERROR("ALERT ALERT Unable to allocate lock\n"); + return NULL; } return lock; } +void palacios_mutex_init(void *mutex) +{ + spinlock_t *lock = (spinlock_t*)mutex; + + if (lock) { + spin_lock_init(lock); + LOCKCHECK_ALLOC(lock); + } +} + +void palacios_mutex_deinit(void *mutex) +{ + spinlock_t *lock = (spinlock_t*)mutex; + + if (lock) { + // no actual spin_lock_deinit on linux + // our purpose here is to drive the lock checker + LOCKCHECK_FREE(lock); + } +} + + /** * Frees a mutex. */ -static void +void palacios_mutex_free(void * mutex) { - kfree(mutex); + palacios_free(mutex); + LOCKCHECK_FREE(mutex); } /** * Locks a mutex. */ -static void +void palacios_mutex_lock(void * mutex, int must_spin) { + + LOCKCHECK_LOCK_PRE(mutex); spin_lock((spinlock_t *)mutex); + LOCKCHECK_LOCK_POST(mutex); } + +/** + * Locks a mutex, disabling interrupts on this core + */ +void * +palacios_mutex_lock_irqsave(void * mutex, int must_spin) { + + unsigned long flags; + + LOCKCHECK_LOCK_IRQSAVE_PRE(mutex,flags); + spin_lock_irqsave((spinlock_t *)mutex,flags); + LOCKCHECK_LOCK_IRQSAVE_POST(mutex,flags); + + return (void *)flags; +} + + /** * Unlocks a mutex. */ -static void +void palacios_mutex_unlock( void * mutex ) { + LOCKCHECK_UNLOCK_PRE(mutex); spin_unlock((spinlock_t *)mutex); + LOCKCHECK_UNLOCK_POST(mutex); +} + + +/** + * Unlocks a mutex and restores previous interrupt state on this core + */ +void +palacios_mutex_unlock_irqrestore(void *mutex, void *flags) +{ + LOCKCHECK_UNLOCK_IRQRESTORE_PRE(mutex,(unsigned long)flags); + // This is correct, flags is opaque + spin_unlock_irqrestore((spinlock_t *)mutex,(unsigned long)flags); + LOCKCHECK_UNLOCK_IRQRESTORE_POST(mutex,(unsigned long)flags); } /** * Structure used by the Palacios hypervisor to interface with the host kernel. */ static struct v3_os_hooks palacios_os_hooks = { - .print = palacios_print, + .print = palacios_print_scoped, .allocate_pages = palacios_allocate_pages, .free_pages = palacios_free_pages, .malloc = palacios_alloc, @@ -521,21 +780,25 @@ static struct v3_os_hooks palacios_os_hooks = { .get_cpu_khz = palacios_get_cpu_khz, .start_kernel_thread = palacios_start_kernel_thread, .yield_cpu = palacios_yield_cpu, + .sleep_cpu = palacios_sleep_cpu, + .wakeup_cpu = palacios_wakeup_cpu, .mutex_alloc = palacios_mutex_alloc, .mutex_free = palacios_mutex_free, .mutex_lock = palacios_mutex_lock, .mutex_unlock = palacios_mutex_unlock, + .mutex_lock_irqsave = palacios_mutex_lock_irqsave, + .mutex_unlock_irqrestore= palacios_mutex_unlock_irqrestore, .get_cpu = palacios_get_cpu, .interrupt_cpu = palacios_interrupt_cpu, .call_on_cpu = palacios_xcall, .start_thread_on_cpu = palacios_start_thread_on_cpu, - .move_thread_to_cpu = palacios_move_thread_to_cpu, + .move_thread_to_cpu = palacios_move_thread_to_cpu, }; -int palacios_vmm_init( void ) +int palacios_vmm_init( char *options ) { int num_cpus = num_online_cpus(); char * cpu_mask = NULL; @@ -545,7 +808,13 @@ int palacios_vmm_init( void ) int minor = 0; int i = 0; - cpu_mask = kmalloc((num_cpus / 8) + 1, GFP_KERNEL); + cpu_mask = palacios_alloc((num_cpus / 8) + 1); + + if (!cpu_mask) { + ERROR("Cannot allocate cpu mask\n"); + return -1; + } + memset(cpu_mask, 0, (num_cpus / 8) + 1); for (i = 0; i < cpu_list_len; i++) { @@ -563,9 +832,15 @@ int palacios_vmm_init( void ) memset(irq_to_guest_map, 0, sizeof(struct v3_vm_info *) * 256); + if (init_print_buffers()) { + ERROR("Cannot initialize print buffers\n"); + palacios_free(cpu_mask); + return -1; + } + INFO("palacios_init starting - calling init_v3\n"); - - Init_V3(&palacios_os_hooks, cpu_mask, num_cpus); + + Init_V3(&palacios_os_hooks, cpu_mask, num_cpus, options); return 0; @@ -576,5 +851,9 @@ int palacios_vmm_exit( void ) { Shutdown_V3(); + INFO("palacios shutdown complete\n"); + + deinit_print_buffers(); + return 0; }