X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=linux_module%2Fpalacios-stubs.c;h=08021e4a91b496ab8ce6cc66ae18afa1c910a1c9;hb=82071a7f5f0b18fbf1a4adc2a37fed1624572a79;hp=5bf69a1ee54557eff2cccdc4810c86ee95b030ed;hpb=3f777ee2e04c359758b0658b04ff8f076d38f2f9;p=palacios.git diff --git a/linux_module/palacios-stubs.c b/linux_module/palacios-stubs.c index 5bf69a1..08021e4 100644 --- a/linux_module/palacios-stubs.c +++ b/linux_module/palacios-stubs.c @@ -14,16 +14,17 @@ #include #include #include +#include #include #include #include "palacios.h" - - - #include "mm.h" +#include "memcheck.h" +#include "lockcheck.h" + // The following can be used to track heap bugs // zero memory after allocation #define ALLOC_ZERO_MEM 0 @@ -35,7 +36,8 @@ u32 pg_allocs = 0; u32 pg_frees = 0; u32 mallocs = 0; u32 frees = 0; - +u32 vmallocs = 0; +u32 vfrees = 0; static struct v3_vm_info * irq_to_guest_map[256]; @@ -87,7 +89,7 @@ static int init_print_buffers(void) /** * Prints a message to the console. */ -void palacios_print(const char *fmt, ...) { +void palacios_print_scoped(void * vm, int vcore, const char *fmt, ...) { #if V3_PRINTK_OLD_STYLE_OUTPUT @@ -104,6 +106,7 @@ void palacios_print(const char *fmt, ...) { va_list ap; char *buf; unsigned int cpu = palacios_get_cpu(); + struct v3_guest *guest = (struct v3_guest *)vm; buf = print_buffer[cpu]; @@ -132,8 +135,25 @@ void palacios_print(const char *fmt, ...) { } #endif - printk(KERN_INFO "palacios (pcore %u): %s",cpu,buf); - + if (guest) { + if (vcore>=0) { + printk(KERN_INFO "palacios (pcore %u vm %s vcore %u): %s", + cpu, + guest->name, + vcore, + buf); + } else { + printk(KERN_INFO "palacios (pcore %u vm %s): %s", + cpu, + guest->name, + buf); + } + } else { + printk(KERN_INFO "palacios (pcore %u): %s", + cpu, + buf); + } + return; #endif @@ -141,15 +161,19 @@ void palacios_print(const char *fmt, ...) { } - /* * Allocates a contiguous region of pages of the requested size. * Returns the physical address of the first page in the region. */ -void *palacios_allocate_pages(int num_pages, unsigned int alignment) { +void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id, int constraints) { void * pg_addr = NULL; - pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment); + if (num_pages<=0) { + ERROR("ALERT ALERT Attempt to allocate zero or fewer pages\n"); + return NULL; + } + + pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id, constraints); if (!pg_addr) { ERROR("ALERT ALERT Page allocation has FAILED Warning\n"); @@ -158,6 +182,8 @@ void *palacios_allocate_pages(int num_pages, unsigned int alignment) { pg_allocs += num_pages; + MEMCHECK_ALLOC_PAGES(pg_addr,num_pages*4096); + return pg_addr; } @@ -171,14 +197,28 @@ void *palacios_allocate_pages(int num_pages, unsigned int alignment) { void palacios_free_pages(void * page_paddr, int num_pages) { pg_frees += num_pages; free_palacios_pgs((uintptr_t)page_paddr, num_pages); + MEMCHECK_FREE_PAGES(page_paddr,num_pages*4096); + } void * -palacios_alloc_extended(unsigned int size, unsigned int flags) { +palacios_alloc_extended(unsigned int size, unsigned int flags, int node) { void * addr = NULL; - addr = kmalloc(size+2*ALLOC_PAD, flags); + if (size==0) { + // note that modern kernels will respond to a zero byte + // kmalloc and return the address 0x10... In Palacios, + // we will simply not allow 0 byte allocs at all, of any kind + ERROR("ALERT ALERT attempt to kmalloc zero bytes rejected\n"); + return NULL; + } + + if (node==-1) { + addr = kmalloc(size+2*ALLOC_PAD, flags); + } else { + addr = kmalloc_node(size+2*ALLOC_PAD, flags, node); + } if (!addr) { ERROR("ALERT ALERT kmalloc has FAILED FAILED FAILED\n"); @@ -191,9 +231,41 @@ palacios_alloc_extended(unsigned int size, unsigned int flags) { memset(addr,0,size+2*ALLOC_PAD); #endif + MEMCHECK_KMALLOC(addr,size+2*ALLOC_PAD); + return addr+ALLOC_PAD; } +void * +palacios_valloc(unsigned int size) +{ + void * addr = NULL; + + if (size==0) { + ERROR("ALERT ALERT attempt to vmalloc zero bytes rejected\n"); + return NULL; + } + + addr = vmalloc(size); + + if (!addr) { + ERROR("ALERT ALERT vmalloc has FAILED FAILED FAILED\n"); + return NULL; + } + + vmallocs++; + + MEMCHECK_VMALLOC(addr,size); + + return addr; +} + +void palacios_vfree(void *p) +{ + vfree(p); + vfrees++; + MEMCHECK_VFREE(p); +} /** * Allocates 'size' bytes of kernel memory. @@ -207,9 +279,9 @@ palacios_alloc(unsigned int size) { // module, both in places where interrupts are off and where they are on // a GFP_KERNEL call, when done with interrupts off can lead to DEADLOCK if (irqs_disabled()) { - return palacios_alloc_extended(size,GFP_ATOMIC); + return palacios_alloc_extended(size,GFP_ATOMIC,-1); } else { - return palacios_alloc_extended(size,GFP_KERNEL); + return palacios_alloc_extended(size,GFP_KERNEL,-1); } } @@ -224,7 +296,7 @@ palacios_free( { frees++; kfree(addr-ALLOC_PAD); - return; + MEMCHECK_KFREE(addr-ALLOC_PAD); } /** @@ -598,6 +670,7 @@ palacios_mutex_alloc(void) if (lock) { spin_lock_init(lock); + LOCKCHECK_ALLOC(lock); } else { ERROR("ALERT ALERT Unable to allocate lock\n"); return NULL; @@ -606,12 +679,35 @@ palacios_mutex_alloc(void) return lock; } +void palacios_mutex_init(void *mutex) +{ + spinlock_t *lock = (spinlock_t*)mutex; + + if (lock) { + spin_lock_init(lock); + LOCKCHECK_ALLOC(lock); + } +} + +void palacios_mutex_deinit(void *mutex) +{ + spinlock_t *lock = (spinlock_t*)mutex; + + if (lock) { + // no actual spin_lock_deinit on linux + // our purpose here is to drive the lock checker + LOCKCHECK_FREE(lock); + } +} + + /** * Frees a mutex. */ void palacios_mutex_free(void * mutex) { palacios_free(mutex); + LOCKCHECK_FREE(mutex); } /** @@ -619,7 +715,10 @@ palacios_mutex_free(void * mutex) { */ void palacios_mutex_lock(void * mutex, int must_spin) { + + LOCKCHECK_LOCK_PRE(mutex); spin_lock((spinlock_t *)mutex); + LOCKCHECK_LOCK_POST(mutex); } @@ -631,7 +730,9 @@ palacios_mutex_lock_irqsave(void * mutex, int must_spin) { unsigned long flags; + LOCKCHECK_LOCK_IRQSAVE_PRE(mutex,flags); spin_lock_irqsave((spinlock_t *)mutex,flags); + LOCKCHECK_LOCK_IRQSAVE_POST(mutex,flags); return (void *)flags; } @@ -645,7 +746,9 @@ palacios_mutex_unlock( void * mutex ) { + LOCKCHECK_UNLOCK_PRE(mutex); spin_unlock((spinlock_t *)mutex); + LOCKCHECK_UNLOCK_POST(mutex); } @@ -655,15 +758,17 @@ palacios_mutex_unlock( void palacios_mutex_unlock_irqrestore(void *mutex, void *flags) { + LOCKCHECK_UNLOCK_IRQRESTORE_PRE(mutex,(unsigned long)flags); // This is correct, flags is opaque spin_unlock_irqrestore((spinlock_t *)mutex,(unsigned long)flags); + LOCKCHECK_UNLOCK_IRQRESTORE_POST(mutex,(unsigned long)flags); } /** * Structure used by the Palacios hypervisor to interface with the host kernel. */ static struct v3_os_hooks palacios_os_hooks = { - .print = palacios_print, + .print = palacios_print_scoped, .allocate_pages = palacios_allocate_pages, .free_pages = palacios_free_pages, .malloc = palacios_alloc, @@ -693,7 +798,7 @@ static struct v3_os_hooks palacios_os_hooks = { -int palacios_vmm_init( void ) +int palacios_vmm_init( char *options ) { int num_cpus = num_online_cpus(); char * cpu_mask = NULL; @@ -735,7 +840,7 @@ int palacios_vmm_init( void ) INFO("palacios_init starting - calling init_v3\n"); - Init_V3(&palacios_os_hooks, cpu_mask, num_cpus); + Init_V3(&palacios_os_hooks, cpu_mask, num_cpus, options); return 0;