X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=linux_module%2Fpalacios-stubs.c;h=78149875f052942c5188af480f070c8f5e387572;hb=6234775894cac514f495b751a046db245ecb124a;hp=decae1f653433f67d8e816eae83bdc86faf0d51f;hpb=9feccf93cd8327d1d30a404a92f19716bf5a1e96;p=palacios.git diff --git a/linux_module/palacios-stubs.c b/linux_module/palacios-stubs.c index decae1f..7814987 100644 --- a/linux_module/palacios-stubs.c +++ b/linux_module/palacios-stubs.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -27,6 +28,8 @@ #include "palacios.h" +#include "util-hashtable.h" + #include "mm.h" #include "memcheck.h" @@ -34,10 +37,10 @@ -// The following can be used to track heap bugs -// zero memory after allocation -#define ALLOC_ZERO_MEM 0 -// pad allocations by this many bytes on both ends of block +// The following can be used to track memory bugs +// zero memory after allocation (now applies to valloc and page alloc as well) +#define ALLOC_ZERO_MEM 1 +// pad allocations by this many bytes on both ends of block (heap only) #define ALLOC_PAD 0 @@ -57,6 +60,9 @@ extern int cpu_list[NR_CPUS]; extern int cpu_list_len; +extern struct hashtable *v3_thread_resource_map; + + static char *print_buffer[NR_CPUS]; static void deinit_print_buffers(void) @@ -84,7 +90,7 @@ static int init_print_buffers(void) if (!print_buffer[i]) { ERROR("Cannot allocate print buffer for cpu %d\n",i); deinit_print_buffers(); - return -1; + return -1; } memset(print_buffer[i],0,V3_PRINTK_BUF_SIZE); } @@ -174,23 +180,45 @@ void palacios_print_scoped(void * vm, int vcore, const char *fmt, ...) { * Allocates a contiguous region of pages of the requested size. * Returns the physical address of the first page in the region. */ -void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id, int constraints) { +void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id, int (*filter_func)(void *paddr, void *filter_state), void *filter_state) { void * pg_addr = NULL; + v3_resource_control_t *r; if (num_pages<=0) { - ERROR("ALERT ALERT Attempt to allocate zero or fewer pages (%d pages, alignment %d, node %d, constraints 0x%x)\n",num_pages, alignment, node_id, constraints); + ERROR("ALERT ALERT Attempt to allocate zero or fewer pages (%d pages, alignment %d, node %d, filter_func %p, filter_state %p)\n",num_pages, alignment, node_id, filter_func, filter_state); return NULL; } - pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id, constraints); + if ((r=(v3_resource_control_t *)palacios_htable_search(v3_thread_resource_map,(addr_t)current))) { + // thread has a registered resource control structure + // these override any default values + // INFO("Overridden page search: (pre) alignment=%x, node_id=%x, filter_func=%p, filter_state=%p\n",alignment,node_id,filter_func,filter_state); + if (alignment==4096) { + alignment = r->pg_alignment; + } + if (node_id==-1) { + node_id = r->pg_node_id; + } + if (!filter_func) { + filter_func = r->pg_filter_func; + filter_state = r->pg_filter_state; + } + //INFO("Overridden page search: (post) alignment=%x, node_id=%x, filter_func=%p, filter_state=%p\n",alignment,node_id,filter_func,filter_state); + } + + pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id, filter_func, filter_state); if (!pg_addr) { - ERROR("ALERT ALERT Page allocation has FAILED Warning (%d pages, alignment %d, node %d, constraints 0x%x)\n",num_pages, alignment, node_id, constraints); + ERROR("ALERT ALERT Page allocation has FAILED Warning (%d pages, alignment %d, node %d, filter_func %p, filter_state %p)\n",num_pages, alignment, node_id, filter_func, filter_state); return NULL; } pg_allocs += num_pages; +#if ALLOC_ZERO_MEM + memset(__va(pg_addr),0,num_pages*4096); +#endif + MEMCHECK_ALLOC_PAGES(pg_addr,num_pages*4096); return pg_addr; @@ -207,6 +235,7 @@ void palacios_free_pages(void * page_paddr, int num_pages) { if (!page_paddr) { ERROR("Ignoring free pages: 0x%p (0x%lx)for %d pages\n", page_paddr, (uintptr_t)page_paddr, num_pages); dump_stack(); + return; } pg_frees += num_pages; free_palacios_pgs((uintptr_t)page_paddr, num_pages); @@ -233,7 +262,7 @@ palacios_alloc_extended(unsigned int size, unsigned int flags, int node) { addr = kmalloc_node(size+2*ALLOC_PAD, flags, node); } - if (!addr) { + if (!addr || IS_ERR(addr)) { ERROR("ALERT ALERT kmalloc has FAILED FAILED FAILED\n"); return NULL; } @@ -261,13 +290,17 @@ palacios_valloc(unsigned int size) addr = vmalloc(size); - if (!addr) { + if (!addr || IS_ERR(addr)) { ERROR("ALERT ALERT vmalloc has FAILED FAILED FAILED\n"); return NULL; } vmallocs++; +#if ALLOC_ZERO_MEM + memset(addr,0,size); +#endif + MEMCHECK_VMALLOC(addr,size); return addr; @@ -275,6 +308,11 @@ palacios_valloc(unsigned int size) void palacios_vfree(void *p) { + if (!p) { + ERROR("Ignoring vfree: 0x%p\n",p); + dump_stack(); + return; + } vfree(p); vfrees++; MEMCHECK_VFREE(p); @@ -291,7 +329,7 @@ palacios_alloc(unsigned int size) { // this function is used extensively throughout palacios and the linux // module, both in places where interrupts are off and where they are on // a GFP_KERNEL call, when done with interrupts off can lead to DEADLOCK - if (irqs_disabled()) { + if (irqs_disabled() || in_atomic()) { return palacios_alloc_extended(size,GFP_ATOMIC,-1); } else { return palacios_alloc_extended(size,GFP_KERNEL,-1); @@ -310,6 +348,7 @@ palacios_free( if (!addr) { ERROR("Ignoring free : 0x%p\n", addr); dump_stack(); + return; } frees++; kfree(addr-ALLOC_PAD); @@ -342,7 +381,7 @@ palacios_paddr_to_vaddr( /** * Runs a function on the specified CPU. */ -static void +void palacios_xcall( int cpu_id, void (*fn)(void *arg), @@ -363,6 +402,7 @@ palacios_xcall( struct lnx_thread_arg { int (*fn)(void * arg); void * arg; + v3_resource_control_t *resource_control; char name[MAX_THREAD_NAME]; }; @@ -383,10 +423,14 @@ static int lnx_thread_target(void * arg) { fpu_alloc(&(current->thread.fpu)); #endif + palacios_htable_insert(v3_thread_resource_map,(addr_t)current,(addr_t)thread_info->resource_control); + ret = thread_info->fn(thread_info->arg); INFO("Palacios Thread (%s) EXITING\n", thread_info->name); + palacios_htable_remove(v3_thread_resource_map,(addr_t)current,0); + palacios_free(thread_info); // handle cleanup @@ -402,10 +446,11 @@ static int lnx_thread_target(void * arg) { * Creates a kernel thread. */ void * -palacios_start_kernel_thread( +palacios_create_and_start_kernel_thread( int (*fn) (void * arg), void * arg, - char * thread_name) { + char * thread_name, + v3_resource_control_t *resource_control) { struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg)); @@ -418,6 +463,7 @@ palacios_start_kernel_thread( thread_info->arg = arg; strncpy(thread_info->name,thread_name,MAX_THREAD_NAME); thread_info->name[MAX_THREAD_NAME-1] =0; + thread_info->resource_control = resource_control; return kthread_run( lnx_thread_target, thread_info, thread_info->name ); } @@ -427,10 +473,11 @@ palacios_start_kernel_thread( * Starts a kernel thread on the specified CPU. */ void * -palacios_start_thread_on_cpu(int cpu_id, - int (*fn)(void * arg), - void * arg, - char * thread_name ) { +palacios_create_thread_on_cpu(int cpu_id, + int (*fn)(void * arg), + void * arg, + char * thread_name, + v3_resource_control_t *resource_control) { struct task_struct * thread = NULL; struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg)); @@ -443,10 +490,11 @@ palacios_start_thread_on_cpu(int cpu_id, thread_info->arg = arg; strncpy(thread_info->name,thread_name,MAX_THREAD_NAME); thread_info->name[MAX_THREAD_NAME-1] =0; + thread_info->resource_control=resource_control; thread = kthread_create( lnx_thread_target, thread_info, thread_info->name ); - if (IS_ERR(thread)) { + if (!thread || IS_ERR(thread)) { WARNING("Palacios error creating thread: %s\n", thread_info->name); palacios_free(thread_info); return NULL; @@ -459,11 +507,37 @@ palacios_start_thread_on_cpu(int cpu_id, return NULL; } - wake_up_process(thread); - return thread; } +void +palacios_start_thread(void * th){ + + struct task_struct * thread = (struct task_struct *)th; + wake_up_process(thread); + +} + +/* + Convenience wrapper +*/ +void * +palacios_create_and_start_thread_on_cpu(int cpu_id, + int (*fn)(void * arg), + void * arg, + char * thread_name, + v3_resource_control_t *resource_control) { + + void *t = palacios_create_thread_on_cpu(cpu_id, fn, arg, thread_name, resource_control); + + if (t) { + palacios_start_thread(t); + } + + return t; +} + + /** * Rebind a kernel thread to the specified CPU @@ -791,10 +865,14 @@ palacios_mutex_unlock_irqrestore(void *mutex, void *flags) void palacios_used_fpu(void) { - struct thread_info *cur = current_thread_info(); - // We assume we are not preemptible here... - cur->status |= TS_USEDFPU; +#ifndef TS_USEDFPU + struct task_struct *tsk = current; + tsk->thread.fpu.has_fpu = 1; +#else + struct thread_info *cur = current_thread_info(); + cur->status |= TS_USEDFPU; +#endif clts(); // After this, FP Save should be handled by Linux if it // switches to a different task and that task uses FPU @@ -823,6 +901,8 @@ static struct v3_os_hooks palacios_os_hooks = { .print = palacios_print_scoped, .allocate_pages = palacios_allocate_pages, .free_pages = palacios_free_pages, + .vmalloc = palacios_valloc, + .vfree = palacios_vfree, .malloc = palacios_alloc, .free = palacios_free, .vaddr_to_paddr = palacios_vaddr_to_paddr, @@ -830,7 +910,7 @@ static struct v3_os_hooks palacios_os_hooks = { .hook_interrupt = palacios_hook_interrupt, .ack_irq = palacios_ack_interrupt, .get_cpu_khz = palacios_get_cpu_khz, - .start_kernel_thread = palacios_start_kernel_thread, + .start_kernel_thread = palacios_create_and_start_kernel_thread, .yield_cpu = palacios_yield_cpu, .sleep_cpu = palacios_sleep_cpu, .wakeup_cpu = palacios_wakeup_cpu, @@ -843,7 +923,8 @@ static struct v3_os_hooks palacios_os_hooks = { .get_cpu = palacios_get_cpu, .interrupt_cpu = palacios_interrupt_cpu, .call_on_cpu = palacios_xcall, - .start_thread_on_cpu = palacios_start_thread_on_cpu, + .create_thread_on_cpu = palacios_create_thread_on_cpu, + .start_thread = palacios_start_thread, .move_thread_to_cpu = palacios_move_thread_to_cpu, };