#include <linux/kthread.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
+#include <linux/vmalloc.h>
#include <palacios/vmm.h>
#include <palacios/vmm_host_events.h>
#include "palacios.h"
-
-
-
#include "mm.h"
+#include "memcheck.h"
+#include "lockcheck.h"
+
// The following can be used to track heap bugs
// zero memory after allocation
#define ALLOC_ZERO_MEM 0
u32 pg_frees = 0;
u32 mallocs = 0;
u32 frees = 0;
-
+u32 vmallocs = 0;
+u32 vfrees = 0;
static struct v3_vm_info * irq_to_guest_map[256];
/**
* Prints a message to the console.
*/
-void palacios_print(const char *fmt, ...) {
+void palacios_print_scoped(void * vm, int vcore, const char *fmt, ...) {
#if V3_PRINTK_OLD_STYLE_OUTPUT
va_list ap;
char *buf;
unsigned int cpu = palacios_get_cpu();
+ struct v3_guest *guest = (struct v3_guest *)vm;
buf = print_buffer[cpu];
}
#endif
- printk(KERN_INFO "palacios (pcore %u): %s",cpu,buf);
-
+ if (guest) {
+ if (vcore>=0) {
+ printk(KERN_INFO "palacios (pcore %u vm %s vcore %u): %s",
+ cpu,
+ guest->name,
+ vcore,
+ buf);
+ } else {
+ printk(KERN_INFO "palacios (pcore %u vm %s): %s",
+ cpu,
+ guest->name,
+ buf);
+ }
+ } else {
+ printk(KERN_INFO "palacios (pcore %u): %s",
+ cpu,
+ buf);
+ }
+
return;
#endif
}
-
/*
* Allocates a contiguous region of pages of the requested size.
* Returns the physical address of the first page in the region.
pg_allocs += num_pages;
+ MEMCHECK_ALLOC_PAGES(pg_addr,num_pages*4096);
+
return pg_addr;
}
void palacios_free_pages(void * page_paddr, int num_pages) {
pg_frees += num_pages;
free_palacios_pgs((uintptr_t)page_paddr, num_pages);
+ MEMCHECK_FREE_PAGES(page_paddr,num_pages*4096);
+
}
memset(addr,0,size+2*ALLOC_PAD);
#endif
+ MEMCHECK_KMALLOC(addr,size+2*ALLOC_PAD);
+
return addr+ALLOC_PAD;
}
+void *
+palacios_valloc(unsigned int size)
+{
+ void * addr = NULL;
+
+ addr = vmalloc(size);
+
+ if (!addr) {
+ ERROR("ALERT ALERT vmalloc has FAILED FAILED FAILED\n");
+ return NULL;
+ }
+
+ vmallocs++;
+
+ MEMCHECK_VMALLOC(addr,size);
+
+ return addr;
+}
+
+void palacios_vfree(void *p)
+{
+ vfree(p);
+ vfrees++;
+ MEMCHECK_VFREE(p);
+}
/**
* Allocates 'size' bytes of kernel memory.
{
frees++;
kfree(addr-ALLOC_PAD);
- return;
+ MEMCHECK_KFREE(addr-ALLOC_PAD);
}
/**
return;
}
+
+#define MAX_THREAD_NAME 32
+
struct lnx_thread_arg {
int (*fn)(void * arg);
void * arg;
- char * name;
+ char name[MAX_THREAD_NAME];
};
static int lnx_thread_target(void * arg) {
thread_info->fn = fn;
thread_info->arg = arg;
- thread_info->name = thread_name;
+ strncpy(thread_info->name,thread_name,MAX_THREAD_NAME);
+ thread_info->name[MAX_THREAD_NAME-1] =0;
- return kthread_run( lnx_thread_target, thread_info, thread_name );
+ return kthread_run( lnx_thread_target, thread_info, thread_info->name );
}
thread_info->fn = fn;
thread_info->arg = arg;
- thread_info->name = thread_name;
-
+ strncpy(thread_info->name,thread_name,MAX_THREAD_NAME);
+ thread_info->name[MAX_THREAD_NAME-1] =0;
- thread = kthread_create( lnx_thread_target, thread_info, thread_name );
+ thread = kthread_create( lnx_thread_target, thread_info, thread_info->name );
if (IS_ERR(thread)) {
- WARNING("Palacios error creating thread: %s\n", thread_name);
+ WARNING("Palacios error creating thread: %s\n", thread_info->name);
palacios_free(thread_info);
return NULL;
}
* Given now immediately if there is no other thread that is runnable
* And there is no real bound on how long it will yield
*/
-void palacios_yield_cpu_timed(unsigned int us)
+void palacios_sleep_cpu(unsigned int us)
{
- unsigned int uspj = 1000000U/HZ;
-
- unsigned int jiffies = us/uspj + ((us%uspj) !=0); // ceiling
-
set_current_state(TASK_INTERRUPTIBLE);
-
- schedule_timeout(jiffies);
-
+ if (us) {
+ unsigned int uspj = 1000000U/HZ;
+ unsigned int jiffies = us/uspj + ((us%uspj) !=0); // ceiling
+ schedule_timeout(jiffies);
+ } else {
+ schedule();
+ }
+ return;
}
+void palacios_wakeup_cpu(void *thread)
+{
+ wake_up_process(thread);
+ return;
+}
/**
* Allocates a mutex.
if (lock) {
spin_lock_init(lock);
+ LOCKCHECK_ALLOC(lock);
} else {
ERROR("ALERT ALERT Unable to allocate lock\n");
return NULL;
return lock;
}
+void palacios_mutex_init(void *mutex)
+{
+ spinlock_t *lock = (spinlock_t*)mutex;
+
+ if (lock) {
+ spin_lock_init(lock);
+ LOCKCHECK_ALLOC(lock);
+ }
+}
+
+
/**
* Frees a mutex.
*/
void
palacios_mutex_free(void * mutex) {
palacios_free(mutex);
+ LOCKCHECK_FREE(mutex);
}
/**
void
palacios_mutex_lock(void * mutex, int must_spin) {
spin_lock((spinlock_t *)mutex);
+ LOCKCHECK_LOCK(mutex);
}
unsigned long flags;
spin_lock_irqsave((spinlock_t *)mutex,flags);
+ LOCKCHECK_LOCK_IRQSAVE(mutex,flags);
return (void *)flags;
}
)
{
spin_unlock((spinlock_t *)mutex);
+ LOCKCHECK_UNLOCK(mutex);
}
{
// This is correct, flags is opaque
spin_unlock_irqrestore((spinlock_t *)mutex,(unsigned long)flags);
+ LOCKCHECK_UNLOCK_IRQRESTORE(mutex,(unsigned long)flags);
}
/**
* Structure used by the Palacios hypervisor to interface with the host kernel.
*/
static struct v3_os_hooks palacios_os_hooks = {
- .print = palacios_print,
+ .print = palacios_print_scoped,
.allocate_pages = palacios_allocate_pages,
.free_pages = palacios_free_pages,
.malloc = palacios_alloc,
.get_cpu_khz = palacios_get_cpu_khz,
.start_kernel_thread = palacios_start_kernel_thread,
.yield_cpu = palacios_yield_cpu,
- .yield_cpu_timed = palacios_yield_cpu_timed,
+ .sleep_cpu = palacios_sleep_cpu,
+ .wakeup_cpu = palacios_wakeup_cpu,
.mutex_alloc = palacios_mutex_alloc,
.mutex_free = palacios_mutex_free,
.mutex_lock = palacios_mutex_lock,
-int palacios_vmm_init( void )
+int palacios_vmm_init( char *options )
{
int num_cpus = num_online_cpus();
char * cpu_mask = NULL;
INFO("palacios_init starting - calling init_v3\n");
- Init_V3(&palacios_os_hooks, cpu_mask, num_cpus);
+ Init_V3(&palacios_os_hooks, cpu_mask, num_cpus, options);
return 0;