#include "mm.h"
+// The following can be used to track heap bugs
+// zero memory after allocation
+#define ALLOC_ZERO_MEM 0
+// pad allocations by this many bytes on both ends of block
+#define ALLOC_PAD 0
+
u32 pg_allocs = 0;
u32 pg_frees = 0;
extern int cpu_list_len;
+static char *print_buffer[NR_CPUS];
+
+static void deinit_print_buffers(void)
+{
+ int i;
+
+ for (i=0;i<NR_CPUS;i++) {
+ if (print_buffer[i]) {
+ palacios_free(print_buffer[i]);
+ print_buffer[i]=0;
+ }
+ }
+}
+
+static int init_print_buffers(void)
+{
+ int i;
+
+ memset(print_buffer,0,sizeof(char*)*NR_CPUS);
+
+#if !V3_PRINTK_OLD_STYLE_OUTPUT
+ for (i=0;i<NR_CPUS;i++) {
+ print_buffer[i] = palacios_alloc(V3_PRINTK_BUF_SIZE);
+ if (!print_buffer[i]) {
+ ERROR("Cannot allocate print buffer for cpu %d\n",i);
+ deinit_print_buffers();
+ return -1;
+ }
+ memset(print_buffer[i],0,V3_PRINTK_BUF_SIZE);
+ }
+#endif
+
+ return 0;
+
+}
+
/**
* Prints a message to the console.
*/
-static void palacios_print(const char * fmt, ...) {
+void palacios_print_scoped(void * vm, int vcore, const char *fmt, ...) {
#if V3_PRINTK_OLD_STYLE_OUTPUT
va_list ap;
char *buf;
+ unsigned int cpu = palacios_get_cpu();
+ struct v3_guest *guest = (struct v3_guest *)vm;
+
+ buf = print_buffer[cpu];
- // Allocate space atomically, in case we are called
- // with a lock held
- buf = kmalloc(V3_PRINTK_BUF_SIZE, GFP_ATOMIC);
if (!buf) {
- printk("palacios: output skipped - unable to allocate\n");
+ printk(KERN_INFO "palacios (pcore %u): output skipped - no allocated buffer\n",cpu);
return;
}
vsnprintf(buf,V3_PRINTK_BUF_SIZE, fmt, ap);
va_end(ap);
- printk(KERN_INFO "palacios: %s",buf);
-
- kfree(buf);
+#if V3_PRINTK_CHECK_7BIT
+ {
+ char c=0;
+ int i;
+ for (i=0;i<strlen(buf);i++) {
+ if (buf[i] < 0) {
+ c=buf[i];
+ break;
+ }
+ }
+ if (c!=0) {
+ printk(KERN_INFO "palacios (pcore %u): ALERT ALERT 8 BIT CHAR (c=%d) DETECTED\n", cpu,c);
+ }
+ }
+#endif
+ if (guest) {
+ if (vcore>=0) {
+ printk(KERN_INFO "palacios (pcore %u vm %s vcore %u): %s",
+ cpu,
+ guest->name,
+ vcore,
+ buf);
+ } else {
+ printk(KERN_INFO "palacios (pcore %u vm %s): %s",
+ cpu,
+ guest->name,
+ buf);
+ }
+ } else {
+ printk(KERN_INFO "palacios (pcore %u): %s",
+ cpu,
+ buf);
+ }
+
return;
#endif
}
-
/*
* Allocates a contiguous region of pages of the requested size.
* Returns the physical address of the first page in the region.
*/
-static void * palacios_allocate_pages(int num_pages, unsigned int alignment) {
+void *palacios_allocate_pages(int num_pages, unsigned int alignment) {
void * pg_addr = NULL;
pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment);
+
+ if (!pg_addr) {
+ ERROR("ALERT ALERT Page allocation has FAILED Warning\n");
+ return NULL;
+ }
+
pg_allocs += num_pages;
return pg_addr;
* a single call while palacios_free_page() only frees a single page.
*/
-static void palacios_free_pages(void * page_paddr, int num_pages) {
+void palacios_free_pages(void * page_paddr, int num_pages) {
pg_frees += num_pages;
free_palacios_pgs((uintptr_t)page_paddr, num_pages);
}
+void *
+palacios_alloc_extended(unsigned int size, unsigned int flags) {
+ void * addr = NULL;
+
+ addr = kmalloc(size+2*ALLOC_PAD, flags);
+
+ if (!addr) {
+ ERROR("ALERT ALERT kmalloc has FAILED FAILED FAILED\n");
+ return NULL;
+ }
+
+ mallocs++;
+
+#if ALLOC_ZERO_MEM
+ memset(addr,0,size+2*ALLOC_PAD);
+#endif
+
+ return addr+ALLOC_PAD;
+}
+
+
/**
* Allocates 'size' bytes of kernel memory.
* Returns the kernel virtual address of the memory allocated.
*/
-static void *
+void *
palacios_alloc(unsigned int size) {
- void * addr = NULL;
+ // It is very important that this test remains since
+ // this function is used extensively throughout palacios and the linux
+ // module, both in places where interrupts are off and where they are on
+ // a GFP_KERNEL call, when done with interrupts off can lead to DEADLOCK
if (irqs_disabled()) {
- addr = kmalloc(size, GFP_ATOMIC);
+ return palacios_alloc_extended(size,GFP_ATOMIC);
} else {
- addr = kmalloc(size, GFP_KERNEL);
+ return palacios_alloc_extended(size,GFP_KERNEL);
}
- mallocs++;
-
- return addr;
}
/**
* Frees memory that was previously allocated by palacios_alloc().
*/
-static void
+void
palacios_free(
void * addr
)
{
frees++;
- kfree(addr);
+ kfree(addr-ALLOC_PAD);
return;
}
/**
* Converts a kernel virtual address to the corresponding physical address.
*/
-static void *
+void *
palacios_vaddr_to_paddr(
void * vaddr
)
/**
* Converts a physical address to the corresponding kernel virtual address.
*/
-static void *
+void *
palacios_paddr_to_vaddr(
void * paddr
)
/**
* Runs a function on the specified CPU.
*/
-
-// For now, do call only on local CPU
static void
palacios_xcall(
int cpu_id,
return;
}
+
+#define MAX_THREAD_NAME 32
+
struct lnx_thread_arg {
int (*fn)(void * arg);
void * arg;
- char * name;
+ char name[MAX_THREAD_NAME];
};
static int lnx_thread_target(void * arg) {
ret = thread_info->fn(thread_info->arg);
- kfree(thread_info);
- // handle cleanup
-
- printk("Palacios Thread (%s) EXITTING\n", thread_info->name);
+ INFO("Palacios Thread (%s) EXITING\n", thread_info->name);
+
+ palacios_free(thread_info);
+ // handle cleanup
do_exit(ret);
/**
* Creates a kernel thread.
*/
-static void *
+void *
palacios_start_kernel_thread(
int (*fn) (void * arg),
void * arg,
char * thread_name) {
- struct lnx_thread_arg * thread_info = kmalloc(sizeof(struct lnx_thread_arg), GFP_KERNEL);
+ struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg));
+
+ if (!thread_info) {
+ ERROR("ALERT ALERT Unable to allocate thread\n");
+ return NULL;
+ }
thread_info->fn = fn;
thread_info->arg = arg;
- thread_info->name = thread_name;
+ strncpy(thread_info->name,thread_name,MAX_THREAD_NAME);
+ thread_info->name[MAX_THREAD_NAME-1] =0;
- return kthread_run( lnx_thread_target, thread_info, thread_name );
+ return kthread_run( lnx_thread_target, thread_info, thread_info->name );
}
/**
* Starts a kernel thread on the specified CPU.
*/
-static void *
+void *
palacios_start_thread_on_cpu(int cpu_id,
int (*fn)(void * arg),
void * arg,
char * thread_name ) {
struct task_struct * thread = NULL;
- struct lnx_thread_arg * thread_info = kmalloc(sizeof(struct lnx_thread_arg), GFP_KERNEL);
+ struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg));
+
+ if (!thread_info) {
+ ERROR("ALERT ALERT Unable to allocate thread to start on cpu\n");
+ return NULL;
+ }
thread_info->fn = fn;
thread_info->arg = arg;
- thread_info->name = thread_name;
-
+ strncpy(thread_info->name,thread_name,MAX_THREAD_NAME);
+ thread_info->name[MAX_THREAD_NAME-1] =0;
- thread = kthread_create( lnx_thread_target, thread_info, thread_name );
+ thread = kthread_create( lnx_thread_target, thread_info, thread_info->name );
if (IS_ERR(thread)) {
- WARNING("Palacios error creating thread: %s\n", thread_name);
+ WARNING("Palacios error creating thread: %s\n", thread_info->name);
+ palacios_free(thread_info);
return NULL;
}
if (set_cpus_allowed_ptr(thread, cpumask_of(cpu_id)) != 0) {
+ WARNING("Attempt to start thread on disallowed CPU\n");
kthread_stop(thread);
+ palacios_free(thread_info);
return NULL;
}
* The thread will be running on target CPU on return
* non-zero return means failure
*/
-static int
+int
palacios_move_thread_to_cpu(int new_cpu_id,
void * thread_ptr) {
struct task_struct * thread = (struct task_struct *)thread_ptr;
/**
* Returns the CPU ID that the caller is running on.
*/
-static unsigned int
+unsigned int
palacios_get_cpu(void)
{
//set_idtvec_handler(vector, palacios_dispatch_interrupt);
if (vector < 32) {
- panic("unexpected vector for hooking\n");
+ ERROR("unexpected vector for hooking\n");
+ return -1;
} else {
int device_id = 0;
if (error) {
ERROR("error code for request_irq is %d\n", error);
- panic("request vector %d failed",vector);
+ ERROR("request vector %d failed", vector);
+ return -1;
}
}
)
{
ack_APIC_irq();
- DEBUG("Pretending to ack interrupt, vector=%d\n",vector);
+ DEBUG("Pretending to ack interrupt, vector=%d\n", vector);
return 0;
}
/**
* Returns the CPU frequency in kilohertz.
*/
-static unsigned int
+unsigned int
palacios_get_cpu_khz(void)
{
- INFO("cpu_khz is %u\n",cpu_khz);
+ INFO("cpu_khz is %u\n", cpu_khz);
if (cpu_khz == 0) {
INFO("faking cpu_khz to 1000000\n");
/**
* Yield the CPU so other host OS tasks can run.
+ * This will return immediately if there is no other thread that is runnable
+ * And there is no real bound on how long it will yield
*/
-static void
+void
palacios_yield_cpu(void)
{
schedule();
return;
}
+/**
+ * Yield the CPU so other host OS tasks can run.
+ * Given now immediately if there is no other thread that is runnable
+ * And there is no real bound on how long it will yield
+ */
+void palacios_sleep_cpu(unsigned int us)
+{
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (us) {
+ unsigned int uspj = 1000000U/HZ;
+ unsigned int jiffies = us/uspj + ((us%uspj) !=0); // ceiling
+ schedule_timeout(jiffies);
+ } else {
+ schedule();
+ }
+ return;
+}
+void palacios_wakeup_cpu(void *thread)
+{
+ wake_up_process(thread);
+ return;
+}
/**
* Allocates a mutex.
* Returns NULL on failure.
*/
-static void *
+void *
palacios_mutex_alloc(void)
{
- spinlock_t *lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+ spinlock_t *lock = palacios_alloc(sizeof(spinlock_t));
if (lock) {
spin_lock_init(lock);
+ } else {
+ ERROR("ALERT ALERT Unable to allocate lock\n");
+ return NULL;
}
return lock;
/**
* Frees a mutex.
*/
-static void
+void
palacios_mutex_free(void * mutex) {
- kfree(mutex);
+ palacios_free(mutex);
}
/**
* Locks a mutex.
*/
-static void
+void
palacios_mutex_lock(void * mutex, int must_spin) {
spin_lock((spinlock_t *)mutex);
}
+
+/**
+ * Locks a mutex, disabling interrupts on this core
+ */
+void *
+palacios_mutex_lock_irqsave(void * mutex, int must_spin) {
+
+ unsigned long flags;
+
+ spin_lock_irqsave((spinlock_t *)mutex,flags);
+
+ return (void *)flags;
+}
+
+
/**
* Unlocks a mutex.
*/
-static void
+void
palacios_mutex_unlock(
void * mutex
)
spin_unlock((spinlock_t *)mutex);
}
+
+/**
+ * Unlocks a mutex and restores previous interrupt state on this core
+ */
+void
+palacios_mutex_unlock_irqrestore(void *mutex, void *flags)
+{
+ // This is correct, flags is opaque
+ spin_unlock_irqrestore((spinlock_t *)mutex,(unsigned long)flags);
+}
+
/**
* Structure used by the Palacios hypervisor to interface with the host kernel.
*/
static struct v3_os_hooks palacios_os_hooks = {
- .print = palacios_print,
+ .print = palacios_print_scoped,
.allocate_pages = palacios_allocate_pages,
.free_pages = palacios_free_pages,
.malloc = palacios_alloc,
.get_cpu_khz = palacios_get_cpu_khz,
.start_kernel_thread = palacios_start_kernel_thread,
.yield_cpu = palacios_yield_cpu,
+ .sleep_cpu = palacios_sleep_cpu,
+ .wakeup_cpu = palacios_wakeup_cpu,
.mutex_alloc = palacios_mutex_alloc,
.mutex_free = palacios_mutex_free,
.mutex_lock = palacios_mutex_lock,
.mutex_unlock = palacios_mutex_unlock,
+ .mutex_lock_irqsave = palacios_mutex_lock_irqsave,
+ .mutex_unlock_irqrestore= palacios_mutex_unlock_irqrestore,
.get_cpu = palacios_get_cpu,
.interrupt_cpu = palacios_interrupt_cpu,
.call_on_cpu = palacios_xcall,
.start_thread_on_cpu = palacios_start_thread_on_cpu,
- .move_thread_to_cpu = palacios_move_thread_to_cpu,
+ .move_thread_to_cpu = palacios_move_thread_to_cpu,
};
-int palacios_vmm_init( void )
+int palacios_vmm_init( char *options )
{
int num_cpus = num_online_cpus();
char * cpu_mask = NULL;
int minor = 0;
int i = 0;
- cpu_mask = kmalloc((num_cpus / 8) + 1, GFP_KERNEL);
+ cpu_mask = palacios_alloc((num_cpus / 8) + 1);
+
+ if (!cpu_mask) {
+ ERROR("Cannot allocate cpu mask\n");
+ return -1;
+ }
+
memset(cpu_mask, 0, (num_cpus / 8) + 1);
for (i = 0; i < cpu_list_len; i++) {
memset(irq_to_guest_map, 0, sizeof(struct v3_vm_info *) * 256);
+ if (init_print_buffers()) {
+ ERROR("Cannot initialize print buffers\n");
+ palacios_free(cpu_mask);
+ return -1;
+ }
+
INFO("palacios_init starting - calling init_v3\n");
-
- Init_V3(&palacios_os_hooks, cpu_mask, num_cpus);
+
+ Init_V3(&palacios_os_hooks, cpu_mask, num_cpus, options);
return 0;
Shutdown_V3();
+ INFO("palacios shutdown complete\n");
+
+ deinit_print_buffers();
+
return 0;
}