#include "mm.h"
+// The following can be used to track heap bugs
+// zero memory after allocation
+#define ALLOC_ZERO_MEM 0
+// pad allocations by this many bytes on both ends of block
+#define ALLOC_PAD 0
+
u32 pg_allocs = 0;
u32 pg_frees = 0;
static void deinit_print_buffers(void)
{
int i;
+
for (i=0;i<NR_CPUS;i++) {
if (print_buffer[i]) {
- kfree(print_buffer[i]);
+ palacios_free(print_buffer[i]);
print_buffer[i]=0;
}
}
}
-static int init_print_buffers(int num_cpus)
+static int init_print_buffers(void)
{
int i;
#if !V3_PRINTK_OLD_STYLE_OUTPUT
- for (i=0;i<num_cpus;i++) {
- print_buffer[i] = kmalloc(V3_PRINTK_BUF_SIZE,GFP_KERNEL);
+ for (i=0;i<NR_CPUS;i++) {
+ print_buffer[i] = palacios_alloc(V3_PRINTK_BUF_SIZE);
if (!print_buffer[i]) {
ERROR("Cannot allocate print buffer for cpu %d\n",i);
deinit_print_buffers();
}
}
if (c!=0) {
- printk(KERN_INFO "palacios (pcore %u): ALERT - 8 BIT CHAR (c=%d) DETECTED\n", cpu,c);
+ printk(KERN_INFO "palacios (pcore %u): ALERT ALERT 8 BIT CHAR (c=%d) DETECTED\n", cpu,c);
}
}
#endif
* Allocates a contiguous region of pages of the requested size.
* Returns the physical address of the first page in the region.
*/
-static void * palacios_allocate_pages(int num_pages, unsigned int alignment) {
+void *palacios_allocate_pages(int num_pages, unsigned int alignment) {
void * pg_addr = NULL;
pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment);
+
+ if (!pg_addr) {
+ ERROR("ALERT ALERT Page allocation has FAILED Warning\n");
+ return NULL;
+ }
+
pg_allocs += num_pages;
return pg_addr;
* a single call while palacios_free_page() only frees a single page.
*/
-static void palacios_free_pages(void * page_paddr, int num_pages) {
+void palacios_free_pages(void * page_paddr, int num_pages) {
pg_frees += num_pages;
free_palacios_pgs((uintptr_t)page_paddr, num_pages);
}
+void *
+palacios_alloc_extended(unsigned int size, unsigned int flags) {
+ void * addr = NULL;
+
+ addr = kmalloc(size+2*ALLOC_PAD, flags);
+
+ if (!addr) {
+ ERROR("ALERT ALERT kmalloc has FAILED FAILED FAILED\n");
+ return NULL;
+ }
+
+ mallocs++;
+
+#if ALLOC_ZERO_MEM
+ memset(addr,0,size+2*ALLOC_PAD);
+#endif
+
+ return addr+ALLOC_PAD;
+}
+
+
/**
* Allocates 'size' bytes of kernel memory.
* Returns the kernel virtual address of the memory allocated.
*/
-static void *
+void *
palacios_alloc(unsigned int size) {
- void * addr = NULL;
+ // It is very important that this test remains since
+ // this function is used extensively throughout palacios and the linux
+ // module, both in places where interrupts are off and where they are on
+ // a GFP_KERNEL call, when done with interrupts off can lead to DEADLOCK
if (irqs_disabled()) {
- addr = kmalloc(size, GFP_ATOMIC);
+ return palacios_alloc_extended(size,GFP_ATOMIC);
} else {
- addr = kmalloc(size, GFP_KERNEL);
+ return palacios_alloc_extended(size,GFP_KERNEL);
}
- mallocs++;
-
- return addr;
}
/**
* Frees memory that was previously allocated by palacios_alloc().
*/
-static void
+void
palacios_free(
void * addr
)
{
frees++;
- kfree(addr);
+ kfree(addr-ALLOC_PAD);
return;
}
/**
* Converts a kernel virtual address to the corresponding physical address.
*/
-static void *
+void *
palacios_vaddr_to_paddr(
void * vaddr
)
/**
* Converts a physical address to the corresponding kernel virtual address.
*/
-static void *
+void *
palacios_paddr_to_vaddr(
void * paddr
)
/**
* Runs a function on the specified CPU.
*/
-
-// For now, do call only on local CPU
static void
palacios_xcall(
int cpu_id,
ret = thread_info->fn(thread_info->arg);
- INFO("Palacios Thread (%s) EXITTING\n", thread_info->name);
+ INFO("Palacios Thread (%s) EXITING\n", thread_info->name);
- kfree(thread_info);
+ palacios_free(thread_info);
// handle cleanup
do_exit(ret);
/**
* Creates a kernel thread.
*/
-static void *
+void *
palacios_start_kernel_thread(
int (*fn) (void * arg),
void * arg,
char * thread_name) {
- struct lnx_thread_arg * thread_info = kmalloc(sizeof(struct lnx_thread_arg), GFP_KERNEL);
+ struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg));
+
+ if (!thread_info) {
+ ERROR("ALERT ALERT Unable to allocate thread\n");
+ return NULL;
+ }
thread_info->fn = fn;
thread_info->arg = arg;
/**
* Starts a kernel thread on the specified CPU.
*/
-static void *
+void *
palacios_start_thread_on_cpu(int cpu_id,
int (*fn)(void * arg),
void * arg,
char * thread_name ) {
struct task_struct * thread = NULL;
- struct lnx_thread_arg * thread_info = kmalloc(sizeof(struct lnx_thread_arg), GFP_KERNEL);
+ struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg));
+
+ if (!thread_info) {
+ ERROR("ALERT ALERT Unable to allocate thread to start on cpu\n");
+ return NULL;
+ }
thread_info->fn = fn;
thread_info->arg = arg;
if (IS_ERR(thread)) {
WARNING("Palacios error creating thread: %s\n", thread_name);
+ palacios_free(thread_info);
return NULL;
}
if (set_cpus_allowed_ptr(thread, cpumask_of(cpu_id)) != 0) {
+ WARNING("Attempt to start thread on disallowed CPU\n");
kthread_stop(thread);
+ palacios_free(thread_info);
return NULL;
}
* The thread will be running on target CPU on return
* non-zero return means failure
*/
-static int
+int
palacios_move_thread_to_cpu(int new_cpu_id,
void * thread_ptr) {
struct task_struct * thread = (struct task_struct *)thread_ptr;
//set_idtvec_handler(vector, palacios_dispatch_interrupt);
if (vector < 32) {
- panic("unexpected vector for hooking\n");
+ ERROR("unexpected vector for hooking\n");
+ return -1;
} else {
int device_id = 0;
if (error) {
ERROR("error code for request_irq is %d\n", error);
- panic("request vector %d failed", vector);
+ ERROR("request vector %d failed", vector);
+ return -1;
}
}
/**
* Returns the CPU frequency in kilohertz.
*/
-static unsigned int
+unsigned int
palacios_get_cpu_khz(void)
{
INFO("cpu_khz is %u\n", cpu_khz);
/**
* Yield the CPU so other host OS tasks can run.
+ * This will return immediately if there is no other thread that is runnable
+ * And there is no real bound on how long it will yield
*/
-static void
+void
palacios_yield_cpu(void)
{
schedule();
return;
}
+/**
+ * Yield the CPU so other host OS tasks can run.
+ * Given now immediately if there is no other thread that is runnable
+ * And there is no real bound on how long it will yield
+ */
+void palacios_yield_cpu_timed(unsigned int us)
+{
+
+ unsigned int uspj = 1000000U/HZ;
+
+ unsigned int jiffies = us/uspj + ((us%uspj) !=0); // ceiling
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ schedule_timeout(jiffies);
+
+}
/**
* Allocates a mutex.
* Returns NULL on failure.
*/
-static void *
+void *
palacios_mutex_alloc(void)
{
- spinlock_t *lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+ spinlock_t *lock = palacios_alloc(sizeof(spinlock_t));
if (lock) {
spin_lock_init(lock);
+ } else {
+ ERROR("ALERT ALERT Unable to allocate lock\n");
+ return NULL;
}
return lock;
/**
* Frees a mutex.
*/
-static void
+void
palacios_mutex_free(void * mutex) {
- kfree(mutex);
+ palacios_free(mutex);
}
/**
* Locks a mutex.
*/
-static void
+void
palacios_mutex_lock(void * mutex, int must_spin) {
spin_lock((spinlock_t *)mutex);
}
+
+/**
+ * Locks a mutex, disabling interrupts on this core
+ */
+void *
+palacios_mutex_lock_irqsave(void * mutex, int must_spin) {
+
+ unsigned long flags;
+
+ spin_lock_irqsave((spinlock_t *)mutex,flags);
+
+ return (void *)flags;
+}
+
+
/**
* Unlocks a mutex.
*/
-static void
+void
palacios_mutex_unlock(
void * mutex
)
spin_unlock((spinlock_t *)mutex);
}
+
+/**
+ * Unlocks a mutex and restores previous interrupt state on this core
+ */
+void
+palacios_mutex_unlock_irqrestore(void *mutex, void *flags)
+{
+ // This is correct, flags is opaque
+ spin_unlock_irqrestore((spinlock_t *)mutex,(unsigned long)flags);
+}
+
/**
* Structure used by the Palacios hypervisor to interface with the host kernel.
*/
.get_cpu_khz = palacios_get_cpu_khz,
.start_kernel_thread = palacios_start_kernel_thread,
.yield_cpu = palacios_yield_cpu,
+ .yield_cpu_timed = palacios_yield_cpu_timed,
.mutex_alloc = palacios_mutex_alloc,
.mutex_free = palacios_mutex_free,
.mutex_lock = palacios_mutex_lock,
.mutex_unlock = palacios_mutex_unlock,
+ .mutex_lock_irqsave = palacios_mutex_lock_irqsave,
+ .mutex_unlock_irqrestore= palacios_mutex_unlock_irqrestore,
.get_cpu = palacios_get_cpu,
.interrupt_cpu = palacios_interrupt_cpu,
.call_on_cpu = palacios_xcall,
int minor = 0;
int i = 0;
- cpu_mask = kmalloc((num_cpus / 8) + 1, GFP_KERNEL);
+ cpu_mask = palacios_alloc((num_cpus / 8) + 1);
if (!cpu_mask) {
ERROR("Cannot allocate cpu mask\n");
memset(irq_to_guest_map, 0, sizeof(struct v3_vm_info *) * 256);
- if (init_print_buffers(num_cpus)) {
+ if (init_print_buffers()) {
ERROR("Cannot initialize print buffers\n");
- kfree(cpu_mask);
+ palacios_free(cpu_mask);
return -1;
}