#include <linux/kthread.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
+#include <linux/vmalloc.h>
#include <palacios/vmm.h>
#include <palacios/vmm_host_events.h>
#include "palacios.h"
-
-
-
#include "mm.h"
+#include "memcheck.h"
+#include "lockcheck.h"
+
// The following can be used to track heap bugs
// zero memory after allocation
#define ALLOC_ZERO_MEM 0
u32 pg_frees = 0;
u32 mallocs = 0;
u32 frees = 0;
-
+u32 vmallocs = 0;
+u32 vfrees = 0;
static struct v3_vm_info * irq_to_guest_map[256];
* Allocates a contiguous region of pages of the requested size.
* Returns the physical address of the first page in the region.
*/
-void *palacios_allocate_pages(int num_pages, unsigned int alignment) {
+void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id) {
void * pg_addr = NULL;
- pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment);
+ if (num_pages<=0) {
+ ERROR("ALERT ALERT Attempt to allocate zero or fewer pages\n");
+ return NULL;
+ }
+
+ pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id);
if (!pg_addr) {
ERROR("ALERT ALERT Page allocation has FAILED Warning\n");
pg_allocs += num_pages;
+ MEMCHECK_ALLOC_PAGES(pg_addr,num_pages*4096);
+
return pg_addr;
}
void palacios_free_pages(void * page_paddr, int num_pages) {
pg_frees += num_pages;
free_palacios_pgs((uintptr_t)page_paddr, num_pages);
+ MEMCHECK_FREE_PAGES(page_paddr,num_pages*4096);
+
}
palacios_alloc_extended(unsigned int size, unsigned int flags) {
void * addr = NULL;
+ if (size==0) {
+ // note that modern kernels will respond to a zero byte
+ // kmalloc and return the address 0x10... In Palacios,
+ // we will simply not allow 0 byte allocs at all, of any kind
+ ERROR("ALERT ALERT attempt to kmalloc zero bytes rejected\n");
+ return NULL;
+ }
+
addr = kmalloc(size+2*ALLOC_PAD, flags);
if (!addr) {
memset(addr,0,size+2*ALLOC_PAD);
#endif
+ MEMCHECK_KMALLOC(addr,size+2*ALLOC_PAD);
+
return addr+ALLOC_PAD;
}
+void *
+palacios_valloc(unsigned int size)
+{
+ void * addr = NULL;
+
+ if (size==0) {
+ ERROR("ALERT ALERT attempt to vmalloc zero bytes rejected\n");
+ return NULL;
+ }
+
+ addr = vmalloc(size);
+
+ if (!addr) {
+ ERROR("ALERT ALERT vmalloc has FAILED FAILED FAILED\n");
+ return NULL;
+ }
+
+ vmallocs++;
+
+ MEMCHECK_VMALLOC(addr,size);
+
+ return addr;
+}
+
+void palacios_vfree(void *p)
+{
+ vfree(p);
+ vfrees++;
+ MEMCHECK_VFREE(p);
+}
/**
* Allocates 'size' bytes of kernel memory.
{
frees++;
kfree(addr-ALLOC_PAD);
- return;
+ MEMCHECK_KFREE(addr-ALLOC_PAD);
}
/**
if (lock) {
spin_lock_init(lock);
+ LOCKCHECK_ALLOC(lock);
} else {
ERROR("ALERT ALERT Unable to allocate lock\n");
return NULL;
return lock;
}
+void palacios_mutex_init(void *mutex)
+{
+ spinlock_t *lock = (spinlock_t*)mutex;
+
+ if (lock) {
+ spin_lock_init(lock);
+ LOCKCHECK_ALLOC(lock);
+ }
+}
+
+void palacios_mutex_deinit(void *mutex)
+{
+ spinlock_t *lock = (spinlock_t*)mutex;
+
+ if (lock) {
+ // no actual spin_lock_deinit on linux
+ // our purpose here is to drive the lock checker
+ LOCKCHECK_FREE(lock);
+ }
+}
+
+
/**
* Frees a mutex.
*/
void
palacios_mutex_free(void * mutex) {
palacios_free(mutex);
+ LOCKCHECK_FREE(mutex);
}
/**
*/
void
palacios_mutex_lock(void * mutex, int must_spin) {
+
+ LOCKCHECK_LOCK_PRE(mutex);
spin_lock((spinlock_t *)mutex);
+ LOCKCHECK_LOCK_POST(mutex);
}
unsigned long flags;
+ LOCKCHECK_LOCK_IRQSAVE_PRE(mutex,flags);
spin_lock_irqsave((spinlock_t *)mutex,flags);
+ LOCKCHECK_LOCK_IRQSAVE_POST(mutex,flags);
return (void *)flags;
}
void * mutex
)
{
+ LOCKCHECK_UNLOCK_PRE(mutex);
spin_unlock((spinlock_t *)mutex);
+ LOCKCHECK_UNLOCK_POST(mutex);
}
void
palacios_mutex_unlock_irqrestore(void *mutex, void *flags)
{
+ LOCKCHECK_UNLOCK_IRQRESTORE_PRE(mutex,(unsigned long)flags);
// This is correct, flags is opaque
spin_unlock_irqrestore((spinlock_t *)mutex,(unsigned long)flags);
+ LOCKCHECK_UNLOCK_IRQRESTORE_POST(mutex,(unsigned long)flags);
}
/**