* Allocates a contiguous region of pages of the requested size.
* Returns the physical address of the first page in the region.
*/
-void *palacios_allocate_pages(int num_pages, unsigned int alignment) {
+void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id) {
void * pg_addr = NULL;
- pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment);
+ if (num_pages<=0) {
+ ERROR("ALERT ALERT Attempt to allocate zero or fewer pages\n");
+ return NULL;
+ }
+
+ pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id);
if (!pg_addr) {
ERROR("ALERT ALERT Page allocation has FAILED Warning\n");
palacios_alloc_extended(unsigned int size, unsigned int flags) {
void * addr = NULL;
+ if (size==0) {
+ // note that modern kernels will respond to a zero byte
+ // kmalloc and return the address 0x10... In Palacios,
+ // we will simply not allow 0 byte allocs at all, of any kind
+ ERROR("ALERT ALERT attempt to kmalloc zero bytes rejected\n");
+ return NULL;
+ }
+
addr = kmalloc(size+2*ALLOC_PAD, flags);
if (!addr) {
memset(addr,0,size+2*ALLOC_PAD);
#endif
- MEMCHECK_KMALLOC(addr+ALLOC_PAD,size+2*ALLOC_PAD);
+ MEMCHECK_KMALLOC(addr,size+2*ALLOC_PAD);
return addr+ALLOC_PAD;
}
{
void * addr = NULL;
+ if (size==0) {
+ ERROR("ALERT ALERT attempt to vmalloc zero bytes rejected\n");
+ return NULL;
+ }
+
addr = vmalloc(size);
if (!addr) {
}
}
+void palacios_mutex_deinit(void *mutex)
+{
+ spinlock_t *lock = (spinlock_t*)mutex;
+
+ if (lock) {
+ // no actual spin_lock_deinit on linux
+ // our purpose here is to drive the lock checker
+ LOCKCHECK_FREE(lock);
+ }
+}
+
/**
* Frees a mutex.
*/
void
palacios_mutex_lock(void * mutex, int must_spin) {
+
+ LOCKCHECK_LOCK_PRE(mutex);
spin_lock((spinlock_t *)mutex);
- LOCKCHECK_LOCK(mutex);
+ LOCKCHECK_LOCK_POST(mutex);
}
unsigned long flags;
+ LOCKCHECK_LOCK_IRQSAVE_PRE(mutex,flags);
spin_lock_irqsave((spinlock_t *)mutex,flags);
- LOCKCHECK_LOCK_IRQSAVE(mutex,flags);
+ LOCKCHECK_LOCK_IRQSAVE_POST(mutex,flags);
return (void *)flags;
}
void * mutex
)
{
+ LOCKCHECK_UNLOCK_PRE(mutex);
spin_unlock((spinlock_t *)mutex);
- LOCKCHECK_UNLOCK(mutex);
+ LOCKCHECK_UNLOCK_POST(mutex);
}
void
palacios_mutex_unlock_irqrestore(void *mutex, void *flags)
{
+ LOCKCHECK_UNLOCK_IRQRESTORE_PRE(mutex,(unsigned long)flags);
// This is correct, flags is opaque
spin_unlock_irqrestore((spinlock_t *)mutex,(unsigned long)flags);
- LOCKCHECK_UNLOCK_IRQRESTORE(mutex,(unsigned long)flags);
+ LOCKCHECK_UNLOCK_IRQRESTORE_POST(mutex,(unsigned long)flags);
}
/**