allow_devmem.o \
util-queue.o \
util-hashtable.o \
- linux-exts.o
+ linux-exts.o \
+ lockcheck.o
v3vee-$(V3_CONFIG_CONSOLE) += iface-console.o
v3vee-$(V3_CONFIG_FILE) += iface-file.o
#include <linux/elf.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
#include <linux/module.h>
palacios_free(msg);
- spin_lock_irqsave(&(cons->queue->lock), flags);
+ palacios_spinlock_lock_irqsave(&(cons->queue->lock), flags);
entries = cons->queue->num_entries;
- spin_unlock_irqrestore(&(cons->queue->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(cons->queue->lock), flags);
if (entries > 0) {
wake_up_interruptible(&(cons->intr_queue));
poll_wait(filp, &(cons->intr_queue), poll_tb);
- spin_lock_irqsave(&(cons->queue->lock), flags);
+ palacios_spinlock_lock_irqsave(&(cons->queue->lock), flags);
entries = cons->queue->num_entries;
- spin_unlock_irqrestore(&(cons->queue->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(cons->queue->lock), flags);
if (entries > 0) {
// DEBUG("Returning from POLL\n");
DEBUG("Releasing the Console File desc\n");
- spin_lock_irqsave(&(cons->queue->lock), flags);
+ palacios_spinlock_lock_irqsave(&(cons->queue->lock), flags);
cons->connected = 0;
- spin_unlock_irqrestore(&(cons->queue->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(cons->queue->lock), flags);
while ((msg = dequeue(cons->queue))) {
palacios_free(msg);
return -1;
}
- spin_lock_irqsave(&(cons->lock), flags);
+ palacios_spinlock_lock_irqsave(&(cons->lock), flags);
if (cons->connected == 0) {
cons->connected = 1;
acquired = 1;
}
- spin_unlock_irqrestore(&(cons->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(cons->lock), flags);
if (acquired == 0) {
ERROR("Console already connected\n");
cons->queue = create_queue(CONSOLE_QUEUE_LEN);
- spin_lock_init(&(cons->lock));
+ palacios_spinlock_init(&(cons->lock));
init_waitqueue_head(&(cons->intr_queue));
cons->guest = guest;
remove_guest_ctrl(cons->guest, V3_VM_CONSOLE_CONNECT);
deinit_queue(cons->queue);
- kfree(cons);
+ palacios_free(cons);
}
*/
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
#include <gears/env_inject.h>
strncpy(pfile->path, path, strlen(path));
pfile->guest = guest;
- spin_lock_init(&(pfile->lock));
+ palacios_spinlock_init(&(pfile->lock));
if (guest == NULL) {
list_add(&(pfile->file_node), &(global_files));
#include "linux-exts.h"
#include "vm.h"
-#include <linux/vmalloc.h>
/*
DEBUG("palacios: allocating %u bytes for %u by %u by %u buffer\n",
mem, desired_spec->width, desired_spec->height, desired_spec->bytes_per_pixel);
- gc->data = vmalloc(mem);
+ gc->data = palacios_valloc(mem);
if (!(gc->data)) {
ERROR("palacios: unable to allocate memory for frame buffer\n");
return;
}
if (gc->data) {
- vfree(gc->data);
+ palacios_vfree(gc->data);
gc->data=0;
}
}
list_del(&(gc->gcons_node));
if (gc->data)
- vfree(gc->data);
+ palacios_vfree(gc->data);
palacios_free(gc);
}
list_del(&(graphics_cons->gcons_node));
if (graphics_cons->data) {
- vfree(graphics_cons->data);
+ palacios_vfree(graphics_cons->data);
}
palacios_free(graphics_cons);
*/
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
return -1;
}
- kfree(gm->name);
- kfree(gm->content_hash);
+ palacios_free(gm->name);
+ palacios_free(gm->content_hash);
for (i = 0; i < gm->num_entries; i++) {
- kfree(gm->entry_points[i].name);
+ palacios_free(gm->entry_points[i].name);
}
- kfree(gm->entry_points);
- kfree(gm);
+ palacios_free(gm->entry_points);
+ palacios_free(gm);
return 0;
}
return -EFAULT;
}
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
if (dev->waiting) {
// Yes, we have a request if you want it!
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: hostdev: poll done immediate\n");
return POLLIN | POLLRDNORM;
}
// register ourselves on the user wait queue
poll_wait(filp, &(dev->user_wait_queue), poll_tb);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: hostdev: poll delayed\n");
// We will get called again when that queue is woken up
INFO("palacios: user side is closing host device \"%s\"\n",dev->url);
- spin_lock_irqsave(&(dev->lock), f);
+ palacios_spinlock_lock_irqsave(&(dev->lock), f);
dev->connected = 0;
- spin_unlock_irqrestore(&(dev->lock), f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock), f);
// it is the palacios->host interface's responsibility to ignore
// reads/writes until connected is true
DEEP_DEBUG_PRINT("palacios: hostdev: request size of request\n");
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
if (!(dev->waiting)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: hostdev: no request available\n");
schedule(); // avoid livelock for polling user space process SUSPICOUS
return 0; // no request available now
}
if (copy_to_user(argp,&(dev->req->data_len),sizeof(uint64_t))) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: unable to copy to user for host device \"%s\"\n",dev->url);
return -EFAULT; // failed to copy!
}
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: hostdev: have request\n");
unsigned long f;
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: hostdev: pull request\n");
if (!(dev->waiting) || !(dev->req)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: hostdev: no request to pull\n");
return 0; // no request available now
}
if (copy_to_user(argp,dev->req,dev->req->data_len)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: unable to copy to user for host device \"%s\"\n",dev->url);
return -EFAULT; // failed to copy!
}
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: hostdev: request pulled\n");
uint64_t user_datalen;
uint64_t old_len;
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: hostdev: push response\n");
if (!(dev->waiting)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: hostdev: no matching request for pushed response\n");
return 0; // no request outstanding, so we do not need a response!
}
if (copy_from_user(&user_datalen,argp,sizeof(uint64_t))) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: unable to copy from user for host device \"%s\"\n",dev->url);
return -EFAULT; // failed to copy!
}
if (user_datalen<sizeof(struct palacios_host_dev_host_request_response)) {
// bad user
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: user has response that is too small on host device \"%s\"\n",dev->url);
return -EFAULT;
}
// we drop the lock, turn on interrupts, resize, and then retry
DEEP_DEBUG_PRINT("palacios: response not big enough, dropping lock to resize on device \"%s\"\n",dev->url);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
if (palacios_resize_reqresp(&(dev->resp),user_datalen-sizeof(struct palacios_host_dev_host_request_response),0)) {
ERROR("palacios: unable to resize to accept response of size %llu from user for host device \"%s\"\n",user_datalen,dev->url);
// There shouldn't be a race here, since there should
// be exactly one user space thread giving us a response for this device
// and it is blocked waiting for us to finish
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: reacuired lock on device \"%s\"\n",dev->url);
}
}
old_len = dev->resp->len;
if (copy_from_user(dev->resp, argp, user_datalen)) {
dev->resp->len=old_len;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: unable to copy from user for host device \"%s\"\n",dev->url);
return -EFAULT; // failed to copy!
}
// now have valid response!
dev->waiting=0;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
// wake the palacios side up so that it sees it
cycle_response_request(dev);
// URL. If we don't find it after a while, we give up
for (i=0;i<RENDEZVOUS_WAIT_SECS/RENDEZVOUS_RETRY_SECS;i++) {
- spin_lock_irqsave(&(host_dev->lock),f1);
+ palacios_spinlock_lock_irqsave(&(host_dev->lock),f1);
list_for_each_entry(dev,&(host_dev->devs), node) {
if (!strncasecmp(url,dev->url,MAX_URL)) {
// found it
- spin_lock_irqsave(&(dev->lock),f2);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f2);
if (dev->connected) {
ERROR("palacios: device for \"%s\" is already connected!\n",url);
- spin_unlock_irqrestore(&(dev->lock),f2);
- spin_unlock_irqrestore(&(host_dev->lock),f1);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f2);
+ palacios_spinlock_unlock_irqrestore(&(host_dev->lock),f1);
return -1;
} else {
dev->fd = anon_inode_getfd("v3-hostdev", &host_dev_fops, dev, 0);
if (dev->fd<0) {
ERROR("palacios: cannot create fd for device \"%s\"\n",url);
- spin_unlock_irqrestore(&(dev->lock),f2);
- spin_unlock_irqrestore(&(host_dev->lock),f1);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f2);
+ palacios_spinlock_unlock_irqrestore(&(host_dev->lock),f1);
return -1;
}
dev->connected=1;
dev->resp=0;
}
INFO("palacios: connected fd for device \"%s\"\n",url);
- spin_unlock_irqrestore(&(dev->lock),f2);
- spin_unlock_irqrestore(&(host_dev->lock),f1);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f2);
+ palacios_spinlock_unlock_irqrestore(&(host_dev->lock),f1);
return dev->fd;
}
- spin_unlock_irqrestore(&(dev->lock),f2);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f2);
}
}
- spin_unlock_irqrestore(&(host_dev->lock),f1);
+ palacios_spinlock_unlock_irqrestore(&(host_dev->lock),f1);
ssleep(RENDEZVOUS_RETRY_SECS);
}
// Now wait until we are noticed!
for (i=0;i<RENDEZVOUS_WAIT_SECS/RENDEZVOUS_RETRY_SECS;i++) {
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
if (dev->connected) {
INFO("palacios: connection with user side established for host device \"%s\" fd=%d\n",dev->url,dev->fd);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
return 0;
}
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ssleep(RENDEZVOUS_RETRY_SECS);
}
}
// Check to see if a device of this url already exists, which would be ugly
- spin_lock_irqsave(&(host_dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(host_dev->lock),f);
list_for_each_entry(dev,&(host_dev->devs), node) {
if (!strncasecmp(url,dev->url,MAX_URL)) {
// found it
- spin_unlock_irqrestore(&(host_dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(host_dev->lock),f);
ERROR("palacios: a host device with url \"%s\" already exists in the guest!\n",url);
return NULL;
}
}
- spin_unlock_irqrestore(&(host_dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(host_dev->lock),f);
INFO("palacios: creating host device \"%s\"\n",url);
dev->guest = guest;
- spin_lock_init(&(dev->lock));
+ palacios_spinlock_init(&(dev->lock));
init_waitqueue_head(&(dev->user_wait_queue));
init_waitqueue_head(&(dev->host_wait_queue));
// Insert ourselves into the list
- spin_lock_irqsave(&(host_dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(host_dev->lock),f);
list_add(&(dev->node),&(host_dev->devs));
- spin_unlock_irqrestore(&(host_dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(host_dev->lock),f);
INFO("palacios: host device \"%s\" created with deferred rendezvous\n",url);
return -1;
}
- spin_lock_irqsave(&(host_dev->lock),f1);
+ palacios_spinlock_lock_irqsave(&(host_dev->lock),f1);
- spin_lock_irqsave(&(dev->lock),f2);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f2);
if (dev->connected) {
dev->connected=0;
list_del(&(dev->node));
- spin_unlock_irqrestore(&(dev->lock),f2);
- spin_unlock_irqrestore(&(host_dev->lock),f1);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f2);
+ palacios_spinlock_unlock_irqrestore(&(host_dev->lock),f1);
palacios_host_dev_user_free(dev);
DEEP_DEBUG_PRINT("palacios: hostdev: read io port 0x%x\n",port);
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
if (palacios_host_dev_rendezvous(dev)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: ignoring request as user side is not connected (and did not rendezvous) for host device \"%s\"\n",dev->url);
return 0;
}
if (dev->waiting) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: guest issued i/o read request with host device \"%s\" in wrong state (waiting=%d, connected=%d)\n",dev->url,dev->waiting,dev->connected);
return 0;
}
// we drop the lock, turn on interrupts, resize, and then retry
DEEP_DEBUG_PRINT("palacios: request not big enough, dropping lock to resize on device \"%s\"\n",dev->url);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
if (palacios_resize_reqresp(&(dev->req),0,0)) {
ERROR("palacios: cannot resize for request on device \"%s\"\n",dev->url);
// reacquire the lock
// There shouldn't be a race here since there should not be another
// request from palacios until this one finishes
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: reacquired lock on device \"%s\"\n",dev->url);
}
}
dev->waiting=1;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
// hand over to the user space and wait for it to respond
cycle_request_response(dev);
// We're back! So now we'll hand the response back to Palacios
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
op_len = dev->resp->op_len < len ? dev->resp->op_len : len ;
memcpy(dest,dev->resp->data, op_len);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
return op_len;
}
DEEP_DEBUG_PRINT("palacios: hostdev: read mem 0x%p\n",gpa);
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
if (palacios_host_dev_rendezvous(dev)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: ignoring request as user side is not connected (and did not rendezvous) for host device \"%s\"\n",dev->url);
return 0;
}
if (dev->waiting) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: guest issued memory read request with host device \"%s\" in wrong state (waiting=%d, connected=%d)\n",dev->url,dev->waiting,dev->connected);
return 0;
}
// we drop the lock, turn on interrupts, resize, and then retry
DEEP_DEBUG_PRINT("palacios: request not big enough, dropping lock to resize on device \"%s\"\n",dev->url);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
if (palacios_resize_reqresp(&(dev->req),0,0)) {
ERROR("palacios: cannot resize for request on device \"%s\"\n",dev->url);
// reacquire the lock
// There shouldn't be a race here since there should not be another
// request from palacios until this one finishes
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: reacquired lock on device \"%s\"\n",dev->url);
}
}
dev->waiting=1;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
// hand over to the user space and wait for it to respond
cycle_request_response(dev);
// We're back! So now we'll hand the response back to Palacios
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
op_len = dev->resp->op_len < len ? dev->resp->op_len : len ;
memcpy(dest,dev->resp->data, op_len);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
return op_len;
}
DEEP_DEBUG_PRINT("palacios: hostdev: read conf 0x%p\n",(void*)offset);
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
if (palacios_host_dev_rendezvous(dev)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: ignoring request as user side is not connected (and did not rendezvous) for host device \"%s\"\n",dev->url);
return 0;
}
if (dev->waiting) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: guest issued config read request with host device \"%s\" in wrong state (waiting=%d, connected=%d)\n",dev->url,dev->waiting,dev->connected);
return 0;
}
// we drop the lock, turn on interrupts, resize, and then retry
DEEP_DEBUG_PRINT("palacios: request not big enough, dropping lock to resize on device \"%s\"\n",dev->url);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
if (palacios_resize_reqresp(&(dev->req),0,0)) {
ERROR("palacios: cannot resize for request on device \"%s\"\n",dev->url);
// reacquire the lock
// There shouldn't be a race here since there should not be another
// request from palacios until this one finishes
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: reacquired lock on device \"%s\"\n",dev->url);
}
}
dev->waiting=1;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
// hand over to the user space and wait for it to respond
cycle_request_response(dev);
// We're back! So now we'll hand the response back to Palacios
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
op_len = dev->resp->op_len < len ? dev->resp->op_len : len ;
memcpy(dest,dev->resp->data, op_len);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
return op_len;
}
DEEP_DEBUG_PRINT("palacios: hostdev: write io port 0x%x \n",port);
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
if (palacios_host_dev_rendezvous(dev)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: ignoring request as user side is not connected (and did not rendezvous) for host device \"%s\"\n",dev->url);
return 0;
}
if (dev->waiting) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: guest issued i/o write request with host device \"%s\" in wrong state (waiting=%d, connected=%d)\n",dev->url,dev->waiting,dev->connected);
return 0;
}
// we drop the lock, turn on interrupts, resize, and then retry
DEEP_DEBUG_PRINT("palacios: request not big enough, dropping lock to resize on device \"%s\"\n",dev->url);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
if (palacios_resize_reqresp(&(dev->req),len,0)) {
ERROR("palacios: cannot resize for request on device \"%s\"\n",dev->url);
// reacquire the lock
// There shouldn't be a race here since there should not be another
// request from palacios until this one finishes
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: reacquired lock on device \"%s\"\n",dev->url);
}
}
dev->waiting=1;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
// hand over to the user space and wait for it to respond
cycle_request_response(dev);
// We're back! So now we'll hand the response back to Palacios
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
op_len = dev->resp->op_len < len ? dev->resp->op_len : len ;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
return op_len;
}
DEEP_DEBUG_PRINT("palacios: hostdev: write mem 0x%p\n",gpa);
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
if (palacios_host_dev_rendezvous(dev)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: ignoring request as user side is not connected (and did not rendezvous) for host device \"%s\"\n",dev->url);
return 0;
}
if (dev->waiting) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: guest issued memory write request with host device \"%s\" in wrong state (waiting=%d, connected=%d)\n",dev->url,dev->waiting,dev->connected);
return 0;
}
// we drop the lock, turn on interrupts, resize, and then retry
DEEP_DEBUG_PRINT("palacios: request not big enough, dropping lock to resize on device \"%s\"\n",dev->url);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
if (palacios_resize_reqresp(&(dev->req),len,0)) {
ERROR("palacios: cannot resize for request on device \"%s\"\n",dev->url);
// reacquire the lock
// There shouldn't be a race here since there should not be another
// request from palacios until this one finishes
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: reacquired lock on device \"%s\"\n",dev->url);
}
}
dev->waiting=1;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
// hand over to the user space and wait for it to respond
cycle_request_response(dev);
// We're back! So now we'll hand the response back to Palacios
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
op_len= dev->resp->op_len < len ? dev->resp->op_len : len ;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
return op_len;
}
DEEP_DEBUG_PRINT("palacios: hostdev: write conf 0x%p\n",(void*)offset);
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
if (palacios_host_dev_rendezvous(dev)) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: ignoring request as user side is not connected (and did not rendezvous) for host device \"%s\"\n",dev->url);
return 0;
}
if (dev->waiting) {
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
ERROR("palacios: guest issued config write request with host device \"%s\" in wrong state (waiting=%d, connected=%d)\n",dev->url,dev->waiting,dev->connected);
return 0;
}
// we drop the lock, turn on interrupts, resize, and then retry
DEEP_DEBUG_PRINT("palacios: request not big enough, dropping lock to resize on device \"%s\"\n",dev->url);
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
if (palacios_resize_reqresp(&(dev->req),len,0)) {
ERROR("palacios: cannot resize for request on device \"%s\"\n",dev->url);
// reacquire the lock
// There shouldn't be a race here since there should not be another
// request from palacios until this one finishes
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
DEEP_DEBUG_PRINT("palacios: reacquired lock on device \"%s\"\n",dev->url);
}
}
dev->waiting=1;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
// hand over to the user space and wait for it to respond
cycle_request_response(dev);
// We're back! So now we'll hand the response back to Palacios
- spin_lock_irqsave(&(dev->lock),f);
+ palacios_spinlock_lock_irqsave(&(dev->lock),f);
op_len = dev->resp->op_len < len ? dev->resp->op_len : len ;
- spin_unlock_irqrestore(&(dev->lock),f);
+ palacios_spinlock_unlock_irqrestore(&(dev->lock),f);
return op_len;
}
INIT_LIST_HEAD(&(host_dev->devs));
- spin_lock_init(&(host_dev->lock));
+ palacios_spinlock_init(&(host_dev->lock));
*vm_data = host_dev;
#include <linux/elf.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
#include <linux/module.h>
host_dev->hw_dev.dev = dev;
host_dev->hw_dev.intx_disabled = 1;
- spin_lock_init(&(host_dev->hw_dev.intx_lock));
+ palacios_spinlock_init(&(host_dev->hw_dev.intx_lock));
if (pci_enable_device(dev)) {
printk("Could not enable Device\n");
// printk("Host PCI IRQ handler (%d)\n", irq);
- spin_lock(&(host_dev->hw_dev.intx_lock));
+ palacios_spinlock_lock(&(host_dev->hw_dev.intx_lock));
disable_irq_nosync(irq);
host_dev->hw_dev.intx_disabled = 1;
- spin_unlock(&(host_dev->hw_dev.intx_lock));
+ palacios_spinlock_unlock(&(host_dev->hw_dev.intx_lock));
V3_host_pci_raise_irq(&(host_dev->v3_dev), 0);
}
host_dev->hw_dev.num_msix_vecs = 0;
- kfree(host_dev->hw_dev.msix_entries);
+ palacios_free(host_dev->hw_dev.msix_entries);
pci_disable_msix(dev);
// printk("Acking IRQ vector %d\n", vector);
- spin_lock_irqsave(&(host_dev->hw_dev.intx_lock), flags);
+ palacios_spinlock_lock_irqsave(&(host_dev->hw_dev.intx_lock), flags);
// printk("Enabling IRQ %d\n", dev->irq);
enable_irq(dev->irq);
host_dev->hw_dev.intx_disabled = 0;
- spin_unlock_irqrestore(&(host_dev->hw_dev.intx_lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(host_dev->hw_dev.intx_lock), flags);
return 0;
}
struct v3_host_pci_dev * v3_dev = &(host_dev->v3_dev);
struct pci_dev * dev = host_dev->hw_dev.dev;
- spin_lock_irqsave(&lock, flags);
+ palacios_spinlock_lock_irqsave(&lock, flags);
if (host_dev->hw_dev.in_use == 0) {
host_dev->hw_dev.in_use = 1;
} else {
ret = -1;
}
- spin_unlock_irqrestore(&lock, flags);
+ palacios_spinlock_unlock_irqrestore(&lock, flags);
if (v3_dev->iface == IOMMU) {
unsigned long flags;
struct host_pci_device * host_dev = NULL;
- spin_lock_irqsave(&lock, flags);
+ palacios_spinlock_lock_irqsave(&lock, flags);
host_dev = find_dev_by_name(url);
- spin_unlock_irqrestore(&lock, flags);
+ palacios_spinlock_unlock_irqrestore(&lock, flags);
if (host_dev == NULL) {
printk("Could not find host device (%s)\n", url);
host_dev->hw_dev.devfn = PCI_DEVFN(hw_dev_arg.dev, hw_dev_arg.func);
- spin_lock_irqsave(&lock, flags);
+ palacios_spinlock_lock_irqsave(&lock, flags);
if (!find_dev_by_name(hw_dev_arg.name)) {
list_add(&(host_dev->dev_node), &device_list);
ret = 1;
}
- spin_unlock_irqrestore(&lock, flags);
+ palacios_spinlock_unlock_irqrestore(&lock, flags);
if (ret == 0) {
// Error device already exists
- kfree(host_dev);
+ palacios_free(host_dev);
return -EFAULT;
}
static int host_pci_init( void ) {
INIT_LIST_HEAD(&(device_list));
- spin_lock_init(&lock);
+ palacios_spinlock_init(&lock);
V3_Init_Host_PCI(&pci_hooks);
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/namei.h>
-#include <linux/vmalloc.h>
#include <linux/poll.h>
#include <linux/anon_inodes.h>
}
- m->data = vmalloc(size);
+ m->data = palacios_valloc(size);
if (!m->data) {
palacios_free(m);
{
if (m) {
if (m->data) {
- vfree(m->data);
+ palacios_vfree(m->data);
}
m->data=0;
palacios_free(m);
static int expand_mem_stream(struct mem_stream *m, uint32_t new_size)
{
- void *data = vmalloc(new_size);
+ void *data = palacios_valloc(new_size);
uint32_t nc;
if (!data) {
memcpy(data,m->data,nc);
- vfree(m->data);
+ palacios_vfree(m->data);
m->data=data;
m->size=new_size;
s->waiting = 1;
// release the stream
- spin_unlock_irqrestore(&(s->lock), *flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), *flags);
// wake up anyone waiting on it
wake_up_interruptible(&(s->user_wait_queue));
while (wait_event_interruptible(s->host_wait_queue, (s->waiting == 0)) != 0) {}
// reacquire the lock for our called
- spin_lock_irqsave(&(s->lock), *flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), *flags);
return 0;
}
s->waiting = 0;
// release the stream
- spin_unlock_irqrestore(&(s->lock), *flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), *flags);
// wake up anyone waiting on it
wake_up_interruptible(&(s->host_wait_queue));
return POLLERR;
}
- spin_lock_irqsave(&(s->lock), flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), flags);
if (s->waiting) {
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return POLLIN | POLLRDNORM;
}
poll_wait(filp, &(s->user_wait_queue), wait);
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return 0;
}
// inform request size
- spin_lock_irqsave(&(s->lock), flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), flags);
if (!(s->waiting)) {
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return 0;
}
size = sizeof(struct palacios_user_keyed_stream_op) + s->op->buf_len;
if (copy_to_user((void * __user) argp, &size, sizeof(uint64_t))) {
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
ERROR("palacios user key size request failed to copy data\n");
return -EFAULT;
}
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return 1;
// pull the request
- spin_lock_irqsave(&(s->lock), flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), flags);
if (!(s->waiting)) {
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
ERROR("palacios user key pull request when not waiting\n");
return 0;
}
if (copy_to_user((void __user *) argp, s->op, size)) {
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
ERROR("palacios user key pull request failed to copy data\n");
return -EFAULT;
}
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return 1;
// push the response
- spin_lock_irqsave(&(s->lock), flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), flags);
if (!(s->waiting)) {
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
ERROR("palacios user key push response when not waiting\n");
return 0;
}
if (copy_from_user(&size, (void __user *) argp, sizeof(uint64_t))) {
ERROR("palacios user key push response failed to copy size\n");
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return -EFAULT;
}
if (resize_op(&(s->op),size-sizeof(struct palacios_user_keyed_stream_op))) {
ERROR("unable to resize op in user key push response\n");
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return -EFAULT;
}
if (copy_from_user(s->op, (void __user *) argp, size)) {
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return -EFAULT;
}
struct user_keyed_stream *s = filp->private_data;
unsigned long f1,f2;
- spin_lock_irqsave(&(user_streams->lock),f1);
- spin_lock_irqsave(&(s->lock), f2);
+ palacios_spinlock_lock_irqsave(&(user_streams->lock),f1);
+ palacios_spinlock_lock_irqsave(&(s->lock), f2);
list_del(&(s->node));
- spin_unlock_irqrestore(&(s->lock), f2);
- spin_unlock_irqrestore(&(user_streams->lock), f1);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), f2);
+ palacios_spinlock_unlock_irqrestore(&(user_streams->lock), f1);
palacios_free(s->url);
palacios_free(s);
// Check for duplicate handler
- spin_lock_irqsave(&(user_streams->lock), flags);
+ palacios_spinlock_lock_irqsave(&(user_streams->lock), flags);
list_for_each_entry(s, &(user_streams->streams), node) {
if (!strncasecmp(url, s->url, len)) {
ERROR("user keyed stream connection with url \"%s\" already exists\n", url);
return -1;
}
}
- spin_unlock_irqrestore(&(user_streams->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(user_streams->lock), flags);
// Create connection
s = palacios_alloc(sizeof(struct user_keyed_stream));
init_waitqueue_head(&(s->host_wait_queue));
// Insert connection into list
- spin_lock_irqsave(&(user_streams->lock), flags);
+ palacios_spinlock_lock_irqsave(&(user_streams->lock), flags);
list_add(&(s->node), &(user_streams->streams));
- spin_unlock_irqrestore(&(user_streams->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(user_streams->lock), flags);
return fd;
}
return NULL;
}
- spin_lock_irqsave(&(user_streams->lock), flags);
+ palacios_spinlock_lock_irqsave(&(user_streams->lock), flags);
list_for_each_entry(s, &(user_streams->streams), node) {
if (!strcasecmp(url, s->url)) {
- spin_unlock_irqrestore(&(user_streams->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(user_streams->lock), flags);
return s;
}
}
- spin_unlock_irqrestore(&(user_streams->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(user_streams->lock), flags);
return NULL;
}
return NULL;
}
- spin_lock_irqsave(&(s->lock), flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), flags);
if (s->waiting) {
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
ERROR("cannot open user stream %s as it is already in waiting state\n",url);
return NULL;
}
s->otype = ot==V3_KS_WR_ONLY_CREATE ? V3_KS_WR_ONLY : ot;
- spin_unlock_irqrestore(&(s->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return s;
uint64_t len = strlen(key)+1;
void *user_key;
- spin_lock_irqsave(&(s->lock), flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), flags);
if (resize_op(&(s->op),len)) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("cannot resize op in opening key %s on user keyed stream %s\n",key,s->url);
return NULL;
}
// enter with it locked
if (do_request_to_response(s,&flags)) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("request/response handling failed\n");
return NULL;
}
user_key=s->op->user_key;
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
return user_key;
}
uint64_t len = 0;
unsigned long flags;
- spin_lock_irqsave(&(s->lock), flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), flags);
if (resize_op(&(s->op),len)) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("cannot resize op in closing key 0x%p on user keyed stream %s\n",key,s->url);
return;
}
// enter with it locked
if (do_request_to_response(s,&flags)) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("request/response handling failed\n");
return;
}
// return with it locked
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
return;
}
sint64_t xfer;
unsigned long flags;
- spin_lock_irqsave(&(s->lock), flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), flags);
if (s->otype != V3_KS_RD_ONLY) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("attempt to read key from stream that is not in read state on %s\n",s->url);
}
if (resize_op(&(s->op),len)) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("cannot resize op in reading key 0x%p on user keyed stream %s\n",key,s->url);
return -1;
}
// enter with it locked
if (do_request_to_response(s,&flags)) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("request/response handling failed\n");
return -1;
}
xfer=s->op->xfer;
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
return xfer;
}
unsigned long flags;
- spin_lock_irqsave(&(s->lock), flags);
+ palacios_spinlock_lock_irqsave(&(s->lock), flags);
if (s->otype != V3_KS_WR_ONLY) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("attempt to write key on stream that is not in write state on %s\n",s->url);
}
if (resize_op(&(s->op),len)) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("cannot resize op in reading key 0x%p on user keyed stream %s\n",key,s->url);
return -1;
}
// enter with it locked
if (do_request_to_response(s,&flags)) {
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
ERROR("request/response handling failed\n");
return -1;
}
xfer=s->op->xfer;
- spin_unlock_irqrestore(&(s->lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(s->lock),flags);
return xfer;
}
INIT_LIST_HEAD(&(user_streams->streams));
- spin_lock_init(&(user_streams->lock));
+ palacios_spinlock_init(&(user_streams->lock));
V3_Init_Keyed_Streams(&hooks);
struct raw_interface * iface;
unsigned long flags;
- spin_lock_irqsave(&(packet_state.lock), flags);
+ palacios_spinlock_lock_irqsave(&(packet_state.lock), flags);
iface = find_interface(host_nic);
- spin_unlock_irqrestore(&(packet_state.lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(packet_state.lock),flags);
if(iface == NULL){
iface = (struct raw_interface *)palacios_alloc(sizeof(struct raw_interface));
palacios_free(iface);
return -1;
}
- spin_lock_irqsave(&(packet_state.lock), flags);
+ palacios_spinlock_lock_irqsave(&(packet_state.lock), flags);
list_add(&(iface->node), &(packet_state.open_interfaces));
- spin_unlock_irqrestore(&(packet_state.lock),flags);
+ palacios_spinlock_unlock_irqrestore(&(packet_state.lock),flags);
}
packet->host_packet_data = iface;
V3_Init_Packet(&palacios_packet_hooks);
memset(&packet_state, 0, sizeof(struct palacios_packet_state));
- spin_lock_init(&(packet_state.lock));
+ palacios_spinlock_init(&(packet_state.lock));
INIT_LIST_HEAD(&(packet_state.open_interfaces));
// REGISTER GLOBAL CONTROL to add interfaces...
int tmp_len = (TMP_BUF_LEN > bytes_left) ? bytes_left : TMP_BUF_LEN;
int tmp_read = 0;
- spin_lock_irqsave(&(stream->lock), flags);
+ palacios_spinlock_lock_irqsave(&(stream->lock), flags);
tmp_read = ringbuf_read(stream->out_ring, tmp_buf, tmp_len);
- spin_unlock_irqrestore(&(stream->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(stream->lock), flags);
if (tmp_read == 0) {
// If userspace reads more than we have
}
- spin_lock_irqsave(&(stream->lock), flags);
+ palacios_spinlock_lock_irqsave(&(stream->lock), flags);
total_bytes_left = ringbuf_data_len(stream->out_ring);
- spin_unlock_irqrestore(&(stream->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(stream->lock), flags);
if (total_bytes_left > 0) {
wake_up_interruptible(&(stream->intr_queue));
poll_wait(filp, &(stream->intr_queue), poll_tb);
- spin_lock_irqsave(&(stream->lock), flags);
+ palacios_spinlock_lock_irqsave(&(stream->lock), flags);
data_avail = ringbuf_data_len(stream->out_ring);
- spin_unlock_irqrestore(&(stream->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(stream->lock), flags);
if (data_avail > 0) {
return mask;
struct stream_state * stream = filp->private_data;
unsigned long flags;
- spin_lock_irqsave(&(stream->lock), flags);
+ palacios_spinlock_lock_irqsave(&(stream->lock), flags);
stream->connected = 0;
- spin_unlock_irqrestore(&(stream->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(stream->lock), flags);
return 0;
strncpy(stream->name, name, STREAM_NAME_LEN - 1);
init_waitqueue_head(&(stream->intr_queue));
- spin_lock_init(&(stream->lock));
+ palacios_spinlock_init(&(stream->lock));
if (guest == NULL) {
list_add(&(stream->stream_node), &(global_streams));
}
while (bytes_written < len) {
- spin_lock_irqsave(&(stream->lock), flags);
+ palacios_spinlock_lock_irqsave(&(stream->lock), flags);
bytes_written += ringbuf_write(stream->out_ring, buf + bytes_written, len - bytes_written);
- spin_unlock_irqrestore(&(stream->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(stream->lock), flags);
wake_up_interruptible(&(stream->intr_queue));
return -EFAULT;
}
- spin_lock_irqsave(&(stream->lock), flags);
+ palacios_spinlock_lock_irqsave(&(stream->lock), flags);
if (stream->connected == 0) {
stream->connected = 1;
ret = 1;
}
- spin_unlock_irqrestore(&(stream->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(stream->lock), flags);
if (ret == -1) {
int add_global_ctrl(unsigned int cmd,
int (*handler)(unsigned int cmd, unsigned long arg)) {
- struct global_ctrl * ctrl = kmalloc(sizeof(struct global_ctrl), GFP_KERNEL);
+ struct global_ctrl * ctrl = palacios_alloc_extended(sizeof(struct global_ctrl), GFP_KERNEL);
if (ctrl == NULL) {
printk("Error: Could not allocate global ctrl %d\n", cmd);
if (__insert_global_ctrl(ctrl) != NULL) {
printk("Could not insert guest ctrl %d\n", cmd);
- kfree(ctrl);
+ palacios_free(ctrl);
return -1;
}
--- /dev/null
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/spinlock.h>
+
+#include "palacios.h"
+
+#include "lockcheck.h"
+
+// How far up the stack to track the caller
+// 0 => palacios_...
+// 1 => v3_lock...
+// 2 => caller of v3_lock..
+// ...
+#define STEP_BACK_DEPTH_FIRST 1
+#define STEP_BACK_DEPTH_LAST 4
+#define STEP_BACK_DEPTH (STEP_BACK_DEPTH_LAST-STEP_BACK_DEPTH_FIRST+1)
+
+// show when multiple locks are held simultaneously
+// This is the minimum number
+#define WARN_MULTIPLE_THRESHOLD 3
+
+typedef struct {
+ int inuse; // nonzero if this is in use
+ void *lock; // the lock
+ void *allocator[STEP_BACK_DEPTH];
+ // who allocated this
+ int lockcount; // how many times it's been locked/unlocked (lock=+1, unlock=-1)
+ int irqcount; // how many times interrupts have been turned off (+1/-1)
+ void *lastlocker[STEP_BACK_DEPTH];
+ // who last locked
+ void *lastunlocker[STEP_BACK_DEPTH];
+ // who last unlocked
+ void *lastirqlocker[STEP_BACK_DEPTH];
+ // who last locked
+ unsigned long lastlockflags; // their flags
+ void *lastirqunlocker[STEP_BACK_DEPTH]
+ ; // who last unlocked
+ unsigned long lastunlockflags; // their flags
+} lockcheck_state_t;
+
+
+static spinlock_t lock;
+
+static lockcheck_state_t state[NUM_LOCKS];
+
+static lockcheck_state_t *get_lock_entry(void)
+{
+ int i;
+ unsigned long f;
+ lockcheck_state_t *l;
+
+ spin_lock_irqsave(&lock,f);
+
+ for (i=0;i<NUM_LOCKS;i++) {
+ l=&(state[i]);
+ if (!(l->inuse)) {
+ l->inuse=1;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&lock,f);
+
+ if (i<NUM_LOCKS) {
+ return l;
+ } else {
+ return 0;
+ }
+}
+
+
+static lockcheck_state_t *find_lock_entry(void *lock)
+{
+ int i;
+ lockcheck_state_t *l;
+
+ for (i=0;i<NUM_LOCKS;i++) {
+ l=&(state[i]);
+ if (l->inuse && l->lock == lock) {
+ return l;
+ }
+ }
+ return 0;
+}
+
+
+static void free_lock_entry(lockcheck_state_t *l)
+{
+ l->inuse=0;
+}
+
+
+
+void palacios_lockcheck_init()
+{
+ memset(state,0,sizeof(lockcheck_state_t)*NUM_LOCKS);
+ spin_lock_init(&lock);
+ DEBUG("LOCKCHECK: LOCK CHECKING INITED\n");
+}
+
+//
+// This needs to be defined explictly since the intrinsic does not take a var
+//
+#define backtrace(t) \
+ t[0]=__builtin_return_address(STEP_BACK_DEPTH_FIRST); \
+ t[1]=__builtin_return_address(STEP_BACK_DEPTH_FIRST+1); \
+ t[2]=__builtin_return_address(STEP_BACK_DEPTH_FIRST+2); \
+ t[3]=__builtin_return_address(STEP_BACK_DEPTH_FIRST+3);
+
+//
+// For printing a backtrace
+//
+//
+#define backtrace_format "%pS << %pS << %pS << %pS"
+#define backtrace_expand(t) ((t)[0]),((t)[1]),((t)[2]),((t)[3])
+
+
+static void clear_trace(void **trace)
+{
+ int i;
+
+ for (i=0;i<STEP_BACK_DEPTH;i++) {
+ trace[i]=0;
+ }
+}
+
+
+static void printlock(char *prefix, lockcheck_state_t *l)
+{
+ if (l->lock) {
+ DEBUG("LOCKCHECK: %s: lock 0x%p, allocator="
+ backtrace_format
+ ", lockcount=%d, lastlocker="
+ backtrace_format
+ ", lastunlocker="
+ backtrace_format
+ ", irqcount=%d, lastirqlocker="
+ backtrace_format
+ ", lastlockflags=%lu, lastirqunlocker="
+ backtrace_format
+ ", lastunlockflags=%lu\n",
+ prefix,l->lock,
+ backtrace_expand(l->allocator),
+ l->lockcount,
+ backtrace_expand(l->lastlocker),
+ backtrace_expand(l->lastunlocker),
+ l->irqcount,
+ backtrace_expand(l->lastirqlocker),
+ l->lastlockflags,
+ backtrace_expand(l->lastirqunlocker),
+ l->lastunlockflags);
+ }
+}
+
+
+
+static void find_multiple_locks_held(void)
+{
+ int i;
+ int have=0;
+ lockcheck_state_t *l;
+ char buf[64];
+
+ for (i=0;i<NUM_LOCKS;i++) {
+ l=&(state[i]);
+ if (l->inuse && l->lockcount>0) {
+ have++;
+ if (have>=WARN_MULTIPLE_THRESHOLD) {
+ break;
+ }
+ }
+ }
+
+ if (have>=WARN_MULTIPLE_THRESHOLD) {
+ have=0;
+ for (i=0;i<NUM_LOCKS;i++) {
+ l=&(state[i]);
+ if (l->inuse && l->lockcount>0) {
+ snprintf(buf,64,"MULTIPLE LOCKS HELD (%d)",have);
+ printlock(buf,l);
+ have++;
+ }
+ }
+ }
+
+}
+
+static void find_multiple_irqs_held(void)
+{
+ int i;
+ int have=0;
+ lockcheck_state_t *l;
+ char buf[64];
+
+ for (i=0;i<NUM_LOCKS;i++) {
+ l=&(state[i]);
+ if (l->inuse && l->irqcount>0) {
+ have++;
+ if (have>=WARN_MULTIPLE_THRESHOLD) {
+ break;
+ }
+ }
+ }
+
+ if (have>=WARN_MULTIPLE_THRESHOLD) {
+ have=0;
+ for (i=0;i<NUM_LOCKS;i++) {
+ l=&(state[i]);
+ if (l->inuse && l->irqcount>0) {
+ snprintf(buf,64,"MULTIPLE IRQS HELD (%d)",have);
+ printlock(buf,l);
+ have++;
+ }
+ }
+ }
+
+}
+
+
+void palacios_lockcheck_deinit()
+{
+ int i;
+ lockcheck_state_t *l;
+
+ for (i=0;i<NUM_LOCKS;i++) {
+ l=&(state[i]);
+ if (l->lock) {
+ printlock("ALLOCATED LOCK AT DEINIT",l);
+ if ((l->lockcount)) {
+ printlock("BAD LOCK COUNT AT DEINIT",l);
+ }
+ if ((l->irqcount)) {
+ printlock("BAD IRQ COUNT AT DEINIT",l);
+ }
+ }
+ }
+ INFO("LOCKCHECK: DEINITED\n");
+}
+
+
+void palacios_lockcheck_alloc(void *lock)
+{
+ lockcheck_state_t *l=get_lock_entry();
+
+ if (!l) {
+ DEBUG("LOCKCHECK: UNABLE TO ALLOCATE TRACKING DATA FOR LOCK 0x%p\n",lock);
+ }
+ l->lock=lock;
+ backtrace(l->allocator);
+ l->lockcount=l->irqcount=0;
+ clear_trace(l->lastlocker);
+ clear_trace(l->lastunlocker);
+ clear_trace(l->lastirqlocker);
+ clear_trace(l->lastirqunlocker);
+ //INFO("LOCKCHECK: LOCK ALLOCATE 0x%p\n",lock);
+ printlock("NEW LOCK", l);
+ //dump_stack();
+}
+
+void palacios_lockcheck_free(void *lock)
+{
+ lockcheck_state_t *l=find_lock_entry(lock);
+
+ if (!l){
+ DEBUG("LOCKCHECK: FREEING UNTRACKED LOCK 0x%p\n",lock);
+ return;
+ }
+
+ if ((l->lockcount)) {
+ printlock("BAD LOCK COUNT AT FREE",l);
+ }
+
+ if ((l->irqcount)) {
+ printlock("BAD IRQ COUNT AT FREE",l);
+ }
+ free_lock_entry(l);
+}
+
+void palacios_lockcheck_lock(void *lock)
+{
+ lockcheck_state_t *l=find_lock_entry(lock);
+
+ if (!l) {
+ DEBUG("LOCKCHECK: LOCKING UNTRACKED LOCK 0x%p\n",lock);
+ return;
+ }
+
+ if (l->lockcount!=0) {
+ printlock("BAD LOCKCOUNT AT LOCK",l);
+ }
+ if (l->irqcount!=0) {
+ printlock("BAD IRQCOUNT AT LOCK",l);
+ }
+
+ l->lockcount++;
+ backtrace(l->lastlocker);
+
+ find_multiple_locks_held();
+
+}
+void palacios_lockcheck_unlock(void *lock)
+{
+ lockcheck_state_t *l=find_lock_entry(lock);
+
+ if (!l) {
+ DEBUG("LOCKCHECK: UNLOCKING UNTRACKED LOCK 0x%p\n",lock);
+ return;
+ }
+
+ if (l->lockcount!=1) {
+ printlock("LOCKCHECK: BAD LOCKCOUNT AT UNLOCK",l);
+ }
+ if (l->irqcount!=0) {
+ printlock("LOCKCHECK: BAD IRQCOUNT AT UNLOCK",l);
+ }
+
+ l->lockcount--;
+ backtrace(l->lastunlocker);
+}
+
+void palacios_lockcheck_lock_irqsave(void *lock,unsigned long flags)
+{
+ lockcheck_state_t *l=find_lock_entry(lock);
+
+ if (!l) {
+ DEBUG("LOCKCHECK: IRQ LOCKING UNTRACKED LOCK 0x%p\n",lock);
+ return;
+ }
+
+ if (l->lockcount!=0) {
+ printlock("BAD LOCKCOUNT AT IRQ LOCK",l);
+ }
+ if (l->irqcount!=0) {
+ printlock("BAD IRQCOUNT AT IRQ LOCK",l);
+ }
+
+ l->irqcount++;
+ l->lastlockflags=flags;
+ backtrace(l->lastirqlocker);
+
+
+ find_multiple_irqs_held();
+
+}
+
+void palacios_lockcheck_unlock_irqrestore(void *lock,unsigned long flags)
+{
+ lockcheck_state_t *l=find_lock_entry(lock);
+
+ if (!l) {
+ DEBUG("LOCKCHECK: IRQ UNLOCKING UNTRACKED LOCK 0x%p\n",lock);
+ return;
+ }
+
+ if (l->lockcount!=0) {
+ printlock("LOCKCHECK: BAD LOCKCOUNT AT IRQ UNLOCK",l);
+ }
+ if (l->irqcount!=1) {
+ printlock("LOCKCHECK: BAD IRQCOUNT AT IRQ UNLOCK",l);
+ }
+
+ l->irqcount--;
+ l->lastunlockflags = flags;
+ backtrace(l->lastirqunlocker);
+
+}
--- /dev/null
+#ifndef _lockcheck
+#define _lockcheck
+
+#define CHECK_LOCKS 0
+#define NUM_LOCKS 1024
+
+#if CHECK_LOCKS
+#define LOCKCHECK_INIT() palacios_lockcheck_init()
+#define LOCKCHECK_ALLOC(lock) palacios_lockcheck_alloc(lock)
+#define LOCKCHECK_FREE(lock) palacios_lockcheck_free(lock)
+#define LOCKCHECK_LOCK(lock) palacios_lockcheck_lock(lock)
+#define LOCKCHECK_UNLOCK(lock) palacios_lockcheck_unlock(lock)
+#define LOCKCHECK_LOCK_IRQSAVE(lock, flags) palacios_lockcheck_lock_irqsave(lock,flags)
+#define LOCKCHECK_UNLOCK_IRQRESTORE(lock, flags) palacios_lockcheck_unlock_irqrestore(lock,flags)
+#define LOCKCHECK_DEINIT() palacios_lockcheck_deinit()
+#else
+#define LOCKCHECK_INIT()
+#define LOCKCHECK_ALLOC(lock)
+#define LOCKCHECK_FREE(lock)
+#define LOCKCHECK_LOCK(lock)
+#define LOCKCHECK_UNLOCK(lock)
+#define LOCKCHECK_LOCK_IRQSAVE(lock, flags)
+#define LOCKCHECK_UNLOCK_IRQRESTORE(lock, flags)
+#define LOCKCHECK_DEINIT()
+#endif
+
+void palacios_lockcheck_init(void);
+void palacios_lockcheck_alloc(void *lock);
+void palacios_lockcheck_free(void *lock);
+void palacios_lockcheck_lock(void *lock);
+void palacios_lockcheck_unlock(void *lock);
+void palacios_lockcheck_lock_irqsave(void *lock,unsigned long flags);
+void palacios_lockcheck_unlock_irqrestore(void *lock,unsigned long flags);
+void palacios_lockcheck_deinit(void);
+
+#endif
#include "mm.h"
#include "vm.h"
#include "allow_devmem.h"
+#include "lockcheck.h"
#include "linux-exts.h"
guest->img_size = user_image.size;
DEBUG("Palacios: Allocating kernel memory for guest image (%llu bytes)\n", user_image.size);
- guest->img = vmalloc(guest->img_size);
+ guest->img = palacios_valloc(guest->img_size);
if (IS_ERR(guest->img)) {
ERROR("Palacios Error: Could not allocate space for guest image\n");
out_err2:
- vfree(guest->img);
+ palacios_vfree(guest->img);
out_err1:
guest_map[vm_minor] = NULL;
out_err:
static int __init v3_init(void) {
+
dev_t dev = MKDEV(0, 0); // We dynamicallly assign the major number
int ret = 0;
+ LOCKCHECK_INIT();
palacios_init_mm();
extern u32 pg_frees;
extern u32 mallocs;
extern u32 frees;
+ extern u32 vmallocs;
+ extern u32 vfrees;
int i = 0;
struct v3_guest * guest;
dev_t dev;
palacios_vmm_exit();
DEBUG("Palacios Mallocs = %d, Frees = %d\n", mallocs, frees);
+ DEBUG("Palacios Vmallocs = %d, Vfrees = %d\n", vmallocs, vfrees);
DEBUG("Palacios Page Allocs = %d, Page Frees = %d\n", pg_allocs, pg_frees);
unregister_chrdev_region(MKDEV(v3_major_num, 0), MAX_VMS + 1);
remove_proc_entry("v3vee", NULL);
DEBUG("Palacios Module Mallocs = %d, Frees = %d\n", mod_allocs, mod_frees);
+
+ LOCKCHECK_DEINIT();
}
void * addr = NULL;
mod_allocs++;
- addr = kmalloc(size, flags);
+ addr = palacios_alloc_extended(size, flags);
return addr;
}
void trace_free(const void * objp) {
mod_frees++;
- kfree(objp);
+ palacios_free((void*)objp);
}
#include <linux/kthread.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
+#include <linux/vmalloc.h>
#include <palacios/vmm.h>
#include <palacios/vmm_host_events.h>
#include "palacios.h"
-
-
-
#include "mm.h"
+#include "lockcheck.h"
+
// The following can be used to track heap bugs
// zero memory after allocation
#define ALLOC_ZERO_MEM 0
u32 pg_frees = 0;
u32 mallocs = 0;
u32 frees = 0;
-
+u32 vmallocs = 0;
+u32 vfrees = 0;
static struct v3_vm_info * irq_to_guest_map[256];
return addr+ALLOC_PAD;
}
+void *
+palacios_valloc(unsigned int size)
+{
+ void * addr = NULL;
+
+ addr = vmalloc(size);
+
+ if (!addr) {
+ ERROR("ALERT ALERT vmalloc has FAILED FAILED FAILED\n");
+ return NULL;
+ }
+
+ vmallocs++;
+
+ return addr;
+}
+
+void palacios_vfree(void *p)
+{
+ vfree(p);
+ vfrees++;
+}
/**
* Allocates 'size' bytes of kernel memory.
if (lock) {
spin_lock_init(lock);
+ LOCKCHECK_ALLOC(lock);
} else {
ERROR("ALERT ALERT Unable to allocate lock\n");
return NULL;
return lock;
}
+void palacios_mutex_init(void *mutex)
+{
+ spinlock_t *lock = (spinlock_t*)mutex;
+
+ if (lock) {
+ spin_lock_init(lock);
+ LOCKCHECK_ALLOC(lock);
+ }
+}
+
+
/**
* Frees a mutex.
*/
void
palacios_mutex_free(void * mutex) {
palacios_free(mutex);
+ LOCKCHECK_FREE(mutex);
}
/**
void
palacios_mutex_lock(void * mutex, int must_spin) {
spin_lock((spinlock_t *)mutex);
+ LOCKCHECK_LOCK(mutex);
}
unsigned long flags;
spin_lock_irqsave((spinlock_t *)mutex,flags);
+ LOCKCHECK_LOCK_IRQSAVE(mutex,flags);
return (void *)flags;
}
)
{
spin_unlock((spinlock_t *)mutex);
+ LOCKCHECK_UNLOCK(mutex);
}
{
// This is correct, flags is opaque
spin_unlock_irqrestore((spinlock_t *)mutex,(unsigned long)flags);
+ LOCKCHECK_UNLOCK_IRQRESTORE(mutex,(unsigned long)flags);
}
/**
link->sock->ops->release(link->sock);
- spin_lock_irqsave(&(vnet_brg_s.lock), flags);
+ palacios_spinlock_lock_irqsave(&(vnet_brg_s.lock), flags);
list_del(&(link->node));
vnet_htable_remove(vnet_brg_s.ip2link, (addr_t)&(link->dst_ip), 0);
vnet_brg_s.num_links --;
- spin_unlock_irqrestore(&(vnet_brg_s.lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(vnet_brg_s.lock), flags);
INFO("VNET Bridge: Link deleted, ip 0x%x, port: %d, idx: %d\n",
link->dst_ip,
}
- spin_lock_irqsave(&(vnet_brg_s.lock), flags);
+ palacios_spinlock_lock_irqsave(&(vnet_brg_s.lock), flags);
list_add(&(link->node), &(vnet_brg_s.link_list));
vnet_brg_s.num_links ++;
link->idx = ++ vnet_brg_s.link_idx;
vnet_htable_insert(vnet_brg_s.ip2link, (addr_t)&(link->dst_ip), (addr_t)link);
- spin_unlock_irqrestore(&(vnet_brg_s.lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(vnet_brg_s.lock), flags);
INFO("VNET Bridge: Link created, ip 0x%x, port: %d, idx: %d, link: %p, protocol: %s\n",
link->dst_ip,
memset(&vnet_brg_s, 0, sizeof(struct vnet_brg_state));
INIT_LIST_HEAD(&(vnet_brg_s.link_list));
- spin_lock_init(&(vnet_brg_s.lock));
+ palacios_spinlock_init(&(vnet_brg_s.lock));
vnet_brg_s.serv_proto = UDP;
route->idx = v3_vnet_add_route(route->route);
- spin_lock_irqsave(&(vnet_ctrl_s.lock), flags);
+ palacios_spinlock_lock_irqsave(&(vnet_ctrl_s.lock), flags);
list_add(&(route->node), &(vnet_ctrl_s.route_list));
vnet_ctrl_s.num_routes ++;
- spin_unlock_irqrestore(&(vnet_ctrl_s.lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(vnet_ctrl_s.lock), flags);
INFO("VNET Control: One route added to VNET core\n");
v3_vnet_del_route(route->idx);
- spin_lock_irqsave(&(vnet_ctrl_s.lock), flags);
+ palacios_spinlock_lock_irqsave(&(vnet_ctrl_s.lock), flags);
list_del(&(route->node));
vnet_ctrl_s.num_routes --;
- spin_unlock_irqrestore(&(vnet_ctrl_s.lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(vnet_ctrl_s.lock), flags);
INFO("VNET Control: Route %d deleted from VNET\n", route->idx);
vnet_brg_delete_link(link->idx);
- spin_lock_irqsave(&(vnet_ctrl_s.lock), flags);
+ palacios_spinlock_lock_irqsave(&(vnet_ctrl_s.lock), flags);
list_del(&(link->node));
vnet_ctrl_s.num_links --;
- spin_unlock_irqrestore(&(vnet_ctrl_s.lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(vnet_ctrl_s.lock), flags);
palacios_free(link);
link = NULL;
link->proto = d_proto;
link->idx = link_idx;
- spin_lock_irqsave(&(vnet_ctrl_s.lock), flags);
+ palacios_spinlock_lock_irqsave(&(vnet_ctrl_s.lock), flags);
list_add(&(link->node), &(vnet_ctrl_s.link_iter_list));
vnet_ctrl_s.num_links ++;
- spin_unlock_irqrestore(&(vnet_ctrl_s.lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(vnet_ctrl_s.lock), flags);
} else if (strnicmp("DEL", token, strlen("DEL")) == 0) {
char * idx_str = NULL;
uint32_t d_idx;
INIT_LIST_HEAD(&(vnet_ctrl_s.link_iter_list));
INIT_LIST_HEAD(&(vnet_ctrl_s.route_list));
- spin_lock_init(&(vnet_ctrl_s.lock));
+ palacios_spinlock_init(&(vnet_ctrl_s.lock));
init_proc_files();
void *palacios_alloc(unsigned int size);
void *palacios_alloc_extended(unsigned int size, unsigned int flags);
void palacios_free(void *);
+void *palacios_valloc(unsigned int size); // use instead of vmalloc
+void palacios_vfree(void *); // use instead of vfree
void *palacios_vaddr_to_paddr(void *vaddr);
void *palacios_paddr_to_vaddr(void *paddr);
void *palacios_start_kernel_thread(int (*fn)(void * arg), void *arg, char *thread_name);
void palacios_yield_cpu_timed(unsigned int us);
unsigned int palacios_get_cpu(void);
unsigned int palacios_get_cpu_khz(void);
-void *palacios_mutex_alloc(void);
+void *palacios_mutex_alloc(void); // allocates and inits a lock
+void palacios_mutex_init(void *mutex); // use instead of spin_lock_init
void palacios_mutex_free(void *mutex);
void palacios_mutex_lock(void *mutex, int must_spin);
void palacios_mutex_unlock(void *mutex);
void *palacios_mutex_lock_irqsave(void *mutex, int must_spin);
void palacios_mutex_unlock_irqrestore(void *mutex, void *flags);
+// Macros for spin-locks in the module code
+// By using these macros, the lock checker will be able
+// to see the module code as well as the core VMM
+#define palacios_spinlock_init(l) palacios_mutex_init(l)
+#define palacios_spinlock_lock(l) palacios_mutex_lock(l,0)
+#define palacios_spinlock_unlock(l) palacios_mutex_unlock(l)
+#define palacios_spinlock_lock_irqsave(l,f) do { f=(unsigned long)palacios_mutex_lock_irqsave(l,0); } while (0)
+#define palacios_spinlock_unlock_irqrestore(l,f) palacios_mutex_unlock_irqrestore(l,(void*)f)
// Palacios Printing Support
queue->max_entries = max_entries;
INIT_LIST_HEAD(&(queue->entries));
- spin_lock_init(&(queue->lock));
+ palacios_spinlock_init(&(queue->lock));
}
void deinit_queue(struct gen_queue * queue) {
return -1;
}
- spin_lock_irqsave(&(queue->lock), flags);
+ palacios_spinlock_lock_irqsave(&(queue->lock), flags);
q_entry->entry = entry;
list_add_tail(&(q_entry->node), &(queue->entries));
queue->num_entries++;
- spin_unlock_irqrestore(&(queue->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(queue->lock), flags);
return 0;
}
void * entry_val = 0;
unsigned long flags;
- spin_lock_irqsave(&(queue->lock), flags);
+ palacios_spinlock_lock_irqsave(&(queue->lock), flags);
if (!list_empty(&(queue->entries))) {
struct list_head * q_entry = queue->entries.next;
}
- spin_unlock_irqrestore(&(queue->lock), flags);
+ palacios_spinlock_unlock_irqrestore(&(queue->lock), flags);
return entry_val;
}
#include <linux/poll.h>
#include <linux/anon_inodes.h>
#include <linux/sched.h>
-#include <linux/vmalloc.h>
#include <linux/file.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
rb_erase(&(ctrl->tree_node), &(guest->vm_ctrls));
- kfree(ctrl);
+ palacios_free(ctrl);
return 0;
}
WARNING("Cleaning up guest ctrl that was not removed explicitly (%d)\n", ctrl->cmd);
- kfree(ctrl);
+ palacios_free(ctrl);
}
}
free_guest_ctrls(guest);
- vfree(guest->img);
+ palacios_vfree(guest->img);
palacios_free(guest);
return 0;
CFLAGS += -I../linux_module
-CC = gcc
+CC = gcc -g
AR = ar
all: $(BUILD_EXECS) $(BUILD_LIBS) $(COPIED_EXECS)