} lockcheck_state_t;
-static spinlock_t lock;
-
+// This lock is currently used only to control
+// allocation of entries in the global state
+static spinlock_t lock;
static lockcheck_state_t state[NUM_LOCKS];
+static void printlock(char *prefix, lockcheck_state_t *l);
+
+
+typedef struct {
+ u32 top; // next open slot 0..
+ void *lock[LOCK_STACK_DEPTH]; // the stack
+ char irq[LOCK_STACK_DEPTH]; // locked with irqsave?
+} lock_stack_t;
+
+static DEFINE_PER_CPU(lock_stack_t, lock_stack);
+
static lockcheck_state_t *get_lock_entry(void)
{
int i;
}
+static void lock_stack_print(void)
+{
+ u32 i;
+ char buf[64];
+ lock_stack_t *mystack = &(get_cpu_var(lock_stack));
+ u32 cpu = get_cpu(); put_cpu();
+
+ if ((mystack->top)>0) {
+ for (i=mystack->top; i>0;i--) {
+ snprintf(buf,64,"LOCK STACK (cpu=%u, index=%u, irq=%d)",cpu, i-1, (int)(mystack->irq[i-1]));
+ printlock(buf,find_lock_entry(mystack->lock[i-1]));
+ }
+ }
+ put_cpu_var(lock_stack);
+}
+
+
+static void lock_stack_lock(void *lock, char irq)
+{
+ lock_stack_t *mystack = &(get_cpu_var(lock_stack));
+ u32 cpu = get_cpu(); put_cpu();
+
+ if (mystack->top>=(LOCK_STACK_DEPTH-1)) {
+ put_cpu_var(lock_stack);
+ DEBUG("LOCKCHECK: Locking lock 0x%p on cpu %u exceeds stack limit of %d\n",lock,cpu,LOCK_STACK_DEPTH);
+ lock_stack_print();
+ } else {
+ mystack->lock[mystack->top] = lock;
+ mystack->irq[mystack->top] = irq;
+ mystack->top++;
+ put_cpu_var(lock_stack);
+ }
+}
+
+static void lock_stack_unlock(void *lock, char irq)
+{
+ lock_stack_t *mystack = &(get_cpu_var(lock_stack));
+ u32 cpu = get_cpu(); put_cpu();
+
+ if (mystack->top==0) {
+ put_cpu_var(lock_stack);
+ DEBUG("LOCKCHECK: Unlocking lock 0x%p on cpu %u when lock stack is empty\n",lock,cpu);
+ } else {
+ if (mystack->lock[mystack->top-1] != lock) {
+ void *otherlock=mystack->lock[mystack->top-1];
+ put_cpu_var(lock_stack);
+ DEBUG("LOCKCHECK: Unlocking lock 0x%p on cpu %u when top of stack is lock 0x%p\n",lock,cpu, otherlock);
+ lock_stack_print();
+ } else {
+ if (irq!=mystack->irq[mystack->top-1]) {
+ char otherirq = mystack->irq[mystack->top-1];
+ put_cpu_var(lock_stack);
+ DEBUG("LOCKCHECK: Unlocking lock 0x%p on cpu %u with irq=%d, but was locked with irq=%d\n",lock,cpu,irq,otherirq);
+ lock_stack_print();
+ } else {
+ mystack->top--;
+ put_cpu_var(lock_stack);
+ }
+ }
+ }
+
+}
void palacios_lockcheck_init()
{
static void printlock(char *prefix, lockcheck_state_t *l)
{
+ if (!l || !(l->lock) ) {
+ DEBUG("LOCKCHECK: %s: lock 0x%p BOGUS\n",prefix,l);
+ return;
+ }
if (l->lock) {
DEBUG("LOCKCHECK: %s: lock 0x%p, allocator="
backtrace_format
find_multiple_locks_held();
+ lock_stack_lock(lock,0);
+
}
void palacios_lockcheck_unlock(void *lock)
{
if (l->irqcount!=0) {
printlock("LOCKCHECK: BAD IRQCOUNT AT UNLOCK",l);
}
+
+ lock_stack_unlock(lock,0);
l->lockcount--;
backtrace(l->lastunlocker);
+
}
void palacios_lockcheck_lock_irqsave(void *lock,unsigned long flags)
find_multiple_irqs_held();
+ lock_stack_lock(lock,1);
+
+
}
void palacios_lockcheck_unlock_irqrestore(void *lock,unsigned long flags)
l->irqcount--;
l->lastunlockflags = flags;
+
+ lock_stack_unlock(lock,1);
+
backtrace(l->lastirqunlocker);
}
#ifndef _lockcheck
#define _lockcheck
-#define CHECK_LOCKS 0
-#define NUM_LOCKS 1024
-#if CHECK_LOCKS
+#ifdef V3_CONFIG_DEBUG_LOCKS
+
+// Maxmimum number of locks to handle
+#define NUM_LOCKS 1024
+// Maximum number of locks that can be simultaneously
+// held on each CPU
+#define LOCK_STACK_DEPTH 16
+
+//
+// The following macros are used
+// in the stub functions to call back to the lock
+// checker - if lock checking is not enabled, these
+// turn into nothing
+//
#define LOCKCHECK_INIT() palacios_lockcheck_init()
#define LOCKCHECK_ALLOC(lock) palacios_lockcheck_alloc(lock)
#define LOCKCHECK_FREE(lock) palacios_lockcheck_free(lock)
#define LOCKCHECK_LOCK_IRQSAVE(lock, flags) palacios_lockcheck_lock_irqsave(lock,flags)
#define LOCKCHECK_UNLOCK_IRQRESTORE(lock, flags) palacios_lockcheck_unlock_irqrestore(lock,flags)
#define LOCKCHECK_DEINIT() palacios_lockcheck_deinit()
+
+void palacios_lockcheck_init(void);
+void palacios_lockcheck_alloc(void *lock);
+void palacios_lockcheck_free(void *lock);
+void palacios_lockcheck_lock(void *lock);
+void palacios_lockcheck_unlock(void *lock);
+void palacios_lockcheck_lock_irqsave(void *lock,unsigned long flags);
+void palacios_lockcheck_unlock_irqrestore(void *lock,unsigned long flags);
+void palacios_lockcheck_deinit(void);
+
#else
+
+//
+// The following is what happens when lock checking is not on
+//
#define LOCKCHECK_INIT()
#define LOCKCHECK_ALLOC(lock)
#define LOCKCHECK_FREE(lock)
#define LOCKCHECK_LOCK_IRQSAVE(lock, flags)
#define LOCKCHECK_UNLOCK_IRQRESTORE(lock, flags)
#define LOCKCHECK_DEINIT()
+
#endif
-void palacios_lockcheck_init(void);
-void palacios_lockcheck_alloc(void *lock);
-void palacios_lockcheck_free(void *lock);
-void palacios_lockcheck_lock(void *lock);
-void palacios_lockcheck_unlock(void *lock);
-void palacios_lockcheck_lock_irqsave(void *lock,unsigned long flags);
-void palacios_lockcheck_unlock_irqrestore(void *lock,unsigned long flags);
-void palacios_lockcheck_deinit(void);
#endif