1 #include <linux/kernel.h>
2 #include <linux/kthread.h>
3 #include <linux/spinlock.h>
9 #define PRINT_LOCK_ALLOC 0
10 #define PRINT_LOCK_FREE 0
11 #define PRINT_LOCK_LOCK 0
12 #define PRINT_LOCK_UNLOCK 0
14 // How far up the stack to track the caller
17 // 2 => caller of v3_lock..
19 #define STEP_BACK_DEPTH_FIRST 1
20 #define STEP_BACK_DEPTH_LAST 4
21 #define STEP_BACK_DEPTH (STEP_BACK_DEPTH_LAST-STEP_BACK_DEPTH_FIRST+1)
23 // show when multiple locks are held simultaneously
24 // This is the minimum number
25 #define WARN_MULTIPLE_THRESHOLD 3
28 int inuse; // nonzero if this is in use
29 void *lock; // the lock
30 void *allocator[STEP_BACK_DEPTH];
32 int lockcount; // how many times it's been locked/unlocked (lock=+1, unlock=-1)
33 int irqcount; // how many times interrupts have been turned off (+1/-1)
34 void *lastlocker[STEP_BACK_DEPTH];
36 void *lastunlocker[STEP_BACK_DEPTH];
38 void *lastirqlocker[STEP_BACK_DEPTH];
40 unsigned long lastlockflags; // their flags
41 void *lastirqunlocker[STEP_BACK_DEPTH]
42 ; // who last unlocked
43 unsigned long lastunlockflags; // their flags
47 // This lock is currently used only to control
48 // allocation of entries in the global state
49 static spinlock_t lock;
50 static lockcheck_state_t state[NUM_LOCKS];
52 static void printlock(char *prefix, lockcheck_state_t *l);
56 u32 top; // next open slot 0..
57 void *lock[LOCK_STACK_DEPTH]; // the stack
58 char irq[LOCK_STACK_DEPTH]; // locked with irqsave?
61 static DEFINE_PER_CPU(lock_stack_t, lock_stack);
63 static lockcheck_state_t *get_lock_entry(void)
69 spin_lock_irqsave(&lock,f);
71 for (i=0;i<NUM_LOCKS;i++) {
79 spin_unlock_irqrestore(&lock,f);
89 static lockcheck_state_t *find_lock_entry(void *lock)
94 for (i=0;i<NUM_LOCKS;i++) {
96 if (l->inuse && l->lock == lock) {
104 static void free_lock_entry(lockcheck_state_t *l)
110 static void lock_stack_print(void)
114 lock_stack_t *mystack = &(get_cpu_var(lock_stack));
115 u32 cpu = get_cpu(); put_cpu();
117 if ((mystack->top)>0) {
118 for (i=mystack->top; i>0;i--) {
119 snprintf(buf,64,"LOCK STACK (cpu=%u, index=%u, irq=%d)",cpu, i-1, (int)(mystack->irq[i-1]));
120 printlock(buf,find_lock_entry(mystack->lock[i-1]));
123 put_cpu_var(lock_stack);
127 static void lock_stack_lock(void *lock, char irq)
129 lock_stack_t *mystack = &(get_cpu_var(lock_stack));
130 u32 cpu = get_cpu(); put_cpu();
132 if (mystack->top>=(LOCK_STACK_DEPTH-1)) {
133 put_cpu_var(lock_stack);
134 DEBUG("LOCKCHECK: Locking lock 0x%p on cpu %u exceeds stack limit of %d\n",lock,cpu,LOCK_STACK_DEPTH);
137 mystack->lock[mystack->top] = lock;
138 mystack->irq[mystack->top] = irq;
140 put_cpu_var(lock_stack);
144 static void lock_stack_unlock(void *lock, char irq)
146 lock_stack_t *mystack = &(get_cpu_var(lock_stack));
147 u32 cpu = get_cpu(); put_cpu();
149 if (mystack->top==0) {
150 put_cpu_var(lock_stack);
151 DEBUG("LOCKCHECK: Unlocking lock 0x%p on cpu %u when lock stack is empty\n",lock,cpu);
153 if (mystack->lock[mystack->top-1] != lock) {
154 void *otherlock=mystack->lock[mystack->top-1];
155 put_cpu_var(lock_stack);
156 DEBUG("LOCKCHECK: Unlocking lock 0x%p on cpu %u when top of stack is lock 0x%p\n",lock,cpu, otherlock);
159 if (irq!=mystack->irq[mystack->top-1]) {
160 char otherirq = mystack->irq[mystack->top-1];
161 put_cpu_var(lock_stack);
162 DEBUG("LOCKCHECK: Unlocking lock 0x%p on cpu %u with irq=%d, but was locked with irq=%d\n",lock,cpu,irq,otherirq);
166 put_cpu_var(lock_stack);
173 void palacios_lockcheck_init()
175 memset(state,0,sizeof(lockcheck_state_t)*NUM_LOCKS);
176 spin_lock_init(&lock);
177 DEBUG("LOCKCHECK: LOCK CHECKING INITED\n");
181 // This needs to be defined explictly since the intrinsic does not take a var
183 #define backtrace(t) \
184 t[0]=__builtin_return_address(STEP_BACK_DEPTH_FIRST); \
185 t[1]=__builtin_return_address(STEP_BACK_DEPTH_FIRST+1); \
186 t[2]=__builtin_return_address(STEP_BACK_DEPTH_FIRST+2); \
187 t[3]=__builtin_return_address(STEP_BACK_DEPTH_FIRST+3);
190 // For printing a backtrace
193 #define backtrace_format "%pS << %pS << %pS << %pS"
194 #define backtrace_expand(t) ((t)[0]),((t)[1]),((t)[2]),((t)[3])
197 static void clear_trace(void **trace)
201 for (i=0;i<STEP_BACK_DEPTH;i++) {
207 static void printlock(char *prefix, lockcheck_state_t *l)
209 if (!l || !(l->lock) ) {
210 DEBUG("LOCKCHECK: %s: lock 0x%p BOGUS\n",prefix,l);
214 DEBUG("LOCKCHECK: %s: lock 0x%p, allocator="
216 ", lockcount=%d, lastlocker="
220 ", irqcount=%d, lastirqlocker="
222 ", lastlockflags=%lu, lastirqunlocker="
224 ", lastunlockflags=%lu\n",
226 backtrace_expand(l->allocator),
228 backtrace_expand(l->lastlocker),
229 backtrace_expand(l->lastunlocker),
231 backtrace_expand(l->lastirqlocker),
233 backtrace_expand(l->lastirqunlocker),
240 static void find_multiple_locks_held(void)
244 lockcheck_state_t *l;
247 for (i=0;i<NUM_LOCKS;i++) {
249 if (l->inuse && l->lockcount>0) {
251 if (have>=WARN_MULTIPLE_THRESHOLD) {
257 if (have>=WARN_MULTIPLE_THRESHOLD) {
259 for (i=0;i<NUM_LOCKS;i++) {
261 if (l->inuse && l->lockcount>0) {
262 snprintf(buf,64,"MULTIPLE LOCKS HELD (%d)",have);
271 static void find_multiple_irqs_held(void)
275 lockcheck_state_t *l;
278 for (i=0;i<NUM_LOCKS;i++) {
280 if (l->inuse && l->irqcount>0) {
282 if (have>=WARN_MULTIPLE_THRESHOLD) {
288 if (have>=WARN_MULTIPLE_THRESHOLD) {
290 for (i=0;i<NUM_LOCKS;i++) {
292 if (l->inuse && l->irqcount>0) {
293 snprintf(buf,64,"MULTIPLE IRQS HELD (%d)",have);
303 void palacios_lockcheck_deinit()
306 lockcheck_state_t *l;
308 for (i=0;i<NUM_LOCKS;i++) {
311 printlock("ALLOCATED LOCK AT DEINIT",l);
312 if ((l->lockcount)) {
313 printlock("BAD LOCK COUNT AT DEINIT",l);
316 printlock("BAD IRQ COUNT AT DEINIT",l);
320 INFO("LOCKCHECK: DEINITED\n");
324 void palacios_lockcheck_alloc(void *lock)
326 lockcheck_state_t *l=get_lock_entry();
329 DEBUG("LOCKCHECK: UNABLE TO ALLOCATE TRACKING DATA FOR LOCK 0x%p\n",lock);
332 backtrace(l->allocator);
333 l->lockcount=l->irqcount=0;
334 clear_trace(l->lastlocker);
335 clear_trace(l->lastunlocker);
336 clear_trace(l->lastirqlocker);
337 clear_trace(l->lastirqunlocker);
338 //INFO("LOCKCHECK: LOCK ALLOCATE 0x%p\n",lock);
340 printlock("NEW LOCK", l);
344 void palacios_lockcheck_free(void *lock)
346 lockcheck_state_t *l=find_lock_entry(lock);
349 DEBUG("LOCKCHECK: FREEING UNTRACKED LOCK 0x%p\n",lock);
353 if ((l->lockcount)) {
354 printlock("BAD LOCK COUNT AT FREE",l);
358 printlock("BAD IRQ COUNT AT FREE",l);
362 printlock("FREE LOCK",l);
368 void palacios_lockcheck_lock(void *lock)
370 lockcheck_state_t *l=find_lock_entry(lock);
373 DEBUG("LOCKCHECK: LOCKING UNTRACKED LOCK 0x%p\n",lock);
377 if (l->lockcount!=0) {
378 printlock("BAD LOCKCOUNT AT LOCK",l);
380 if (l->irqcount!=0) {
381 printlock("BAD IRQCOUNT AT LOCK",l);
385 backtrace(l->lastlocker);
387 find_multiple_locks_held();
389 lock_stack_lock(lock,0);
396 void palacios_lockcheck_unlock(void *lock)
398 lockcheck_state_t *l=find_lock_entry(lock);
401 DEBUG("LOCKCHECK: UNLOCKING UNTRACKED LOCK 0x%p\n",lock);
405 if (l->lockcount!=1) {
406 printlock("LOCKCHECK: BAD LOCKCOUNT AT UNLOCK",l);
408 if (l->irqcount!=0) {
409 printlock("LOCKCHECK: BAD IRQCOUNT AT UNLOCK",l);
412 lock_stack_unlock(lock,0);
415 backtrace(l->lastunlocker);
417 #if PRINT_LOCK_UNLOCK
418 printlock("UNLOCK",l);
424 void palacios_lockcheck_lock_irqsave(void *lock,unsigned long flags)
426 lockcheck_state_t *l=find_lock_entry(lock);
429 DEBUG("LOCKCHECK: IRQ LOCKING UNTRACKED LOCK 0x%p\n",lock);
433 if (l->lockcount!=0) {
434 printlock("BAD LOCKCOUNT AT IRQ LOCK",l);
436 if (l->irqcount!=0) {
437 printlock("BAD IRQCOUNT AT IRQ LOCK",l);
441 l->lastlockflags=flags;
442 backtrace(l->lastirqlocker);
445 find_multiple_irqs_held();
447 lock_stack_lock(lock,1);
450 printlock("LOCK_IRQSAVE",l);
457 void palacios_lockcheck_unlock_irqrestore(void *lock,unsigned long flags)
459 lockcheck_state_t *l=find_lock_entry(lock);
462 DEBUG("LOCKCHECK: IRQ UNLOCKING UNTRACKED LOCK 0x%p\n",lock);
466 if (l->lockcount!=0) {
467 printlock("LOCKCHECK: BAD LOCKCOUNT AT IRQ UNLOCK",l);
469 if (l->irqcount!=1) {
470 printlock("LOCKCHECK: BAD IRQCOUNT AT IRQ UNLOCK",l);
474 l->lastunlockflags = flags;
476 lock_stack_unlock(lock,1);
478 backtrace(l->lastirqunlocker);
480 #if PRINT_LOCK_UNLOCK
481 printlock("UNLOCK_IRQRESTORE",l);