1 #include <linux/kernel.h>
2 #include <linux/kthread.h>
3 #include <linux/spinlock.h>
9 // How far up the stack to track the caller
12 // 2 => caller of v3_lock..
14 #define STEP_BACK_DEPTH_FIRST 1
15 #define STEP_BACK_DEPTH_LAST 4
16 #define STEP_BACK_DEPTH (STEP_BACK_DEPTH_LAST-STEP_BACK_DEPTH_FIRST+1)
18 // show when multiple locks are held simultaneously
19 // This is the minimum number
20 #define WARN_MULTIPLE_THRESHOLD 3
23 int inuse; // nonzero if this is in use
24 void *lock; // the lock
25 void *allocator[STEP_BACK_DEPTH];
27 int lockcount; // how many times it's been locked/unlocked (lock=+1, unlock=-1)
28 int irqcount; // how many times interrupts have been turned off (+1/-1)
29 void *lastlocker[STEP_BACK_DEPTH];
31 void *lastunlocker[STEP_BACK_DEPTH];
33 void *lastirqlocker[STEP_BACK_DEPTH];
35 unsigned long lastlockflags; // their flags
36 void *lastirqunlocker[STEP_BACK_DEPTH]
37 ; // who last unlocked
38 unsigned long lastunlockflags; // their flags
42 // This lock is currently used only to control
43 // allocation of entries in the global state
44 static spinlock_t lock;
45 static lockcheck_state_t state[NUM_LOCKS];
47 static void printlock(char *prefix, lockcheck_state_t *l);
51 u32 top; // next open slot 0..
52 void *lock[LOCK_STACK_DEPTH]; // the stack
53 char irq[LOCK_STACK_DEPTH]; // locked with irqsave?
56 static DEFINE_PER_CPU(lock_stack_t, lock_stack);
58 static lockcheck_state_t *get_lock_entry(void)
64 spin_lock_irqsave(&lock,f);
66 for (i=0;i<NUM_LOCKS;i++) {
74 spin_unlock_irqrestore(&lock,f);
84 static lockcheck_state_t *find_lock_entry(void *lock)
89 for (i=0;i<NUM_LOCKS;i++) {
91 if (l->inuse && l->lock == lock) {
99 static void free_lock_entry(lockcheck_state_t *l)
105 static void lock_stack_print(void)
109 lock_stack_t *mystack = &(get_cpu_var(lock_stack));
110 u32 cpu = get_cpu(); put_cpu();
112 if ((mystack->top)>0) {
113 for (i=mystack->top; i>0;i--) {
114 snprintf(buf,64,"LOCK STACK (cpu=%u, index=%u, irq=%d)",cpu, i-1, (int)(mystack->irq[i-1]));
115 printlock(buf,find_lock_entry(mystack->lock[i-1]));
118 put_cpu_var(lock_stack);
122 static void lock_stack_lock(void *lock, char irq)
124 lock_stack_t *mystack = &(get_cpu_var(lock_stack));
125 u32 cpu = get_cpu(); put_cpu();
127 if (mystack->top>=(LOCK_STACK_DEPTH-1)) {
128 put_cpu_var(lock_stack);
129 DEBUG("LOCKCHECK: Locking lock 0x%p on cpu %u exceeds stack limit of %d\n",lock,cpu,LOCK_STACK_DEPTH);
132 mystack->lock[mystack->top] = lock;
133 mystack->irq[mystack->top] = irq;
135 put_cpu_var(lock_stack);
139 static void lock_stack_unlock(void *lock, char irq)
141 lock_stack_t *mystack = &(get_cpu_var(lock_stack));
142 u32 cpu = get_cpu(); put_cpu();
144 if (mystack->top==0) {
145 put_cpu_var(lock_stack);
146 DEBUG("LOCKCHECK: Unlocking lock 0x%p on cpu %u when lock stack is empty\n",lock,cpu);
148 if (mystack->lock[mystack->top-1] != lock) {
149 void *otherlock=mystack->lock[mystack->top-1];
150 put_cpu_var(lock_stack);
151 DEBUG("LOCKCHECK: Unlocking lock 0x%p on cpu %u when top of stack is lock 0x%p\n",lock,cpu, otherlock);
154 if (irq!=mystack->irq[mystack->top-1]) {
155 char otherirq = mystack->irq[mystack->top-1];
156 put_cpu_var(lock_stack);
157 DEBUG("LOCKCHECK: Unlocking lock 0x%p on cpu %u with irq=%d, but was locked with irq=%d\n",lock,cpu,irq,otherirq);
161 put_cpu_var(lock_stack);
168 void palacios_lockcheck_init()
170 memset(state,0,sizeof(lockcheck_state_t)*NUM_LOCKS);
171 spin_lock_init(&lock);
172 DEBUG("LOCKCHECK: LOCK CHECKING INITED\n");
176 // This needs to be defined explictly since the intrinsic does not take a var
178 #define backtrace(t) \
179 t[0]=__builtin_return_address(STEP_BACK_DEPTH_FIRST); \
180 t[1]=__builtin_return_address(STEP_BACK_DEPTH_FIRST+1); \
181 t[2]=__builtin_return_address(STEP_BACK_DEPTH_FIRST+2); \
182 t[3]=__builtin_return_address(STEP_BACK_DEPTH_FIRST+3);
185 // For printing a backtrace
188 #define backtrace_format "%pS << %pS << %pS << %pS"
189 #define backtrace_expand(t) ((t)[0]),((t)[1]),((t)[2]),((t)[3])
192 static void clear_trace(void **trace)
196 for (i=0;i<STEP_BACK_DEPTH;i++) {
202 static void printlock(char *prefix, lockcheck_state_t *l)
204 if (!l || !(l->lock) ) {
205 DEBUG("LOCKCHECK: %s: lock 0x%p BOGUS\n",prefix,l);
209 DEBUG("LOCKCHECK: %s: lock 0x%p, allocator="
211 ", lockcount=%d, lastlocker="
215 ", irqcount=%d, lastirqlocker="
217 ", lastlockflags=%lu, lastirqunlocker="
219 ", lastunlockflags=%lu\n",
221 backtrace_expand(l->allocator),
223 backtrace_expand(l->lastlocker),
224 backtrace_expand(l->lastunlocker),
226 backtrace_expand(l->lastirqlocker),
228 backtrace_expand(l->lastirqunlocker),
235 static void find_multiple_locks_held(void)
239 lockcheck_state_t *l;
242 for (i=0;i<NUM_LOCKS;i++) {
244 if (l->inuse && l->lockcount>0) {
246 if (have>=WARN_MULTIPLE_THRESHOLD) {
252 if (have>=WARN_MULTIPLE_THRESHOLD) {
254 for (i=0;i<NUM_LOCKS;i++) {
256 if (l->inuse && l->lockcount>0) {
257 snprintf(buf,64,"MULTIPLE LOCKS HELD (%d)",have);
266 static void find_multiple_irqs_held(void)
270 lockcheck_state_t *l;
273 for (i=0;i<NUM_LOCKS;i++) {
275 if (l->inuse && l->irqcount>0) {
277 if (have>=WARN_MULTIPLE_THRESHOLD) {
283 if (have>=WARN_MULTIPLE_THRESHOLD) {
285 for (i=0;i<NUM_LOCKS;i++) {
287 if (l->inuse && l->irqcount>0) {
288 snprintf(buf,64,"MULTIPLE IRQS HELD (%d)",have);
298 void palacios_lockcheck_deinit()
301 lockcheck_state_t *l;
303 for (i=0;i<NUM_LOCKS;i++) {
306 printlock("ALLOCATED LOCK AT DEINIT",l);
307 if ((l->lockcount)) {
308 printlock("BAD LOCK COUNT AT DEINIT",l);
311 printlock("BAD IRQ COUNT AT DEINIT",l);
315 INFO("LOCKCHECK: DEINITED\n");
319 void palacios_lockcheck_alloc(void *lock)
321 lockcheck_state_t *l=get_lock_entry();
324 DEBUG("LOCKCHECK: UNABLE TO ALLOCATE TRACKING DATA FOR LOCK 0x%p\n",lock);
327 backtrace(l->allocator);
328 l->lockcount=l->irqcount=0;
329 clear_trace(l->lastlocker);
330 clear_trace(l->lastunlocker);
331 clear_trace(l->lastirqlocker);
332 clear_trace(l->lastirqunlocker);
333 //INFO("LOCKCHECK: LOCK ALLOCATE 0x%p\n",lock);
334 printlock("NEW LOCK", l);
338 void palacios_lockcheck_free(void *lock)
340 lockcheck_state_t *l=find_lock_entry(lock);
343 DEBUG("LOCKCHECK: FREEING UNTRACKED LOCK 0x%p\n",lock);
347 if ((l->lockcount)) {
348 printlock("BAD LOCK COUNT AT FREE",l);
352 printlock("BAD IRQ COUNT AT FREE",l);
357 void palacios_lockcheck_lock(void *lock)
359 lockcheck_state_t *l=find_lock_entry(lock);
362 DEBUG("LOCKCHECK: LOCKING UNTRACKED LOCK 0x%p\n",lock);
366 if (l->lockcount!=0) {
367 printlock("BAD LOCKCOUNT AT LOCK",l);
369 if (l->irqcount!=0) {
370 printlock("BAD IRQCOUNT AT LOCK",l);
374 backtrace(l->lastlocker);
376 find_multiple_locks_held();
378 lock_stack_lock(lock,0);
381 void palacios_lockcheck_unlock(void *lock)
383 lockcheck_state_t *l=find_lock_entry(lock);
386 DEBUG("LOCKCHECK: UNLOCKING UNTRACKED LOCK 0x%p\n",lock);
390 if (l->lockcount!=1) {
391 printlock("LOCKCHECK: BAD LOCKCOUNT AT UNLOCK",l);
393 if (l->irqcount!=0) {
394 printlock("LOCKCHECK: BAD IRQCOUNT AT UNLOCK",l);
397 lock_stack_unlock(lock,0);
400 backtrace(l->lastunlocker);
404 void palacios_lockcheck_lock_irqsave(void *lock,unsigned long flags)
406 lockcheck_state_t *l=find_lock_entry(lock);
409 DEBUG("LOCKCHECK: IRQ LOCKING UNTRACKED LOCK 0x%p\n",lock);
413 if (l->lockcount!=0) {
414 printlock("BAD LOCKCOUNT AT IRQ LOCK",l);
416 if (l->irqcount!=0) {
417 printlock("BAD IRQCOUNT AT IRQ LOCK",l);
421 l->lastlockflags=flags;
422 backtrace(l->lastirqlocker);
425 find_multiple_irqs_held();
427 lock_stack_lock(lock,1);
432 void palacios_lockcheck_unlock_irqrestore(void *lock,unsigned long flags)
434 lockcheck_state_t *l=find_lock_entry(lock);
437 DEBUG("LOCKCHECK: IRQ UNLOCKING UNTRACKED LOCK 0x%p\n",lock);
441 if (l->lockcount!=0) {
442 printlock("LOCKCHECK: BAD LOCKCOUNT AT IRQ UNLOCK",l);
444 if (l->irqcount!=1) {
445 printlock("LOCKCHECK: BAD IRQCOUNT AT IRQ UNLOCK",l);
449 l->lastunlockflags = flags;
451 lock_stack_unlock(lock,1);
453 backtrace(l->lastirqunlocker);