Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel'
[palacios.git] / kitten / include / arch-x86_64 / spinlock.h
1 #ifndef _X86_64_SPINLOCK_H
2 #define _X86_64_SPINLOCK_H
3
4 #include <arch/atomic.h>
5 #include <arch/rwlock.h>
6 #include <arch/page.h>
7
8 /*
9  * Your basic SMP spinlocks, allowing only a single CPU anywhere
10  *
11  * Simple spin lock operations.  There are two variants, one clears IRQ's
12  * on the local processor, one does not.
13  *
14  * We make no fairness assumptions. They have a cost.
15  *
16  * (the type definitions are in arch/spinlock_types.h)
17  */
18
19 #define __raw_spin_is_locked(x) \
20                 (*(volatile signed int *)(&(x)->slock) <= 0)
21
22 #define __raw_spin_lock_string \
23         "\n1:\t" \
24         "lock ; decl %0\n\t" \
25         "js 2f\n" \
26         LOCK_SECTION_START("") \
27         "2:\t" \
28         "rep;nop\n\t" \
29         "cmpl $0,%0\n\t" \
30         "jle 2b\n\t" \
31         "jmp 1b\n" \
32         LOCK_SECTION_END
33
34 #define __raw_spin_unlock_string \
35         "movl $1,%0" \
36                 :"=m" (lock->slock) : : "memory"
37
38 static inline void __raw_spin_lock(raw_spinlock_t *lock)
39 {
40         __asm__ __volatile__(
41                 __raw_spin_lock_string
42                 :"=m" (lock->slock) : : "memory");
43 }
44
45 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
46
47 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
48 {
49         int oldval;
50
51         __asm__ __volatile__(
52                 "xchgl %0,%1"
53                 :"=q" (oldval), "=m" (lock->slock)
54                 :"0" (0) : "memory");
55
56         return oldval > 0;
57 }
58
59 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
60 {
61         __asm__ __volatile__(
62                 __raw_spin_unlock_string
63         );
64 }
65
66 #define __raw_spin_unlock_wait(lock) \
67         do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
68
69 /*
70  * Read-write spinlocks, allowing multiple readers
71  * but only one writer.
72  *
73  * NOTE! it is quite common to have readers in interrupts
74  * but no interrupt writers. For those circumstances we
75  * can "mix" irq-safe locks - any writer needs to get a
76  * irq-safe write-lock, but readers can get non-irqsafe
77  * read-locks.
78  *
79  * On x86, we implement read-write locks as a 32-bit counter
80  * with the high bit (sign) being the "contended" bit.
81  *
82  * The inline assembly is non-obvious. Think about it.
83  *
84  * Changed to use the same technique as rw semaphores.  See
85  * semaphore.h for details.  -ben
86  *
87  * the helpers are in arch/i386/kernel/semaphore.c
88  */
89
90 #define __raw_read_can_lock(x)          ((int)(x)->lock > 0)
91 #define __raw_write_can_lock(x)         ((x)->lock == RW_LOCK_BIAS)
92
93 static inline void __raw_read_lock(raw_rwlock_t *rw)
94 {
95         __build_read_lock(rw, "__read_lock_failed");
96 }
97
98 static inline void __raw_write_lock(raw_rwlock_t *rw)
99 {
100         __build_write_lock(rw, "__write_lock_failed");
101 }
102
103 static inline int __raw_read_trylock(raw_rwlock_t *lock)
104 {
105         atomic_t *count = (atomic_t *)lock;
106         atomic_dec(count);
107         if (atomic_read(count) >= 0)
108                 return 1;
109         atomic_inc(count);
110         return 0;
111 }
112
113 static inline int __raw_write_trylock(raw_rwlock_t *lock)
114 {
115         atomic_t *count = (atomic_t *)lock;
116         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
117                 return 1;
118         atomic_add(RW_LOCK_BIAS, count);
119         return 0;
120 }
121
122 static inline void __raw_read_unlock(raw_rwlock_t *rw)
123 {
124         asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
125 }
126
127 static inline void __raw_write_unlock(raw_rwlock_t *rw)
128 {
129         asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
130                                 : "=m" (rw->lock) : : "memory");
131 }
132
133 #endif /* _X86_64_SPINLOCK_H */