Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel'
[palacios.git] / kitten / include / lwk / seqlock.h
1 #ifndef _LWK_SEQLOCK_H
2 #define _LWK_SEQLOCK_H
3 /*
4  * Reader/writer consistent mechanism without starving writers. This type of
5  * lock for data where the reader wants a consitent set of information
6  * and is willing to retry if the information changes.  Readers never
7  * block but they may have to retry if a writer is in
8  * progress. Writers do not wait for readers. 
9  *
10  * This is not as cache friendly as brlock. Also, this will not work
11  * for data that contains pointers, because any writer could
12  * invalidate a pointer that a reader was following.
13  *
14  * Expected reader usage:
15  *      do {
16  *          seq = read_seqbegin(&foo);
17  *      ...
18  *      } while (read_seqretry(&foo, seq));
19  *
20  *
21  * On non-SMP the spin locks disappear but the writer still needs
22  * to increment the sequence variables because an interrupt routine could
23  * change the state of the data.
24  *
25  * Based on x86_64 vsyscall gettimeofday 
26  * by Keith Owens and Andrea Arcangeli
27  */
28
29 #include <lwk/spinlock.h>
30
31 typedef struct {
32         unsigned sequence;
33         spinlock_t lock;
34 } seqlock_t;
35
36 /*
37  * These macros triggered gcc-3.x compile-time problems.  We think these are
38  * OK now.  Be cautious.
39  */
40 #define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED }
41 #define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0)
42
43
44 /* Lock out other writers and update the count.
45  * Acts like a normal spin_lock/unlock.
46  */
47 static inline void write_seqlock(seqlock_t *sl)
48 {
49         spin_lock(&sl->lock);
50         ++sl->sequence;
51         smp_wmb();                      
52 }       
53
54 static inline void write_sequnlock(seqlock_t *sl) 
55 {
56         smp_wmb();
57         sl->sequence++;
58         spin_unlock(&sl->lock);
59 }
60
61 static inline int write_tryseqlock(seqlock_t *sl)
62 {
63         int ret = spin_trylock(&sl->lock);
64
65         if (ret) {
66                 ++sl->sequence;
67                 smp_wmb();                      
68         }
69         return ret;
70 }
71
72 /* Start of read calculation -- fetch last complete writer token */
73 static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
74 {
75         unsigned ret = sl->sequence;
76         smp_rmb();
77         return ret;
78 }
79
80 /* Test if reader processed invalid data.
81  * If initial values is odd, 
82  *      then writer had already started when section was entered
83  * If sequence value changed
84  *      then writer changed data while in section
85  *    
86  * Using xor saves one conditional branch.
87  */
88 static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv)
89 {
90         smp_rmb();
91         return (iv & 1) | (sl->sequence ^ iv);
92 }
93
94
95 /*
96  * Version using sequence counter only.
97  * This can be used when code has its own mutex protecting the
98  * updating starting before the write_seqcountbeqin() and ending
99  * after the write_seqcount_end().
100  */
101
102 typedef struct seqcount {
103         unsigned sequence;
104 } seqcount_t;
105
106 #define SEQCNT_ZERO { 0 }
107 #define seqcount_init(x)        do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
108
109 /* Start of read using pointer to a sequence counter only.  */
110 static inline unsigned read_seqcount_begin(const seqcount_t *s)
111 {
112         unsigned ret = s->sequence;
113         smp_rmb();
114         return ret;
115 }
116
117 /* Test if reader processed invalid data.
118  * Equivalent to: iv is odd or sequence number has changed.
119  *                (iv & 1) || (*s != iv)
120  * Using xor saves one conditional branch.
121  */
122 static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv)
123 {
124         smp_rmb();
125         return (iv & 1) | (s->sequence ^ iv);
126 }
127
128
129 /*
130  * Sequence counter only version assumes that callers are using their
131  * own mutexing.
132  */
133 static inline void write_seqcount_begin(seqcount_t *s)
134 {
135         s->sequence++;
136         smp_wmb();
137 }
138
139 static inline void write_seqcount_end(seqcount_t *s)
140 {
141         smp_wmb();
142         s->sequence++;
143 }
144
145 /*
146  * Possible sw/hw IRQ protected versions of the interfaces.
147  */
148 #define write_seqlock_irqsave(lock, flags)                              \
149         do { local_irq_save(flags); write_seqlock(lock); } while (0)
150 #define write_seqlock_irq(lock)                                         \
151         do { local_irq_disable();   write_seqlock(lock); } while (0)
152 #define write_seqlock_bh(lock)                                          \
153         do { local_bh_disable();    write_seqlock(lock); } while (0)
154
155 #define write_sequnlock_irqrestore(lock, flags)                         \
156         do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
157 #define write_sequnlock_irq(lock)                                       \
158         do { write_sequnlock(lock); local_irq_enable(); } while(0)
159 #define write_sequnlock_bh(lock)                                        \
160         do { write_sequnlock(lock); local_bh_enable(); } while(0)
161
162 #define read_seqbegin_irqsave(lock, flags)                              \
163         ({ local_irq_save(flags);   read_seqbegin(lock); })
164
165 #define read_seqretry_irqrestore(lock, iv, flags)                       \
166         ({                                                              \
167                 int ret = read_seqretry(lock, iv);                      \
168                 local_irq_restore(flags);                               \
169                 ret;                                                    \
170         })
171
172 #endif /* _LWK_SEQLOCK_H */