Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Add shared-exclusive (readers-writer) locks
[palacios.git] / palacios / src / palacios / vmm_lock.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20 #include <palacios/vmm.h>
21 #include <palacios/vmm_lock.h>
22 #include <palacios/vmm_lowlevel.h>
23
24
25 extern struct v3_os_hooks * os_hooks;
26
27
28 int v3_lock_init(v3_lock_t * lock) {
29     *lock = (addr_t)(os_hooks->mutex_alloc());
30
31     if (!(*lock)) {
32         return -1;
33     }
34
35     return 0;
36 }
37
38
39 void v3_lock_deinit(v3_lock_t * lock) {
40     os_hooks->mutex_free((void *)*lock);
41     *lock = 0;
42 }
43
44 void v3_lock(v3_lock_t lock) {
45     os_hooks->mutex_lock((void *)lock, 0);    
46 }
47
48 void v3_unlock(v3_lock_t lock) {
49     os_hooks->mutex_unlock((void *)lock);
50 }
51
52 addr_t v3_lock_irqsave(v3_lock_t lock) {
53     return (addr_t) (os_hooks->mutex_lock_irqsave((void *)lock, 1));
54 }
55
56
57 void v3_unlock_irqrestore(v3_lock_t lock, addr_t irq_state) {
58     os_hooks->mutex_unlock_irqrestore((void *)lock,(void*)irq_state);
59 }
60
61
62 int v3_rw_lock_init(v3_rw_lock_t *lock)
63 {
64     lock->reader_count=0;
65     return v3_lock_init(&(lock->lock));
66 }
67
68 void v3_rw_lock_deinit(v3_rw_lock_t *lock)
69 {
70     v3_lock_deinit(&(lock->lock));
71     lock->reader_count=0;
72 }
73
74 void v3_read_lock(v3_rw_lock_t *lock)
75 {
76     addr_t flags;
77
78     flags=v3_lock_irqsave(lock->lock);
79     lock->reader_count++;
80     v3_unlock_irqrestore(lock->lock,flags);
81     // readers can come in after us, writers cannot
82 }
83 void v3_read_unlock(v3_rw_lock_t *lock)
84 {
85     addr_t flags;
86
87     flags=v3_lock_irqsave(lock->lock);
88     lock->reader_count--;
89     v3_unlock_irqrestore(lock->lock,flags);
90     // readers can come in after us, and also writers if reader_count==0
91 }
92
93 void v3_write_lock(v3_rw_lock_t *lock)
94 {
95     // a less hideous implementation is possible, of course...
96     while (1) { 
97         v3_lock(lock->lock);
98         if (!(lock->reader_count)) { 
99             break;
100         }
101         v3_unlock(lock->lock);
102         V3_Yield();
103     }
104     // holding lock now - reader or writer cannot come in after us
105 }
106
107 addr_t v3_write_lock_irqsave(v3_rw_lock_t *lock)
108 {
109     addr_t flags;
110
111     while (1) { 
112         flags=v3_lock_irqsave(lock->lock);
113         if (!(lock->reader_count)) { 
114             break;
115         }
116         v3_unlock_irqrestore(lock->lock,flags);
117         V3_Yield();
118     }
119     // holding lock now with interrupts off - reader or writer canot come in after us
120     return flags;
121 }
122
123 void v3_write_unlock(v3_rw_lock_t *lock) 
124 {
125     // I am already holding this lock
126     v3_unlock(lock->lock);
127     // readers/writers can now come in
128 }
129
130 void v3_write_unlock_irqrestore(v3_rw_lock_t *lock, addr_t irq_state)
131 {
132     // I am already holding this lock with interrupts off
133     v3_unlock_irqrestore(lock->lock,irq_state);
134     // readers/writers can now come in
135 }