Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel'
[palacios.git] / kitten / include / arch-x86_64 / system.h
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3
4 #include <lwk/kernel.h>
5 #include <arch/segment.h>
6
7 #ifdef __KERNEL__
8
9 #define __STR(x) #x
10 #define STR(x) __STR(x)
11
12 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
13 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
14
15 /* frame pointer must be last for get_wchan */
16 #define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
17 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
18
19 #define __EXTRA_CLOBBER  \
20         ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
21
22 #define switch_to(prev,next,last) \
23         asm volatile(SAVE_CONTEXT                                                   \
24                      "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
25                      "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */    \
26                      "call __switch_to\n\t"                                       \
27                      ".globl thread_return\n"                                   \
28                      "thread_return:\n\t"                                           \
29                      "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"                       \
30                      "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
31                      "lock ; btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"     \
32                      "movq %%rax,%%rdi\n\t"                                       \
33                      "jc   ret_from_fork\n\t"                                     \
34                      RESTORE_CONTEXT                                                \
35                      : "=a" (last)                                                \
36                      : [next] "S" (next), [prev] "D" (prev),                      \
37                        [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
38                        [ti_flags] "i" (offsetof(struct thread_info, flags)),\
39                        [tif_fork] "i" (TIF_FORK),                         \
40                        [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
41                        [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
42                      : "memory", "cc" __EXTRA_CLOBBER)
43     
44 extern void load_gs_index(unsigned); 
45
46 /*
47  * Load a segment. Fall back on loading the zero
48  * segment if something goes wrong..
49  */
50 #define loadsegment(seg,value)  \
51         asm volatile("\n"                       \
52                 "1:\t"                          \
53                 "movl %k0,%%" #seg "\n"         \
54                 "2:\n"                          \
55                 ".section .fixup,\"ax\"\n"      \
56                 "3:\t"                          \
57                 "movl %1,%%" #seg "\n\t"        \
58                 "jmp 2b\n"                      \
59                 ".previous\n"                   \
60                 ".section __ex_table,\"a\"\n\t" \
61                 ".align 8\n\t"                  \
62                 ".quad 1b,3b\n"                 \
63                 ".previous"                     \
64                 : :"r" (value), "r" (0))
65
66 /*
67  * Clear and set 'TS' bit respectively
68  */
69 #define clts() __asm__ __volatile__ ("clts")
70
71 static inline unsigned long read_cr0(void)
72
73         unsigned long cr0;
74         asm volatile("movq %%cr0,%0" : "=r" (cr0));
75         return cr0;
76
77
78 static inline void write_cr0(unsigned long val) 
79
80         asm volatile("movq %0,%%cr0" :: "r" (val));
81
82
83 static inline unsigned long read_cr3(void)
84
85         unsigned long cr3;
86         asm("movq %%cr3,%0" : "=r" (cr3));
87         return cr3;
88
89
90 static inline unsigned long read_cr4(void)
91
92         unsigned long cr4;
93         asm("movq %%cr4,%0" : "=r" (cr4));
94         return cr4;
95
96
97 static inline void write_cr4(unsigned long val)
98
99         asm volatile("movq %0,%%cr4" :: "r" (val));
100
101
102 #define stts() write_cr0(8 | read_cr0())
103
104 #define wbinvd() \
105         __asm__ __volatile__ ("wbinvd": : :"memory");
106
107 static inline unsigned long read_eflags(void)
108
109         unsigned long eflags;
110
111         __asm__ __volatile__(
112                 "# __raw_save_flags\n\t"
113                 "pushf ; pop %0"
114                 : "=g" (eflags)
115                 : /* no input */
116                 : "memory"
117         );
118
119         return eflags;
120
121
122 /*
123  * On SMP systems, when the scheduler does migration-cost autodetection,
124  * it needs a way to flush as much of the CPU's caches as possible.
125  */
126 static inline void sched_cacheflush(void)
127 {
128         wbinvd();
129 }
130
131 #endif  /* __KERNEL__ */
132
133 #define nop() __asm__ __volatile__ ("nop")
134
135 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
136
137 #define tas(ptr) (xchg((ptr),1))
138
139 #define __xg(x) ((volatile long *)(x))
140
141 static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
142 {
143         *ptr = val;
144 }
145
146 #define _set_64bit set_64bit
147
148 /*
149  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
150  * Note 2: xchg has side effect, so that attribute volatile is necessary,
151  *        but generally the primitive is invalid, *ptr is output argument. --ANK
152  */
153 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
154 {
155         switch (size) {
156                 case 1:
157                         __asm__ __volatile__("xchgb %b0,%1"
158                                 :"=q" (x)
159                                 :"m" (*__xg(ptr)), "0" (x)
160                                 :"memory");
161                         break;
162                 case 2:
163                         __asm__ __volatile__("xchgw %w0,%1"
164                                 :"=r" (x)
165                                 :"m" (*__xg(ptr)), "0" (x)
166                                 :"memory");
167                         break;
168                 case 4:
169                         __asm__ __volatile__("xchgl %k0,%1"
170                                 :"=r" (x)
171                                 :"m" (*__xg(ptr)), "0" (x)
172                                 :"memory");
173                         break;
174                 case 8:
175                         __asm__ __volatile__("xchgq %0,%1"
176                                 :"=r" (x)
177                                 :"m" (*__xg(ptr)), "0" (x)
178                                 :"memory");
179                         break;
180         }
181         return x;
182 }
183
184 /*
185  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
186  * store NEW in MEM.  Return the initial value in MEM.  Success is
187  * indicated by comparing RETURN with OLD.
188  */
189
190 #define __HAVE_ARCH_CMPXCHG 1
191
192 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
193                                       unsigned long new, int size)
194 {
195         unsigned long prev;
196         switch (size) {
197         case 1:
198                 __asm__ __volatile__("lock ; cmpxchgb %b1,%2"
199                                      : "=a"(prev)
200                                      : "q"(new), "m"(*__xg(ptr)), "0"(old)
201                                      : "memory");
202                 return prev;
203         case 2:
204                 __asm__ __volatile__("lock ; cmpxchgw %w1,%2"
205                                      : "=a"(prev)
206                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
207                                      : "memory");
208                 return prev;
209         case 4:
210                 __asm__ __volatile__("lock ; cmpxchgl %k1,%2"
211                                      : "=a"(prev)
212                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
213                                      : "memory");
214                 return prev;
215         case 8:
216                 __asm__ __volatile__("lock ; cmpxchgq %1,%2"
217                                      : "=a"(prev)
218                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
219                                      : "memory");
220                 return prev;
221         }
222         return old;
223 }
224
225 #define cmpxchg(ptr,o,n)\
226         ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
227                                         (unsigned long)(n),sizeof(*(ptr))))
228
229 #define smp_mb()        mb()
230 #define smp_rmb()       rmb()
231 #define smp_wmb()       wmb()
232 #define smp_read_barrier_depends()      do {} while(0)
233     
234
235 /*
236  * Force strict CPU ordering.
237  * And yes, this is required on UP too when we're talking
238  * to devices.
239  */
240 #define mb()    asm volatile("mfence":::"memory")
241 #define rmb()   asm volatile("lfence":::"memory")
242 #define wmb()   asm volatile("sfence" ::: "memory")
243 #define read_barrier_depends()  do {} while(0)
244 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
245 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
246
247 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
248
249 /* interrupt control.. */
250 #define local_save_flags(x)     do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
251 #define local_irq_restore(x)    __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
252
253 #define local_irq_disable()     __asm__ __volatile__("cli": : :"memory")
254 #define local_irq_enable()      __asm__ __volatile__("sti": : :"memory")
255
256 #define irqs_disabled()                 \
257 ({                                      \
258         unsigned long flags;            \
259         local_save_flags(flags);        \
260         !(flags & (1<<9));              \
261 })
262
263 #define irqs_enabled() !irqs_disabled()
264
265 /* For spinlocks etc */
266 #define local_irq_save(x)       do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
267
268 /* used in the idle loop; sti takes one instruction cycle to complete */
269 #define safe_halt()             __asm__ __volatile__("sti; hlt": : :"memory")
270 /* used when interrupts are already enabled or to shutdown the processor */
271 #define halt()                  __asm__ __volatile__("hlt": : :"memory")
272
273 void cpu_idle_wait(void);
274
275 extern unsigned long arch_align_stack(unsigned long sp);
276 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
277
278 #endif