Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel'
[palacios.git] / kitten / include / arch-x86_64 / uaccess.h
1 #ifndef _X86_64_UACCESS_H
2 #define _X86_64_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <lwk/compiler.h>
8 #include <lwk/errno.h>
9 #include <lwk/prefetch.h>
10 #include <lwk/task.h>
11 #include <arch/page.h>
12
13 #define VERIFY_READ 0
14 #define VERIFY_WRITE 1
15
16 /*
17  * The fs value determines whether argument validity checking should be
18  * performed or not.  If get_fs() == USER_DS, checking is performed, with
19  * get_fs() == KERNEL_DS, checking is bypassed.
20  *
21  * For historical reasons, these macros are grossly misnamed.
22  */
23
24 #define KERNEL_DS       0xFFFFFFFFFFFFFFFFUL
25 #define USER_DS         PAGE_OFFSET
26
27 #define get_ds()        (KERNEL_DS)
28 #define get_fs()        (current->arch.addr_limit)
29 #define set_fs(x)       (current->arch.addr_limit = (x))
30
31 #define segment_eq(a,b) ((a).seg == (b).seg)
32
33 #define __addr_ok(addr) (!((unsigned long)(addr) & (current->arch.addr_limit)))
34
35 /*
36  * Uhhuh, this needs 65-bit arithmetic. We have a carry..
37  */
38 #define __range_not_ok(addr,size) ({ \
39         unsigned long flag,sum; \
40         __chk_user_ptr(addr); \
41         asm("# range_ok\n\r" \
42                 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0"  \
43                 :"=&r" (flag), "=r" (sum) \
44                 :"1" (addr),"g" ((long)(size)),"g" (current->arch.addr_limit)); \
45         flag; })
46
47 #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
48
49 /*
50  * These are the main single-value transfer routines.  They automatically
51  * use the right size if we just have the right pointer type.
52  *
53  * This gets kind of ugly. We want to return _two_ values in "get_user()"
54  * and yet we don't want to do any pointers, because that is too much
55  * of a performance impact. Thus we have a few rather ugly macros here,
56  * and hide all the ugliness from the user.
57  *
58  * The "__xxx" versions of the user access functions are versions that
59  * do not verify the address space, that must have been done previously
60  * with a separate "access_ok()" call (this is used when we do multiple
61  * accesses to the same area of user memory).
62  */
63
64 #define __get_user_x(size,ret,x,ptr) \
65         asm volatile("call __get_user_" #size \
66                 :"=a" (ret),"=d" (x) \
67                 :"c" (ptr) \
68                 :"r8")
69
70 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
71 #define get_user(x,ptr)                                                 \
72 ({      unsigned long __val_gu;                                         \
73         int __ret_gu;                                                   \
74         __chk_user_ptr(ptr);                                            \
75         switch(sizeof (*(ptr))) {                                       \
76         case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;          \
77         case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;          \
78         case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;          \
79         case 8:  __get_user_x(8,__ret_gu,__val_gu,ptr); break;          \
80         default: __get_user_bad(); break;                               \
81         }                                                               \
82         (x) = (typeof(*(ptr)))__val_gu;                         \
83         __ret_gu;                                                       \
84 })
85
86 extern void __put_user_1(void);
87 extern void __put_user_2(void);
88 extern void __put_user_4(void);
89 extern void __put_user_8(void);
90 extern void __put_user_bad(void);
91
92 #define __put_user_x(size,ret,x,ptr)                                    \
93         asm volatile("call __put_user_" #size                   \
94                 :"=a" (ret)                                             \
95                 :"c" (ptr),"d" (x)                                      \
96                 :"r8")
97
98 #define put_user(x,ptr)                                                 \
99   __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
100
101 #define __get_user(x,ptr) \
102   __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
103 #define __put_user(x,ptr) \
104   __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
105
106 #define __get_user_unaligned __get_user
107 #define __put_user_unaligned __put_user
108
109 #define __put_user_nocheck(x,ptr,size)                  \
110 ({                                                      \
111         int __pu_err;                                   \
112         __put_user_size((x),(ptr),(size),__pu_err);     \
113         __pu_err;                                       \
114 })
115
116
117 #define __put_user_check(x,ptr,size)                    \
118 ({                                                      \
119         int __pu_err;                                   \
120         typeof(*(ptr)) __user *__pu_addr = (ptr);       \
121         switch (size) {                                 \
122         case 1: __put_user_x(1,__pu_err,x,__pu_addr); break;    \
123         case 2: __put_user_x(2,__pu_err,x,__pu_addr); break;    \
124         case 4: __put_user_x(4,__pu_err,x,__pu_addr); break;    \
125         case 8: __put_user_x(8,__pu_err,x,__pu_addr); break;    \
126         default: __put_user_bad();                      \
127         }                                               \
128         __pu_err;                                       \
129 })
130
131 #define __put_user_size(x,ptr,size,retval)                              \
132 do {                                                                    \
133         retval = 0;                                                     \
134         __chk_user_ptr(ptr);                                            \
135         switch (size) {                                                 \
136           case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
137           case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
138           case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
139           case 8: __put_user_asm(x,ptr,retval,"q","","Zr",-EFAULT); break;\
140           default: __put_user_bad();                                    \
141         }                                                               \
142 } while (0)
143
144 /* FIXME: this hack is definitely wrong -AK */
145 struct __large_struct { unsigned long buf[100]; };
146 #define __m(x) (*(struct __large_struct __user *)(x))
147
148 /*
149  * Tell gcc we read from memory instead of writing: this is because
150  * we do not write to any memory gcc knows about, so there are no
151  * aliasing issues.
152  */
153 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
154         asm volatile(                                   \
155                 "1:     mov"itype" %"rtype"1,%2\n"              \
156                 "2:\n"                                          \
157                 ".section .fixup,\"ax\"\n"                      \
158                 "3:     mov %3,%0\n"                            \
159                 "       jmp 2b\n"                               \
160                 ".previous\n"                                   \
161                 ".section __ex_table,\"a\"\n"                   \
162                 "       .align 8\n"                             \
163                 "       .quad 1b,3b\n"                          \
164                 ".previous"                                     \
165                 : "=r"(err)                                     \
166                 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
167
168
169 #define __get_user_nocheck(x,ptr,size)                          \
170 ({                                                              \
171         int __gu_err;                                           \
172         unsigned long __gu_val;                                 \
173         __get_user_size(__gu_val,(ptr),(size),__gu_err);        \
174         (x) = (typeof(*(ptr)))__gu_val;                 \
175         __gu_err;                                               \
176 })
177
178 extern int __get_user_1(void);
179 extern int __get_user_2(void);
180 extern int __get_user_4(void);
181 extern int __get_user_8(void);
182 extern int __get_user_bad(void);
183
184 #define __get_user_size(x,ptr,size,retval)                              \
185 do {                                                                    \
186         retval = 0;                                                     \
187         __chk_user_ptr(ptr);                                            \
188         switch (size) {                                                 \
189           case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
190           case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
191           case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
192           case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
193           default: (x) = __get_user_bad();                              \
194         }                                                               \
195 } while (0)
196
197 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
198         asm volatile(                                   \
199                 "1:     mov"itype" %2,%"rtype"1\n"              \
200                 "2:\n"                                          \
201                 ".section .fixup,\"ax\"\n"                      \
202                 "3:     mov %3,%0\n"                            \
203                 "       xor"itype" %"rtype"1,%"rtype"1\n"       \
204                 "       jmp 2b\n"                               \
205                 ".previous\n"                                   \
206                 ".section __ex_table,\"a\"\n"                   \
207                 "       .align 8\n"                             \
208                 "       .quad 1b,3b\n"                          \
209                 ".previous"                                     \
210                 : "=r"(err), ltype (x)                          \
211                 : "m"(__m(addr)), "i"(errno), "0"(err))
212
213 /*
214  * Copy To/From Userspace
215  */
216
217 /* Handles exceptions in both to and from, but doesn't do access_ok */
218 __must_check unsigned long
219 copy_user_generic(void *to, const void *from, unsigned len);
220
221 __must_check unsigned long
222 copy_to_user(void __user *to, const void *from, unsigned len);
223 __must_check unsigned long
224 copy_from_user(void *to, const void __user *from, unsigned len);
225 __must_check unsigned long
226 copy_in_user(void __user *to, const void __user *from, unsigned len);
227
228 static __always_inline __must_check
229 int __copy_from_user(void *dst, const void __user *src, unsigned size)
230
231         int ret = 0;
232         if (!__builtin_constant_p(size))
233                 return copy_user_generic(dst,(__force void *)src,size);
234         switch (size) { 
235         case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); 
236                 return ret;
237         case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
238                 return ret;
239         case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
240                 return ret;
241         case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
242                 return ret; 
243         case 10:
244                 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
245                 if (unlikely(ret)) return ret;
246                 __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
247                 return ret; 
248         case 16:
249                 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
250                 if (unlikely(ret)) return ret;
251                 __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
252                 return ret; 
253         default:
254                 return copy_user_generic(dst,(__force void *)src,size); 
255         }
256 }       
257
258 static __always_inline __must_check
259 int __copy_to_user(void __user *dst, const void *src, unsigned size)
260
261         int ret = 0;
262         if (!__builtin_constant_p(size))
263                 return copy_user_generic((__force void *)dst,src,size);
264         switch (size) { 
265         case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); 
266                 return ret;
267         case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
268                 return ret;
269         case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
270                 return ret;
271         case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
272                 return ret; 
273         case 10:
274                 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
275                 if (unlikely(ret)) return ret;
276                 asm("":::"memory");
277                 __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
278                 return ret; 
279         case 16:
280                 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
281                 if (unlikely(ret)) return ret;
282                 asm("":::"memory");
283                 __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
284                 return ret; 
285         default:
286                 return copy_user_generic((__force void *)dst,src,size); 
287         }
288 }       
289
290 static __always_inline __must_check
291 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
292
293         int ret = 0;
294         if (!__builtin_constant_p(size))
295                 return copy_user_generic((__force void *)dst,(__force void *)src,size);
296         switch (size) { 
297         case 1: { 
298                 u8 tmp;
299                 __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); 
300                 if (likely(!ret))
301                         __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); 
302                 return ret;
303         }
304         case 2: { 
305                 u16 tmp;
306                 __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); 
307                 if (likely(!ret))
308                         __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); 
309                 return ret;
310         }
311
312         case 4: { 
313                 u32 tmp;
314                 __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); 
315                 if (likely(!ret))
316                         __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); 
317                 return ret;
318         }
319         case 8: { 
320                 u64 tmp;
321                 __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); 
322                 if (likely(!ret))
323                         __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); 
324                 return ret;
325         }
326         default:
327                 return copy_user_generic((__force void *)dst,(__force void *)src,size); 
328         }
329 }       
330
331 __must_check long 
332 strncpy_from_user(char *dst, const char __user *src, long count);
333 __must_check long 
334 __strncpy_from_user(char *dst, const char __user *src, long count);
335 __must_check long strnlen_user(const char __user *str, long n);
336 __must_check long __strnlen_user(const char __user *str, long n);
337 __must_check long strlen_user(const char __user *str);
338 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
339 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
340
341 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
342
343 static __must_check __always_inline int
344 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
345 {
346         return copy_user_generic((__force void *)dst, src, size);
347 }
348
349 #endif /* _X86_64_UACCESS_H */