microblaze: Move exception_table_entry upward
[linux-2.6-block.git] / arch / microblaze / include / asm / uaccess.h
1 /*
2  * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3  * Copyright (C) 2008-2009 PetaLogix
4  * Copyright (C) 2006 Atmark Techno, Inc.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10
11 #ifndef _ASM_MICROBLAZE_UACCESS_H
12 #define _ASM_MICROBLAZE_UACCESS_H
13
14 #ifdef __KERNEL__
15 #ifndef __ASSEMBLY__
16
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h> /* RLIMIT_FSIZE */
20 #include <linux/mm.h>
21
22 #include <asm/mmu.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <linux/string.h>
26
27 #define VERIFY_READ     0
28 #define VERIFY_WRITE    1
29
30 /*
31  * On Microblaze the fs value is actually the top of the corresponding
32  * address space.
33  *
34  * The fs value determines whether argument validity checking should be
35  * performed or not. If get_fs() == USER_DS, checking is performed, with
36  * get_fs() == KERNEL_DS, checking is bypassed.
37  *
38  * For historical reasons, these macros are grossly misnamed.
39  *
40  * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
41  */
42 # define MAKE_MM_SEG(s)       ((mm_segment_t) { (s) })
43
44 #  ifndef CONFIG_MMU
45 #  define KERNEL_DS     MAKE_MM_SEG(0)
46 #  define USER_DS       KERNEL_DS
47 #  else
48 #  define KERNEL_DS     MAKE_MM_SEG(0xFFFFFFFF)
49 #  define USER_DS       MAKE_MM_SEG(TASK_SIZE - 1)
50 #  endif
51
52 # define get_ds()       (KERNEL_DS)
53 # define get_fs()       (current_thread_info()->addr_limit)
54 # define set_fs(val)    (current_thread_info()->addr_limit = (val))
55
56 # define segment_eq(a, b)       ((a).seg == (b).seg)
57
58 /*
59  * The exception table consists of pairs of addresses: the first is the
60  * address of an instruction that is allowed to fault, and the second is
61  * the address at which the program should continue. No registers are
62  * modified, so it is entirely up to the continuation code to figure out
63  * what to do.
64  *
65  * All the routines below use bits of fixup code that are out of line
66  * with the main instruction path. This means when everything is well,
67  * we don't even have to jump over them. Further, they do not intrude
68  * on our cache or tlb entries.
69  */
70 struct exception_table_entry {
71         unsigned long insn, fixup;
72 };
73
74 #define __clear_user(addr, n)   (memset((void *)(addr), 0, (n)), 0)
75
76 #ifndef CONFIG_MMU
77
78 extern int ___range_ok(unsigned long addr, unsigned long size);
79
80 #define __range_ok(addr, size) \
81                 ___range_ok((unsigned long)(addr), (unsigned long)(size))
82
83 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
84 #define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
85
86 /* Undefined function to trigger linker error */
87 extern int bad_user_access_length(void);
88
89 /* FIXME this is function for optimalization -> memcpy */
90 #define __get_user(var, ptr)                            \
91 ({                                                      \
92         int __gu_err = 0;                               \
93         switch (sizeof(*(ptr))) {                       \
94         case 1:                                         \
95         case 2:                                         \
96         case 4:                                         \
97                 (var) = *(ptr);                         \
98                 break;                                  \
99         case 8:                                         \
100                 memcpy((void *) &(var), (ptr), 8);      \
101                 break;                                  \
102         default:                                        \
103                 (var) = 0;                              \
104                 __gu_err = __get_user_bad();            \
105                 break;                                  \
106         }                                               \
107         __gu_err;                                       \
108 })
109
110 #define __get_user_bad()        (bad_user_access_length(), (-EFAULT))
111
112 /* FIXME is not there defined __pu_val */
113 #define __put_user(var, ptr)                                    \
114 ({                                                              \
115         int __pu_err = 0;                                       \
116         switch (sizeof(*(ptr))) {                               \
117         case 1:                                                 \
118         case 2:                                                 \
119         case 4:                                                 \
120                 *(ptr) = (var);                                 \
121                 break;                                          \
122         case 8: {                                               \
123                 typeof(*(ptr)) __pu_val = (var);                \
124                 memcpy(ptr, &__pu_val, sizeof(__pu_val));       \
125                 }                                               \
126                 break;                                          \
127         default:                                                \
128                 __pu_err = __put_user_bad();                    \
129                 break;                                          \
130         }                                                       \
131         __pu_err;                                               \
132 })
133
134 #define __put_user_bad()        (bad_user_access_length(), (-EFAULT))
135
136 #define put_user(x, ptr)        __put_user((x), (ptr))
137 #define get_user(x, ptr)        __get_user((x), (ptr))
138
139 #define copy_to_user(to, from, n)       (memcpy((to), (from), (n)), 0)
140 #define copy_from_user(to, from, n)     (memcpy((to), (from), (n)), 0)
141
142 #define __copy_to_user(to, from, n)     (copy_to_user((to), (from), (n)))
143 #define __copy_from_user(to, from, n)   (copy_from_user((to), (from), (n)))
144 #define __copy_to_user_inatomic(to, from, n) \
145                         (__copy_to_user((to), (from), (n)))
146 #define __copy_from_user_inatomic(to, from, n) \
147                         (__copy_from_user((to), (from), (n)))
148
149 static inline unsigned long clear_user(void *addr, unsigned long size)
150 {
151         if (access_ok(VERIFY_WRITE, addr, size))
152                 size = __clear_user(addr, size);
153         return size;
154 }
155
156 /* Returns 0 if exception not found and fixup otherwise.  */
157 extern unsigned long search_exception_table(unsigned long);
158
159 extern long strncpy_from_user(char *dst, const char *src, long count);
160 extern long strnlen_user(const char *src, long count);
161
162 #else /* CONFIG_MMU */
163
164 /*
165  * Address is valid if:
166  *  - "addr", "addr + size" and "size" are all below the limit
167  */
168 #define access_ok(type, addr, size) \
169         (get_fs().seg > (((unsigned long)(addr)) | \
170                 (size) | ((unsigned long)(addr) + (size))))
171
172 /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
173  type?"WRITE":"READ",addr,size,get_fs().seg)) */
174
175 /*
176  * All the __XXX versions macros/functions below do not perform
177  * access checking. It is assumed that the necessary checks have been
178  * already performed before the finction (macro) is called.
179  */
180
181 #define get_user(x, ptr)                                                \
182 ({                                                                      \
183         access_ok(VERIFY_READ, (ptr), sizeof(*(ptr)))                   \
184                 ? __get_user((x), (ptr)) : -EFAULT;                     \
185 })
186
187 #define put_user(x, ptr)                                                \
188 ({                                                                      \
189         access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))                  \
190                 ? __put_user((x), (ptr)) : -EFAULT;                     \
191 })
192
193 #define __get_user(x, ptr)                                              \
194 ({                                                                      \
195         unsigned long __gu_val;                                         \
196         /*unsigned long __gu_ptr = (unsigned long)(ptr);*/              \
197         long __gu_err;                                                  \
198         switch (sizeof(*(ptr))) {                                       \
199         case 1:                                                         \
200                 __get_user_asm("lbu", (ptr), __gu_val, __gu_err);       \
201                 break;                                                  \
202         case 2:                                                         \
203                 __get_user_asm("lhu", (ptr), __gu_val, __gu_err);       \
204                 break;                                                  \
205         case 4:                                                         \
206                 __get_user_asm("lw", (ptr), __gu_val, __gu_err);        \
207                 break;                                                  \
208         default:                                                        \
209                 __gu_val = 0; __gu_err = -EINVAL;                       \
210         }                                                               \
211         x = (__typeof__(*(ptr))) __gu_val;                              \
212         __gu_err;                                                       \
213 })
214
215 #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)              \
216 ({                                                                      \
217         __asm__ __volatile__ (                                          \
218                         "1:"    insn    " %1, %2, r0;                   \
219                                 addk    %0, r0, r0;                     \
220                         2:                                              \
221                         .section .fixup,\"ax\";                         \
222                         3:      brid    2b;                             \
223                                 addik   %0, r0, %3;                     \
224                         .previous;                                      \
225                         .section __ex_table,\"a\";                      \
226                         .word   1b,3b;                                  \
227                         .previous;"                                     \
228                 : "=r"(__gu_err), "=r"(__gu_val)                        \
229                 : "r"(__gu_ptr), "i"(-EFAULT)                           \
230         );                                                              \
231 })
232
233 #define __put_user(x, ptr)                                              \
234 ({                                                                      \
235         __typeof__(*(ptr)) volatile __gu_val = (x);                     \
236         long __gu_err = 0;                                              \
237         switch (sizeof(__gu_val)) {                                     \
238         case 1:                                                         \
239                 __put_user_asm("sb", (ptr), __gu_val, __gu_err);        \
240                 break;                                                  \
241         case 2:                                                         \
242                 __put_user_asm("sh", (ptr), __gu_val, __gu_err);        \
243                 break;                                                  \
244         case 4:                                                         \
245                 __put_user_asm("sw", (ptr), __gu_val, __gu_err);        \
246                 break;                                                  \
247         case 8:                                                         \
248                 __put_user_asm_8((ptr), __gu_val, __gu_err);            \
249                 break;                                                  \
250         default:                                                        \
251                 __gu_err = -EINVAL;                                     \
252         }                                                               \
253         __gu_err;                                                       \
254 })
255
256 #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err)  \
257 ({                                                      \
258 __asm__ __volatile__ (" lwi     %0, %1, 0;              \
259                 1:      swi     %0, %2, 0;              \
260                         lwi     %0, %1, 4;              \
261                 2:      swi     %0, %2, 4;              \
262                         addk    %0,r0,r0;               \
263                 3:                                      \
264                 .section .fixup,\"ax\";                 \
265                 4:      brid    3b;                     \
266                         addik   %0, r0, %3;             \
267                 .previous;                              \
268                 .section __ex_table,\"a\";              \
269                 .word   1b,4b,2b,4b;                    \
270                 .previous;"                             \
271         : "=&r"(__gu_err)                               \
272         : "r"(&__gu_val),                               \
273         "r"(__gu_ptr), "i"(-EFAULT)                     \
274         );                                              \
275 })
276
277 #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err)      \
278 ({                                                              \
279         __asm__ __volatile__ (                                  \
280                         "1:"    insn    " %1, %2, r0;           \
281                                 addk    %0, r0, r0;             \
282                         2:                                      \
283                         .section .fixup,\"ax\";                 \
284                         3:      brid    2b;                     \
285                                 addik   %0, r0, %3;             \
286                         .previous;                              \
287                         .section __ex_table,\"a\";              \
288                         .word   1b,3b;                          \
289                         .previous;"                             \
290                 : "=r"(__gu_err)                                \
291                 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT)    \
292         );                                                      \
293 })
294
295 /*
296  * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
297  */
298 static inline int clear_user(char *to, int size)
299 {
300         if (size && access_ok(VERIFY_WRITE, to, size)) {
301                 __asm__ __volatile__ ("                         \
302                                 1:                              \
303                                         sb      r0, %2, r0;     \
304                                         addik   %0, %0, -1;     \
305                                         bneid   %0, 1b;         \
306                                         addik   %2, %2, 1;      \
307                                 2:                              \
308                                 .section __ex_table,\"a\";      \
309                                 .word   1b,2b;                  \
310                                 .section .text;"                \
311                         : "=r"(size)                            \
312                         : "0"(size), "r"(to)
313                 );
314         }
315         return size;
316 }
317
318 #define __copy_from_user(to, from, n)   copy_from_user((to), (from), (n))
319 #define __copy_from_user_inatomic(to, from, n) \
320                 copy_from_user((to), (from), (n))
321
322 #define copy_to_user(to, from, n)                                       \
323         (access_ok(VERIFY_WRITE, (to), (n)) ?                           \
324                 __copy_tofrom_user((void __user *)(to),                 \
325                         (__force const void __user *)(from), (n))       \
326                 : -EFAULT)
327
328 #define __copy_to_user(to, from, n)     copy_to_user((to), (from), (n))
329 #define __copy_to_user_inatomic(to, from, n)    copy_to_user((to), (from), (n))
330
331 #define copy_from_user(to, from, n)                                     \
332         (access_ok(VERIFY_READ, (from), (n)) ?                          \
333                 __copy_tofrom_user((__force void __user *)(to),         \
334                         (void __user *)(from), (n))                     \
335                 : -EFAULT)
336
337 extern int __strncpy_user(char *to, const char __user *from, int len);
338 extern int __strnlen_user(const char __user *sstr, int len);
339
340 #define strncpy_from_user(to, from, len)        \
341                 (access_ok(VERIFY_READ, from, 1) ?      \
342                         __strncpy_user(to, from, len) : -EFAULT)
343 #define strnlen_user(str, len)  \
344                 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
345
346 #endif /* CONFIG_MMU */
347
348 extern unsigned long __copy_tofrom_user(void __user *to,
349                 const void __user *from, unsigned long size);
350
351 #endif  /* __ASSEMBLY__ */
352 #endif /* __KERNEL__ */
353
354 #endif /* _ASM_MICROBLAZE_UACCESS_H */