microblaze: uaccess: fix clean user macro
[linux-2.6-block.git] / arch / microblaze / include / asm / uaccess.h
CommitLineData
2660663f 1/*
0d6de953
MS
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2660663f
MS
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_UACCESS_H
12#define _ASM_MICROBLAZE_UACCESS_H
13
14#ifdef __KERNEL__
15#ifndef __ASSEMBLY__
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/sched.h> /* RLIMIT_FSIZE */
20#include <linux/mm.h>
21
22#include <asm/mmu.h>
23#include <asm/page.h>
24#include <asm/pgtable.h>
2660663f
MS
25#include <linux/string.h>
26
27#define VERIFY_READ 0
28#define VERIFY_WRITE 1
29
40db0834
MS
30/*
31 * On Microblaze the fs value is actually the top of the corresponding
32 * address space.
33 *
34 * The fs value determines whether argument validity checking should be
35 * performed or not. If get_fs() == USER_DS, checking is performed, with
36 * get_fs() == KERNEL_DS, checking is bypassed.
37 *
38 * For historical reasons, these macros are grossly misnamed.
39 *
40 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
41 */
42# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
43
44# ifndef CONFIG_MMU
45# define KERNEL_DS MAKE_MM_SEG(0)
46# define USER_DS KERNEL_DS
47# else
48# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
49# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
50# endif
51
52# define get_ds() (KERNEL_DS)
53# define get_fs() (current_thread_info()->addr_limit)
54# define set_fs(val) (current_thread_info()->addr_limit = (val))
55
56# define segment_eq(a, b) ((a).seg == (b).seg)
57
357bc3c9
MS
58/*
59 * The exception table consists of pairs of addresses: the first is the
60 * address of an instruction that is allowed to fault, and the second is
61 * the address at which the program should continue. No registers are
62 * modified, so it is entirely up to the continuation code to figure out
63 * what to do.
64 *
65 * All the routines below use bits of fixup code that are out of line
66 * with the main instruction path. This means when everything is well,
67 * we don't even have to jump over them. Further, they do not intrude
68 * on our cache or tlb entries.
69 */
70struct exception_table_entry {
71 unsigned long insn, fixup;
72};
40db0834 73
0d6de953
MS
74#ifndef CONFIG_MMU
75
60a729f7
MS
76/* Check against bounds of physical memory */
77static inline int ___range_ok(unsigned long addr, unsigned long size)
78{
79 return ((addr < memory_start) ||
80 ((addr + size) > memory_end));
81}
2660663f
MS
82
83#define __range_ok(addr, size) \
84 ___range_ok((unsigned long)(addr), (unsigned long)(size))
85
86#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
40b1156d
MS
87
88#else
89
90/*
91 * Address is valid if:
92 * - "addr", "addr + size" and "size" are all below the limit
93 */
94#define access_ok(type, addr, size) \
95 (get_fs().seg > (((unsigned long)(addr)) | \
96 (size) | ((unsigned long)(addr) + (size))))
97
98/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
99 type?"WRITE":"READ",addr,size,get_fs().seg)) */
100
101#endif
102
103#ifdef CONFIG_MMU
104# define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
105# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
106#else
107# define __FIXUP_SECTION ".section .discard,\"ax\"\n"
108# define __EX_TABLE_SECTION ".section .discard,\"a\"\n"
109#endif
110
111#ifndef CONFIG_MMU
2660663f 112
838d2406
AB
113/* Undefined function to trigger linker error */
114extern int bad_user_access_length(void);
115
2660663f 116/* FIXME this is function for optimalization -> memcpy */
0d6de953
MS
117#define __get_user(var, ptr) \
118({ \
119 int __gu_err = 0; \
120 switch (sizeof(*(ptr))) { \
121 case 1: \
122 case 2: \
123 case 4: \
124 (var) = *(ptr); \
125 break; \
126 case 8: \
127 memcpy((void *) &(var), (ptr), 8); \
128 break; \
129 default: \
130 (var) = 0; \
131 __gu_err = __get_user_bad(); \
132 break; \
133 } \
134 __gu_err; \
135})
2660663f
MS
136
137#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
138
0d6de953 139/* FIXME is not there defined __pu_val */
2660663f 140#define __put_user(var, ptr) \
0d6de953
MS
141({ \
142 int __pu_err = 0; \
143 switch (sizeof(*(ptr))) { \
144 case 1: \
145 case 2: \
146 case 4: \
147 *(ptr) = (var); \
148 break; \
149 case 8: { \
150 typeof(*(ptr)) __pu_val = (var); \
151 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
152 } \
153 break; \
154 default: \
155 __pu_err = __put_user_bad(); \
156 break; \
157 } \
158 __pu_err; \
159})
2660663f
MS
160
161#define __put_user_bad() (bad_user_access_length(), (-EFAULT))
162
0d6de953
MS
163#define put_user(x, ptr) __put_user((x), (ptr))
164#define get_user(x, ptr) __get_user((x), (ptr))
2660663f 165
0d6de953
MS
166#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
167#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
2660663f 168
0d6de953
MS
169#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
170#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
171#define __copy_to_user_inatomic(to, from, n) \
172 (__copy_to_user((to), (from), (n)))
173#define __copy_from_user_inatomic(to, from, n) \
174 (__copy_from_user((to), (from), (n)))
2660663f 175
40b1156d
MS
176#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
177
178/* stejne s MMU */
2660663f
MS
179static inline unsigned long clear_user(void *addr, unsigned long size)
180{
181 if (access_ok(VERIFY_WRITE, addr, size))
182 size = __clear_user(addr, size);
183 return size;
184}
185
0d6de953 186/* Returns 0 if exception not found and fixup otherwise. */
2660663f
MS
187extern unsigned long search_exception_table(unsigned long);
188
0d6de953
MS
189extern long strncpy_from_user(char *dst, const char *src, long count);
190extern long strnlen_user(const char *src, long count);
191
192#else /* CONFIG_MMU */
193
0d6de953
MS
194/*
195 * All the __XXX versions macros/functions below do not perform
196 * access checking. It is assumed that the necessary checks have been
197 * already performed before the finction (macro) is called.
198 */
199
200#define get_user(x, ptr) \
201({ \
202 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
203 ? __get_user((x), (ptr)) : -EFAULT; \
204})
205
206#define put_user(x, ptr) \
207({ \
208 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
209 ? __put_user((x), (ptr)) : -EFAULT; \
210})
211
212#define __get_user(x, ptr) \
213({ \
214 unsigned long __gu_val; \
215 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
216 long __gu_err; \
217 switch (sizeof(*(ptr))) { \
218 case 1: \
219 __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
220 break; \
221 case 2: \
222 __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
223 break; \
224 case 4: \
225 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
226 break; \
227 default: \
228 __gu_val = 0; __gu_err = -EINVAL; \
229 } \
230 x = (__typeof__(*(ptr))) __gu_val; \
231 __gu_err; \
232})
233
234#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
235({ \
236 __asm__ __volatile__ ( \
237 "1:" insn " %1, %2, r0; \
238 addk %0, r0, r0; \
239 2: \
240 .section .fixup,\"ax\"; \
241 3: brid 2b; \
242 addik %0, r0, %3; \
243 .previous; \
244 .section __ex_table,\"a\"; \
245 .word 1b,3b; \
246 .previous;" \
247 : "=r"(__gu_err), "=r"(__gu_val) \
248 : "r"(__gu_ptr), "i"(-EFAULT) \
249 ); \
250})
251
252#define __put_user(x, ptr) \
253({ \
7bcb63b2 254 __typeof__(*(ptr)) volatile __gu_val = (x); \
0d6de953
MS
255 long __gu_err = 0; \
256 switch (sizeof(__gu_val)) { \
257 case 1: \
258 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
259 break; \
260 case 2: \
261 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
262 break; \
263 case 4: \
264 __put_user_asm("sw", (ptr), __gu_val, __gu_err); \
265 break; \
266 case 8: \
267 __put_user_asm_8((ptr), __gu_val, __gu_err); \
268 break; \
269 default: \
270 __gu_err = -EINVAL; \
271 } \
272 __gu_err; \
273})
274
275#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
276({ \
277__asm__ __volatile__ (" lwi %0, %1, 0; \
278 1: swi %0, %2, 0; \
279 lwi %0, %1, 4; \
280 2: swi %0, %2, 4; \
281 addk %0,r0,r0; \
282 3: \
283 .section .fixup,\"ax\"; \
284 4: brid 3b; \
285 addik %0, r0, %3; \
286 .previous; \
287 .section __ex_table,\"a\"; \
288 .word 1b,4b,2b,4b; \
289 .previous;" \
290 : "=&r"(__gu_err) \
291 : "r"(&__gu_val), \
292 "r"(__gu_ptr), "i"(-EFAULT) \
293 ); \
294})
295
296#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
297({ \
298 __asm__ __volatile__ ( \
299 "1:" insn " %1, %2, r0; \
300 addk %0, r0, r0; \
301 2: \
302 .section .fixup,\"ax\"; \
303 3: brid 2b; \
304 addik %0, r0, %3; \
305 .previous; \
306 .section __ex_table,\"a\"; \
307 .word 1b,3b; \
308 .previous;" \
309 : "=r"(__gu_err) \
310 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
311 ); \
312})
313
40b1156d
MS
314/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
315static inline unsigned long __must_check __clear_user(void __user *to,
316 unsigned long n)
0d6de953 317{
40b1156d
MS
318 /* normal memset with two words to __ex_table */
319 __asm__ __volatile__ ( \
320 "1: sb r0, %2, r0;" \
321 " addik %0, %0, -1;" \
322 " bneid %0, 1b;" \
323 " addik %2, %2, 1;" \
324 "2: " \
325 __EX_TABLE_SECTION \
326 ".word 1b,2b;" \
327 ".previous;" \
328 : "=r"(n) \
329 : "0"(n), "r"(to)
330 );
331 return n;
332}
333
334static inline unsigned long __must_check clear_user(void __user *to,
335 unsigned long n)
336{
337 might_sleep();
338 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
339 return n;
340
341 return __clear_user(to, n);
0d6de953
MS
342}
343
95dfbbe4
JW
344#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
345#define __copy_from_user_inatomic(to, from, n) \
346 copy_from_user((to), (from), (n))
0d6de953
MS
347
348#define copy_to_user(to, from, n) \
349 (access_ok(VERIFY_WRITE, (to), (n)) ? \
350 __copy_tofrom_user((void __user *)(to), \
351 (__force const void __user *)(from), (n)) \
352 : -EFAULT)
353
354#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
355#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
356
357#define copy_from_user(to, from, n) \
358 (access_ok(VERIFY_READ, (from), (n)) ? \
359 __copy_tofrom_user((__force void __user *)(to), \
360 (void __user *)(from), (n)) \
361 : -EFAULT)
362
0d6de953
MS
363extern int __strncpy_user(char *to, const char __user *from, int len);
364extern int __strnlen_user(const char __user *sstr, int len);
365
366#define strncpy_from_user(to, from, len) \
367 (access_ok(VERIFY_READ, from, 1) ? \
368 __strncpy_user(to, from, len) : -EFAULT)
369#define strnlen_user(str, len) \
370 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
2660663f 371
0d6de953 372#endif /* CONFIG_MMU */
2660663f 373
95dfbbe4
JW
374extern unsigned long __copy_tofrom_user(void __user *to,
375 const void __user *from, unsigned long size);
376
2660663f
MS
377#endif /* __ASSEMBLY__ */
378#endif /* __KERNEL__ */
379
380#endif /* _ASM_MICROBLAZE_UACCESS_H */