microblaze: Remove segment.h
[linux-2.6-block.git] / arch / microblaze / include / asm / uaccess.h
CommitLineData
2660663f 1/*
0d6de953
MS
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2660663f
MS
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_UACCESS_H
12#define _ASM_MICROBLAZE_UACCESS_H
13
14#ifdef __KERNEL__
15#ifndef __ASSEMBLY__
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/sched.h> /* RLIMIT_FSIZE */
20#include <linux/mm.h>
21
22#include <asm/mmu.h>
23#include <asm/page.h>
24#include <asm/pgtable.h>
2660663f
MS
25#include <linux/string.h>
26
27#define VERIFY_READ 0
28#define VERIFY_WRITE 1
29
40db0834
MS
30/*
31 * On Microblaze the fs value is actually the top of the corresponding
32 * address space.
33 *
34 * The fs value determines whether argument validity checking should be
35 * performed or not. If get_fs() == USER_DS, checking is performed, with
36 * get_fs() == KERNEL_DS, checking is bypassed.
37 *
38 * For historical reasons, these macros are grossly misnamed.
39 *
40 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
41 */
42# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
43
44# ifndef CONFIG_MMU
45# define KERNEL_DS MAKE_MM_SEG(0)
46# define USER_DS KERNEL_DS
47# else
48# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
49# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
50# endif
51
52# define get_ds() (KERNEL_DS)
53# define get_fs() (current_thread_info()->addr_limit)
54# define set_fs(val) (current_thread_info()->addr_limit = (val))
55
56# define segment_eq(a, b) ((a).seg == (b).seg)
57
58
0d6de953
MS
59#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
60
61#ifndef CONFIG_MMU
62
2660663f
MS
63extern int ___range_ok(unsigned long addr, unsigned long size);
64
65#define __range_ok(addr, size) \
66 ___range_ok((unsigned long)(addr), (unsigned long)(size))
67
68#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
69#define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
70
838d2406
AB
71/* Undefined function to trigger linker error */
72extern int bad_user_access_length(void);
73
2660663f 74/* FIXME this is function for optimalization -> memcpy */
0d6de953
MS
75#define __get_user(var, ptr) \
76({ \
77 int __gu_err = 0; \
78 switch (sizeof(*(ptr))) { \
79 case 1: \
80 case 2: \
81 case 4: \
82 (var) = *(ptr); \
83 break; \
84 case 8: \
85 memcpy((void *) &(var), (ptr), 8); \
86 break; \
87 default: \
88 (var) = 0; \
89 __gu_err = __get_user_bad(); \
90 break; \
91 } \
92 __gu_err; \
93})
2660663f
MS
94
95#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
96
0d6de953 97/* FIXME is not there defined __pu_val */
2660663f 98#define __put_user(var, ptr) \
0d6de953
MS
99({ \
100 int __pu_err = 0; \
101 switch (sizeof(*(ptr))) { \
102 case 1: \
103 case 2: \
104 case 4: \
105 *(ptr) = (var); \
106 break; \
107 case 8: { \
108 typeof(*(ptr)) __pu_val = (var); \
109 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
110 } \
111 break; \
112 default: \
113 __pu_err = __put_user_bad(); \
114 break; \
115 } \
116 __pu_err; \
117})
2660663f
MS
118
119#define __put_user_bad() (bad_user_access_length(), (-EFAULT))
120
0d6de953
MS
121#define put_user(x, ptr) __put_user((x), (ptr))
122#define get_user(x, ptr) __get_user((x), (ptr))
2660663f 123
0d6de953
MS
124#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
125#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
2660663f 126
0d6de953
MS
127#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
128#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
129#define __copy_to_user_inatomic(to, from, n) \
130 (__copy_to_user((to), (from), (n)))
131#define __copy_from_user_inatomic(to, from, n) \
132 (__copy_from_user((to), (from), (n)))
2660663f
MS
133
134static inline unsigned long clear_user(void *addr, unsigned long size)
135{
136 if (access_ok(VERIFY_WRITE, addr, size))
137 size = __clear_user(addr, size);
138 return size;
139}
140
0d6de953 141/* Returns 0 if exception not found and fixup otherwise. */
2660663f
MS
142extern unsigned long search_exception_table(unsigned long);
143
0d6de953
MS
144extern long strncpy_from_user(char *dst, const char *src, long count);
145extern long strnlen_user(const char *src, long count);
146
147#else /* CONFIG_MMU */
148
149/*
150 * Address is valid if:
151 * - "addr", "addr + size" and "size" are all below the limit
152 */
153#define access_ok(type, addr, size) \
154 (get_fs().seg > (((unsigned long)(addr)) | \
155 (size) | ((unsigned long)(addr) + (size))))
156
157/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
158 type?"WRITE":"READ",addr,size,get_fs().seg)) */
159
160/*
161 * All the __XXX versions macros/functions below do not perform
162 * access checking. It is assumed that the necessary checks have been
163 * already performed before the finction (macro) is called.
164 */
165
166#define get_user(x, ptr) \
167({ \
168 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
169 ? __get_user((x), (ptr)) : -EFAULT; \
170})
171
172#define put_user(x, ptr) \
173({ \
174 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
175 ? __put_user((x), (ptr)) : -EFAULT; \
176})
177
178#define __get_user(x, ptr) \
179({ \
180 unsigned long __gu_val; \
181 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
182 long __gu_err; \
183 switch (sizeof(*(ptr))) { \
184 case 1: \
185 __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
186 break; \
187 case 2: \
188 __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
189 break; \
190 case 4: \
191 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
192 break; \
193 default: \
194 __gu_val = 0; __gu_err = -EINVAL; \
195 } \
196 x = (__typeof__(*(ptr))) __gu_val; \
197 __gu_err; \
198})
199
200#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
201({ \
202 __asm__ __volatile__ ( \
203 "1:" insn " %1, %2, r0; \
204 addk %0, r0, r0; \
205 2: \
206 .section .fixup,\"ax\"; \
207 3: brid 2b; \
208 addik %0, r0, %3; \
209 .previous; \
210 .section __ex_table,\"a\"; \
211 .word 1b,3b; \
212 .previous;" \
213 : "=r"(__gu_err), "=r"(__gu_val) \
214 : "r"(__gu_ptr), "i"(-EFAULT) \
215 ); \
216})
217
218#define __put_user(x, ptr) \
219({ \
7bcb63b2 220 __typeof__(*(ptr)) volatile __gu_val = (x); \
0d6de953
MS
221 long __gu_err = 0; \
222 switch (sizeof(__gu_val)) { \
223 case 1: \
224 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
225 break; \
226 case 2: \
227 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
228 break; \
229 case 4: \
230 __put_user_asm("sw", (ptr), __gu_val, __gu_err); \
231 break; \
232 case 8: \
233 __put_user_asm_8((ptr), __gu_val, __gu_err); \
234 break; \
235 default: \
236 __gu_err = -EINVAL; \
237 } \
238 __gu_err; \
239})
240
241#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
242({ \
243__asm__ __volatile__ (" lwi %0, %1, 0; \
244 1: swi %0, %2, 0; \
245 lwi %0, %1, 4; \
246 2: swi %0, %2, 4; \
247 addk %0,r0,r0; \
248 3: \
249 .section .fixup,\"ax\"; \
250 4: brid 3b; \
251 addik %0, r0, %3; \
252 .previous; \
253 .section __ex_table,\"a\"; \
254 .word 1b,4b,2b,4b; \
255 .previous;" \
256 : "=&r"(__gu_err) \
257 : "r"(&__gu_val), \
258 "r"(__gu_ptr), "i"(-EFAULT) \
259 ); \
260})
261
262#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
263({ \
264 __asm__ __volatile__ ( \
265 "1:" insn " %1, %2, r0; \
266 addk %0, r0, r0; \
267 2: \
268 .section .fixup,\"ax\"; \
269 3: brid 2b; \
270 addik %0, r0, %3; \
271 .previous; \
272 .section __ex_table,\"a\"; \
273 .word 1b,3b; \
274 .previous;" \
275 : "=r"(__gu_err) \
276 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
277 ); \
278})
279
280/*
281 * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
282 */
283static inline int clear_user(char *to, int size)
284{
285 if (size && access_ok(VERIFY_WRITE, to, size)) {
286 __asm__ __volatile__ (" \
287 1: \
288 sb r0, %2, r0; \
289 addik %0, %0, -1; \
290 bneid %0, 1b; \
291 addik %2, %2, 1; \
292 2: \
293 .section __ex_table,\"a\"; \
294 .word 1b,2b; \
295 .section .text;" \
296 : "=r"(size) \
297 : "0"(size), "r"(to)
298 );
299 }
300 return size;
301}
302
95dfbbe4
JW
303#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
304#define __copy_from_user_inatomic(to, from, n) \
305 copy_from_user((to), (from), (n))
0d6de953
MS
306
307#define copy_to_user(to, from, n) \
308 (access_ok(VERIFY_WRITE, (to), (n)) ? \
309 __copy_tofrom_user((void __user *)(to), \
310 (__force const void __user *)(from), (n)) \
311 : -EFAULT)
312
313#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
314#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
315
316#define copy_from_user(to, from, n) \
317 (access_ok(VERIFY_READ, (from), (n)) ? \
318 __copy_tofrom_user((__force void __user *)(to), \
319 (void __user *)(from), (n)) \
320 : -EFAULT)
321
0d6de953
MS
322extern int __strncpy_user(char *to, const char __user *from, int len);
323extern int __strnlen_user(const char __user *sstr, int len);
324
325#define strncpy_from_user(to, from, len) \
326 (access_ok(VERIFY_READ, from, 1) ? \
327 __strncpy_user(to, from, len) : -EFAULT)
328#define strnlen_user(str, len) \
329 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
2660663f 330
0d6de953 331#endif /* CONFIG_MMU */
2660663f 332
95dfbbe4
JW
333extern unsigned long __copy_tofrom_user(void __user *to,
334 const void __user *from, unsigned long size);
335
2660663f
MS
336/*
337 * The exception table consists of pairs of addresses: the first is the
338 * address of an instruction that is allowed to fault, and the second is
339 * the address at which the program should continue. No registers are
340 * modified, so it is entirely up to the continuation code to figure out
341 * what to do.
342 *
343 * All the routines below use bits of fixup code that are out of line
344 * with the main instruction path. This means when everything is well,
345 * we don't even have to jump over them. Further, they do not intrude
346 * on our cache or tlb entries.
347 */
348struct exception_table_entry {
349 unsigned long insn, fixup;
350};
351
352#endif /* __ASSEMBLY__ */
353#endif /* __KERNEL__ */
354
355#endif /* _ASM_MICROBLAZE_UACCESS_H */