Merge branch 'tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[linux-2.6-block.git] / arch / sparc / include / asm / uaccess_64.h
CommitLineData
f5e706ad
SR
1#ifndef _ASM_UACCESS_H
2#define _ASM_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7
8#ifdef __KERNEL__
fb34035e 9#include <linux/errno.h>
f5e706ad 10#include <linux/compiler.h>
f5e706ad 11#include <linux/string.h>
8abf9196 12#include <linux/thread_info.h>
f5e706ad
SR
13#include <asm/asi.h>
14#include <asm/system.h>
15#include <asm/spitfire.h>
5b17e1cd 16#include <asm-generic/uaccess-unaligned.h>
f5e706ad
SR
17#endif
18
19#ifndef __ASSEMBLY__
20
21/*
22 * Sparc64 is segmented, though more like the M68K than the I386.
23 * We use the secondary ASI to address user memory, which references a
24 * completely different VM map, thus there is zero chance of the user
25 * doing something queer and tricking us into poking kernel memory.
26 *
27 * What is left here is basically what is needed for the other parts of
28 * the kernel that expect to be able to manipulate, erum, "segments".
29 * Or perhaps more properly, permissions.
30 *
31 * "For historical reasons, these macros are grossly misnamed." -Linus
32 */
33
34#define KERNEL_DS ((mm_segment_t) { ASI_P })
35#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
36
37#define VERIFY_READ 0
38#define VERIFY_WRITE 1
39
40#define get_fs() ((mm_segment_t) { get_thread_current_ds() })
41#define get_ds() (KERNEL_DS)
42
43#define segment_eq(a,b) ((a).seg == (b).seg)
44
45#define set_fs(val) \
46do { \
47 set_thread_current_ds((val).seg); \
48 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
49} while(0)
50
51static inline int __access_ok(const void __user * addr, unsigned long size)
52{
53 return 1;
54}
55
56static inline int access_ok(int type, const void __user * addr, unsigned long size)
57{
58 return 1;
59}
60
61/*
62 * The exception table consists of pairs of addresses: the first is the
63 * address of an instruction that is allowed to fault, and the second is
64 * the address at which the program should continue. No registers are
65 * modified, so it is entirely up to the continuation code to figure out
66 * what to do.
67 *
68 * All the routines below use bits of fixup code that are out of line
69 * with the main instruction path. This means when everything is well,
70 * we don't even have to jump over them. Further, they do not intrude
71 * on our cache or tlb entries.
72 */
73
74struct exception_table_entry {
75 unsigned int insn, fixup;
76};
77
78extern void __ret_efault(void);
79extern void __retl_efault(void);
80
81/* Uh, these should become the main single-value transfer routines..
82 * They automatically use the right size if we just have the right
83 * pointer type..
84 *
85 * This gets kind of ugly. We want to return _two_ values in "get_user()"
86 * and yet we don't want to do any pointers, because that is too much
87 * of a performance impact. Thus we have a few rather ugly macros here,
88 * and hide all the ugliness from the user.
89 */
90#define put_user(x,ptr) ({ \
91unsigned long __pu_addr = (unsigned long)(ptr); \
92__chk_user_ptr(ptr); \
93__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
94
95#define get_user(x,ptr) ({ \
96unsigned long __gu_addr = (unsigned long)(ptr); \
97__chk_user_ptr(ptr); \
98__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
99
100#define __put_user(x,ptr) put_user(x,ptr)
101#define __get_user(x,ptr) get_user(x,ptr)
102
103struct __large_struct { unsigned long buf[100]; };
104#define __m(x) ((struct __large_struct *)(x))
105
106#define __put_user_nocheck(data,addr,size) ({ \
107register int __pu_ret; \
108switch (size) { \
109case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
110case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
111case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
112case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
113default: __pu_ret = __put_user_bad(); break; \
114} __pu_ret; })
115
116#define __put_user_asm(x,size,addr,ret) \
117__asm__ __volatile__( \
118 "/* Put user asm, inline. */\n" \
119"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
120 "clr %0\n" \
121"2:\n\n\t" \
122 ".section .fixup,#alloc,#execinstr\n\t" \
123 ".align 4\n" \
124"3:\n\t" \
125 "sethi %%hi(2b), %0\n\t" \
126 "jmpl %0 + %%lo(2b), %%g0\n\t" \
127 " mov %3, %0\n\n\t" \
128 ".previous\n\t" \
129 ".section __ex_table,\"a\"\n\t" \
130 ".align 4\n\t" \
131 ".word 1b, 3b\n\t" \
132 ".previous\n\n\t" \
133 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
134 "i" (-EFAULT))
135
136extern int __put_user_bad(void);
137
138#define __get_user_nocheck(data,addr,size,type) ({ \
139register int __gu_ret; \
140register unsigned long __gu_val; \
141switch (size) { \
142case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
143case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
144case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
145case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
146default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
147} data = (type) __gu_val; __gu_ret; })
148
149#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
150register unsigned long __gu_val __asm__ ("l1"); \
151switch (size) { \
152case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
153case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
154case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
155case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
156default: if (__get_user_bad()) return retval; \
157} data = (type) __gu_val; })
158
159#define __get_user_asm(x,size,addr,ret) \
160__asm__ __volatile__( \
161 "/* Get user asm, inline. */\n" \
162"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
163 "clr %0\n" \
164"2:\n\n\t" \
165 ".section .fixup,#alloc,#execinstr\n\t" \
166 ".align 4\n" \
167"3:\n\t" \
168 "sethi %%hi(2b), %0\n\t" \
169 "clr %1\n\t" \
170 "jmpl %0 + %%lo(2b), %%g0\n\t" \
171 " mov %3, %0\n\n\t" \
172 ".previous\n\t" \
173 ".section __ex_table,\"a\"\n\t" \
174 ".align 4\n\t" \
175 ".word 1b, 3b\n\n\t" \
176 ".previous\n\t" \
177 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
178 "i" (-EFAULT))
179
180#define __get_user_asm_ret(x,size,addr,retval) \
181if (__builtin_constant_p(retval) && retval == -EFAULT) \
182__asm__ __volatile__( \
183 "/* Get user asm ret, inline. */\n" \
184"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
185 ".section __ex_table,\"a\"\n\t" \
186 ".align 4\n\t" \
187 ".word 1b,__ret_efault\n\n\t" \
188 ".previous\n\t" \
189 : "=r" (x) : "r" (__m(addr))); \
190else \
191__asm__ __volatile__( \
192 "/* Get user asm ret, inline. */\n" \
193"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
194 ".section .fixup,#alloc,#execinstr\n\t" \
195 ".align 4\n" \
196"3:\n\t" \
197 "ret\n\t" \
198 " restore %%g0, %2, %%o0\n\n\t" \
199 ".previous\n\t" \
200 ".section __ex_table,\"a\"\n\t" \
201 ".align 4\n\t" \
202 ".word 1b, 3b\n\n\t" \
203 ".previous\n\t" \
204 : "=r" (x) : "r" (__m(addr)), "i" (retval))
205
206extern int __get_user_bad(void);
207
fb34035e
DM
208extern void copy_from_user_overflow(void)
209#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
210 __compiletime_error("copy_from_user() buffer size is not provably correct")
211#else
212 __compiletime_warning("copy_from_user() buffer size is not provably correct")
213#endif
214;
215
f5e706ad
SR
216extern unsigned long __must_check ___copy_from_user(void *to,
217 const void __user *from,
218 unsigned long size);
219extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
220 unsigned long size);
221static inline unsigned long __must_check
222copy_from_user(void *to, const void __user *from, unsigned long size)
223{
fb34035e 224 int sz = __compiletime_object_size(to);
6df1c176 225 unsigned long ret = size;
fb34035e
DM
226
227 if (likely(sz == -1 || sz >= size)) {
228 ret = ___copy_from_user(to, from, size);
229 if (unlikely(ret))
230 ret = copy_from_user_fixup(to, from, size);
231 } else {
232 copy_from_user_overflow();
233 }
f5e706ad
SR
234 return ret;
235}
236#define __copy_from_user copy_from_user
237
238extern unsigned long __must_check ___copy_to_user(void __user *to,
239 const void *from,
240 unsigned long size);
241extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
242 unsigned long size);
243static inline unsigned long __must_check
244copy_to_user(void __user *to, const void *from, unsigned long size)
245{
246 unsigned long ret = ___copy_to_user(to, from, size);
247
248 if (unlikely(ret))
249 ret = copy_to_user_fixup(to, from, size);
250 return ret;
251}
252#define __copy_to_user copy_to_user
253
254extern unsigned long __must_check ___copy_in_user(void __user *to,
255 const void __user *from,
256 unsigned long size);
257extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
258 unsigned long size);
259static inline unsigned long __must_check
260copy_in_user(void __user *to, void __user *from, unsigned long size)
261{
262 unsigned long ret = ___copy_in_user(to, from, size);
263
264 if (unlikely(ret))
265 ret = copy_in_user_fixup(to, from, size);
266 return ret;
267}
268#define __copy_in_user copy_in_user
269
270extern unsigned long __must_check __clear_user(void __user *, unsigned long);
271
272#define clear_user __clear_user
273
274extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
275
276#define strncpy_from_user __strncpy_from_user
277
278extern long __strlen_user(const char __user *);
279extern long __strnlen_user(const char __user *, long len);
280
281#define strlen_user __strlen_user
282#define strnlen_user __strnlen_user
145e1c00
HD
283#define __copy_to_user_inatomic ___copy_to_user
284#define __copy_from_user_inatomic ___copy_from_user
f5e706ad
SR
285
286#endif /* __ASSEMBLY__ */
287
288#endif /* _ASM_UACCESS_H */