x86: some lock annotations for user copy paths
[linux-2.6-block.git] / include / asm-x86 / uaccess.h
CommitLineData
ca233862
GC
1#ifndef _ASM_UACCES_H_
2#define _ASM_UACCES_H_
3/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
c10d38dd
NP
11#include <linux/lockdep.h>
12#include <linux/sched.h>
ca233862
GC
13#include <asm/asm.h>
14#include <asm/page.h>
15
16#define VERIFY_READ 0
17#define VERIFY_WRITE 1
18
19/*
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
23 *
24 * For historical reasons, these macros are grossly misnamed.
25 */
26
27#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
28
29#define KERNEL_DS MAKE_MM_SEG(-1UL)
30#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
31
32#define get_ds() (KERNEL_DS)
33#define get_fs() (current_thread_info()->addr_limit)
34#define set_fs(x) (current_thread_info()->addr_limit = (x))
35
36#define segment_eq(a, b) ((a).seg == (b).seg)
37
002ca169
GC
38#define __addr_ok(addr) \
39 ((unsigned long __force)(addr) < \
40 (current_thread_info()->addr_limit.seg))
41
ca233862
GC
42/*
43 * Test whether a block of memory is a valid user space address.
44 * Returns 0 if the range is valid, nonzero otherwise.
45 *
46 * This is equivalent to the following test:
47 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
48 *
49 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
50 */
51
52#define __range_not_ok(addr, size) \
53({ \
54 unsigned long flag, roksum; \
55 __chk_user_ptr(addr); \
56 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
57 : "=&r" (flag), "=r" (roksum) \
58 : "1" (addr), "g" ((long)(size)), \
59 "rm" (current_thread_info()->addr_limit.seg)); \
60 flag; \
61})
62
63/**
64 * access_ok: - Checks if a user space pointer is valid
65 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
66 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
67 * to write to a block, it is always safe to read from it.
68 * @addr: User space pointer to start of block to check
69 * @size: Size of block to check
70 *
71 * Context: User context only. This function may sleep.
72 *
73 * Checks if a pointer to a block of memory in user space is valid.
74 *
75 * Returns true (nonzero) if the memory block may be valid, false (zero)
76 * if it is definitely invalid.
77 *
78 * Note that, depending on architecture, this function probably just
79 * checks that the pointer is in the user space range - after calling
80 * this function, memory access functions may still return -EFAULT.
81 */
82#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
83
84/*
85 * The exception table consists of pairs of addresses: the first is the
86 * address of an instruction that is allowed to fault, and the second is
87 * the address at which the program should continue. No registers are
88 * modified, so it is entirely up to the continuation code to figure out
89 * what to do.
90 *
91 * All the routines below use bits of fixup code that are out of line
92 * with the main instruction path. This means when everything is well,
93 * we don't even have to jump over them. Further, they do not intrude
94 * on our cache or tlb entries.
95 */
96
97struct exception_table_entry {
98 unsigned long insn, fixup;
99};
100
101extern int fixup_exception(struct pt_regs *regs);
102
103/*
104 * These are the main single-value transfer routines. They automatically
105 * use the right size if we just have the right pointer type.
106 *
107 * This gets kind of ugly. We want to return _two_ values in "get_user()"
108 * and yet we don't want to do any pointers, because that is too much
109 * of a performance impact. Thus we have a few rather ugly macros here,
110 * and hide all the ugliness from the user.
111 *
112 * The "__xxx" versions of the user access functions are versions that
113 * do not verify the address space, that must have been done previously
114 * with a separate "access_ok()" call (this is used when we do multiple
115 * accesses to the same area of user memory).
116 */
117
118extern int __get_user_1(void);
119extern int __get_user_2(void);
120extern int __get_user_4(void);
121extern int __get_user_8(void);
122extern int __get_user_bad(void);
123
124#define __get_user_x(size, ret, x, ptr) \
125 asm volatile("call __get_user_" #size \
126 : "=a" (ret),"=d" (x) \
127 : "0" (ptr)) \
128
865e5b76
GC
129/* Careful: we have to cast the result to the type of the pointer
130 * for sign reasons */
131
132/**
133 * get_user: - Get a simple variable from user space.
134 * @x: Variable to store result.
135 * @ptr: Source address, in user space.
136 *
137 * Context: User context only. This function may sleep.
138 *
139 * This macro copies a single simple variable from user space to kernel
140 * space. It supports simple types like char and int, but not larger
141 * data types like structures or arrays.
142 *
143 * @ptr must have pointer-to-simple-variable type, and the result of
144 * dereferencing @ptr must be assignable to @x without a cast.
145 *
146 * Returns zero on success, or -EFAULT on error.
147 * On error, the variable @x is set to zero.
148 */
149#ifdef CONFIG_X86_32
150#define __get_user_8(__ret_gu, __val_gu, ptr) \
151 __get_user_x(X, __ret_gu, __val_gu, ptr)
152#else
153#define __get_user_8(__ret_gu, __val_gu, ptr) \
154 __get_user_x(8, __ret_gu, __val_gu, ptr)
155#endif
156
157#define get_user(x, ptr) \
158({ \
159 int __ret_gu; \
160 unsigned long __val_gu; \
161 __chk_user_ptr(ptr); \
c10d38dd
NP
162 might_sleep(); \
163 if (current->mm) \
164 might_lock_read(&current->mm->mmap_sem); \
865e5b76
GC
165 switch (sizeof(*(ptr))) { \
166 case 1: \
167 __get_user_x(1, __ret_gu, __val_gu, ptr); \
168 break; \
169 case 2: \
170 __get_user_x(2, __ret_gu, __val_gu, ptr); \
171 break; \
172 case 4: \
173 __get_user_x(4, __ret_gu, __val_gu, ptr); \
174 break; \
175 case 8: \
176 __get_user_8(__ret_gu, __val_gu, ptr); \
177 break; \
178 default: \
179 __get_user_x(X, __ret_gu, __val_gu, ptr); \
180 break; \
181 } \
182 (x) = (__typeof__(*(ptr)))__val_gu; \
183 __ret_gu; \
184})
185
e30a44fd
GC
186#define __put_user_x(size, x, ptr, __ret_pu) \
187 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
188 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
189
190
191
dc70ddf4
GC
192#ifdef CONFIG_X86_32
193#define __put_user_u64(x, addr, err) \
194 asm volatile("1: movl %%eax,0(%2)\n" \
195 "2: movl %%edx,4(%2)\n" \
196 "3:\n" \
197 ".section .fixup,\"ax\"\n" \
198 "4: movl %3,%0\n" \
199 " jmp 3b\n" \
200 ".previous\n" \
201 _ASM_EXTABLE(1b, 4b) \
202 _ASM_EXTABLE(2b, 4b) \
203 : "=r" (err) \
204 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
e30a44fd
GC
205
206#define __put_user_x8(x, ptr, __ret_pu) \
207 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
208 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
dc70ddf4
GC
209#else
210#define __put_user_u64(x, ptr, retval) \
211 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
e30a44fd 212#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
dc70ddf4
GC
213#endif
214
e30a44fd
GC
215extern void __put_user_bad(void);
216
217/*
218 * Strange magic calling convention: pointer in %ecx,
219 * value in %eax(:%edx), return value in %eax. clobbers %rbx
220 */
221extern void __put_user_1(void);
222extern void __put_user_2(void);
223extern void __put_user_4(void);
224extern void __put_user_8(void);
225
dc70ddf4
GC
226#ifdef CONFIG_X86_WP_WORKS_OK
227
e30a44fd
GC
228/**
229 * put_user: - Write a simple value into user space.
230 * @x: Value to copy to user space.
231 * @ptr: Destination address, in user space.
232 *
233 * Context: User context only. This function may sleep.
234 *
235 * This macro copies a single simple value from kernel space to user
236 * space. It supports simple types like char and int, but not larger
237 * data types like structures or arrays.
238 *
239 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
240 * to the result of dereferencing @ptr.
241 *
242 * Returns zero on success, or -EFAULT on error.
243 */
244#define put_user(x, ptr) \
245({ \
246 int __ret_pu; \
247 __typeof__(*(ptr)) __pu_val; \
248 __chk_user_ptr(ptr); \
c10d38dd
NP
249 might_sleep(); \
250 if (current->mm) \
251 might_lock_read(&current->mm->mmap_sem); \
e30a44fd
GC
252 __pu_val = x; \
253 switch (sizeof(*(ptr))) { \
254 case 1: \
255 __put_user_x(1, __pu_val, ptr, __ret_pu); \
256 break; \
257 case 2: \
258 __put_user_x(2, __pu_val, ptr, __ret_pu); \
259 break; \
260 case 4: \
261 __put_user_x(4, __pu_val, ptr, __ret_pu); \
262 break; \
263 case 8: \
264 __put_user_x8(__pu_val, ptr, __ret_pu); \
265 break; \
266 default: \
267 __put_user_x(X, __pu_val, ptr, __ret_pu); \
268 break; \
269 } \
270 __ret_pu; \
271})
272
dc70ddf4
GC
273#define __put_user_size(x, ptr, size, retval, errret) \
274do { \
275 retval = 0; \
c10d38dd
NP
276 might_sleep(); \
277 if (current->mm) \
278 might_lock_read(&current->mm->mmap_sem); \
dc70ddf4
GC
279 __chk_user_ptr(ptr); \
280 switch (size) { \
281 case 1: \
282 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
283 break; \
284 case 2: \
285 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
286 break; \
287 case 4: \
288 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
289 break; \
290 case 8: \
291 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
292 break; \
293 default: \
294 __put_user_bad(); \
295 } \
296} while (0)
297
298#else
299
300#define __put_user_size(x, ptr, size, retval, errret) \
301do { \
302 __typeof__(*(ptr))__pus_tmp = x; \
303 retval = 0; \
304 \
305 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
306 retval = errret; \
307} while (0)
308
e30a44fd
GC
309#define put_user(x, ptr) \
310({ \
311 int __ret_pu; \
312 __typeof__(*(ptr))__pus_tmp = x; \
313 __ret_pu = 0; \
314 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
315 sizeof(*(ptr))) != 0)) \
316 __ret_pu = -EFAULT; \
317 __ret_pu; \
318})
dc70ddf4
GC
319#endif
320
3f168221
GC
321#ifdef CONFIG_X86_32
322#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
323#else
324#define __get_user_asm_u64(x, ptr, retval, errret) \
325 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
326#endif
327
328#define __get_user_size(x, ptr, size, retval, errret) \
329do { \
330 retval = 0; \
c10d38dd
NP
331 might_sleep(); \
332 if (current->mm) \
333 might_lock_read(&current->mm->mmap_sem); \
3f168221
GC
334 __chk_user_ptr(ptr); \
335 switch (size) { \
336 case 1: \
337 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
338 break; \
339 case 2: \
340 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
341 break; \
342 case 4: \
343 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
344 break; \
345 case 8: \
346 __get_user_asm_u64(x, ptr, retval, errret); \
347 break; \
348 default: \
349 (x) = __get_user_bad(); \
350 } \
351} while (0)
352
353#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
354 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
355 "2:\n" \
356 ".section .fixup,\"ax\"\n" \
357 "3: mov %3,%0\n" \
358 " xor"itype" %"rtype"1,%"rtype"1\n" \
359 " jmp 2b\n" \
360 ".previous\n" \
361 _ASM_EXTABLE(1b, 3b) \
362 : "=r" (err), ltype(x) \
363 : "m" (__m(addr)), "i" (errret), "0" (err))
364
dc70ddf4
GC
365#define __put_user_nocheck(x, ptr, size) \
366({ \
367 long __pu_err; \
368 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
369 __pu_err; \
370})
371
3f168221
GC
372#define __get_user_nocheck(x, ptr, size) \
373({ \
374 long __gu_err; \
375 unsigned long __gu_val; \
376 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
377 (x) = (__force __typeof__(*(ptr)))__gu_val; \
378 __gu_err; \
379})
dc70ddf4
GC
380
381/* FIXME: this hack is definitely wrong -AK */
382struct __large_struct { unsigned long buf[100]; };
383#define __m(x) (*(struct __large_struct __user *)(x))
384
385/*
386 * Tell gcc we read from memory instead of writing: this is because
387 * we do not write to any memory gcc knows about, so there are no
388 * aliasing issues.
389 */
390#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
391 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
392 "2:\n" \
393 ".section .fixup,\"ax\"\n" \
394 "3: mov %3,%0\n" \
395 " jmp 2b\n" \
396 ".previous\n" \
397 _ASM_EXTABLE(1b, 3b) \
398 : "=r"(err) \
399 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
8cb834e9
GC
400/**
401 * __get_user: - Get a simple variable from user space, with less checking.
402 * @x: Variable to store result.
403 * @ptr: Source address, in user space.
404 *
405 * Context: User context only. This function may sleep.
406 *
407 * This macro copies a single simple variable from user space to kernel
408 * space. It supports simple types like char and int, but not larger
409 * data types like structures or arrays.
410 *
411 * @ptr must have pointer-to-simple-variable type, and the result of
412 * dereferencing @ptr must be assignable to @x without a cast.
413 *
414 * Caller must check the pointer with access_ok() before calling this
415 * function.
416 *
417 * Returns zero on success, or -EFAULT on error.
418 * On error, the variable @x is set to zero.
419 */
420
421#define __get_user(x, ptr) \
422 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
423/**
424 * __put_user: - Write a simple value into user space, with less checking.
425 * @x: Value to copy to user space.
426 * @ptr: Destination address, in user space.
427 *
428 * Context: User context only. This function may sleep.
429 *
430 * This macro copies a single simple value from kernel space to user
431 * space. It supports simple types like char and int, but not larger
432 * data types like structures or arrays.
433 *
434 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
435 * to the result of dereferencing @ptr.
436 *
437 * Caller must check the pointer with access_ok() before calling this
438 * function.
439 *
440 * Returns zero on success, or -EFAULT on error.
441 */
442
443#define __put_user(x, ptr) \
444 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
dc70ddf4 445
8cb834e9
GC
446#define __get_user_unaligned __get_user
447#define __put_user_unaligned __put_user
865e5b76 448
8bc7de0c
GC
449/*
450 * movsl can be slow when source and dest are not both 8-byte aligned
451 */
452#ifdef CONFIG_X86_INTEL_USERCOPY
453extern struct movsl_mask {
454 int mask;
455} ____cacheline_aligned_in_smp movsl_mask;
456#endif
457
22cac167
GC
458#define ARCH_HAS_NOCACHE_UACCESS 1
459
96a388de
TG
460#ifdef CONFIG_X86_32
461# include "uaccess_32.h"
462#else
22cac167 463# define ARCH_HAS_SEARCH_EXTABLE
96a388de
TG
464# include "uaccess_64.h"
465#endif
ca233862
GC
466
467#endif
8174c430 468