Drain per-cpu lists when high-order allocations fail
[linux-block.git] / include / linux / uaccess.h
1 #ifndef __LINUX_UACCESS_H__
2 #define __LINUX_UACCESS_H__
3
4 #include <linux/preempt.h>
5 #include <asm/uaccess.h>
6
7 /*
8  * These routines enable/disable the pagefault handler in that
9  * it will not take any locks and go straight to the fixup table.
10  *
11  * They have great resemblance to the preempt_disable/enable calls
12  * and in fact they are identical; this is because currently there is
13  * no other way to make the pagefault handlers do this. So we do
14  * disable preemption but we don't necessarily care about that.
15  */
16 static inline void pagefault_disable(void)
17 {
18         inc_preempt_count();
19         /*
20          * make sure to have issued the store before a pagefault
21          * can hit.
22          */
23         barrier();
24 }
25
26 static inline void pagefault_enable(void)
27 {
28         /*
29          * make sure to issue those last loads/stores before enabling
30          * the pagefault handler again.
31          */
32         barrier();
33         dec_preempt_count();
34         /*
35          * make sure we do..
36          */
37         barrier();
38         preempt_check_resched();
39 }
40
41 #ifndef ARCH_HAS_NOCACHE_UACCESS
42
43 static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
44                                 const void __user *from, unsigned long n)
45 {
46         return __copy_from_user_inatomic(to, from, n);
47 }
48
49 static inline unsigned long __copy_from_user_nocache(void *to,
50                                 const void __user *from, unsigned long n)
51 {
52         return __copy_from_user(to, from, n);
53 }
54
55 #endif          /* ARCH_HAS_NOCACHE_UACCESS */
56
57 /**
58  * probe_kernel_address(): safely attempt to read from a location
59  * @addr: address to read from - its type is type typeof(retval)*
60  * @retval: read into this variable
61  *
62  * Safely read from address @addr into variable @revtal.  If a kernel fault
63  * happens, handle that and return -EFAULT.
64  * We ensure that the __get_user() is executed in atomic context so that
65  * do_page_fault() doesn't attempt to take mmap_sem.  This makes
66  * probe_kernel_address() suitable for use within regions where the caller
67  * already holds mmap_sem, or other locks which nest inside mmap_sem.
68  * This must be a macro because __get_user() needs to know the types of the
69  * args.
70  *
71  * We don't include enough header files to be able to do the set_fs().  We
72  * require that the probe_kernel_address() caller will do that.
73  */
74 #define probe_kernel_address(addr, retval)              \
75         ({                                              \
76                 long ret;                               \
77                 mm_segment_t old_fs = get_fs();         \
78                                                         \
79                 set_fs(KERNEL_DS);                      \
80                 pagefault_disable();                    \
81                 ret = __get_user(retval, (__force typeof(retval) __user *)(addr));              \
82                 pagefault_enable();                     \
83                 set_fs(old_fs);                         \
84                 ret;                                    \
85         })
86
87 #endif          /* __LINUX_UACCESS_H__ */