Merge tag 'soc-dt-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / arch / arm / lib / uaccess_with_memcpy.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
39ec58f3
LB
2/*
3 * linux/arch/arm/lib/uaccess_with_memcpy.c
4 *
5 * Written by: Lennert Buytenhek and Nicolas Pitre
6 * Copyright (C) 2009 Marvell Semiconductor
39ec58f3
LB
7 */
8
9#include <linux/kernel.h>
10#include <linux/ctype.h>
11#include <linux/uaccess.h>
12#include <linux/rwsem.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/hardirq.h> /* for in_atomic() */
5a0e3ad6 16#include <linux/gfp.h>
7816e210 17#include <linux/highmem.h>
a3a9ea65 18#include <linux/hugetlb.h>
39ec58f3
LB
19#include <asm/current.h>
20#include <asm/page.h>
21
22static int
23pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
24{
25 unsigned long addr = (unsigned long)_addr;
26 pgd_t *pgd;
84e6ffb2 27 p4d_t *p4d;
39ec58f3
LB
28 pmd_t *pmd;
29 pte_t *pte;
516295e5 30 pud_t *pud;
39ec58f3
LB
31 spinlock_t *ptl;
32
33 pgd = pgd_offset(current->mm, addr);
34 if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
35 return 0;
36
84e6ffb2
MR
37 p4d = p4d_offset(pgd, addr);
38 if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
39 return 0;
40
41 pud = pud_offset(p4d, addr);
516295e5
RK
42 if (unlikely(pud_none(*pud) || pud_bad(*pud)))
43 return 0;
44
45 pmd = pmd_offset(pud, addr);
a3a9ea65
SC
46 if (unlikely(pmd_none(*pmd)))
47 return 0;
48
49 /*
50 * A pmd can be bad if it refers to a HugeTLB or THP page.
51 *
52 * Both THP and HugeTLB pages have the same pmd layout
53 * and should not be manipulated by the pte functions.
54 *
55 * Lock the page table for the destination and check
56 * to see that it's still huge and whether or not we will
0ebd7446 57 * need to fault on write.
a3a9ea65
SC
58 */
59 if (unlikely(pmd_thp_or_huge(*pmd))) {
60 ptl = &current->mm->page_table_lock;
61 spin_lock(ptl);
62 if (unlikely(!pmd_thp_or_huge(*pmd)
0ebd7446 63 || pmd_hugewillfault(*pmd))) {
a3a9ea65
SC
64 spin_unlock(ptl);
65 return 0;
66 }
67
68 *ptep = NULL;
69 *ptlp = ptl;
70 return 1;
71 }
72
73 if (unlikely(pmd_bad(*pmd)))
39ec58f3
LB
74 return 0;
75
76 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
77 if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
78 !pte_write(*pte) || !pte_dirty(*pte))) {
79 pte_unmap_unlock(pte, ptl);
80 return 0;
81 }
82
83 *ptep = pte;
84 *ptlp = ptl;
85
86 return 1;
87}
88
cb9dc92c
NP
89static unsigned long noinline
90__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
39ec58f3 91{
c014953d 92 unsigned long ua_flags;
39ec58f3
LB
93 int atomic;
94
39ec58f3 95 /* the mmap semaphore is taken only if not in an atomic context */
0f64b247 96 atomic = faulthandler_disabled();
39ec58f3
LB
97
98 if (!atomic)
d8ed45c5 99 mmap_read_lock(current->mm);
39ec58f3
LB
100 while (n) {
101 pte_t *pte;
102 spinlock_t *ptl;
103 int tocopy;
104
105 while (!pin_page_for_write(to, &pte, &ptl)) {
106 if (!atomic)
d8ed45c5 107 mmap_read_unlock(current->mm);
39ec58f3
LB
108 if (__put_user(0, (char __user *)to))
109 goto out;
110 if (!atomic)
d8ed45c5 111 mmap_read_lock(current->mm);
39ec58f3
LB
112 }
113
114 tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
115 if (tocopy > n)
116 tocopy = n;
117
c014953d 118 ua_flags = uaccess_save_and_enable();
ceac10c8 119 __memcpy((void *)to, from, tocopy);
c014953d 120 uaccess_restore(ua_flags);
39ec58f3
LB
121 to += tocopy;
122 from += tocopy;
123 n -= tocopy;
124
a3a9ea65
SC
125 if (pte)
126 pte_unmap_unlock(pte, ptl);
127 else
128 spin_unlock(ptl);
39ec58f3
LB
129 }
130 if (!atomic)
d8ed45c5 131 mmap_read_unlock(current->mm);
39ec58f3
LB
132
133out:
134 return n;
135}
136
cb9dc92c 137unsigned long
3fba7e23 138arm_copy_to_user(void __user *to, const void *from, unsigned long n)
cb9dc92c
NP
139{
140 /*
141 * This test is stubbed out of the main function above to keep
142 * the overhead for small copies low by avoiding a large
143 * register dump on the stack just to reload them right away.
144 * With frame pointer disabled, tail call optimization kicks in
145 * as well making this test almost invisible.
146 */
c014953d
RK
147 if (n < 64) {
148 unsigned long ua_flags = uaccess_save_and_enable();
149 n = __copy_to_user_std(to, from, n);
150 uaccess_restore(ua_flags);
151 } else {
a1d09e07
JT
152 n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
153 from, n);
c014953d
RK
154 }
155 return n;
cb9dc92c
NP
156}
157
158static unsigned long noinline
159__clear_user_memset(void __user *addr, unsigned long n)
39ec58f3 160{
c014953d
RK
161 unsigned long ua_flags;
162
d8ed45c5 163 mmap_read_lock(current->mm);
39ec58f3
LB
164 while (n) {
165 pte_t *pte;
166 spinlock_t *ptl;
167 int tocopy;
168
169 while (!pin_page_for_write(addr, &pte, &ptl)) {
d8ed45c5 170 mmap_read_unlock(current->mm);
39ec58f3
LB
171 if (__put_user(0, (char __user *)addr))
172 goto out;
d8ed45c5 173 mmap_read_lock(current->mm);
39ec58f3
LB
174 }
175
176 tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
177 if (tocopy > n)
178 tocopy = n;
179
c014953d 180 ua_flags = uaccess_save_and_enable();
ceac10c8 181 __memset((void *)addr, 0, tocopy);
c014953d 182 uaccess_restore(ua_flags);
39ec58f3
LB
183 addr += tocopy;
184 n -= tocopy;
185
a3a9ea65
SC
186 if (pte)
187 pte_unmap_unlock(pte, ptl);
188 else
189 spin_unlock(ptl);
39ec58f3 190 }
d8ed45c5 191 mmap_read_unlock(current->mm);
39ec58f3
LB
192
193out:
194 return n;
195}
cb9dc92c 196
3fba7e23 197unsigned long arm_clear_user(void __user *addr, unsigned long n)
cb9dc92c
NP
198{
199 /* See rational for this in __copy_to_user() above. */
c014953d
RK
200 if (n < 64) {
201 unsigned long ua_flags = uaccess_save_and_enable();
202 n = __clear_user_std(addr, n);
203 uaccess_restore(ua_flags);
204 } else {
205 n = __clear_user_memset(addr, n);
206 }
207 return n;
cb9dc92c 208}
c626e3f5
NP
209
210#if 0
211
212/*
213 * This code is disabled by default, but kept around in case the chosen
214 * thresholds need to be revalidated. Some overhead (small but still)
215 * would be implied by a runtime determined variable threshold, and
216 * so far the measurement on concerned targets didn't show a worthwhile
217 * variation.
218 *
219 * Note that a fairly precise sched_clock() implementation is needed
220 * for results to make some sense.
221 */
222
223#include <linux/vmalloc.h>
224
225static int __init test_size_treshold(void)
226{
227 struct page *src_page, *dst_page;
228 void *user_ptr, *kernel_ptr;
229 unsigned long long t0, t1, t2;
230 int size, ret;
231
232 ret = -ENOMEM;
233 src_page = alloc_page(GFP_KERNEL);
234 if (!src_page)
235 goto no_src;
236 dst_page = alloc_page(GFP_KERNEL);
237 if (!dst_page)
238 goto no_dst;
239 kernel_ptr = page_address(src_page);
ca26f936 240 user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
c626e3f5
NP
241 if (!user_ptr)
242 goto no_vmap;
243
244 /* warm up the src page dcache */
245 ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
246
247 for (size = PAGE_SIZE; size >= 4; size /= 2) {
248 t0 = sched_clock();
249 ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
250 t1 = sched_clock();
251 ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
252 t2 = sched_clock();
253 printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
254 }
255
256 for (size = PAGE_SIZE; size >= 4; size /= 2) {
257 t0 = sched_clock();
258 ret |= __clear_user_memset(user_ptr, size);
259 t1 = sched_clock();
260 ret |= __clear_user_std(user_ptr, size);
261 t2 = sched_clock();
262 printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
263 }
264
265 if (ret)
266 ret = -EFAULT;
267
268 vunmap(user_ptr);
269no_vmap:
270 put_page(dst_page);
271no_dst:
272 put_page(src_page);
273no_src:
274 return ret;
275}
276
277subsys_initcall(test_size_treshold);
278
279#endif