metag/usercopy: Add early abort to copy_to_user
[linux-2.6-block.git] / arch / metag / lib / usercopy.c
CommitLineData
373cd784
JH
1/*
2 * User address space access functions.
3 * The non-inlined parts of asm-metag/uaccess.h are here.
4 *
5 * Copyright (C) 2006, Imagination Technologies.
6 * Copyright (C) 2000, Axis Communications AB.
7 *
8 * Written by Hans-Peter Nilsson.
9 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
10 * Modified for Meta by Will Newton.
11 */
12
9da3ee9a 13#include <linux/export.h>
373cd784
JH
14#include <linux/uaccess.h>
15#include <asm/cache.h> /* def of L1_CACHE_BYTES */
16
17#define USE_RAPF
18#define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
19
20
21/* The "double write" in this code is because the Meta will not fault
22 * immediately unless the memory pipe is forced to by e.g. a data stall or
23 * another memory op. The second write should be discarded by the write
24 * combiner so should have virtually no cost.
25 */
26
27#define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
28 asm volatile ( \
29 COPY \
30 "1:\n" \
31 " .section .fixup,\"ax\"\n" \
32 " MOV D1Ar1,#0\n" \
33 FIXUP \
34 " MOVT D1Ar1,#HI(1b)\n" \
35 " JUMP D1Ar1,#LO(1b)\n" \
36 " .previous\n" \
37 " .section __ex_table,\"a\"\n" \
38 TENTRY \
39 " .previous\n" \
40 : "=r" (to), "=r" (from), "=r" (ret) \
41 : "0" (to), "1" (from), "2" (ret) \
42 : "D1Ar1", "memory")
43
44
45#define __asm_copy_to_user_1(to, from, ret) \
46 __asm_copy_user_cont(to, from, ret, \
47 " GETB D1Ar1,[%1++]\n" \
48 " SETB [%0],D1Ar1\n" \
49 "2: SETB [%0++],D1Ar1\n", \
50 "3: ADD %2,%2,#1\n", \
51 " .long 2b,3b\n")
52
53#define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
54 __asm_copy_user_cont(to, from, ret, \
55 " GETW D1Ar1,[%1++]\n" \
56 " SETW [%0],D1Ar1\n" \
57 "2: SETW [%0++],D1Ar1\n" COPY, \
58 "3: ADD %2,%2,#2\n" FIXUP, \
59 " .long 2b,3b\n" TENTRY)
60
61#define __asm_copy_to_user_2(to, from, ret) \
62 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
63
64#define __asm_copy_to_user_3(to, from, ret) \
65 __asm_copy_to_user_2x_cont(to, from, ret, \
66 " GETB D1Ar1,[%1++]\n" \
67 " SETB [%0],D1Ar1\n" \
68 "4: SETB [%0++],D1Ar1\n", \
69 "5: ADD %2,%2,#1\n", \
70 " .long 4b,5b\n")
71
72#define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
73 __asm_copy_user_cont(to, from, ret, \
74 " GETD D1Ar1,[%1++]\n" \
75 " SETD [%0],D1Ar1\n" \
76 "2: SETD [%0++],D1Ar1\n" COPY, \
77 "3: ADD %2,%2,#4\n" FIXUP, \
78 " .long 2b,3b\n" TENTRY)
79
80#define __asm_copy_to_user_4(to, from, ret) \
81 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
82
83#define __asm_copy_to_user_5(to, from, ret) \
84 __asm_copy_to_user_4x_cont(to, from, ret, \
85 " GETB D1Ar1,[%1++]\n" \
86 " SETB [%0],D1Ar1\n" \
87 "4: SETB [%0++],D1Ar1\n", \
88 "5: ADD %2,%2,#1\n", \
89 " .long 4b,5b\n")
90
91#define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
92 __asm_copy_to_user_4x_cont(to, from, ret, \
93 " GETW D1Ar1,[%1++]\n" \
94 " SETW [%0],D1Ar1\n" \
95 "4: SETW [%0++],D1Ar1\n" COPY, \
96 "5: ADD %2,%2,#2\n" FIXUP, \
97 " .long 4b,5b\n" TENTRY)
98
99#define __asm_copy_to_user_6(to, from, ret) \
100 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
101
102#define __asm_copy_to_user_7(to, from, ret) \
103 __asm_copy_to_user_6x_cont(to, from, ret, \
104 " GETB D1Ar1,[%1++]\n" \
105 " SETB [%0],D1Ar1\n" \
106 "6: SETB [%0++],D1Ar1\n", \
107 "7: ADD %2,%2,#1\n", \
108 " .long 6b,7b\n")
109
110#define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
111 __asm_copy_to_user_4x_cont(to, from, ret, \
112 " GETD D1Ar1,[%1++]\n" \
113 " SETD [%0],D1Ar1\n" \
114 "4: SETD [%0++],D1Ar1\n" COPY, \
115 "5: ADD %2,%2,#4\n" FIXUP, \
116 " .long 4b,5b\n" TENTRY)
117
118#define __asm_copy_to_user_8(to, from, ret) \
119 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
120
121#define __asm_copy_to_user_9(to, from, ret) \
122 __asm_copy_to_user_8x_cont(to, from, ret, \
123 " GETB D1Ar1,[%1++]\n" \
124 " SETB [%0],D1Ar1\n" \
125 "6: SETB [%0++],D1Ar1\n", \
126 "7: ADD %2,%2,#1\n", \
127 " .long 6b,7b\n")
128
129#define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
130 __asm_copy_to_user_8x_cont(to, from, ret, \
131 " GETW D1Ar1,[%1++]\n" \
132 " SETW [%0],D1Ar1\n" \
133 "6: SETW [%0++],D1Ar1\n" COPY, \
134 "7: ADD %2,%2,#2\n" FIXUP, \
135 " .long 6b,7b\n" TENTRY)
136
137#define __asm_copy_to_user_10(to, from, ret) \
138 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
139
140#define __asm_copy_to_user_11(to, from, ret) \
141 __asm_copy_to_user_10x_cont(to, from, ret, \
142 " GETB D1Ar1,[%1++]\n" \
143 " SETB [%0],D1Ar1\n" \
144 "8: SETB [%0++],D1Ar1\n", \
145 "9: ADD %2,%2,#1\n", \
146 " .long 8b,9b\n")
147
148#define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
149 __asm_copy_to_user_8x_cont(to, from, ret, \
150 " GETD D1Ar1,[%1++]\n" \
151 " SETD [%0],D1Ar1\n" \
152 "6: SETD [%0++],D1Ar1\n" COPY, \
153 "7: ADD %2,%2,#4\n" FIXUP, \
154 " .long 6b,7b\n" TENTRY)
155#define __asm_copy_to_user_12(to, from, ret) \
156 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
157
158#define __asm_copy_to_user_13(to, from, ret) \
159 __asm_copy_to_user_12x_cont(to, from, ret, \
160 " GETB D1Ar1,[%1++]\n" \
161 " SETB [%0],D1Ar1\n" \
162 "8: SETB [%0++],D1Ar1\n", \
163 "9: ADD %2,%2,#1\n", \
164 " .long 8b,9b\n")
165
166#define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
167 __asm_copy_to_user_12x_cont(to, from, ret, \
168 " GETW D1Ar1,[%1++]\n" \
169 " SETW [%0],D1Ar1\n" \
170 "8: SETW [%0++],D1Ar1\n" COPY, \
171 "9: ADD %2,%2,#2\n" FIXUP, \
172 " .long 8b,9b\n" TENTRY)
173
174#define __asm_copy_to_user_14(to, from, ret) \
175 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
176
177#define __asm_copy_to_user_15(to, from, ret) \
178 __asm_copy_to_user_14x_cont(to, from, ret, \
179 " GETB D1Ar1,[%1++]\n" \
180 " SETB [%0],D1Ar1\n" \
181 "10: SETB [%0++],D1Ar1\n", \
182 "11: ADD %2,%2,#1\n", \
183 " .long 10b,11b\n")
184
185#define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
186 __asm_copy_to_user_12x_cont(to, from, ret, \
187 " GETD D1Ar1,[%1++]\n" \
188 " SETD [%0],D1Ar1\n" \
189 "8: SETD [%0++],D1Ar1\n" COPY, \
190 "9: ADD %2,%2,#4\n" FIXUP, \
191 " .long 8b,9b\n" TENTRY)
192
193#define __asm_copy_to_user_16(to, from, ret) \
194 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
195
196#define __asm_copy_to_user_8x64(to, from, ret) \
197 asm volatile ( \
198 " GETL D0Ar2,D1Ar1,[%1++]\n" \
199 " SETL [%0],D0Ar2,D1Ar1\n" \
200 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
201 "1:\n" \
202 " .section .fixup,\"ax\"\n" \
203 "3: ADD %2,%2,#8\n" \
204 " MOVT D0Ar2,#HI(1b)\n" \
205 " JUMP D0Ar2,#LO(1b)\n" \
206 " .previous\n" \
207 " .section __ex_table,\"a\"\n" \
208 " .long 2b,3b\n" \
209 " .previous\n" \
210 : "=r" (to), "=r" (from), "=r" (ret) \
211 : "0" (to), "1" (from), "2" (ret) \
212 : "D1Ar1", "D0Ar2", "memory")
213
214/*
215 * optimized copying loop using RAPF when 64 bit aligned
216 *
217 * n will be automatically decremented inside the loop
218 * ret will be left intact. if error occurs we will rewind
219 * so that the original non optimized code will fill up
220 * this value correctly.
221 *
222 * on fault:
223 * > n will hold total number of uncopied bytes
224 *
225 * > {'to','from'} will be rewind back so that
226 * the non-optimized code will do the proper fix up
227 *
228 * DCACHE drops the cacheline which helps in reducing cache
229 * pollution.
230 *
231 * We introduce an extra SETL at the end of the loop to
232 * ensure we don't fall off the loop before we catch all
233 * erros.
234 *
235 * NOTICE:
236 * LSM_STEP in TXSTATUS must be cleared in fix up code.
237 * since we're using M{S,G}ETL, a fault might happen at
238 * any address in the middle of M{S,G}ETL causing
239 * the value of LSM_STEP to be incorrect which can
240 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
241 * ie: if LSM_STEP was 1 when a fault occurs, the
242 * next call to M{S,G}ET{L,D} will skip the first
243 * copy/getting as it think that the first 1 has already
244 * been done.
245 *
246 */
247#define __asm_copy_user_64bit_rapf_loop( \
248 to, from, ret, n, id, FIXUP) \
249 asm volatile ( \
250 ".balign 8\n" \
251 "MOV RAPF, %1\n" \
252 "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
253 "MOV D0Ar6, #0\n" \
254 "LSR D1Ar5, %3, #6\n" \
255 "SUB TXRPT, D1Ar5, #2\n" \
256 "MOV RAPF, %1\n" \
257 "$Lloop"id":\n" \
258 "ADD RAPF, %1, #64\n" \
259 "21:\n" \
260 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
261 "22:\n" \
262 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
263 "SUB %3, %3, #32\n" \
264 "23:\n" \
265 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
266 "24:\n" \
267 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
268 "SUB %3, %3, #32\n" \
269 "DCACHE [%1+#-64], D0Ar6\n" \
270 "BR $Lloop"id"\n" \
271 \
272 "MOV RAPF, %1\n" \
273 "25:\n" \
274 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
275 "26:\n" \
276 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
277 "SUB %3, %3, #32\n" \
278 "27:\n" \
279 "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
280 "28:\n" \
281 "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
282 "SUB %0, %0, #8\n" \
283 "29:\n" \
284 "SETL [%0++], D0.7, D1.7\n" \
285 "SUB %3, %3, #32\n" \
286 "1:" \
287 "DCACHE [%1+#-64], D0Ar6\n" \
288 "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
289 "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
290 "GETL D0.5, D1.5, [A0StP+#-24]\n" \
291 "GETL D0.6, D1.6, [A0StP+#-16]\n" \
292 "GETL D0.7, D1.7, [A0StP+#-8]\n" \
293 "SUB A0StP, A0StP, #40\n" \
294 " .section .fixup,\"ax\"\n" \
295 "4:\n" \
296 " ADD %0, %0, #8\n" \
297 "3:\n" \
298 " MOV D0Ar2, TXSTATUS\n" \
299 " MOV D1Ar1, TXSTATUS\n" \
300 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
301 " MOV TXSTATUS, D1Ar1\n" \
302 FIXUP \
303 " MOVT D0Ar2,#HI(1b)\n" \
304 " JUMP D0Ar2,#LO(1b)\n" \
305 " .previous\n" \
306 " .section __ex_table,\"a\"\n" \
307 " .long 21b,3b\n" \
308 " .long 22b,3b\n" \
309 " .long 23b,3b\n" \
310 " .long 24b,3b\n" \
311 " .long 25b,3b\n" \
312 " .long 26b,3b\n" \
313 " .long 27b,3b\n" \
314 " .long 28b,3b\n" \
315 " .long 29b,4b\n" \
316 " .previous\n" \
317 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
318 : "0" (to), "1" (from), "2" (ret), "3" (n) \
319 : "D1Ar1", "D0Ar2", "memory")
320
321/* rewind 'to' and 'from' pointers when a fault occurs
322 *
323 * Rationale:
324 * A fault always occurs on writing to user buffer. A fault
325 * is at a single address, so we need to rewind by only 4
326 * bytes.
327 * Since we do a complete read from kernel buffer before
328 * writing, we need to rewind it also. The amount to be
329 * rewind equals the number of faulty writes in MSETD
330 * which is: [4 - (LSM_STEP-1)]*8
331 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
332 * and stored in D0Ar2
333 *
334 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
335 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
336 * a fault happens at the 4th write, LSM_STEP will be 0
337 * instead of 4. The code copes with that.
338 *
339 * n is updated by the number of successful writes, which is:
340 * n = n - (LSM_STEP-1)*8
341 */
342#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
343 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
344 "LSR D0Ar2, D0Ar2, #8\n" \
345 "AND D0Ar2, D0Ar2, #0x7\n" \
346 "ADDZ D0Ar2, D0Ar2, #4\n" \
347 "SUB D0Ar2, D0Ar2, #1\n" \
348 "MOV D1Ar1, #4\n" \
349 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
350 "LSL D0Ar2, D0Ar2, #3\n" \
351 "LSL D1Ar1, D1Ar1, #3\n" \
352 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
353 "SUB %0, %0, #8\n" \
354 "SUB %1, %1,D0Ar2\n" \
355 "SUB %3, %3, D1Ar1\n")
356
357/*
358 * optimized copying loop using RAPF when 32 bit aligned
359 *
360 * n will be automatically decremented inside the loop
361 * ret will be left intact. if error occurs we will rewind
362 * so that the original non optimized code will fill up
363 * this value correctly.
364 *
365 * on fault:
366 * > n will hold total number of uncopied bytes
367 *
368 * > {'to','from'} will be rewind back so that
369 * the non-optimized code will do the proper fix up
370 *
371 * DCACHE drops the cacheline which helps in reducing cache
372 * pollution.
373 *
374 * We introduce an extra SETD at the end of the loop to
375 * ensure we don't fall off the loop before we catch all
376 * erros.
377 *
378 * NOTICE:
379 * LSM_STEP in TXSTATUS must be cleared in fix up code.
380 * since we're using M{S,G}ETL, a fault might happen at
381 * any address in the middle of M{S,G}ETL causing
382 * the value of LSM_STEP to be incorrect which can
383 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
384 * ie: if LSM_STEP was 1 when a fault occurs, the
385 * next call to M{S,G}ET{L,D} will skip the first
386 * copy/getting as it think that the first 1 has already
387 * been done.
388 *
389 */
390#define __asm_copy_user_32bit_rapf_loop( \
391 to, from, ret, n, id, FIXUP) \
392 asm volatile ( \
393 ".balign 8\n" \
394 "MOV RAPF, %1\n" \
395 "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
396 "MOV D0Ar6, #0\n" \
397 "LSR D1Ar5, %3, #6\n" \
398 "SUB TXRPT, D1Ar5, #2\n" \
399 "MOV RAPF, %1\n" \
400 "$Lloop"id":\n" \
401 "ADD RAPF, %1, #64\n" \
402 "21:\n" \
403 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
404 "22:\n" \
405 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
406 "SUB %3, %3, #16\n" \
407 "23:\n" \
408 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
409 "24:\n" \
410 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
411 "SUB %3, %3, #16\n" \
412 "25:\n" \
413 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
414 "26:\n" \
415 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
416 "SUB %3, %3, #16\n" \
417 "27:\n" \
418 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
419 "28:\n" \
420 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
421 "SUB %3, %3, #16\n" \
422 "DCACHE [%1+#-64], D0Ar6\n" \
423 "BR $Lloop"id"\n" \
424 \
425 "MOV RAPF, %1\n" \
426 "29:\n" \
427 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
428 "30:\n" \
429 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
430 "SUB %3, %3, #16\n" \
431 "31:\n" \
432 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
433 "32:\n" \
434 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
435 "SUB %3, %3, #16\n" \
436 "33:\n" \
437 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
438 "34:\n" \
439 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
440 "SUB %3, %3, #16\n" \
441 "35:\n" \
442 "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
443 "36:\n" \
444 "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
445 "SUB %0, %0, #4\n" \
446 "37:\n" \
447 "SETD [%0++], D0.7\n" \
448 "SUB %3, %3, #16\n" \
449 "1:" \
450 "DCACHE [%1+#-64], D0Ar6\n" \
451 "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
452 "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
453 "GETL D0.5, D1.5, [A0StP+#-24]\n" \
454 "GETL D0.6, D1.6, [A0StP+#-16]\n" \
455 "GETL D0.7, D1.7, [A0StP+#-8]\n" \
456 "SUB A0StP, A0StP, #40\n" \
457 " .section .fixup,\"ax\"\n" \
458 "4:\n" \
459 " ADD %0, %0, #4\n" \
460 "3:\n" \
461 " MOV D0Ar2, TXSTATUS\n" \
462 " MOV D1Ar1, TXSTATUS\n" \
463 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
464 " MOV TXSTATUS, D1Ar1\n" \
465 FIXUP \
466 " MOVT D0Ar2,#HI(1b)\n" \
467 " JUMP D0Ar2,#LO(1b)\n" \
468 " .previous\n" \
469 " .section __ex_table,\"a\"\n" \
470 " .long 21b,3b\n" \
471 " .long 22b,3b\n" \
472 " .long 23b,3b\n" \
473 " .long 24b,3b\n" \
474 " .long 25b,3b\n" \
475 " .long 26b,3b\n" \
476 " .long 27b,3b\n" \
477 " .long 28b,3b\n" \
478 " .long 29b,3b\n" \
479 " .long 30b,3b\n" \
480 " .long 31b,3b\n" \
481 " .long 32b,3b\n" \
482 " .long 33b,3b\n" \
483 " .long 34b,3b\n" \
484 " .long 35b,3b\n" \
485 " .long 36b,3b\n" \
486 " .long 37b,4b\n" \
487 " .previous\n" \
488 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
489 : "0" (to), "1" (from), "2" (ret), "3" (n) \
490 : "D1Ar1", "D0Ar2", "memory")
491
492/* rewind 'to' and 'from' pointers when a fault occurs
493 *
494 * Rationale:
495 * A fault always occurs on writing to user buffer. A fault
496 * is at a single address, so we need to rewind by only 4
497 * bytes.
498 * Since we do a complete read from kernel buffer before
499 * writing, we need to rewind it also. The amount to be
500 * rewind equals the number of faulty writes in MSETD
501 * which is: [4 - (LSM_STEP-1)]*4
502 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
503 * and stored in D0Ar2
504 *
505 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
506 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
507 * a fault happens at the 4th write, LSM_STEP will be 0
508 * instead of 4. The code copes with that.
509 *
510 * n is updated by the number of successful writes, which is:
511 * n = n - (LSM_STEP-1)*4
512 */
513#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
514 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
515 "LSR D0Ar2, D0Ar2, #8\n" \
516 "AND D0Ar2, D0Ar2, #0x7\n" \
517 "ADDZ D0Ar2, D0Ar2, #4\n" \
518 "SUB D0Ar2, D0Ar2, #1\n" \
519 "MOV D1Ar1, #4\n" \
520 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
521 "LSL D0Ar2, D0Ar2, #2\n" \
522 "LSL D1Ar1, D1Ar1, #2\n" \
523 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
524 "SUB %0, %0, #4\n" \
525 "SUB %1, %1, D0Ar2\n" \
526 "SUB %3, %3, D1Ar1\n")
527
528unsigned long __copy_user(void __user *pdst, const void *psrc,
529 unsigned long n)
530{
531 register char __user *dst asm ("A0.2") = pdst;
532 register const char *src asm ("A1.2") = psrc;
533 unsigned long retn = 0;
534
535 if (n == 0)
536 return 0;
537
538 if ((unsigned long) src & 1) {
539 __asm_copy_to_user_1(dst, src, retn);
540 n--;
fb8ea062
JH
541 if (retn)
542 return retn + n;
373cd784
JH
543 }
544 if ((unsigned long) dst & 1) {
545 /* Worst case - byte copy */
546 while (n > 0) {
547 __asm_copy_to_user_1(dst, src, retn);
548 n--;
fb8ea062
JH
549 if (retn)
550 return retn + n;
373cd784
JH
551 }
552 }
553 if (((unsigned long) src & 2) && n >= 2) {
554 __asm_copy_to_user_2(dst, src, retn);
555 n -= 2;
fb8ea062
JH
556 if (retn)
557 return retn + n;
373cd784
JH
558 }
559 if ((unsigned long) dst & 2) {
560 /* Second worst case - word copy */
561 while (n >= 2) {
562 __asm_copy_to_user_2(dst, src, retn);
563 n -= 2;
fb8ea062
JH
564 if (retn)
565 return retn + n;
373cd784
JH
566 }
567 }
568
569#ifdef USE_RAPF
570 /* 64 bit copy loop */
571 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
572 if (n >= RAPF_MIN_BUF_SIZE) {
573 /* copy user using 64 bit rapf copy */
574 __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
575 n, "64cu");
576 }
577 while (n >= 8) {
578 __asm_copy_to_user_8x64(dst, src, retn);
579 n -= 8;
fb8ea062
JH
580 if (retn)
581 return retn + n;
373cd784
JH
582 }
583 }
584 if (n >= RAPF_MIN_BUF_SIZE) {
585 /* copy user using 32 bit rapf copy */
586 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
587 }
588#else
589 /* 64 bit copy loop */
590 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
591 while (n >= 8) {
592 __asm_copy_to_user_8x64(dst, src, retn);
593 n -= 8;
fb8ea062
JH
594 if (retn)
595 return retn + n;
373cd784
JH
596 }
597 }
598#endif
599
600 while (n >= 16) {
601 __asm_copy_to_user_16(dst, src, retn);
602 n -= 16;
fb8ea062
JH
603 if (retn)
604 return retn + n;
373cd784
JH
605 }
606
607 while (n >= 4) {
608 __asm_copy_to_user_4(dst, src, retn);
609 n -= 4;
fb8ea062
JH
610 if (retn)
611 return retn + n;
373cd784
JH
612 }
613
614 switch (n) {
615 case 0:
616 break;
617 case 1:
618 __asm_copy_to_user_1(dst, src, retn);
619 break;
620 case 2:
621 __asm_copy_to_user_2(dst, src, retn);
622 break;
623 case 3:
624 __asm_copy_to_user_3(dst, src, retn);
625 break;
626 }
627
fb8ea062
JH
628 /*
629 * If we get here, retn correctly reflects the number of failing
630 * bytes.
631 */
373cd784
JH
632 return retn;
633}
9da3ee9a 634EXPORT_SYMBOL(__copy_user);
373cd784
JH
635
636#define __asm_copy_from_user_1(to, from, ret) \
637 __asm_copy_user_cont(to, from, ret, \
638 " GETB D1Ar1,[%1++]\n" \
639 "2: SETB [%0++],D1Ar1\n", \
640 "3: ADD %2,%2,#1\n" \
641 " SETB [%0++],D1Ar1\n", \
642 " .long 2b,3b\n")
643
644#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
645 __asm_copy_user_cont(to, from, ret, \
646 " GETW D1Ar1,[%1++]\n" \
647 "2: SETW [%0++],D1Ar1\n" COPY, \
648 "3: ADD %2,%2,#2\n" \
649 " SETW [%0++],D1Ar1\n" FIXUP, \
650 " .long 2b,3b\n" TENTRY)
651
652#define __asm_copy_from_user_2(to, from, ret) \
653 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
654
655#define __asm_copy_from_user_3(to, from, ret) \
656 __asm_copy_from_user_2x_cont(to, from, ret, \
657 " GETB D1Ar1,[%1++]\n" \
658 "4: SETB [%0++],D1Ar1\n", \
659 "5: ADD %2,%2,#1\n" \
660 " SETB [%0++],D1Ar1\n", \
661 " .long 4b,5b\n")
662
663#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
664 __asm_copy_user_cont(to, from, ret, \
665 " GETD D1Ar1,[%1++]\n" \
666 "2: SETD [%0++],D1Ar1\n" COPY, \
667 "3: ADD %2,%2,#4\n" \
668 " SETD [%0++],D1Ar1\n" FIXUP, \
669 " .long 2b,3b\n" TENTRY)
670
671#define __asm_copy_from_user_4(to, from, ret) \
672 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
673
373cd784
JH
674
675#define __asm_copy_from_user_8x64(to, from, ret) \
676 asm volatile ( \
677 " GETL D0Ar2,D1Ar1,[%1++]\n" \
678 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
679 "1:\n" \
680 " .section .fixup,\"ax\"\n" \
681 " MOV D1Ar1,#0\n" \
682 " MOV D0Ar2,#0\n" \
683 "3: ADD %2,%2,#8\n" \
684 " SETL [%0++],D0Ar2,D1Ar1\n" \
685 " MOVT D0Ar2,#HI(1b)\n" \
686 " JUMP D0Ar2,#LO(1b)\n" \
687 " .previous\n" \
688 " .section __ex_table,\"a\"\n" \
689 " .long 2b,3b\n" \
690 " .previous\n" \
691 : "=a" (to), "=r" (from), "=r" (ret) \
692 : "0" (to), "1" (from), "2" (ret) \
693 : "D1Ar1", "D0Ar2", "memory")
694
695/* rewind 'from' pointer when a fault occurs
696 *
697 * Rationale:
698 * A fault occurs while reading from user buffer, which is the
699 * source. Since the fault is at a single address, we only
700 * need to rewind by 8 bytes.
701 * Since we don't write to kernel buffer until we read first,
702 * the kernel buffer is at the right state and needn't be
703 * corrected.
704 */
705#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
706 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
707 "SUB %1, %1, #8\n")
708
709/* rewind 'from' pointer when a fault occurs
710 *
711 * Rationale:
712 * A fault occurs while reading from user buffer, which is the
713 * source. Since the fault is at a single address, we only
714 * need to rewind by 4 bytes.
715 * Since we don't write to kernel buffer until we read first,
716 * the kernel buffer is at the right state and needn't be
717 * corrected.
718 */
719#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
720 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
721 "SUB %1, %1, #4\n")
722
723
724/* Copy from user to kernel, zeroing the bytes that were inaccessible in
725 userland. The return-value is the number of bytes that were
726 inaccessible. */
727unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
728 unsigned long n)
729{
730 register char *dst asm ("A0.2") = pdst;
731 register const char __user *src asm ("A1.2") = psrc;
732 unsigned long retn = 0;
733
734 if (n == 0)
735 return 0;
736
737 if ((unsigned long) src & 1) {
738 __asm_copy_from_user_1(dst, src, retn);
739 n--;
22572119
JH
740 if (retn)
741 goto copy_exception_bytes;
373cd784
JH
742 }
743 if ((unsigned long) dst & 1) {
744 /* Worst case - byte copy */
745 while (n > 0) {
746 __asm_copy_from_user_1(dst, src, retn);
747 n--;
748 if (retn)
749 goto copy_exception_bytes;
750 }
751 }
752 if (((unsigned long) src & 2) && n >= 2) {
753 __asm_copy_from_user_2(dst, src, retn);
754 n -= 2;
22572119
JH
755 if (retn)
756 goto copy_exception_bytes;
373cd784
JH
757 }
758 if ((unsigned long) dst & 2) {
759 /* Second worst case - word copy */
760 while (n >= 2) {
761 __asm_copy_from_user_2(dst, src, retn);
762 n -= 2;
763 if (retn)
764 goto copy_exception_bytes;
765 }
766 }
767
373cd784
JH
768#ifdef USE_RAPF
769 /* 64 bit copy loop */
770 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
771 if (n >= RAPF_MIN_BUF_SIZE) {
772 /* Copy using fast 64bit rapf */
773 __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
774 n, "64cuz");
775 }
776 while (n >= 8) {
777 __asm_copy_from_user_8x64(dst, src, retn);
778 n -= 8;
779 if (retn)
780 goto copy_exception_bytes;
781 }
782 }
783
784 if (n >= RAPF_MIN_BUF_SIZE) {
785 /* Copy using fast 32bit rapf */
786 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
787 n, "32cuz");
788 }
789#else
790 /* 64 bit copy loop */
791 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
792 while (n >= 8) {
793 __asm_copy_from_user_8x64(dst, src, retn);
794 n -= 8;
795 if (retn)
796 goto copy_exception_bytes;
797 }
798 }
799#endif
800
801 while (n >= 4) {
802 __asm_copy_from_user_4(dst, src, retn);
803 n -= 4;
804
805 if (retn)
806 goto copy_exception_bytes;
807 }
808
809 /* If we get here, there were no memory read faults. */
810 switch (n) {
811 /* These copies are at least "naturally aligned" (so we don't
812 have to check each byte), due to the src alignment code.
813 The *_3 case *will* get the correct count for retn. */
814 case 0:
815 /* This case deliberately left in (if you have doubts check the
816 generated assembly code). */
817 break;
818 case 1:
819 __asm_copy_from_user_1(dst, src, retn);
820 break;
821 case 2:
822 __asm_copy_from_user_2(dst, src, retn);
823 break;
824 case 3:
825 __asm_copy_from_user_3(dst, src, retn);
826 break;
827 }
828
829 /* If we get here, retn correctly reflects the number of failing
830 bytes. */
831 return retn;
832
833 copy_exception_bytes:
834 /* We already have "retn" bytes cleared, and need to clear the
835 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
836 memset is preferred here, since this isn't speed-critical code and
837 we'd rather have this a leaf-function than calling memset. */
838 {
839 char *endp;
840 for (endp = dst + n; dst < endp; dst++)
841 *dst = 0;
842 }
843
844 return retn + n;
845}
9da3ee9a 846EXPORT_SYMBOL(__copy_user_zeroing);
373cd784
JH
847
848#define __asm_clear_8x64(to, ret) \
849 asm volatile ( \
850 " MOV D0Ar2,#0\n" \
851 " MOV D1Ar1,#0\n" \
852 " SETL [%0],D0Ar2,D1Ar1\n" \
853 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
854 "1:\n" \
855 " .section .fixup,\"ax\"\n" \
856 "3: ADD %1,%1,#8\n" \
857 " MOVT D0Ar2,#HI(1b)\n" \
858 " JUMP D0Ar2,#LO(1b)\n" \
859 " .previous\n" \
860 " .section __ex_table,\"a\"\n" \
861 " .long 2b,3b\n" \
862 " .previous\n" \
863 : "=r" (to), "=r" (ret) \
864 : "0" (to), "1" (ret) \
865 : "D1Ar1", "D0Ar2", "memory")
866
867/* Zero userspace. */
868
869#define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
870 asm volatile ( \
871 " MOV D1Ar1,#0\n" \
872 CLEAR \
873 "1:\n" \
874 " .section .fixup,\"ax\"\n" \
875 FIXUP \
876 " MOVT D1Ar1,#HI(1b)\n" \
877 " JUMP D1Ar1,#LO(1b)\n" \
878 " .previous\n" \
879 " .section __ex_table,\"a\"\n" \
880 TENTRY \
881 " .previous" \
882 : "=r" (to), "=r" (ret) \
883 : "0" (to), "1" (ret) \
884 : "D1Ar1", "memory")
885
886#define __asm_clear_1(to, ret) \
887 __asm_clear(to, ret, \
888 " SETB [%0],D1Ar1\n" \
889 "2: SETB [%0++],D1Ar1\n", \
890 "3: ADD %1,%1,#1\n", \
891 " .long 2b,3b\n")
892
893#define __asm_clear_2(to, ret) \
894 __asm_clear(to, ret, \
895 " SETW [%0],D1Ar1\n" \
896 "2: SETW [%0++],D1Ar1\n", \
897 "3: ADD %1,%1,#2\n", \
898 " .long 2b,3b\n")
899
900#define __asm_clear_3(to, ret) \
901 __asm_clear(to, ret, \
902 "2: SETW [%0++],D1Ar1\n" \
903 " SETB [%0],D1Ar1\n" \
904 "3: SETB [%0++],D1Ar1\n", \
905 "4: ADD %1,%1,#2\n" \
906 "5: ADD %1,%1,#1\n", \
907 " .long 2b,4b\n" \
908 " .long 3b,5b\n")
909
910#define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
911 __asm_clear(to, ret, \
912 " SETD [%0],D1Ar1\n" \
913 "2: SETD [%0++],D1Ar1\n" CLEAR, \
914 "3: ADD %1,%1,#4\n" FIXUP, \
915 " .long 2b,3b\n" TENTRY)
916
917#define __asm_clear_4(to, ret) \
918 __asm_clear_4x_cont(to, ret, "", "", "")
919
920#define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
921 __asm_clear_4x_cont(to, ret, \
922 " SETD [%0],D1Ar1\n" \
923 "4: SETD [%0++],D1Ar1\n" CLEAR, \
924 "5: ADD %1,%1,#4\n" FIXUP, \
925 " .long 4b,5b\n" TENTRY)
926
927#define __asm_clear_8(to, ret) \
928 __asm_clear_8x_cont(to, ret, "", "", "")
929
930#define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
931 __asm_clear_8x_cont(to, ret, \
932 " SETD [%0],D1Ar1\n" \
933 "6: SETD [%0++],D1Ar1\n" CLEAR, \
934 "7: ADD %1,%1,#4\n" FIXUP, \
935 " .long 6b,7b\n" TENTRY)
936
937#define __asm_clear_12(to, ret) \
938 __asm_clear_12x_cont(to, ret, "", "", "")
939
940#define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
941 __asm_clear_12x_cont(to, ret, \
942 " SETD [%0],D1Ar1\n" \
943 "8: SETD [%0++],D1Ar1\n" CLEAR, \
944 "9: ADD %1,%1,#4\n" FIXUP, \
945 " .long 8b,9b\n" TENTRY)
946
947#define __asm_clear_16(to, ret) \
948 __asm_clear_16x_cont(to, ret, "", "", "")
949
950unsigned long __do_clear_user(void __user *pto, unsigned long pn)
951{
952 register char __user *dst asm ("D0Re0") = pto;
953 register unsigned long n asm ("D1Re0") = pn;
954 register unsigned long retn asm ("D0Ar6") = 0;
955
956 if ((unsigned long) dst & 1) {
957 __asm_clear_1(dst, retn);
958 n--;
959 }
960
961 if ((unsigned long) dst & 2) {
962 __asm_clear_2(dst, retn);
963 n -= 2;
964 }
965
966 /* 64 bit copy loop */
967 if (!((__force unsigned long) dst & 7)) {
968 while (n >= 8) {
969 __asm_clear_8x64(dst, retn);
970 n -= 8;
971 }
972 }
973
974 while (n >= 16) {
975 __asm_clear_16(dst, retn);
976 n -= 16;
977 }
978
979 while (n >= 4) {
980 __asm_clear_4(dst, retn);
981 n -= 4;
982 }
983
984 switch (n) {
985 case 0:
986 break;
987 case 1:
988 __asm_clear_1(dst, retn);
989 break;
990 case 2:
991 __asm_clear_2(dst, retn);
992 break;
993 case 3:
994 __asm_clear_3(dst, retn);
995 break;
996 }
997
998 return retn;
999}
9da3ee9a 1000EXPORT_SYMBOL(__do_clear_user);
373cd784
JH
1001
1002unsigned char __get_user_asm_b(const void __user *addr, long *err)
1003{
1004 register unsigned char x asm ("D0Re0") = 0;
1005 asm volatile (
1006 " GETB %0,[%2]\n"
1007 "1:\n"
1008 " GETB %0,[%2]\n"
1009 "2:\n"
1010 " .section .fixup,\"ax\"\n"
1011 "3: MOV D0FrT,%3\n"
1012 " SETD [%1],D0FrT\n"
1013 " MOVT D0FrT,#HI(2b)\n"
1014 " JUMP D0FrT,#LO(2b)\n"
1015 " .previous\n"
1016 " .section __ex_table,\"a\"\n"
1017 " .long 1b,3b\n"
1018 " .previous\n"
1019 : "=r" (x)
1020 : "r" (err), "r" (addr), "P" (-EFAULT)
1021 : "D0FrT");
1022 return x;
1023}
9da3ee9a 1024EXPORT_SYMBOL(__get_user_asm_b);
373cd784
JH
1025
1026unsigned short __get_user_asm_w(const void __user *addr, long *err)
1027{
1028 register unsigned short x asm ("D0Re0") = 0;
1029 asm volatile (
1030 " GETW %0,[%2]\n"
1031 "1:\n"
1032 " GETW %0,[%2]\n"
1033 "2:\n"
1034 " .section .fixup,\"ax\"\n"
1035 "3: MOV D0FrT,%3\n"
1036 " SETD [%1],D0FrT\n"
1037 " MOVT D0FrT,#HI(2b)\n"
1038 " JUMP D0FrT,#LO(2b)\n"
1039 " .previous\n"
1040 " .section __ex_table,\"a\"\n"
1041 " .long 1b,3b\n"
1042 " .previous\n"
1043 : "=r" (x)
1044 : "r" (err), "r" (addr), "P" (-EFAULT)
1045 : "D0FrT");
1046 return x;
1047}
9da3ee9a 1048EXPORT_SYMBOL(__get_user_asm_w);
373cd784
JH
1049
1050unsigned int __get_user_asm_d(const void __user *addr, long *err)
1051{
1052 register unsigned int x asm ("D0Re0") = 0;
1053 asm volatile (
1054 " GETD %0,[%2]\n"
1055 "1:\n"
1056 " GETD %0,[%2]\n"
1057 "2:\n"
1058 " .section .fixup,\"ax\"\n"
1059 "3: MOV D0FrT,%3\n"
1060 " SETD [%1],D0FrT\n"
1061 " MOVT D0FrT,#HI(2b)\n"
1062 " JUMP D0FrT,#LO(2b)\n"
1063 " .previous\n"
1064 " .section __ex_table,\"a\"\n"
1065 " .long 1b,3b\n"
1066 " .previous\n"
1067 : "=r" (x)
1068 : "r" (err), "r" (addr), "P" (-EFAULT)
1069 : "D0FrT");
1070 return x;
1071}
9da3ee9a 1072EXPORT_SYMBOL(__get_user_asm_d);
373cd784
JH
1073
1074long __put_user_asm_b(unsigned int x, void __user *addr)
1075{
1076 register unsigned int err asm ("D0Re0") = 0;
1077 asm volatile (
1078 " MOV %0,#0\n"
1079 " SETB [%2],%1\n"
1080 "1:\n"
1081 " SETB [%2],%1\n"
1082 "2:\n"
1083 ".section .fixup,\"ax\"\n"
1084 "3: MOV %0,%3\n"
1085 " MOVT D0FrT,#HI(2b)\n"
1086 " JUMP D0FrT,#LO(2b)\n"
1087 ".previous\n"
1088 ".section __ex_table,\"a\"\n"
1089 " .long 1b,3b\n"
1090 ".previous"
1091 : "=r"(err)
1092 : "d" (x), "a" (addr), "P"(-EFAULT)
1093 : "D0FrT");
1094 return err;
1095}
9da3ee9a 1096EXPORT_SYMBOL(__put_user_asm_b);
373cd784
JH
1097
1098long __put_user_asm_w(unsigned int x, void __user *addr)
1099{
1100 register unsigned int err asm ("D0Re0") = 0;
1101 asm volatile (
1102 " MOV %0,#0\n"
1103 " SETW [%2],%1\n"
1104 "1:\n"
1105 " SETW [%2],%1\n"
1106 "2:\n"
1107 ".section .fixup,\"ax\"\n"
1108 "3: MOV %0,%3\n"
1109 " MOVT D0FrT,#HI(2b)\n"
1110 " JUMP D0FrT,#LO(2b)\n"
1111 ".previous\n"
1112 ".section __ex_table,\"a\"\n"
1113 " .long 1b,3b\n"
1114 ".previous"
1115 : "=r"(err)
1116 : "d" (x), "a" (addr), "P"(-EFAULT)
1117 : "D0FrT");
1118 return err;
1119}
9da3ee9a 1120EXPORT_SYMBOL(__put_user_asm_w);
373cd784
JH
1121
1122long __put_user_asm_d(unsigned int x, void __user *addr)
1123{
1124 register unsigned int err asm ("D0Re0") = 0;
1125 asm volatile (
1126 " MOV %0,#0\n"
1127 " SETD [%2],%1\n"
1128 "1:\n"
1129 " SETD [%2],%1\n"
1130 "2:\n"
1131 ".section .fixup,\"ax\"\n"
1132 "3: MOV %0,%3\n"
1133 " MOVT D0FrT,#HI(2b)\n"
1134 " JUMP D0FrT,#LO(2b)\n"
1135 ".previous\n"
1136 ".section __ex_table,\"a\"\n"
1137 " .long 1b,3b\n"
1138 ".previous"
1139 : "=r"(err)
1140 : "d" (x), "a" (addr), "P"(-EFAULT)
1141 : "D0FrT");
1142 return err;
1143}
9da3ee9a 1144EXPORT_SYMBOL(__put_user_asm_d);
373cd784
JH
1145
1146long __put_user_asm_l(unsigned long long x, void __user *addr)
1147{
1148 register unsigned int err asm ("D0Re0") = 0;
1149 asm volatile (
1150 " MOV %0,#0\n"
1151 " SETL [%2],%1,%t1\n"
1152 "1:\n"
1153 " SETL [%2],%1,%t1\n"
1154 "2:\n"
1155 ".section .fixup,\"ax\"\n"
1156 "3: MOV %0,%3\n"
1157 " MOVT D0FrT,#HI(2b)\n"
1158 " JUMP D0FrT,#LO(2b)\n"
1159 ".previous\n"
1160 ".section __ex_table,\"a\"\n"
1161 " .long 1b,3b\n"
1162 ".previous"
1163 : "=r"(err)
1164 : "d" (x), "a" (addr), "P"(-EFAULT)
1165 : "D0FrT");
1166 return err;
1167}
9da3ee9a 1168EXPORT_SYMBOL(__put_user_asm_l);
373cd784
JH
1169
1170long strnlen_user(const char __user *src, long count)
1171{
1172 long res;
1173
1174 if (!access_ok(VERIFY_READ, src, 0))
1175 return 0;
1176
1177 asm volatile (" MOV D0Ar4, %1\n"
1178 " MOV D0Ar6, %2\n"
1179 "0:\n"
1180 " SUBS D0FrT, D0Ar6, #0\n"
1181 " SUB D0Ar6, D0Ar6, #1\n"
1182 " BLE 2f\n"
1183 " GETB D0FrT, [D0Ar4+#1++]\n"
1184 "1:\n"
1185 " TST D0FrT, #255\n"
1186 " BNE 0b\n"
1187 "2:\n"
1188 " SUB %0, %2, D0Ar6\n"
1189 "3:\n"
1190 " .section .fixup,\"ax\"\n"
1191 "4:\n"
1192 " MOV %0, #0\n"
1193 " MOVT D0FrT,#HI(3b)\n"
1194 " JUMP D0FrT,#LO(3b)\n"
1195 " .previous\n"
1196 " .section __ex_table,\"a\"\n"
1197 " .long 1b,4b\n"
1198 " .previous\n"
1199 : "=r" (res)
1200 : "r" (src), "r" (count)
1201 : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1202
1203 return res;
1204}
9da3ee9a 1205EXPORT_SYMBOL(strnlen_user);
373cd784
JH
1206
1207long __strncpy_from_user(char *dst, const char __user *src, long count)
1208{
1209 long res;
1210
1211 if (count == 0)
1212 return 0;
1213
1214 /*
1215 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1216 * So do we.
1217 *
1218 * This code is deduced from:
1219 *
1220 * char tmp2;
1221 * long tmp1, tmp3;
1222 * tmp1 = count;
1223 * while ((*dst++ = (tmp2 = *src++)) != 0
1224 * && --tmp1)
1225 * ;
1226 *
1227 * res = count - tmp1;
1228 *
1229 * with tweaks.
1230 */
1231
1232 asm volatile (" MOV %0,%3\n"
1233 "1:\n"
1234 " GETB D0FrT,[%2++]\n"
1235 "2:\n"
1236 " CMP D0FrT,#0\n"
1237 " SETB [%1++],D0FrT\n"
1238 " BEQ 3f\n"
1239 " SUBS %0,%0,#1\n"
1240 " BNZ 1b\n"
1241 "3:\n"
1242 " SUB %0,%3,%0\n"
1243 "4:\n"
1244 " .section .fixup,\"ax\"\n"
1245 "5:\n"
1246 " MOV %0,%7\n"
1247 " MOVT D0FrT,#HI(4b)\n"
1248 " JUMP D0FrT,#LO(4b)\n"
1249 " .previous\n"
1250 " .section __ex_table,\"a\"\n"
1251 " .long 2b,5b\n"
1252 " .previous"
1253 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1254 : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1255 : "D0FrT", "memory", "cc");
1256
1257 return res;
1258}
9da3ee9a 1259EXPORT_SYMBOL(__strncpy_from_user);