ARC: uaccess: get_user to zero out dest in cause of fault
[linux-2.6-block.git] / arch / arc / include / asm / uaccess.h
CommitLineData
43697cb0
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: June 2010
9 * -__clear_user( ) called multiple times during elf load was byte loop
10 * converted to do as much word clear as possible.
11 *
12 * vineetg: Dec 2009
13 * -Hand crafted constant propagation for "constant" copy sizes
14 * -stock kernel shrunk by 33K at -O3
15 *
16 * vineetg: Sept 2009
17 * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
18 * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
19 * -Enabled when doing -Os
20 *
21 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
22 */
23
24#ifndef _ASM_ARC_UACCESS_H
25#define _ASM_ARC_UACCESS_H
26
27#include <linux/sched.h>
28#include <asm/errno.h>
29#include <linux/string.h> /* for generic string functions */
30
31
32#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
33
34/*
2547476a 35 * Algorithmically, for __user_ok() we want do:
43697cb0
VG
36 * (start < TASK_SIZE) && (start+len < TASK_SIZE)
37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
38 * emitted directly in code.
39 *
40 * This can however be rewritten as follows:
41 * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
42 *
43 * Because it essentially checks if buffer end is within limit and @len is
44 * non-ngeative, which implies that buffer start will be within limit too.
45 *
0752adfd 46 * The reason for rewriting being, for majority of cases, @len is generally
43697cb0
VG
47 * compile time constant, causing first sub-expression to be compile time
48 * subsumed.
49 *
50 * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
51 * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
52 * would already have been done at this call site for __kernel_ok()
53 *
54 */
55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
0752adfd 56 ((addr) <= (get_fs() - (sz))))
43697cb0
VG
57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
58 likely(__user_ok((addr), (sz))))
59
0a5eae45
VG
60/*********** Single byte/hword/word copies ******************/
61
62#define __get_user_fn(sz, u, k) \
63({ \
64 long __ret = 0; /* success by default */ \
65 switch (sz) { \
66 case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
67 case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
68 case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
69 case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
70 } \
71 __ret; \
72})
73
74/*
75 * Returns 0 on success, -EFAULT if not.
76 * @ret already contains 0 - given that errors will be less likely
77 * (hence +r asm constraint below).
78 * In case of error, fixup code will make it -EFAULT
79 */
80#define __arc_get_user_one(dst, src, op, ret) \
81 __asm__ __volatile__( \
82 "1: "op" %1,[%2]\n" \
83 "2: ;nop\n" \
84 " .section .fixup, \"ax\"\n" \
85 " .align 4\n" \
05d9d0b9
VG
86 "3: # return -EFAULT\n" \
87 " mov %0, %3\n" \
88 " # zero out dst ptr\n" \
89 " mov %1, 0\n" \
0a5eae45
VG
90 " j 2b\n" \
91 " .previous\n" \
92 " .section __ex_table, \"a\"\n" \
93 " .align 4\n" \
94 " .word 1b,3b\n" \
95 " .previous\n" \
96 \
97 : "+r" (ret), "=r" (dst) \
98 : "r" (src), "ir" (-EFAULT))
99
100#define __arc_get_user_one_64(dst, src, ret) \
101 __asm__ __volatile__( \
102 "1: ld %1,[%2]\n" \
103 "4: ld %R1,[%2, 4]\n" \
104 "2: ;nop\n" \
105 " .section .fixup, \"ax\"\n" \
106 " .align 4\n" \
05d9d0b9
VG
107 "3: # return -EFAULT\n" \
108 " mov %0, %3\n" \
109 " # zero out dst ptr\n" \
110 " mov %1, 0\n" \
111 " mov %R1, 0\n" \
0a5eae45
VG
112 " j 2b\n" \
113 " .previous\n" \
114 " .section __ex_table, \"a\"\n" \
115 " .align 4\n" \
116 " .word 1b,3b\n" \
117 " .word 4b,3b\n" \
118 " .previous\n" \
119 \
120 : "+r" (ret), "=r" (dst) \
121 : "r" (src), "ir" (-EFAULT))
122
123#define __put_user_fn(sz, u, k) \
124({ \
125 long __ret = 0; /* success by default */ \
126 switch (sz) { \
127 case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
128 case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
129 case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
130 case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
131 } \
132 __ret; \
133})
134
135#define __arc_put_user_one(src, dst, op, ret) \
136 __asm__ __volatile__( \
137 "1: "op" %1,[%2]\n" \
138 "2: ;nop\n" \
139 " .section .fixup, \"ax\"\n" \
140 " .align 4\n" \
141 "3: mov %0, %3\n" \
142 " j 2b\n" \
143 " .previous\n" \
144 " .section __ex_table, \"a\"\n" \
145 " .align 4\n" \
146 " .word 1b,3b\n" \
147 " .previous\n" \
148 \
149 : "+r" (ret) \
150 : "r" (src), "r" (dst), "ir" (-EFAULT))
151
152#define __arc_put_user_one_64(src, dst, ret) \
153 __asm__ __volatile__( \
154 "1: st %1,[%2]\n" \
155 "4: st %R1,[%2, 4]\n" \
156 "2: ;nop\n" \
157 " .section .fixup, \"ax\"\n" \
158 " .align 4\n" \
159 "3: mov %0, %3\n" \
160 " j 2b\n" \
161 " .previous\n" \
162 " .section __ex_table, \"a\"\n" \
163 " .align 4\n" \
164 " .word 1b,3b\n" \
165 " .word 4b,3b\n" \
166 " .previous\n" \
167 \
168 : "+r" (ret) \
169 : "r" (src), "r" (dst), "ir" (-EFAULT))
170
171
43697cb0
VG
172static inline unsigned long
173__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
174{
175 long res = 0;
176 char val;
177 unsigned long tmp1, tmp2, tmp3, tmp4;
178 unsigned long orig_n = n;
179
180 if (n == 0)
181 return 0;
182
183 /* unaligned */
184 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
185
186 unsigned char tmp;
187
188 __asm__ __volatile__ (
189 " mov.f lp_count, %0 \n"
190 " lpnz 2f \n"
191 "1: ldb.ab %1, [%3, 1] \n"
192 " stb.ab %1, [%2, 1] \n"
193 " sub %0,%0,1 \n"
194 "2: ;nop \n"
195 " .section .fixup, \"ax\" \n"
196 " .align 4 \n"
197 "3: j 2b \n"
198 " .previous \n"
199 " .section __ex_table, \"a\" \n"
200 " .align 4 \n"
201 " .word 1b, 3b \n"
202 " .previous \n"
203
204 : "+r" (n),
205 /*
206 * Note as an '&' earlyclobber operand to make sure the
207 * temporary register inside the loop is not the same as
208 * FROM or TO.
209 */
210 "=&r" (tmp), "+r" (to), "+r" (from)
211 :
212 : "lp_count", "lp_start", "lp_end", "memory");
213
214 return n;
215 }
216
217 /*
218 * Hand-crafted constant propagation to reduce code sz of the
219 * laddered copy 16x,8,4,2,1
220 */
221 if (__builtin_constant_p(orig_n)) {
222 res = orig_n;
223
224 if (orig_n / 16) {
225 orig_n = orig_n % 16;
226
227 __asm__ __volatile__(
228 " lsr lp_count, %7,4 \n"
229 " lp 3f \n"
230 "1: ld.ab %3, [%2, 4] \n"
231 "11: ld.ab %4, [%2, 4] \n"
232 "12: ld.ab %5, [%2, 4] \n"
233 "13: ld.ab %6, [%2, 4] \n"
234 " st.ab %3, [%1, 4] \n"
235 " st.ab %4, [%1, 4] \n"
236 " st.ab %5, [%1, 4] \n"
237 " st.ab %6, [%1, 4] \n"
238 " sub %0,%0,16 \n"
239 "3: ;nop \n"
240 " .section .fixup, \"ax\" \n"
241 " .align 4 \n"
242 "4: j 3b \n"
243 " .previous \n"
244 " .section __ex_table, \"a\" \n"
245 " .align 4 \n"
246 " .word 1b, 4b \n"
247 " .word 11b,4b \n"
248 " .word 12b,4b \n"
249 " .word 13b,4b \n"
250 " .previous \n"
251 : "+r" (res), "+r"(to), "+r"(from),
252 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
253 : "ir"(n)
254 : "lp_count", "memory");
255 }
256 if (orig_n / 8) {
257 orig_n = orig_n % 8;
258
259 __asm__ __volatile__(
260 "14: ld.ab %3, [%2,4] \n"
261 "15: ld.ab %4, [%2,4] \n"
262 " st.ab %3, [%1,4] \n"
263 " st.ab %4, [%1,4] \n"
264 " sub %0,%0,8 \n"
265 "31: ;nop \n"
266 " .section .fixup, \"ax\" \n"
267 " .align 4 \n"
268 "4: j 31b \n"
269 " .previous \n"
270 " .section __ex_table, \"a\" \n"
271 " .align 4 \n"
272 " .word 14b,4b \n"
273 " .word 15b,4b \n"
274 " .previous \n"
275 : "+r" (res), "+r"(to), "+r"(from),
276 "=r"(tmp1), "=r"(tmp2)
277 :
278 : "memory");
279 }
280 if (orig_n / 4) {
281 orig_n = orig_n % 4;
282
283 __asm__ __volatile__(
284 "16: ld.ab %3, [%2,4] \n"
285 " st.ab %3, [%1,4] \n"
286 " sub %0,%0,4 \n"
287 "32: ;nop \n"
288 " .section .fixup, \"ax\" \n"
289 " .align 4 \n"
290 "4: j 32b \n"
291 " .previous \n"
292 " .section __ex_table, \"a\" \n"
293 " .align 4 \n"
294 " .word 16b,4b \n"
295 " .previous \n"
296 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
297 :
298 : "memory");
299 }
300 if (orig_n / 2) {
301 orig_n = orig_n % 2;
302
303 __asm__ __volatile__(
304 "17: ldw.ab %3, [%2,2] \n"
305 " stw.ab %3, [%1,2] \n"
306 " sub %0,%0,2 \n"
307 "33: ;nop \n"
308 " .section .fixup, \"ax\" \n"
309 " .align 4 \n"
310 "4: j 33b \n"
311 " .previous \n"
312 " .section __ex_table, \"a\" \n"
313 " .align 4 \n"
314 " .word 17b,4b \n"
315 " .previous \n"
316 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
317 :
318 : "memory");
319 }
320 if (orig_n & 1) {
321 __asm__ __volatile__(
322 "18: ldb.ab %3, [%2,2] \n"
323 " stb.ab %3, [%1,2] \n"
324 " sub %0,%0,1 \n"
325 "34: ; nop \n"
326 " .section .fixup, \"ax\" \n"
327 " .align 4 \n"
328 "4: j 34b \n"
329 " .previous \n"
330 " .section __ex_table, \"a\" \n"
331 " .align 4 \n"
332 " .word 18b,4b \n"
333 " .previous \n"
334 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
335 :
336 : "memory");
337 }
338 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
339
340 __asm__ __volatile__(
341 " mov %0,%3 \n"
342 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
343 " lpnz 3f \n"
344 "1: ld.ab %5, [%2, 4] \n"
345 "11: ld.ab %6, [%2, 4] \n"
346 "12: ld.ab %7, [%2, 4] \n"
347 "13: ld.ab %8, [%2, 4] \n"
348 " st.ab %5, [%1, 4] \n"
349 " st.ab %6, [%1, 4] \n"
350 " st.ab %7, [%1, 4] \n"
351 " st.ab %8, [%1, 4] \n"
352 " sub %0,%0,16 \n"
353 "3: and.f %3,%3,0xf \n" /* stragglers */
354 " bz 34f \n"
355 " bbit0 %3,3,31f \n" /* 8 bytes left */
356 "14: ld.ab %5, [%2,4] \n"
357 "15: ld.ab %6, [%2,4] \n"
358 " st.ab %5, [%1,4] \n"
359 " st.ab %6, [%1,4] \n"
360 " sub.f %0,%0,8 \n"
361 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
362 "16: ld.ab %5, [%2,4] \n"
363 " st.ab %5, [%1,4] \n"
364 " sub.f %0,%0,4 \n"
365 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
366 "17: ldw.ab %5, [%2,2] \n"
367 " stw.ab %5, [%1,2] \n"
368 " sub.f %0,%0,2 \n"
369 "33: bbit0 %3,0,34f \n"
370 "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
371 " stb.ab %5, [%1,1] \n"
372 " sub.f %0,%0,1 \n"
373 "34: ;nop \n"
374 " .section .fixup, \"ax\" \n"
375 " .align 4 \n"
376 "4: j 34b \n"
377 " .previous \n"
378 " .section __ex_table, \"a\" \n"
379 " .align 4 \n"
380 " .word 1b, 4b \n"
381 " .word 11b,4b \n"
382 " .word 12b,4b \n"
383 " .word 13b,4b \n"
384 " .word 14b,4b \n"
385 " .word 15b,4b \n"
386 " .word 16b,4b \n"
387 " .word 17b,4b \n"
388 " .word 18b,4b \n"
389 " .previous \n"
390 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
391 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
392 :
393 : "lp_count", "memory");
394 }
395
396 return res;
397}
398
399extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
400 unsigned long n);
401
402static inline unsigned long
403__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
404{
405 long res = 0;
406 char val;
407 unsigned long tmp1, tmp2, tmp3, tmp4;
408 unsigned long orig_n = n;
409
410 if (n == 0)
411 return 0;
412
413 /* unaligned */
414 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
415
416 unsigned char tmp;
417
418 __asm__ __volatile__(
419 " mov.f lp_count, %0 \n"
420 " lpnz 3f \n"
421 " ldb.ab %1, [%3, 1] \n"
422 "1: stb.ab %1, [%2, 1] \n"
423 " sub %0, %0, 1 \n"
424 "3: ;nop \n"
425 " .section .fixup, \"ax\" \n"
426 " .align 4 \n"
427 "4: j 3b \n"
428 " .previous \n"
429 " .section __ex_table, \"a\" \n"
430 " .align 4 \n"
431 " .word 1b, 4b \n"
432 " .previous \n"
433
434 : "+r" (n),
435 /* Note as an '&' earlyclobber operand to make sure the
436 * temporary register inside the loop is not the same as
437 * FROM or TO.
438 */
439 "=&r" (tmp), "+r" (to), "+r" (from)
440 :
441 : "lp_count", "lp_start", "lp_end", "memory");
442
443 return n;
444 }
445
446 if (__builtin_constant_p(orig_n)) {
447 res = orig_n;
448
449 if (orig_n / 16) {
450 orig_n = orig_n % 16;
451
452 __asm__ __volatile__(
453 " lsr lp_count, %7,4 \n"
454 " lp 3f \n"
455 " ld.ab %3, [%2, 4] \n"
456 " ld.ab %4, [%2, 4] \n"
457 " ld.ab %5, [%2, 4] \n"
458 " ld.ab %6, [%2, 4] \n"
459 "1: st.ab %3, [%1, 4] \n"
460 "11: st.ab %4, [%1, 4] \n"
461 "12: st.ab %5, [%1, 4] \n"
462 "13: st.ab %6, [%1, 4] \n"
463 " sub %0, %0, 16 \n"
464 "3:;nop \n"
465 " .section .fixup, \"ax\" \n"
466 " .align 4 \n"
467 "4: j 3b \n"
468 " .previous \n"
469 " .section __ex_table, \"a\" \n"
470 " .align 4 \n"
471 " .word 1b, 4b \n"
472 " .word 11b,4b \n"
473 " .word 12b,4b \n"
474 " .word 13b,4b \n"
475 " .previous \n"
476 : "+r" (res), "+r"(to), "+r"(from),
477 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
478 : "ir"(n)
479 : "lp_count", "memory");
480 }
481 if (orig_n / 8) {
482 orig_n = orig_n % 8;
483
484 __asm__ __volatile__(
485 " ld.ab %3, [%2,4] \n"
486 " ld.ab %4, [%2,4] \n"
487 "14: st.ab %3, [%1,4] \n"
488 "15: st.ab %4, [%1,4] \n"
489 " sub %0, %0, 8 \n"
490 "31:;nop \n"
491 " .section .fixup, \"ax\" \n"
492 " .align 4 \n"
493 "4: j 31b \n"
494 " .previous \n"
495 " .section __ex_table, \"a\" \n"
496 " .align 4 \n"
497 " .word 14b,4b \n"
498 " .word 15b,4b \n"
499 " .previous \n"
500 : "+r" (res), "+r"(to), "+r"(from),
501 "=r"(tmp1), "=r"(tmp2)
502 :
503 : "memory");
504 }
505 if (orig_n / 4) {
506 orig_n = orig_n % 4;
507
508 __asm__ __volatile__(
509 " ld.ab %3, [%2,4] \n"
510 "16: st.ab %3, [%1,4] \n"
511 " sub %0, %0, 4 \n"
512 "32:;nop \n"
513 " .section .fixup, \"ax\" \n"
514 " .align 4 \n"
515 "4: j 32b \n"
516 " .previous \n"
517 " .section __ex_table, \"a\" \n"
518 " .align 4 \n"
519 " .word 16b,4b \n"
520 " .previous \n"
521 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
522 :
523 : "memory");
524 }
525 if (orig_n / 2) {
526 orig_n = orig_n % 2;
527
528 __asm__ __volatile__(
529 " ldw.ab %3, [%2,2] \n"
530 "17: stw.ab %3, [%1,2] \n"
531 " sub %0, %0, 2 \n"
532 "33:;nop \n"
533 " .section .fixup, \"ax\" \n"
534 " .align 4 \n"
535 "4: j 33b \n"
536 " .previous \n"
537 " .section __ex_table, \"a\" \n"
538 " .align 4 \n"
539 " .word 17b,4b \n"
540 " .previous \n"
541 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
542 :
543 : "memory");
544 }
545 if (orig_n & 1) {
546 __asm__ __volatile__(
547 " ldb.ab %3, [%2,1] \n"
548 "18: stb.ab %3, [%1,1] \n"
549 " sub %0, %0, 1 \n"
550 "34: ;nop \n"
551 " .section .fixup, \"ax\" \n"
552 " .align 4 \n"
553 "4: j 34b \n"
554 " .previous \n"
555 " .section __ex_table, \"a\" \n"
556 " .align 4 \n"
557 " .word 18b,4b \n"
558 " .previous \n"
559 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
560 :
561 : "memory");
562 }
563 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
564
565 __asm__ __volatile__(
566 " mov %0,%3 \n"
567 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
568 " lpnz 3f \n"
569 " ld.ab %5, [%2, 4] \n"
570 " ld.ab %6, [%2, 4] \n"
571 " ld.ab %7, [%2, 4] \n"
572 " ld.ab %8, [%2, 4] \n"
573 "1: st.ab %5, [%1, 4] \n"
574 "11: st.ab %6, [%1, 4] \n"
575 "12: st.ab %7, [%1, 4] \n"
576 "13: st.ab %8, [%1, 4] \n"
577 " sub %0, %0, 16 \n"
578 "3: and.f %3,%3,0xf \n" /* stragglers */
579 " bz 34f \n"
580 " bbit0 %3,3,31f \n" /* 8 bytes left */
581 " ld.ab %5, [%2,4] \n"
582 " ld.ab %6, [%2,4] \n"
583 "14: st.ab %5, [%1,4] \n"
584 "15: st.ab %6, [%1,4] \n"
585 " sub.f %0, %0, 8 \n"
586 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
587 " ld.ab %5, [%2,4] \n"
588 "16: st.ab %5, [%1,4] \n"
589 " sub.f %0, %0, 4 \n"
590 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
591 " ldw.ab %5, [%2,2] \n"
592 "17: stw.ab %5, [%1,2] \n"
593 " sub.f %0, %0, 2 \n"
594 "33: bbit0 %3,0,34f \n"
595 " ldb.ab %5, [%2,1] \n" /* 1 byte left */
596 "18: stb.ab %5, [%1,1] \n"
597 " sub.f %0, %0, 1 \n"
598 "34: ;nop \n"
599 " .section .fixup, \"ax\" \n"
600 " .align 4 \n"
601 "4: j 34b \n"
602 " .previous \n"
603 " .section __ex_table, \"a\" \n"
604 " .align 4 \n"
605 " .word 1b, 4b \n"
606 " .word 11b,4b \n"
607 " .word 12b,4b \n"
608 " .word 13b,4b \n"
609 " .word 14b,4b \n"
610 " .word 15b,4b \n"
611 " .word 16b,4b \n"
612 " .word 17b,4b \n"
613 " .word 18b,4b \n"
614 " .previous \n"
615 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
616 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
617 :
618 : "lp_count", "memory");
619 }
620
621 return res;
622}
623
624static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
625{
626 long res = n;
627 unsigned char *d_char = to;
628
629 __asm__ __volatile__(
630 " bbit0 %0, 0, 1f \n"
631 "75: stb.ab %2, [%0,1] \n"
632 " sub %1, %1, 1 \n"
633 "1: bbit0 %0, 1, 2f \n"
634 "76: stw.ab %2, [%0,2] \n"
635 " sub %1, %1, 2 \n"
636 "2: asr.f lp_count, %1, 2 \n"
637 " lpnz 3f \n"
638 "77: st.ab %2, [%0,4] \n"
639 " sub %1, %1, 4 \n"
640 "3: bbit0 %1, 1, 4f \n"
641 "78: stw.ab %2, [%0,2] \n"
642 " sub %1, %1, 2 \n"
643 "4: bbit0 %1, 0, 5f \n"
644 "79: stb.ab %2, [%0,1] \n"
645 " sub %1, %1, 1 \n"
646 "5: \n"
647 " .section .fixup, \"ax\" \n"
648 " .align 4 \n"
649 "3: j 5b \n"
650 " .previous \n"
651 " .section __ex_table, \"a\" \n"
652 " .align 4 \n"
653 " .word 75b, 3b \n"
654 " .word 76b, 3b \n"
655 " .word 77b, 3b \n"
656 " .word 78b, 3b \n"
657 " .word 79b, 3b \n"
658 " .previous \n"
659 : "+r"(d_char), "+r"(res)
660 : "i"(0)
661 : "lp_count", "lp_start", "lp_end", "memory");
662
663 return res;
664}
665
666static inline long
667__arc_strncpy_from_user(char *dst, const char __user *src, long count)
668{
8922bc30 669 long res = 0;
43697cb0 670 char val;
43697cb0
VG
671
672 if (count == 0)
673 return 0;
674
675 __asm__ __volatile__(
8922bc30 676 " lp 3f \n"
43697cb0 677 "1: ldb.ab %3, [%2, 1] \n"
8922bc30 678 " breq.d %3, 0, 3f \n"
43697cb0 679 " stb.ab %3, [%1, 1] \n"
8922bc30
VG
680 " add %0, %0, 1 # Num of NON NULL bytes copied \n"
681 "3: \n"
43697cb0
VG
682 " .section .fixup, \"ax\" \n"
683 " .align 4 \n"
8922bc30 684 "4: mov %0, %4 # sets @res as -EFAULT \n"
43697cb0
VG
685 " j 3b \n"
686 " .previous \n"
687 " .section __ex_table, \"a\" \n"
688 " .align 4 \n"
689 " .word 1b, 4b \n"
690 " .previous \n"
8922bc30
VG
691 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
692 : "g"(-EFAULT), "l"(count)
43697cb0
VG
693 : "memory");
694
695 return res;
696}
697
698static inline long __arc_strnlen_user(const char __user *s, long n)
699{
700 long res, tmp1, cnt;
701 char val;
702
703 __asm__ __volatile__(
704 " mov %2, %1 \n"
705 "1: ldb.ab %3, [%0, 1] \n"
706 " breq.d %3, 0, 2f \n"
707 " sub.f %2, %2, 1 \n"
708 " bnz 1b \n"
709 " sub %2, %2, 1 \n"
710 "2: sub %0, %1, %2 \n"
711 "3: ;nop \n"
712 " .section .fixup, \"ax\" \n"
713 " .align 4 \n"
714 "4: mov %0, 0 \n"
715 " j 3b \n"
716 " .previous \n"
717 " .section __ex_table, \"a\" \n"
718 " .align 4 \n"
719 " .word 1b, 4b \n"
720 " .previous \n"
721 : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
722 : "0"(s), "1"(n)
723 : "memory");
724
725 return res;
726}
727
728#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
729#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
730#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
731#define __clear_user(d, n) __arc_clear_user(d, n)
732#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
733#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
734#else
735extern long arc_copy_from_user_noinline(void *to, const void __user * from,
736 unsigned long n);
737extern long arc_copy_to_user_noinline(void __user *to, const void *from,
738 unsigned long n);
739extern unsigned long arc_clear_user_noinline(void __user *to,
740 unsigned long n);
741extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
742 long count);
743extern long arc_strnlen_user_noinline(const char __user *src, long n);
744
745#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
746#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
747#define __clear_user(d, n) arc_clear_user_noinline(d, n)
748#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
749#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
750
751#endif
752
753#include <asm-generic/uaccess.h>
754
755extern int fixup_exception(struct pt_regs *regs);
756
757#endif