Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
5210d1e6 VG |
2 | /* |
3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
5210d1e6 VG |
4 | */ |
5 | ||
6 | /* If dst and src are 4 byte aligned, copy 8 bytes at a time. | |
7 | If the src is 4, but not 8 byte aligned, we first read 4 bytes to get | |
8 | it 8 byte aligned. Thus, we can do a little read-ahead, without | |
9 | dereferencing a cache line that we should not touch. | |
10 | Note that short and long instructions have been scheduled to avoid | |
11 | branch stalls. | |
12 | The beq_s to r3z could be made unaligned & long to avoid a stall | |
13 | there, but the it is not likely to be taken often, and it | |
14 | would also be likey to cost an unaligned mispredict at the next call. */ | |
15 | ||
ec7ac6af | 16 | #include <linux/linkage.h> |
5210d1e6 | 17 | |
86effd0d | 18 | ENTRY_CFI(strcpy) |
5210d1e6 VG |
19 | or r2,r0,r1 |
20 | bmsk_s r2,r2,1 | |
21 | brne.d r2,0,charloop | |
22 | mov_s r10,r0 | |
23 | ld_s r3,[r1,0] | |
24 | mov r8,0x01010101 | |
25 | bbit0.d r1,2,loop_start | |
26 | ror r12,r8 | |
27 | sub r2,r3,r8 | |
28 | bic_s r2,r2,r3 | |
29 | tst_s r2,r12 | |
30 | bne r3z | |
31 | mov_s r4,r3 | |
32 | .balign 4 | |
33 | loop: | |
34 | ld.a r3,[r1,4] | |
35 | st.ab r4,[r10,4] | |
36 | loop_start: | |
37 | ld.a r4,[r1,4] | |
38 | sub r2,r3,r8 | |
39 | bic_s r2,r2,r3 | |
40 | tst_s r2,r12 | |
41 | bne_s r3z | |
42 | st.ab r3,[r10,4] | |
43 | sub r2,r4,r8 | |
44 | bic r2,r2,r4 | |
45 | tst r2,r12 | |
46 | beq loop | |
47 | mov_s r3,r4 | |
48 | #ifdef __LITTLE_ENDIAN__ | |
49 | r3z: bmsk.f r1,r3,7 | |
50 | lsr_s r3,r3,8 | |
51 | #else | |
52 | r3z: lsr.f r1,r3,24 | |
53 | asl_s r3,r3,8 | |
54 | #endif | |
55 | bne.d r3z | |
56 | stb.ab r1,[r10,1] | |
57 | j_s [blink] | |
58 | ||
59 | .balign 4 | |
60 | charloop: | |
61 | ldb.ab r3,[r1,1] | |
62 | ||
63 | ||
64 | brne.d r3,0,charloop | |
65 | stb.ab r3,[r10,1] | |
66 | j [blink] | |
86effd0d | 67 | END_CFI(strcpy) |