Commit | Line | Data |
---|---|---|
1d481f1c AB |
1 | // |
2 | // Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions | |
3 | // | |
4 | // Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> | |
e7b3ed33 | 5 | // Copyright (C) 2019 Google LLC <ebiggers@google.com> |
1d481f1c AB |
6 | // |
7 | // This program is free software; you can redistribute it and/or modify | |
8 | // it under the terms of the GNU General Public License version 2 as | |
9 | // published by the Free Software Foundation. | |
10 | // | |
11 | ||
e7b3ed33 | 12 | // Derived from the x86 version: |
1d481f1c AB |
13 | // |
14 | // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions | |
15 | // | |
16 | // Copyright (c) 2013, Intel Corporation | |
17 | // | |
18 | // Authors: | |
19 | // Erdinc Ozturk <erdinc.ozturk@intel.com> | |
20 | // Vinodh Gopal <vinodh.gopal@intel.com> | |
21 | // James Guilford <james.guilford@intel.com> | |
22 | // Tim Chen <tim.c.chen@linux.intel.com> | |
23 | // | |
24 | // This software is available to you under a choice of one of two | |
25 | // licenses. You may choose to be licensed under the terms of the GNU | |
26 | // General Public License (GPL) Version 2, available from the file | |
27 | // COPYING in the main directory of this source tree, or the | |
28 | // OpenIB.org BSD license below: | |
29 | // | |
30 | // Redistribution and use in source and binary forms, with or without | |
31 | // modification, are permitted provided that the following conditions are | |
32 | // met: | |
33 | // | |
34 | // * Redistributions of source code must retain the above copyright | |
35 | // notice, this list of conditions and the following disclaimer. | |
36 | // | |
37 | // * Redistributions in binary form must reproduce the above copyright | |
38 | // notice, this list of conditions and the following disclaimer in the | |
39 | // documentation and/or other materials provided with the | |
40 | // distribution. | |
41 | // | |
42 | // * Neither the name of the Intel Corporation nor the names of its | |
43 | // contributors may be used to endorse or promote products derived from | |
44 | // this software without specific prior written permission. | |
45 | // | |
46 | // | |
47 | // THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY | |
48 | // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
49 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
50 | // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR | |
51 | // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
52 | // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
53 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
54 | // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
55 | // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | |
56 | // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
57 | // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
58 | // | |
1d481f1c AB |
59 | // Reference paper titled "Fast CRC Computation for Generic |
60 | // Polynomials Using PCLMULQDQ Instruction" | |
61 | // URL: http://www.intel.com/content/dam/www/public/us/en/documents | |
62 | // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf | |
63 | // | |
1d481f1c AB |
64 | |
65 | #include <linux/linkage.h> | |
66 | #include <asm/assembler.h> | |
67 | ||
68 | #ifdef CONFIG_CPU_ENDIAN_BE8 | |
69 | #define CPU_LE(code...) | |
70 | #else | |
71 | #define CPU_LE(code...) code | |
72 | #endif | |
73 | ||
74 | .text | |
b4d0c0aa | 75 | .arch armv8-a |
1d481f1c AB |
76 | .fpu crypto-neon-fp-armv8 |
77 | ||
e7b3ed33 EB |
78 | init_crc .req r0 |
79 | buf .req r1 | |
80 | len .req r2 | |
1d481f1c | 81 | |
e7b3ed33 | 82 | fold_consts_ptr .req ip |
1d481f1c AB |
83 | |
84 | q0l .req d0 | |
85 | q0h .req d1 | |
86 | q1l .req d2 | |
87 | q1h .req d3 | |
88 | q2l .req d4 | |
89 | q2h .req d5 | |
90 | q3l .req d6 | |
91 | q3h .req d7 | |
92 | q4l .req d8 | |
93 | q4h .req d9 | |
94 | q5l .req d10 | |
95 | q5h .req d11 | |
96 | q6l .req d12 | |
97 | q6h .req d13 | |
98 | q7l .req d14 | |
99 | q7h .req d15 | |
e7b3ed33 EB |
100 | q8l .req d16 |
101 | q8h .req d17 | |
102 | q9l .req d18 | |
103 | q9h .req d19 | |
104 | q10l .req d20 | |
105 | q10h .req d21 | |
106 | q11l .req d22 | |
107 | q11h .req d23 | |
108 | q12l .req d24 | |
109 | q12h .req d25 | |
110 | ||
111 | FOLD_CONSTS .req q10 | |
112 | FOLD_CONST_L .req q10l | |
113 | FOLD_CONST_H .req q10h | |
114 | ||
115 | // Fold reg1, reg2 into the next 32 data bytes, storing the result back | |
116 | // into reg1, reg2. | |
117 | .macro fold_32_bytes, reg1, reg2 | |
118 | vld1.64 {q11-q12}, [buf]! | |
119 | ||
120 | vmull.p64 q8, \reg1\()h, FOLD_CONST_H | |
121 | vmull.p64 \reg1, \reg1\()l, FOLD_CONST_L | |
122 | vmull.p64 q9, \reg2\()h, FOLD_CONST_H | |
123 | vmull.p64 \reg2, \reg2\()l, FOLD_CONST_L | |
124 | ||
125 | CPU_LE( vrev64.8 q11, q11 ) | |
126 | CPU_LE( vrev64.8 q12, q12 ) | |
127 | vswp q11l, q11h | |
128 | vswp q12l, q12h | |
1d481f1c AB |
129 | |
130 | veor.8 \reg1, \reg1, q8 | |
131 | veor.8 \reg2, \reg2, q9 | |
132 | veor.8 \reg1, \reg1, q11 | |
133 | veor.8 \reg2, \reg2, q12 | |
134 | .endm | |
135 | ||
e7b3ed33 EB |
136 | // Fold src_reg into dst_reg, optionally loading the next fold constants |
137 | .macro fold_16_bytes, src_reg, dst_reg, load_next_consts | |
138 | vmull.p64 q8, \src_reg\()l, FOLD_CONST_L | |
139 | vmull.p64 \src_reg, \src_reg\()h, FOLD_CONST_H | |
140 | .ifnb \load_next_consts | |
141 | vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! | |
1d481f1c | 142 | .endif |
e7b3ed33 EB |
143 | veor.8 \dst_reg, \dst_reg, q8 |
144 | veor.8 \dst_reg, \dst_reg, \src_reg | |
1d481f1c AB |
145 | .endm |
146 | ||
e7b3ed33 EB |
147 | .macro __adrl, out, sym |
148 | movw \out, #:lower16:\sym | |
149 | movt \out, #:upper16:\sym | |
150 | .endm | |
1d481f1c | 151 | |
e7b3ed33 EB |
152 | // |
153 | // u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len); | |
154 | // | |
155 | // Assumes len >= 16. | |
156 | // | |
157 | ENTRY(crc_t10dif_pmull) | |
1d481f1c | 158 | |
e7b3ed33 EB |
159 | // For sizes less than 256 bytes, we can't fold 128 bytes at a time. |
160 | cmp len, #256 | |
161 | blt .Lless_than_256_bytes | |
162 | ||
163 | __adrl fold_consts_ptr, .Lfold_across_128_bytes_consts | |
164 | ||
165 | // Load the first 128 data bytes. Byte swapping is necessary to make | |
166 | // the bit order match the polynomial coefficient order. | |
167 | vld1.64 {q0-q1}, [buf]! | |
168 | vld1.64 {q2-q3}, [buf]! | |
169 | vld1.64 {q4-q5}, [buf]! | |
170 | vld1.64 {q6-q7}, [buf]! | |
171 | CPU_LE( vrev64.8 q0, q0 ) | |
172 | CPU_LE( vrev64.8 q1, q1 ) | |
173 | CPU_LE( vrev64.8 q2, q2 ) | |
174 | CPU_LE( vrev64.8 q3, q3 ) | |
175 | CPU_LE( vrev64.8 q4, q4 ) | |
176 | CPU_LE( vrev64.8 q5, q5 ) | |
177 | CPU_LE( vrev64.8 q6, q6 ) | |
178 | CPU_LE( vrev64.8 q7, q7 ) | |
179 | vswp q0l, q0h | |
180 | vswp q1l, q1h | |
181 | vswp q2l, q2h | |
182 | vswp q3l, q3h | |
183 | vswp q4l, q4h | |
184 | vswp q5l, q5h | |
185 | vswp q6l, q6h | |
186 | vswp q7l, q7h | |
187 | ||
188 | // XOR the first 16 data *bits* with the initial CRC value. | |
189 | vmov.i8 q8h, #0 | |
190 | vmov.u16 q8h[3], init_crc | |
191 | veor q0h, q0h, q8h | |
192 | ||
193 | // Load the constants for folding across 128 bytes. | |
194 | vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! | |
195 | ||
196 | // Subtract 128 for the 128 data bytes just consumed. Subtract another | |
197 | // 128 to simplify the termination condition of the following loop. | |
198 | sub len, len, #256 | |
199 | ||
200 | // While >= 128 data bytes remain (not counting q0-q7), fold the 128 | |
201 | // bytes q0-q7 into them, storing the result back into q0-q7. | |
202 | .Lfold_128_bytes_loop: | |
203 | fold_32_bytes q0, q1 | |
204 | fold_32_bytes q2, q3 | |
205 | fold_32_bytes q4, q5 | |
206 | fold_32_bytes q6, q7 | |
207 | subs len, len, #128 | |
208 | bge .Lfold_128_bytes_loop | |
209 | ||
210 | // Now fold the 112 bytes in q0-q6 into the 16 bytes in q7. | |
211 | ||
212 | // Fold across 64 bytes. | |
213 | vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! | |
214 | fold_16_bytes q0, q4 | |
215 | fold_16_bytes q1, q5 | |
216 | fold_16_bytes q2, q6 | |
217 | fold_16_bytes q3, q7, 1 | |
218 | // Fold across 32 bytes. | |
219 | fold_16_bytes q4, q6 | |
220 | fold_16_bytes q5, q7, 1 | |
221 | // Fold across 16 bytes. | |
222 | fold_16_bytes q6, q7 | |
223 | ||
224 | // Add 128 to get the correct number of data bytes remaining in 0...127 | |
225 | // (not counting q7), following the previous extra subtraction by 128. | |
226 | // Then subtract 16 to simplify the termination condition of the | |
227 | // following loop. | |
228 | adds len, len, #(128-16) | |
229 | ||
230 | // While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7 | |
231 | // into them, storing the result back into q7. | |
232 | blt .Lfold_16_bytes_loop_done | |
233 | .Lfold_16_bytes_loop: | |
234 | vmull.p64 q8, q7l, FOLD_CONST_L | |
235 | vmull.p64 q7, q7h, FOLD_CONST_H | |
236 | veor.8 q7, q7, q8 | |
237 | vld1.64 {q0}, [buf]! | |
238 | CPU_LE( vrev64.8 q0, q0 ) | |
239 | vswp q0l, q0h | |
1d481f1c | 240 | veor.8 q7, q7, q0 |
e7b3ed33 EB |
241 | subs len, len, #16 |
242 | bge .Lfold_16_bytes_loop | |
243 | ||
244 | .Lfold_16_bytes_loop_done: | |
245 | // Add 16 to get the correct number of data bytes remaining in 0...15 | |
246 | // (not counting q7), following the previous extra subtraction by 16. | |
247 | adds len, len, #16 | |
248 | beq .Lreduce_final_16_bytes | |
249 | ||
250 | .Lhandle_partial_segment: | |
251 | // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first | |
252 | // 16 bytes are in q7 and the rest are the remaining data in 'buf'. To | |
253 | // do this without needing a fold constant for each possible 'len', | |
254 | // redivide the bytes into a first chunk of 'len' bytes and a second | |
255 | // chunk of 16 bytes, then fold the first chunk into the second. | |
256 | ||
257 | // q0 = last 16 original data bytes | |
258 | add buf, buf, len | |
259 | sub buf, buf, #16 | |
260 | vld1.64 {q0}, [buf] | |
261 | CPU_LE( vrev64.8 q0, q0 ) | |
262 | vswp q0l, q0h | |
263 | ||
264 | // q1 = high order part of second chunk: q7 left-shifted by 'len' bytes. | |
265 | __adrl r3, .Lbyteshift_table + 16 | |
266 | sub r3, r3, len | |
267 | vld1.8 {q2}, [r3] | |
268 | vtbl.8 q1l, {q7l-q7h}, q2l | |
269 | vtbl.8 q1h, {q7l-q7h}, q2h | |
270 | ||
271 | // q3 = first chunk: q7 right-shifted by '16-len' bytes. | |
272 | vmov.i8 q3, #0x80 | |
273 | veor.8 q2, q2, q3 | |
274 | vtbl.8 q3l, {q7l-q7h}, q2l | |
275 | vtbl.8 q3h, {q7l-q7h}, q2h | |
276 | ||
277 | // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. | |
278 | vshr.s8 q2, q2, #7 | |
279 | ||
280 | // q2 = second chunk: 'len' bytes from q0 (low-order bytes), | |
281 | // then '16-len' bytes from q1 (high-order bytes). | |
282 | vbsl.8 q2, q1, q0 | |
283 | ||
284 | // Fold the first chunk into the second chunk, storing the result in q7. | |
285 | vmull.p64 q0, q3l, FOLD_CONST_L | |
286 | vmull.p64 q7, q3h, FOLD_CONST_H | |
1d481f1c | 287 | veor.8 q7, q7, q0 |
e7b3ed33 EB |
288 | veor.8 q7, q7, q2 |
289 | ||
290 | .Lreduce_final_16_bytes: | |
291 | // Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC. | |
292 | ||
293 | // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. | |
294 | vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! | |
295 | ||
296 | // Fold the high 64 bits into the low 64 bits, while also multiplying by | |
297 | // x^64. This produces a 128-bit value congruent to x^64 * M(x) and | |
298 | // whose low 48 bits are 0. | |
299 | vmull.p64 q0, q7h, FOLD_CONST_H // high bits * x^48 * (x^80 mod G(x)) | |
300 | veor.8 q0h, q0h, q7l // + low bits * x^64 | |
301 | ||
302 | // Fold the high 32 bits into the low 96 bits. This produces a 96-bit | |
303 | // value congruent to x^64 * M(x) and whose low 48 bits are 0. | |
304 | vmov.i8 q1, #0 | |
305 | vmov s4, s3 // extract high 32 bits | |
306 | vmov s3, s5 // zero high 32 bits | |
307 | vmull.p64 q1, q1l, FOLD_CONST_L // high 32 bits * x^48 * (x^48 mod G(x)) | |
308 | veor.8 q0, q0, q1 // + low bits | |
309 | ||
310 | // Load G(x) and floor(x^48 / G(x)). | |
311 | vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128] | |
312 | ||
313 | // Use Barrett reduction to compute the final CRC value. | |
314 | vmull.p64 q1, q0h, FOLD_CONST_H // high 32 bits * floor(x^48 / G(x)) | |
315 | vshr.u64 q1l, q1l, #32 // /= x^32 | |
316 | vmull.p64 q1, q1l, FOLD_CONST_L // *= G(x) | |
317 | vshr.u64 q0l, q0l, #48 | |
318 | veor.8 q0l, q0l, q1l // + low 16 nonzero bits | |
319 | // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0. | |
320 | ||
321 | vmov.u16 r0, q0l[0] | |
1d481f1c AB |
322 | bx lr |
323 | ||
e7b3ed33 EB |
324 | .Lless_than_256_bytes: |
325 | // Checksumming a buffer of length 16...255 bytes | |
1d481f1c | 326 | |
e7b3ed33 | 327 | __adrl fold_consts_ptr, .Lfold_across_16_bytes_consts |
1d481f1c | 328 | |
e7b3ed33 EB |
329 | // Load the first 16 data bytes. |
330 | vld1.64 {q7}, [buf]! | |
331 | CPU_LE( vrev64.8 q7, q7 ) | |
332 | vswp q7l, q7h | |
1d481f1c | 333 | |
e7b3ed33 EB |
334 | // XOR the first 16 data *bits* with the initial CRC value. |
335 | vmov.i8 q0h, #0 | |
336 | vmov.u16 q0h[3], init_crc | |
337 | veor.8 q7h, q7h, q0h | |
1d481f1c | 338 | |
e7b3ed33 EB |
339 | // Load the fold-across-16-bytes constants. |
340 | vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! | |
1d481f1c | 341 | |
e7b3ed33 EB |
342 | cmp len, #16 |
343 | beq .Lreduce_final_16_bytes // len == 16 | |
344 | subs len, len, #32 | |
345 | addlt len, len, #16 | |
346 | blt .Lhandle_partial_segment // 17 <= len <= 31 | |
347 | b .Lfold_16_bytes_loop // 32 <= len <= 255 | |
1d481f1c AB |
348 | ENDPROC(crc_t10dif_pmull) |
349 | ||
e7b3ed33 | 350 | .section ".rodata", "a" |
1d481f1c | 351 | .align 4 |
1d481f1c | 352 | |
e7b3ed33 EB |
353 | // Fold constants precomputed from the polynomial 0x18bb7 |
354 | // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 | |
355 | .Lfold_across_128_bytes_consts: | |
356 | .quad 0x0000000000006123 // x^(8*128) mod G(x) | |
357 | .quad 0x0000000000002295 // x^(8*128+64) mod G(x) | |
358 | // .Lfold_across_64_bytes_consts: | |
359 | .quad 0x0000000000001069 // x^(4*128) mod G(x) | |
360 | .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) | |
361 | // .Lfold_across_32_bytes_consts: | |
362 | .quad 0x000000000000857d // x^(2*128) mod G(x) | |
363 | .quad 0x0000000000007acc // x^(2*128+64) mod G(x) | |
364 | .Lfold_across_16_bytes_consts: | |
365 | .quad 0x000000000000a010 // x^(1*128) mod G(x) | |
366 | .quad 0x0000000000001faa // x^(1*128+64) mod G(x) | |
367 | // .Lfinal_fold_consts: | |
368 | .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) | |
369 | .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) | |
370 | // .Lbarrett_reduction_consts: | |
371 | .quad 0x0000000000018bb7 // G(x) | |
372 | .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) | |
373 | ||
374 | // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - | |
375 | // len] is the index vector to shift left by 'len' bytes, and is also {0x80, | |
376 | // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. | |
377 | .Lbyteshift_table: | |
1d481f1c AB |
378 | .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 |
379 | .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f | |
380 | .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 | |
381 | .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0 |