Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
66d857b0 GU |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
4 | * operating system. INET is implemented using the BSD Socket | |
5 | * interface as the means of communication with the user level. | |
6 | * | |
7 | * IP/TCP/UDP checksumming routines | |
8 | * | |
9 | * Authors: Jorge Cwik, <jorge@laser.satlink.net> | |
10 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> | |
11 | * Tom May, <ftom@netcom.com> | |
12 | * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> | |
13 | * Lots of code moved from tcp.c and ip.c; see those files | |
14 | * for more names. | |
15 | * | |
16 | * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: | |
17 | * Fixed some nasty bugs, causing some horrible crashes. | |
18 | * A: At some points, the sum (%0) was used as | |
19 | * length-counter instead of the length counter | |
20 | * (%1). Thanks to Roman Hodek for pointing this out. | |
21 | * B: GCC seems to mess up if one uses too many | |
22 | * data-registers to hold input values and one tries to | |
23 | * specify d0 and d1 as scratch registers. Letting gcc | |
24 | * choose these registers itself solves the problem. | |
25 | * | |
66d857b0 GU |
26 | * 1998/8/31 Andreas Schwab: |
27 | * Zero out rest of buffer on exception in | |
28 | * csum_partial_copy_from_user. | |
29 | */ | |
30 | ||
31 | #include <linux/module.h> | |
32 | #include <net/checksum.h> | |
33 | ||
34 | /* | |
35 | * computes a partial checksum, e.g. for TCP/UDP fragments | |
36 | */ | |
37 | ||
38 | __wsum csum_partial(const void *buff, int len, __wsum sum) | |
39 | { | |
40 | unsigned long tmp1, tmp2; | |
41 | /* | |
42 | * Experiments with ethernet and slip connections show that buff | |
43 | * is aligned on either a 2-byte or 4-byte boundary. | |
44 | */ | |
45 | __asm__("movel %2,%3\n\t" | |
46 | "btst #1,%3\n\t" /* Check alignment */ | |
47 | "jeq 2f\n\t" | |
48 | "subql #2,%1\n\t" /* buff%4==2: treat first word */ | |
49 | "jgt 1f\n\t" | |
50 | "addql #2,%1\n\t" /* len was == 2, treat only rest */ | |
51 | "jra 4f\n" | |
52 | "1:\t" | |
53 | "addw %2@+,%0\n\t" /* add first word to sum */ | |
54 | "clrl %3\n\t" | |
55 | "addxl %3,%0\n" /* add X bit */ | |
56 | "2:\t" | |
57 | /* unrolled loop for the main part: do 8 longs at once */ | |
58 | "movel %1,%3\n\t" /* save len in tmp1 */ | |
59 | "lsrl #5,%1\n\t" /* len/32 */ | |
60 | "jeq 2f\n\t" /* not enough... */ | |
61 | "subql #1,%1\n" | |
62 | "1:\t" | |
63 | "movel %2@+,%4\n\t" | |
64 | "addxl %4,%0\n\t" | |
65 | "movel %2@+,%4\n\t" | |
66 | "addxl %4,%0\n\t" | |
67 | "movel %2@+,%4\n\t" | |
68 | "addxl %4,%0\n\t" | |
69 | "movel %2@+,%4\n\t" | |
70 | "addxl %4,%0\n\t" | |
71 | "movel %2@+,%4\n\t" | |
72 | "addxl %4,%0\n\t" | |
73 | "movel %2@+,%4\n\t" | |
74 | "addxl %4,%0\n\t" | |
75 | "movel %2@+,%4\n\t" | |
76 | "addxl %4,%0\n\t" | |
77 | "movel %2@+,%4\n\t" | |
78 | "addxl %4,%0\n\t" | |
79 | "dbra %1,1b\n\t" | |
80 | "clrl %4\n\t" | |
81 | "addxl %4,%0\n\t" /* add X bit */ | |
82 | "clrw %1\n\t" | |
83 | "subql #1,%1\n\t" | |
84 | "jcc 1b\n" | |
85 | "2:\t" | |
86 | "movel %3,%1\n\t" /* restore len from tmp1 */ | |
87 | "andw #0x1c,%3\n\t" /* number of rest longs */ | |
88 | "jeq 4f\n\t" | |
89 | "lsrw #2,%3\n\t" | |
90 | "subqw #1,%3\n" | |
91 | "3:\t" | |
92 | /* loop for rest longs */ | |
93 | "movel %2@+,%4\n\t" | |
94 | "addxl %4,%0\n\t" | |
95 | "dbra %3,3b\n\t" | |
96 | "clrl %4\n\t" | |
97 | "addxl %4,%0\n" /* add X bit */ | |
98 | "4:\t" | |
99 | /* now check for rest bytes that do not fit into longs */ | |
100 | "andw #3,%1\n\t" | |
101 | "jeq 7f\n\t" | |
102 | "clrl %4\n\t" /* clear tmp2 for rest bytes */ | |
103 | "subqw #2,%1\n\t" | |
104 | "jlt 5f\n\t" | |
105 | "movew %2@+,%4\n\t" /* have rest >= 2: get word */ | |
106 | "swap %4\n\t" /* into bits 16..31 */ | |
107 | "tstw %1\n\t" /* another byte? */ | |
108 | "jeq 6f\n" | |
109 | "5:\t" | |
110 | "moveb %2@,%4\n\t" /* have odd rest: get byte */ | |
111 | "lslw #8,%4\n\t" /* into bits 8..15; 16..31 untouched */ | |
112 | "6:\t" | |
113 | "addl %4,%0\n\t" /* now add rest long to sum */ | |
114 | "clrl %4\n\t" | |
115 | "addxl %4,%0\n" /* add X bit */ | |
116 | "7:\t" | |
117 | : "=d" (sum), "=d" (len), "=a" (buff), | |
118 | "=&d" (tmp1), "=&d" (tmp2) | |
119 | : "0" (sum), "1" (len), "2" (buff) | |
120 | ); | |
121 | return(sum); | |
122 | } | |
123 | ||
124 | EXPORT_SYMBOL(csum_partial); | |
125 | ||
126 | ||
127 | /* | |
128 | * copy from user space while checksumming, with exception handling. | |
129 | */ | |
130 | ||
131 | __wsum | |
132 | csum_partial_copy_from_user(const void __user *src, void *dst, | |
133 | int len, __wsum sum, int *csum_err) | |
134 | { | |
135 | /* | |
136 | * GCC doesn't like more than 10 operands for the asm | |
137 | * statements so we have to use tmp2 for the error | |
138 | * code. | |
139 | */ | |
140 | unsigned long tmp1, tmp2; | |
141 | ||
142 | __asm__("movel %2,%4\n\t" | |
143 | "btst #1,%4\n\t" /* Check alignment */ | |
144 | "jeq 2f\n\t" | |
145 | "subql #2,%1\n\t" /* buff%4==2: treat first word */ | |
146 | "jgt 1f\n\t" | |
147 | "addql #2,%1\n\t" /* len was == 2, treat only rest */ | |
148 | "jra 4f\n" | |
149 | "1:\n" | |
150 | "10:\t" | |
151 | "movesw %2@+,%4\n\t" /* add first word to sum */ | |
152 | "addw %4,%0\n\t" | |
153 | "movew %4,%3@+\n\t" | |
154 | "clrl %4\n\t" | |
155 | "addxl %4,%0\n" /* add X bit */ | |
156 | "2:\t" | |
157 | /* unrolled loop for the main part: do 8 longs at once */ | |
158 | "movel %1,%4\n\t" /* save len in tmp1 */ | |
159 | "lsrl #5,%1\n\t" /* len/32 */ | |
160 | "jeq 2f\n\t" /* not enough... */ | |
161 | "subql #1,%1\n" | |
162 | "1:\n" | |
163 | "11:\t" | |
164 | "movesl %2@+,%5\n\t" | |
165 | "addxl %5,%0\n\t" | |
166 | "movel %5,%3@+\n\t" | |
167 | "12:\t" | |
168 | "movesl %2@+,%5\n\t" | |
169 | "addxl %5,%0\n\t" | |
170 | "movel %5,%3@+\n\t" | |
171 | "13:\t" | |
172 | "movesl %2@+,%5\n\t" | |
173 | "addxl %5,%0\n\t" | |
174 | "movel %5,%3@+\n\t" | |
175 | "14:\t" | |
176 | "movesl %2@+,%5\n\t" | |
177 | "addxl %5,%0\n\t" | |
178 | "movel %5,%3@+\n\t" | |
179 | "15:\t" | |
180 | "movesl %2@+,%5\n\t" | |
181 | "addxl %5,%0\n\t" | |
182 | "movel %5,%3@+\n\t" | |
183 | "16:\t" | |
184 | "movesl %2@+,%5\n\t" | |
185 | "addxl %5,%0\n\t" | |
186 | "movel %5,%3@+\n\t" | |
187 | "17:\t" | |
188 | "movesl %2@+,%5\n\t" | |
189 | "addxl %5,%0\n\t" | |
190 | "movel %5,%3@+\n\t" | |
191 | "18:\t" | |
192 | "movesl %2@+,%5\n\t" | |
193 | "addxl %5,%0\n\t" | |
194 | "movel %5,%3@+\n\t" | |
195 | "dbra %1,1b\n\t" | |
196 | "clrl %5\n\t" | |
197 | "addxl %5,%0\n\t" /* add X bit */ | |
198 | "clrw %1\n\t" | |
199 | "subql #1,%1\n\t" | |
200 | "jcc 1b\n" | |
201 | "2:\t" | |
202 | "movel %4,%1\n\t" /* restore len from tmp1 */ | |
203 | "andw #0x1c,%4\n\t" /* number of rest longs */ | |
204 | "jeq 4f\n\t" | |
205 | "lsrw #2,%4\n\t" | |
206 | "subqw #1,%4\n" | |
207 | "3:\n" | |
208 | /* loop for rest longs */ | |
209 | "19:\t" | |
210 | "movesl %2@+,%5\n\t" | |
211 | "addxl %5,%0\n\t" | |
212 | "movel %5,%3@+\n\t" | |
213 | "dbra %4,3b\n\t" | |
214 | "clrl %5\n\t" | |
215 | "addxl %5,%0\n" /* add X bit */ | |
216 | "4:\t" | |
217 | /* now check for rest bytes that do not fit into longs */ | |
218 | "andw #3,%1\n\t" | |
219 | "jeq 7f\n\t" | |
220 | "clrl %5\n\t" /* clear tmp2 for rest bytes */ | |
221 | "subqw #2,%1\n\t" | |
222 | "jlt 5f\n\t" | |
223 | "20:\t" | |
224 | "movesw %2@+,%5\n\t" /* have rest >= 2: get word */ | |
225 | "movew %5,%3@+\n\t" | |
226 | "swap %5\n\t" /* into bits 16..31 */ | |
227 | "tstw %1\n\t" /* another byte? */ | |
228 | "jeq 6f\n" | |
229 | "5:\n" | |
230 | "21:\t" | |
231 | "movesb %2@,%5\n\t" /* have odd rest: get byte */ | |
232 | "moveb %5,%3@+\n\t" | |
233 | "lslw #8,%5\n\t" /* into bits 8..15; 16..31 untouched */ | |
234 | "6:\t" | |
235 | "addl %5,%0\n\t" /* now add rest long to sum */ | |
236 | "clrl %5\n\t" | |
237 | "addxl %5,%0\n\t" /* add X bit */ | |
238 | "7:\t" | |
239 | "clrl %5\n" /* no error - clear return value */ | |
240 | "8:\n" | |
241 | ".section .fixup,\"ax\"\n" | |
242 | ".even\n" | |
243 | /* If any exception occurs zero out the rest. | |
244 | Similarities with the code above are intentional :-) */ | |
245 | "90:\t" | |
246 | "clrw %3@+\n\t" | |
247 | "movel %1,%4\n\t" | |
248 | "lsrl #5,%1\n\t" | |
249 | "jeq 1f\n\t" | |
250 | "subql #1,%1\n" | |
251 | "91:\t" | |
252 | "clrl %3@+\n" | |
253 | "92:\t" | |
254 | "clrl %3@+\n" | |
255 | "93:\t" | |
256 | "clrl %3@+\n" | |
257 | "94:\t" | |
258 | "clrl %3@+\n" | |
259 | "95:\t" | |
260 | "clrl %3@+\n" | |
261 | "96:\t" | |
262 | "clrl %3@+\n" | |
263 | "97:\t" | |
264 | "clrl %3@+\n" | |
265 | "98:\t" | |
266 | "clrl %3@+\n\t" | |
267 | "dbra %1,91b\n\t" | |
268 | "clrw %1\n\t" | |
269 | "subql #1,%1\n\t" | |
270 | "jcc 91b\n" | |
271 | "1:\t" | |
272 | "movel %4,%1\n\t" | |
273 | "andw #0x1c,%4\n\t" | |
274 | "jeq 1f\n\t" | |
275 | "lsrw #2,%4\n\t" | |
276 | "subqw #1,%4\n" | |
277 | "99:\t" | |
278 | "clrl %3@+\n\t" | |
279 | "dbra %4,99b\n\t" | |
280 | "1:\t" | |
281 | "andw #3,%1\n\t" | |
282 | "jeq 9f\n" | |
283 | "100:\t" | |
284 | "clrw %3@+\n\t" | |
285 | "tstw %1\n\t" | |
286 | "jeq 9f\n" | |
287 | "101:\t" | |
288 | "clrb %3@+\n" | |
289 | "9:\t" | |
290 | #define STR(X) STR1(X) | |
291 | #define STR1(X) #X | |
292 | "moveq #-" STR(EFAULT) ",%5\n\t" | |
293 | "jra 8b\n" | |
294 | ".previous\n" | |
295 | ".section __ex_table,\"a\"\n" | |
296 | ".long 10b,90b\n" | |
297 | ".long 11b,91b\n" | |
298 | ".long 12b,92b\n" | |
299 | ".long 13b,93b\n" | |
300 | ".long 14b,94b\n" | |
301 | ".long 15b,95b\n" | |
302 | ".long 16b,96b\n" | |
303 | ".long 17b,97b\n" | |
304 | ".long 18b,98b\n" | |
305 | ".long 19b,99b\n" | |
306 | ".long 20b,100b\n" | |
307 | ".long 21b,101b\n" | |
308 | ".previous" | |
309 | : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst), | |
310 | "=&d" (tmp1), "=d" (tmp2) | |
311 | : "0" (sum), "1" (len), "2" (src), "3" (dst) | |
312 | ); | |
313 | ||
314 | *csum_err = tmp2; | |
315 | ||
316 | return(sum); | |
317 | } | |
318 | ||
319 | EXPORT_SYMBOL(csum_partial_copy_from_user); | |
320 | ||
321 | ||
322 | /* | |
323 | * copy from kernel space while checksumming, otherwise like csum_partial | |
324 | */ | |
325 | ||
326 | __wsum | |
327 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) | |
328 | { | |
329 | unsigned long tmp1, tmp2; | |
330 | __asm__("movel %2,%4\n\t" | |
331 | "btst #1,%4\n\t" /* Check alignment */ | |
332 | "jeq 2f\n\t" | |
333 | "subql #2,%1\n\t" /* buff%4==2: treat first word */ | |
334 | "jgt 1f\n\t" | |
335 | "addql #2,%1\n\t" /* len was == 2, treat only rest */ | |
336 | "jra 4f\n" | |
337 | "1:\t" | |
338 | "movew %2@+,%4\n\t" /* add first word to sum */ | |
339 | "addw %4,%0\n\t" | |
340 | "movew %4,%3@+\n\t" | |
341 | "clrl %4\n\t" | |
342 | "addxl %4,%0\n" /* add X bit */ | |
343 | "2:\t" | |
344 | /* unrolled loop for the main part: do 8 longs at once */ | |
345 | "movel %1,%4\n\t" /* save len in tmp1 */ | |
346 | "lsrl #5,%1\n\t" /* len/32 */ | |
347 | "jeq 2f\n\t" /* not enough... */ | |
348 | "subql #1,%1\n" | |
349 | "1:\t" | |
350 | "movel %2@+,%5\n\t" | |
351 | "addxl %5,%0\n\t" | |
352 | "movel %5,%3@+\n\t" | |
353 | "movel %2@+,%5\n\t" | |
354 | "addxl %5,%0\n\t" | |
355 | "movel %5,%3@+\n\t" | |
356 | "movel %2@+,%5\n\t" | |
357 | "addxl %5,%0\n\t" | |
358 | "movel %5,%3@+\n\t" | |
359 | "movel %2@+,%5\n\t" | |
360 | "addxl %5,%0\n\t" | |
361 | "movel %5,%3@+\n\t" | |
362 | "movel %2@+,%5\n\t" | |
363 | "addxl %5,%0\n\t" | |
364 | "movel %5,%3@+\n\t" | |
365 | "movel %2@+,%5\n\t" | |
366 | "addxl %5,%0\n\t" | |
367 | "movel %5,%3@+\n\t" | |
368 | "movel %2@+,%5\n\t" | |
369 | "addxl %5,%0\n\t" | |
370 | "movel %5,%3@+\n\t" | |
371 | "movel %2@+,%5\n\t" | |
372 | "addxl %5,%0\n\t" | |
373 | "movel %5,%3@+\n\t" | |
374 | "dbra %1,1b\n\t" | |
375 | "clrl %5\n\t" | |
376 | "addxl %5,%0\n\t" /* add X bit */ | |
377 | "clrw %1\n\t" | |
378 | "subql #1,%1\n\t" | |
379 | "jcc 1b\n" | |
380 | "2:\t" | |
381 | "movel %4,%1\n\t" /* restore len from tmp1 */ | |
382 | "andw #0x1c,%4\n\t" /* number of rest longs */ | |
383 | "jeq 4f\n\t" | |
384 | "lsrw #2,%4\n\t" | |
385 | "subqw #1,%4\n" | |
386 | "3:\t" | |
387 | /* loop for rest longs */ | |
388 | "movel %2@+,%5\n\t" | |
389 | "addxl %5,%0\n\t" | |
390 | "movel %5,%3@+\n\t" | |
391 | "dbra %4,3b\n\t" | |
392 | "clrl %5\n\t" | |
393 | "addxl %5,%0\n" /* add X bit */ | |
394 | "4:\t" | |
395 | /* now check for rest bytes that do not fit into longs */ | |
396 | "andw #3,%1\n\t" | |
397 | "jeq 7f\n\t" | |
398 | "clrl %5\n\t" /* clear tmp2 for rest bytes */ | |
399 | "subqw #2,%1\n\t" | |
400 | "jlt 5f\n\t" | |
401 | "movew %2@+,%5\n\t" /* have rest >= 2: get word */ | |
402 | "movew %5,%3@+\n\t" | |
403 | "swap %5\n\t" /* into bits 16..31 */ | |
404 | "tstw %1\n\t" /* another byte? */ | |
405 | "jeq 6f\n" | |
406 | "5:\t" | |
407 | "moveb %2@,%5\n\t" /* have odd rest: get byte */ | |
408 | "moveb %5,%3@+\n\t" | |
409 | "lslw #8,%5\n" /* into bits 8..15; 16..31 untouched */ | |
410 | "6:\t" | |
411 | "addl %5,%0\n\t" /* now add rest long to sum */ | |
412 | "clrl %5\n\t" | |
413 | "addxl %5,%0\n" /* add X bit */ | |
414 | "7:\t" | |
415 | : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst), | |
416 | "=&d" (tmp1), "=&d" (tmp2) | |
417 | : "0" (sum), "1" (len), "2" (src), "3" (dst) | |
418 | ); | |
419 | return(sum); | |
420 | } | |
421 | EXPORT_SYMBOL(csum_partial_copy_nocheck); |