blob: 6216f12a756b56121361639116794e35d8356ad2 [file] [log] [blame]
Greg Ungerer66d857b2011-03-22 13:39:27 +10001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IP/TCP/UDP checksumming routines
7 *
8 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
9 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10 * Tom May, <ftom@netcom.com>
11 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
12 * Lots of code moved from tcp.c and ip.c; see those files
13 * for more names.
14 *
15 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
16 * Fixed some nasty bugs, causing some horrible crashes.
17 * A: At some points, the sum (%0) was used as
18 * length-counter instead of the length counter
19 * (%1). Thanks to Roman Hodek for pointing this out.
20 * B: GCC seems to mess up if one uses too many
21 * data-registers to hold input values and one tries to
22 * specify d0 and d1 as scratch registers. Letting gcc
23 * choose these registers itself solves the problem.
24 *
25 * This program is free software; you can redistribute it and/or
26 * modify it under the terms of the GNU General Public License
27 * as published by the Free Software Foundation; either version
28 * 2 of the License, or (at your option) any later version.
29 *
30 * 1998/8/31 Andreas Schwab:
31 * Zero out rest of buffer on exception in
32 * csum_partial_copy_from_user.
33 */
34
35#include <linux/module.h>
36#include <net/checksum.h>
37
38/*
39 * computes a partial checksum, e.g. for TCP/UDP fragments
40 */
41
42__wsum csum_partial(const void *buff, int len, __wsum sum)
43{
44 unsigned long tmp1, tmp2;
45 /*
46 * Experiments with ethernet and slip connections show that buff
47 * is aligned on either a 2-byte or 4-byte boundary.
48 */
49 __asm__("movel %2,%3\n\t"
50 "btst #1,%3\n\t" /* Check alignment */
51 "jeq 2f\n\t"
52 "subql #2,%1\n\t" /* buff%4==2: treat first word */
53 "jgt 1f\n\t"
54 "addql #2,%1\n\t" /* len was == 2, treat only rest */
55 "jra 4f\n"
56 "1:\t"
57 "addw %2@+,%0\n\t" /* add first word to sum */
58 "clrl %3\n\t"
59 "addxl %3,%0\n" /* add X bit */
60 "2:\t"
61 /* unrolled loop for the main part: do 8 longs at once */
62 "movel %1,%3\n\t" /* save len in tmp1 */
63 "lsrl #5,%1\n\t" /* len/32 */
64 "jeq 2f\n\t" /* not enough... */
65 "subql #1,%1\n"
66 "1:\t"
67 "movel %2@+,%4\n\t"
68 "addxl %4,%0\n\t"
69 "movel %2@+,%4\n\t"
70 "addxl %4,%0\n\t"
71 "movel %2@+,%4\n\t"
72 "addxl %4,%0\n\t"
73 "movel %2@+,%4\n\t"
74 "addxl %4,%0\n\t"
75 "movel %2@+,%4\n\t"
76 "addxl %4,%0\n\t"
77 "movel %2@+,%4\n\t"
78 "addxl %4,%0\n\t"
79 "movel %2@+,%4\n\t"
80 "addxl %4,%0\n\t"
81 "movel %2@+,%4\n\t"
82 "addxl %4,%0\n\t"
83 "dbra %1,1b\n\t"
84 "clrl %4\n\t"
85 "addxl %4,%0\n\t" /* add X bit */
86 "clrw %1\n\t"
87 "subql #1,%1\n\t"
88 "jcc 1b\n"
89 "2:\t"
90 "movel %3,%1\n\t" /* restore len from tmp1 */
91 "andw #0x1c,%3\n\t" /* number of rest longs */
92 "jeq 4f\n\t"
93 "lsrw #2,%3\n\t"
94 "subqw #1,%3\n"
95 "3:\t"
96 /* loop for rest longs */
97 "movel %2@+,%4\n\t"
98 "addxl %4,%0\n\t"
99 "dbra %3,3b\n\t"
100 "clrl %4\n\t"
101 "addxl %4,%0\n" /* add X bit */
102 "4:\t"
103 /* now check for rest bytes that do not fit into longs */
104 "andw #3,%1\n\t"
105 "jeq 7f\n\t"
106 "clrl %4\n\t" /* clear tmp2 for rest bytes */
107 "subqw #2,%1\n\t"
108 "jlt 5f\n\t"
109 "movew %2@+,%4\n\t" /* have rest >= 2: get word */
110 "swap %4\n\t" /* into bits 16..31 */
111 "tstw %1\n\t" /* another byte? */
112 "jeq 6f\n"
113 "5:\t"
114 "moveb %2@,%4\n\t" /* have odd rest: get byte */
115 "lslw #8,%4\n\t" /* into bits 8..15; 16..31 untouched */
116 "6:\t"
117 "addl %4,%0\n\t" /* now add rest long to sum */
118 "clrl %4\n\t"
119 "addxl %4,%0\n" /* add X bit */
120 "7:\t"
121 : "=d" (sum), "=d" (len), "=a" (buff),
122 "=&d" (tmp1), "=&d" (tmp2)
123 : "0" (sum), "1" (len), "2" (buff)
124 );
125 return(sum);
126}
127
128EXPORT_SYMBOL(csum_partial);
129
130
131/*
132 * copy from user space while checksumming, with exception handling.
133 */
134
135__wsum
136csum_partial_copy_from_user(const void __user *src, void *dst,
137 int len, __wsum sum, int *csum_err)
138{
139 /*
140 * GCC doesn't like more than 10 operands for the asm
141 * statements so we have to use tmp2 for the error
142 * code.
143 */
144 unsigned long tmp1, tmp2;
145
146 __asm__("movel %2,%4\n\t"
147 "btst #1,%4\n\t" /* Check alignment */
148 "jeq 2f\n\t"
149 "subql #2,%1\n\t" /* buff%4==2: treat first word */
150 "jgt 1f\n\t"
151 "addql #2,%1\n\t" /* len was == 2, treat only rest */
152 "jra 4f\n"
153 "1:\n"
154 "10:\t"
155 "movesw %2@+,%4\n\t" /* add first word to sum */
156 "addw %4,%0\n\t"
157 "movew %4,%3@+\n\t"
158 "clrl %4\n\t"
159 "addxl %4,%0\n" /* add X bit */
160 "2:\t"
161 /* unrolled loop for the main part: do 8 longs at once */
162 "movel %1,%4\n\t" /* save len in tmp1 */
163 "lsrl #5,%1\n\t" /* len/32 */
164 "jeq 2f\n\t" /* not enough... */
165 "subql #1,%1\n"
166 "1:\n"
167 "11:\t"
168 "movesl %2@+,%5\n\t"
169 "addxl %5,%0\n\t"
170 "movel %5,%3@+\n\t"
171 "12:\t"
172 "movesl %2@+,%5\n\t"
173 "addxl %5,%0\n\t"
174 "movel %5,%3@+\n\t"
175 "13:\t"
176 "movesl %2@+,%5\n\t"
177 "addxl %5,%0\n\t"
178 "movel %5,%3@+\n\t"
179 "14:\t"
180 "movesl %2@+,%5\n\t"
181 "addxl %5,%0\n\t"
182 "movel %5,%3@+\n\t"
183 "15:\t"
184 "movesl %2@+,%5\n\t"
185 "addxl %5,%0\n\t"
186 "movel %5,%3@+\n\t"
187 "16:\t"
188 "movesl %2@+,%5\n\t"
189 "addxl %5,%0\n\t"
190 "movel %5,%3@+\n\t"
191 "17:\t"
192 "movesl %2@+,%5\n\t"
193 "addxl %5,%0\n\t"
194 "movel %5,%3@+\n\t"
195 "18:\t"
196 "movesl %2@+,%5\n\t"
197 "addxl %5,%0\n\t"
198 "movel %5,%3@+\n\t"
199 "dbra %1,1b\n\t"
200 "clrl %5\n\t"
201 "addxl %5,%0\n\t" /* add X bit */
202 "clrw %1\n\t"
203 "subql #1,%1\n\t"
204 "jcc 1b\n"
205 "2:\t"
206 "movel %4,%1\n\t" /* restore len from tmp1 */
207 "andw #0x1c,%4\n\t" /* number of rest longs */
208 "jeq 4f\n\t"
209 "lsrw #2,%4\n\t"
210 "subqw #1,%4\n"
211 "3:\n"
212 /* loop for rest longs */
213 "19:\t"
214 "movesl %2@+,%5\n\t"
215 "addxl %5,%0\n\t"
216 "movel %5,%3@+\n\t"
217 "dbra %4,3b\n\t"
218 "clrl %5\n\t"
219 "addxl %5,%0\n" /* add X bit */
220 "4:\t"
221 /* now check for rest bytes that do not fit into longs */
222 "andw #3,%1\n\t"
223 "jeq 7f\n\t"
224 "clrl %5\n\t" /* clear tmp2 for rest bytes */
225 "subqw #2,%1\n\t"
226 "jlt 5f\n\t"
227 "20:\t"
228 "movesw %2@+,%5\n\t" /* have rest >= 2: get word */
229 "movew %5,%3@+\n\t"
230 "swap %5\n\t" /* into bits 16..31 */
231 "tstw %1\n\t" /* another byte? */
232 "jeq 6f\n"
233 "5:\n"
234 "21:\t"
235 "movesb %2@,%5\n\t" /* have odd rest: get byte */
236 "moveb %5,%3@+\n\t"
237 "lslw #8,%5\n\t" /* into bits 8..15; 16..31 untouched */
238 "6:\t"
239 "addl %5,%0\n\t" /* now add rest long to sum */
240 "clrl %5\n\t"
241 "addxl %5,%0\n\t" /* add X bit */
242 "7:\t"
243 "clrl %5\n" /* no error - clear return value */
244 "8:\n"
245 ".section .fixup,\"ax\"\n"
246 ".even\n"
247 /* If any exception occurs zero out the rest.
248 Similarities with the code above are intentional :-) */
249 "90:\t"
250 "clrw %3@+\n\t"
251 "movel %1,%4\n\t"
252 "lsrl #5,%1\n\t"
253 "jeq 1f\n\t"
254 "subql #1,%1\n"
255 "91:\t"
256 "clrl %3@+\n"
257 "92:\t"
258 "clrl %3@+\n"
259 "93:\t"
260 "clrl %3@+\n"
261 "94:\t"
262 "clrl %3@+\n"
263 "95:\t"
264 "clrl %3@+\n"
265 "96:\t"
266 "clrl %3@+\n"
267 "97:\t"
268 "clrl %3@+\n"
269 "98:\t"
270 "clrl %3@+\n\t"
271 "dbra %1,91b\n\t"
272 "clrw %1\n\t"
273 "subql #1,%1\n\t"
274 "jcc 91b\n"
275 "1:\t"
276 "movel %4,%1\n\t"
277 "andw #0x1c,%4\n\t"
278 "jeq 1f\n\t"
279 "lsrw #2,%4\n\t"
280 "subqw #1,%4\n"
281 "99:\t"
282 "clrl %3@+\n\t"
283 "dbra %4,99b\n\t"
284 "1:\t"
285 "andw #3,%1\n\t"
286 "jeq 9f\n"
287 "100:\t"
288 "clrw %3@+\n\t"
289 "tstw %1\n\t"
290 "jeq 9f\n"
291 "101:\t"
292 "clrb %3@+\n"
293 "9:\t"
294#define STR(X) STR1(X)
295#define STR1(X) #X
296 "moveq #-" STR(EFAULT) ",%5\n\t"
297 "jra 8b\n"
298 ".previous\n"
299 ".section __ex_table,\"a\"\n"
300 ".long 10b,90b\n"
301 ".long 11b,91b\n"
302 ".long 12b,92b\n"
303 ".long 13b,93b\n"
304 ".long 14b,94b\n"
305 ".long 15b,95b\n"
306 ".long 16b,96b\n"
307 ".long 17b,97b\n"
308 ".long 18b,98b\n"
309 ".long 19b,99b\n"
310 ".long 20b,100b\n"
311 ".long 21b,101b\n"
312 ".previous"
313 : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
314 "=&d" (tmp1), "=d" (tmp2)
315 : "0" (sum), "1" (len), "2" (src), "3" (dst)
316 );
317
318 *csum_err = tmp2;
319
320 return(sum);
321}
322
323EXPORT_SYMBOL(csum_partial_copy_from_user);
324
325
326/*
327 * copy from kernel space while checksumming, otherwise like csum_partial
328 */
329
330__wsum
331csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
332{
333 unsigned long tmp1, tmp2;
334 __asm__("movel %2,%4\n\t"
335 "btst #1,%4\n\t" /* Check alignment */
336 "jeq 2f\n\t"
337 "subql #2,%1\n\t" /* buff%4==2: treat first word */
338 "jgt 1f\n\t"
339 "addql #2,%1\n\t" /* len was == 2, treat only rest */
340 "jra 4f\n"
341 "1:\t"
342 "movew %2@+,%4\n\t" /* add first word to sum */
343 "addw %4,%0\n\t"
344 "movew %4,%3@+\n\t"
345 "clrl %4\n\t"
346 "addxl %4,%0\n" /* add X bit */
347 "2:\t"
348 /* unrolled loop for the main part: do 8 longs at once */
349 "movel %1,%4\n\t" /* save len in tmp1 */
350 "lsrl #5,%1\n\t" /* len/32 */
351 "jeq 2f\n\t" /* not enough... */
352 "subql #1,%1\n"
353 "1:\t"
354 "movel %2@+,%5\n\t"
355 "addxl %5,%0\n\t"
356 "movel %5,%3@+\n\t"
357 "movel %2@+,%5\n\t"
358 "addxl %5,%0\n\t"
359 "movel %5,%3@+\n\t"
360 "movel %2@+,%5\n\t"
361 "addxl %5,%0\n\t"
362 "movel %5,%3@+\n\t"
363 "movel %2@+,%5\n\t"
364 "addxl %5,%0\n\t"
365 "movel %5,%3@+\n\t"
366 "movel %2@+,%5\n\t"
367 "addxl %5,%0\n\t"
368 "movel %5,%3@+\n\t"
369 "movel %2@+,%5\n\t"
370 "addxl %5,%0\n\t"
371 "movel %5,%3@+\n\t"
372 "movel %2@+,%5\n\t"
373 "addxl %5,%0\n\t"
374 "movel %5,%3@+\n\t"
375 "movel %2@+,%5\n\t"
376 "addxl %5,%0\n\t"
377 "movel %5,%3@+\n\t"
378 "dbra %1,1b\n\t"
379 "clrl %5\n\t"
380 "addxl %5,%0\n\t" /* add X bit */
381 "clrw %1\n\t"
382 "subql #1,%1\n\t"
383 "jcc 1b\n"
384 "2:\t"
385 "movel %4,%1\n\t" /* restore len from tmp1 */
386 "andw #0x1c,%4\n\t" /* number of rest longs */
387 "jeq 4f\n\t"
388 "lsrw #2,%4\n\t"
389 "subqw #1,%4\n"
390 "3:\t"
391 /* loop for rest longs */
392 "movel %2@+,%5\n\t"
393 "addxl %5,%0\n\t"
394 "movel %5,%3@+\n\t"
395 "dbra %4,3b\n\t"
396 "clrl %5\n\t"
397 "addxl %5,%0\n" /* add X bit */
398 "4:\t"
399 /* now check for rest bytes that do not fit into longs */
400 "andw #3,%1\n\t"
401 "jeq 7f\n\t"
402 "clrl %5\n\t" /* clear tmp2 for rest bytes */
403 "subqw #2,%1\n\t"
404 "jlt 5f\n\t"
405 "movew %2@+,%5\n\t" /* have rest >= 2: get word */
406 "movew %5,%3@+\n\t"
407 "swap %5\n\t" /* into bits 16..31 */
408 "tstw %1\n\t" /* another byte? */
409 "jeq 6f\n"
410 "5:\t"
411 "moveb %2@,%5\n\t" /* have odd rest: get byte */
412 "moveb %5,%3@+\n\t"
413 "lslw #8,%5\n" /* into bits 8..15; 16..31 untouched */
414 "6:\t"
415 "addl %5,%0\n\t" /* now add rest long to sum */
416 "clrl %5\n\t"
417 "addxl %5,%0\n" /* add X bit */
418 "7:\t"
419 : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
420 "=&d" (tmp1), "=&d" (tmp2)
421 : "0" (sum), "1" (len), "2" (src), "3" (dst)
422 );
423 return(sum);
424}
425EXPORT_SYMBOL(csum_partial_copy_nocheck);