blob: fcf38943132c490206fe6045e7ed19eaa865d292 [file] [log] [blame]
Arnd Bergmann26a28fa2009-05-13 22:56:38 +00001/*
2 *
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * IP/TCP/UDP checksumming routines
8 *
9 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Tom May, <ftom@netcom.com>
12 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
13 * Lots of code moved from tcp.c and ip.c; see those files
14 * for more names.
15 *
16 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
17 * Fixed some nasty bugs, causing some horrible crashes.
18 * A: At some points, the sum (%0) was used as
19 * length-counter instead of the length counter
20 * (%1). Thanks to Roman Hodek for pointing this out.
21 * B: GCC seems to mess up if one uses too many
22 * data-registers to hold input values and one tries to
23 * specify d0 and d1 as scratch registers. Letting gcc
24 * choose these registers itself solves the problem.
25 *
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License
28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version.
30 */
31
32/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
33 kills, so most of the assembly has to go. */
34
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050035#include <linux/export.h>
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000036#include <net/checksum.h>
37
38#include <asm/byteorder.h>
39
Arnd Bergmann20c1f642009-06-23 21:37:26 +020040#ifndef do_csum
Arnd Bergmannc44ba9f2009-06-23 21:22:58 +020041static inline unsigned short from32to16(unsigned int x)
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000042{
43 /* add up 16-bit and 16-bit for 16+c bit */
44 x = (x & 0xffff) + (x >> 16);
45 /* add up carry.. */
46 x = (x & 0xffff) + (x >> 16);
47 return x;
48}
49
karl beldan150ae0e2015-01-28 10:58:11 +010050static inline u32 from64to32(u64 x)
51{
52 /* add up 32-bit and 32-bit for 32+c bit */
53 x = (x & 0xffffffff) + (x >> 32);
54 /* add up carry.. */
55 x = (x & 0xffffffff) + (x >> 32);
56 return (u32)x;
57}
58
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000059static unsigned int do_csum(const unsigned char *buff, int len)
60{
Ian Abbottbe0e1e72011-07-07 01:18:49 +000061 int odd;
Arnd Bergmannc44ba9f2009-06-23 21:22:58 +020062 unsigned int result = 0;
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000063
64 if (len <= 0)
65 goto out;
66 odd = 1 & (unsigned long) buff;
67 if (odd) {
Arnd Bergmann32a9ff92009-06-19 10:41:19 +020068#ifdef __LITTLE_ENDIAN
Arnd Bergmann32a9ff92009-06-19 10:41:19 +020069 result += (*buff << 8);
Arnd Bergmann0a5549e2009-06-23 22:52:51 +020070#else
71 result = *buff;
Arnd Bergmann32a9ff92009-06-19 10:41:19 +020072#endif
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000073 len--;
74 buff++;
75 }
Ian Abbottbe0e1e72011-07-07 01:18:49 +000076 if (len >= 2) {
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000077 if (2 & (unsigned long) buff) {
78 result += *(unsigned short *) buff;
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000079 len -= 2;
80 buff += 2;
81 }
Ian Abbottbe0e1e72011-07-07 01:18:49 +000082 if (len >= 4) {
83 const unsigned char *end = buff + ((unsigned)len & ~3);
Arnd Bergmannc44ba9f2009-06-23 21:22:58 +020084 unsigned int carry = 0;
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000085 do {
Arnd Bergmannc44ba9f2009-06-23 21:22:58 +020086 unsigned int w = *(unsigned int *) buff;
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000087 buff += 4;
88 result += carry;
89 result += w;
90 carry = (w > result);
Ian Abbottbe0e1e72011-07-07 01:18:49 +000091 } while (buff < end);
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000092 result += carry;
93 result = (result & 0xffff) + (result >> 16);
94 }
95 if (len & 2) {
96 result += *(unsigned short *) buff;
97 buff += 2;
98 }
99 }
100 if (len & 1)
Arnd Bergmann32a9ff92009-06-19 10:41:19 +0200101#ifdef __LITTLE_ENDIAN
102 result += *buff;
103#else
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000104 result += (*buff << 8);
Arnd Bergmann32a9ff92009-06-19 10:41:19 +0200105#endif
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000106 result = from32to16(result);
107 if (odd)
108 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
109out:
110 return result;
111}
Arnd Bergmann20c1f642009-06-23 21:37:26 +0200112#endif
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000113
Vineet Gupta64e69072013-01-18 15:12:16 +0530114#ifndef ip_fast_csum
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000115/*
116 * This is a version of ip_compute_csum() optimized for IP headers,
117 * which always checksum on 4 octet boundaries.
118 */
119__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
120{
121 return (__force __sum16)~do_csum(iph, ihl*4);
122}
123EXPORT_SYMBOL(ip_fast_csum);
Vineet Gupta64e69072013-01-18 15:12:16 +0530124#endif
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000125
126/*
127 * computes the checksum of a memory block at buff, length len,
128 * and adds in "sum" (32-bit)
129 *
130 * returns a 32-bit number suitable for feeding into itself
131 * or csum_tcpudp_magic
132 *
133 * this function must be called with even lengths, except
134 * for the last fragment, which may be odd
135 *
136 * it's best to have buff aligned on a 32-bit boundary
137 */
138__wsum csum_partial(const void *buff, int len, __wsum wsum)
139{
140 unsigned int sum = (__force unsigned int)wsum;
141 unsigned int result = do_csum(buff, len);
142
143 /* add in old sum, and carry.. */
144 result += sum;
145 if (sum > result)
146 result += 1;
147 return (__force __wsum)result;
148}
149EXPORT_SYMBOL(csum_partial);
150
151/*
152 * this routine is used for miscellaneous IP-like checksums, mainly
153 * in icmp.c
154 */
155__sum16 ip_compute_csum(const void *buff, int len)
156{
157 return (__force __sum16)~do_csum(buff, len);
158}
159EXPORT_SYMBOL(ip_compute_csum);
160
161/*
162 * copy from fs while checksumming, otherwise like csum_partial
163 */
164__wsum
165csum_partial_copy_from_user(const void __user *src, void *dst, int len,
166 __wsum sum, int *csum_err)
167{
168 int missing;
169
170 missing = __copy_from_user(dst, src, len);
171 if (missing) {
172 memset(dst + len - missing, 0, missing);
173 *csum_err = -EFAULT;
174 } else
175 *csum_err = 0;
176
177 return csum_partial(dst, len, sum);
178}
179EXPORT_SYMBOL(csum_partial_copy_from_user);
180
181/*
182 * copy from ds while checksumming, otherwise like csum_partial
183 */
184__wsum
185csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
186{
187 memcpy(dst, src, len);
188 return csum_partial(dst, len, sum);
189}
190EXPORT_SYMBOL(csum_partial_copy);
191
192#ifndef csum_tcpudp_nofold
193__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
194 unsigned short len,
195 unsigned short proto,
196 __wsum sum)
197{
198 unsigned long long s = (__force u32)sum;
199
200 s += (__force u32)saddr;
201 s += (__force u32)daddr;
202#ifdef __BIG_ENDIAN
203 s += proto + len;
204#else
205 s += (proto + len) << 8;
206#endif
karl beldan150ae0e2015-01-28 10:58:11 +0100207 return (__force __wsum)from64to32(s);
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000208}
209EXPORT_SYMBOL(csum_tcpudp_nofold);
210#endif