Al Viro | 27f85f1 | 2011-08-18 20:02:59 +0100 | [diff] [blame] | 1 | #ifndef __UM_CHECKSUM_H |
| 2 | #define __UM_CHECKSUM_H |
| 3 | |
Al Viro | 4301785 | 2012-10-08 03:25:00 +0100 | [diff] [blame] | 4 | #include <linux/string.h> |
| 5 | #include <linux/in6.h> |
| 6 | |
| 7 | /* |
| 8 | * computes the checksum of a memory block at buff, length len, |
| 9 | * and adds in "sum" (32-bit) |
| 10 | * |
| 11 | * returns a 32-bit number suitable for feeding into itself |
| 12 | * or csum_tcpudp_magic |
| 13 | * |
| 14 | * this function must be called with even lengths, except |
| 15 | * for the last fragment, which may be odd |
| 16 | * |
| 17 | * it's best to have buff aligned on a 32-bit boundary |
| 18 | */ |
| 19 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); |
| 20 | |
| 21 | /* |
| 22 | * Note: when you get a NULL pointer exception here this means someone |
| 23 | * passed in an incorrect kernel address to one of these functions. |
| 24 | * |
| 25 | * If you use these functions directly please don't forget the |
| 26 | * access_ok(). |
| 27 | */ |
| 28 | |
| 29 | static __inline__ |
| 30 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
| 31 | int len, __wsum sum) |
| 32 | { |
| 33 | memcpy(dst, src, len); |
| 34 | return csum_partial(dst, len, sum); |
| 35 | } |
| 36 | |
| 37 | /* |
| 38 | * the same as csum_partial, but copies from src while it |
| 39 | * checksums, and handles user-space pointer exceptions correctly, when needed. |
| 40 | * |
| 41 | * here even more important to align src and dst on a 32-bit (or even |
| 42 | * better 64-bit) boundary |
| 43 | */ |
| 44 | |
| 45 | static __inline__ |
| 46 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, |
| 47 | int len, __wsum sum, int *err_ptr) |
| 48 | { |
| 49 | if (copy_from_user(dst, src, len)) { |
| 50 | *err_ptr = -EFAULT; |
| 51 | return (__force __wsum)-1; |
| 52 | } |
| 53 | |
| 54 | return csum_partial(dst, len, sum); |
| 55 | } |
| 56 | |
| 57 | /** |
| 58 | * csum_fold - Fold and invert a 32bit checksum. |
| 59 | * sum: 32bit unfolded sum |
| 60 | * |
| 61 | * Fold a 32bit running checksum to 16bit and invert it. This is usually |
| 62 | * the last step before putting a checksum into a packet. |
| 63 | * Make sure not to mix with 64bit checksums. |
| 64 | */ |
| 65 | static inline __sum16 csum_fold(__wsum sum) |
| 66 | { |
| 67 | __asm__( |
| 68 | " addl %1,%0\n" |
| 69 | " adcl $0xffff,%0" |
| 70 | : "=r" (sum) |
| 71 | : "r" ((__force u32)sum << 16), |
| 72 | "0" ((__force u32)sum & 0xffff0000) |
| 73 | ); |
| 74 | return (__force __sum16)(~(__force u32)sum >> 16); |
| 75 | } |
| 76 | |
| 77 | /** |
| 78 | * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum. |
| 79 | * @saddr: source address |
| 80 | * @daddr: destination address |
| 81 | * @len: length of packet |
| 82 | * @proto: ip protocol of packet |
| 83 | * @sum: initial sum to be added in (32bit unfolded) |
| 84 | * |
| 85 | * Returns the pseudo header checksum the input data. Result is |
| 86 | * 32bit unfolded. |
| 87 | */ |
| 88 | static inline __wsum |
| 89 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, |
| 90 | unsigned short proto, __wsum sum) |
| 91 | { |
| 92 | asm(" addl %1, %0\n" |
| 93 | " adcl %2, %0\n" |
| 94 | " adcl %3, %0\n" |
| 95 | " adcl $0, %0\n" |
| 96 | : "=r" (sum) |
| 97 | : "g" (daddr), "g" (saddr), "g" ((len + proto) << 8), "0" (sum)); |
| 98 | return sum; |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * computes the checksum of the TCP/UDP pseudo-header |
| 103 | * returns a 16-bit checksum, already complemented |
| 104 | */ |
| 105 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
| 106 | unsigned short len, |
| 107 | unsigned short proto, |
| 108 | __wsum sum) |
| 109 | { |
| 110 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); |
| 111 | } |
| 112 | |
| 113 | /** |
| 114 | * ip_fast_csum - Compute the IPv4 header checksum efficiently. |
| 115 | * iph: ipv4 header |
| 116 | * ihl: length of header / 4 |
| 117 | */ |
| 118 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
| 119 | { |
| 120 | unsigned int sum; |
| 121 | |
| 122 | asm( " movl (%1), %0\n" |
| 123 | " subl $4, %2\n" |
| 124 | " jbe 2f\n" |
| 125 | " addl 4(%1), %0\n" |
| 126 | " adcl 8(%1), %0\n" |
| 127 | " adcl 12(%1), %0\n" |
| 128 | "1: adcl 16(%1), %0\n" |
| 129 | " lea 4(%1), %1\n" |
| 130 | " decl %2\n" |
| 131 | " jne 1b\n" |
| 132 | " adcl $0, %0\n" |
| 133 | " movl %0, %2\n" |
| 134 | " shrl $16, %0\n" |
| 135 | " addw %w2, %w0\n" |
| 136 | " adcl $0, %0\n" |
| 137 | " notl %0\n" |
| 138 | "2:" |
| 139 | /* Since the input registers which are loaded with iph and ipl |
| 140 | are modified, we must also specify them as outputs, or gcc |
| 141 | will assume they contain their original values. */ |
| 142 | : "=r" (sum), "=r" (iph), "=r" (ihl) |
| 143 | : "1" (iph), "2" (ihl) |
| 144 | : "memory"); |
| 145 | return (__force __sum16)sum; |
| 146 | } |
| 147 | |
Al Viro | 27f85f1 | 2011-08-18 20:02:59 +0100 | [diff] [blame] | 148 | #ifdef CONFIG_X86_32 |
| 149 | # include "checksum_32.h" |
| 150 | #else |
| 151 | # include "checksum_64.h" |
| 152 | #endif |
| 153 | |
| 154 | #endif |