Paul Mundt | fcfdd0f | 2007-11-11 17:36:13 +0900 | [diff] [blame] | 1 | #ifndef __ASM_SH_CHECKSUM_H |
| 2 | #define __ASM_SH_CHECKSUM_H |
| 3 | |
| 4 | /* |
| 5 | * This file is subject to the terms and conditions of the GNU General Public |
| 6 | * License. See the file "COPYING" in the main directory of this archive |
| 7 | * for more details. |
| 8 | * |
| 9 | * Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka |
| 10 | */ |
| 11 | |
| 12 | #include <linux/in6.h> |
| 13 | |
| 14 | /* |
| 15 | * computes the checksum of a memory block at buff, length len, |
| 16 | * and adds in "sum" (32-bit) |
| 17 | * |
| 18 | * returns a 32-bit number suitable for feeding into itself |
| 19 | * or csum_tcpudp_magic |
| 20 | * |
| 21 | * this function must be called with even lengths, except |
| 22 | * for the last fragment, which may be odd |
| 23 | * |
| 24 | * it's best to have buff aligned on a 32-bit boundary |
| 25 | */ |
| 26 | asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); |
| 27 | |
| 28 | /* |
| 29 | * the same as csum_partial, but copies from src while it |
| 30 | * checksums, and handles user-space pointer exceptions correctly, when needed. |
| 31 | * |
| 32 | * here even more important to align src and dst on a 32-bit (or even |
| 33 | * better 64-bit) boundary |
| 34 | */ |
| 35 | |
| 36 | asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, |
| 37 | int len, __wsum sum, |
| 38 | int *src_err_ptr, int *dst_err_ptr); |
| 39 | |
| 40 | /* |
| 41 | * Note: when you get a NULL pointer exception here this means someone |
| 42 | * passed in an incorrect kernel address to one of these functions. |
| 43 | * |
| 44 | * If you use these functions directly please don't forget the |
| 45 | * access_ok(). |
| 46 | */ |
| 47 | static inline |
| 48 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
| 49 | int len, __wsum sum) |
| 50 | { |
| 51 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); |
| 52 | } |
| 53 | |
| 54 | static inline |
| 55 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, |
| 56 | int len, __wsum sum, int *err_ptr) |
| 57 | { |
| 58 | return csum_partial_copy_generic((__force const void *)src, dst, |
| 59 | len, sum, err_ptr, NULL); |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * Fold a partial checksum |
| 64 | */ |
| 65 | |
| 66 | static inline __sum16 csum_fold(__wsum sum) |
| 67 | { |
| 68 | unsigned int __dummy; |
| 69 | __asm__("swap.w %0, %1\n\t" |
| 70 | "extu.w %0, %0\n\t" |
| 71 | "extu.w %1, %1\n\t" |
| 72 | "add %1, %0\n\t" |
| 73 | "swap.w %0, %1\n\t" |
| 74 | "add %1, %0\n\t" |
| 75 | "not %0, %0\n\t" |
| 76 | : "=r" (sum), "=&r" (__dummy) |
| 77 | : "0" (sum) |
| 78 | : "t"); |
| 79 | return (__force __sum16)sum; |
| 80 | } |
| 81 | |
| 82 | /* |
| 83 | * This is a version of ip_compute_csum() optimized for IP headers, |
| 84 | * which always checksum on 4 octet boundaries. |
| 85 | * |
| 86 | * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted |
| 87 | * for linux by * Arnt Gulbrandsen. |
| 88 | */ |
| 89 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
| 90 | { |
| 91 | unsigned int sum, __dummy0, __dummy1; |
| 92 | |
| 93 | __asm__ __volatile__( |
| 94 | "mov.l @%1+, %0\n\t" |
| 95 | "mov.l @%1+, %3\n\t" |
| 96 | "add #-2, %2\n\t" |
| 97 | "clrt\n\t" |
| 98 | "1:\t" |
| 99 | "addc %3, %0\n\t" |
| 100 | "movt %4\n\t" |
| 101 | "mov.l @%1+, %3\n\t" |
| 102 | "dt %2\n\t" |
| 103 | "bf/s 1b\n\t" |
| 104 | " cmp/eq #1, %4\n\t" |
| 105 | "addc %3, %0\n\t" |
| 106 | "addc %2, %0" /* Here %2 is 0, add carry-bit */ |
| 107 | /* Since the input registers which are loaded with iph and ihl |
| 108 | are modified, we must also specify them as outputs, or gcc |
| 109 | will assume they contain their original values. */ |
| 110 | : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1) |
| 111 | : "1" (iph), "2" (ihl) |
| 112 | : "t"); |
| 113 | |
| 114 | return csum_fold(sum); |
| 115 | } |
| 116 | |
| 117 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
| 118 | unsigned short len, |
| 119 | unsigned short proto, |
| 120 | __wsum sum) |
| 121 | { |
| 122 | #ifdef __LITTLE_ENDIAN__ |
| 123 | unsigned long len_proto = (proto + len) << 8; |
| 124 | #else |
| 125 | unsigned long len_proto = proto + len; |
| 126 | #endif |
| 127 | __asm__("clrt\n\t" |
| 128 | "addc %0, %1\n\t" |
| 129 | "addc %2, %1\n\t" |
| 130 | "addc %3, %1\n\t" |
| 131 | "movt %0\n\t" |
| 132 | "add %1, %0" |
| 133 | : "=r" (sum), "=r" (len_proto) |
| 134 | : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum) |
| 135 | : "t"); |
| 136 | |
| 137 | return sum; |
| 138 | } |
| 139 | |
| 140 | /* |
| 141 | * computes the checksum of the TCP/UDP pseudo-header |
| 142 | * returns a 16-bit checksum, already complemented |
| 143 | */ |
| 144 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
| 145 | unsigned short len, |
| 146 | unsigned short proto, |
| 147 | __wsum sum) |
| 148 | { |
| 149 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); |
| 150 | } |
| 151 | |
| 152 | /* |
| 153 | * this routine is used for miscellaneous IP-like checksums, mainly |
| 154 | * in icmp.c |
| 155 | */ |
| 156 | static inline __sum16 ip_compute_csum(const void *buff, int len) |
| 157 | { |
| 158 | return csum_fold(csum_partial(buff, len, 0)); |
| 159 | } |
| 160 | |
| 161 | #define _HAVE_ARCH_IPV6_CSUM |
| 162 | static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
| 163 | const struct in6_addr *daddr, |
| 164 | __u32 len, unsigned short proto, |
| 165 | __wsum sum) |
| 166 | { |
| 167 | unsigned int __dummy; |
| 168 | __asm__("clrt\n\t" |
| 169 | "mov.l @(0,%2), %1\n\t" |
| 170 | "addc %1, %0\n\t" |
| 171 | "mov.l @(4,%2), %1\n\t" |
| 172 | "addc %1, %0\n\t" |
| 173 | "mov.l @(8,%2), %1\n\t" |
| 174 | "addc %1, %0\n\t" |
| 175 | "mov.l @(12,%2), %1\n\t" |
| 176 | "addc %1, %0\n\t" |
| 177 | "mov.l @(0,%3), %1\n\t" |
| 178 | "addc %1, %0\n\t" |
| 179 | "mov.l @(4,%3), %1\n\t" |
| 180 | "addc %1, %0\n\t" |
| 181 | "mov.l @(8,%3), %1\n\t" |
| 182 | "addc %1, %0\n\t" |
| 183 | "mov.l @(12,%3), %1\n\t" |
| 184 | "addc %1, %0\n\t" |
| 185 | "addc %4, %0\n\t" |
| 186 | "addc %5, %0\n\t" |
| 187 | "movt %1\n\t" |
| 188 | "add %1, %0\n" |
| 189 | : "=r" (sum), "=&r" (__dummy) |
| 190 | : "r" (saddr), "r" (daddr), |
| 191 | "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) |
| 192 | : "t"); |
| 193 | |
| 194 | return csum_fold(sum); |
| 195 | } |
| 196 | |
| 197 | /* |
| 198 | * Copy and checksum to user |
| 199 | */ |
| 200 | #define HAVE_CSUM_COPY_USER |
| 201 | static inline __wsum csum_and_copy_to_user(const void *src, |
| 202 | void __user *dst, |
| 203 | int len, __wsum sum, |
| 204 | int *err_ptr) |
| 205 | { |
| 206 | if (access_ok(VERIFY_WRITE, dst, len)) |
| 207 | return csum_partial_copy_generic((__force const void *)src, |
| 208 | dst, len, sum, NULL, err_ptr); |
| 209 | |
| 210 | if (len) |
| 211 | *err_ptr = -EFAULT; |
| 212 | |
| 213 | return (__force __wsum)-1; /* invalid checksum */ |
| 214 | } |
| 215 | #endif /* __ASM_SH_CHECKSUM_H */ |