Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 3 | * operating system. INET is implemented using the BSD Socket |
| 4 | * interface as the means of communication with the user level. |
| 5 | * |
| 6 | * The IP fragmentation functionality. |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 7 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Version: $Id: ip_fragment.c,v 1.59 2002/01/12 07:54:56 davem Exp $ |
| 9 | * |
| 10 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> |
| 11 | * Alan Cox <Alan.Cox@linux.org> |
| 12 | * |
| 13 | * Fixes: |
| 14 | * Alan Cox : Split from ip.c , see ip_input.c for history. |
| 15 | * David S. Miller : Begin massive cleanup... |
| 16 | * Andi Kleen : Add sysctls. |
| 17 | * xxxx : Overlapfrag bug. |
| 18 | * Ultima : ip_expire() kernel panic. |
| 19 | * Bill Hawes : Frag accounting and evictor fixes. |
| 20 | * John McDonald : 0 length frag bug. |
| 21 | * Alexey Kuznetsov: SMP races, threading, cleanup. |
| 22 | * Patrick McHardy : LRU queue of frag heads for evictor. |
| 23 | */ |
| 24 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 25 | #include <linux/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
| 27 | #include <linux/types.h> |
| 28 | #include <linux/mm.h> |
| 29 | #include <linux/jiffies.h> |
| 30 | #include <linux/skbuff.h> |
| 31 | #include <linux/list.h> |
| 32 | #include <linux/ip.h> |
| 33 | #include <linux/icmp.h> |
| 34 | #include <linux/netdevice.h> |
| 35 | #include <linux/jhash.h> |
| 36 | #include <linux/random.h> |
| 37 | #include <net/sock.h> |
| 38 | #include <net/ip.h> |
| 39 | #include <net/icmp.h> |
| 40 | #include <net/checksum.h> |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 41 | #include <net/inetpeer.h> |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 42 | #include <net/inet_frag.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | #include <linux/tcp.h> |
| 44 | #include <linux/udp.h> |
| 45 | #include <linux/inet.h> |
| 46 | #include <linux/netfilter_ipv4.h> |
| 47 | |
| 48 | /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 |
| 49 | * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c |
| 50 | * as well. Or notify me, at least. --ANK |
| 51 | */ |
| 52 | |
| 53 | /* Fragment cache limits. We will commit 256K at one time. Should we |
| 54 | * cross that limit we will prune down to 192K. This should cope with |
| 55 | * even the most extreme cases without allowing an attacker to measurably |
| 56 | * harm machine performance. |
| 57 | */ |
Brian Haley | ab32ea5 | 2006-09-22 14:15:41 -0700 | [diff] [blame] | 58 | int sysctl_ipfrag_high_thresh __read_mostly = 256*1024; |
| 59 | int sysctl_ipfrag_low_thresh __read_mostly = 192*1024; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Brian Haley | ab32ea5 | 2006-09-22 14:15:41 -0700 | [diff] [blame] | 61 | int sysctl_ipfrag_max_dist __read_mostly = 64; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 62 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | /* Important NOTE! Fragment queue must be destroyed before MSL expires. |
| 64 | * RFC791 is wrong proposing to prolongate timer each fragment arrival by TTL. |
| 65 | */ |
Brian Haley | ab32ea5 | 2006-09-22 14:15:41 -0700 | [diff] [blame] | 66 | int sysctl_ipfrag_time __read_mostly = IP_FRAG_TIME; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | |
| 68 | struct ipfrag_skb_cb |
| 69 | { |
| 70 | struct inet_skb_parm h; |
| 71 | int offset; |
| 72 | }; |
| 73 | |
| 74 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb*)((skb)->cb)) |
| 75 | |
| 76 | /* Describe an entry in the "incomplete datagrams" queue. */ |
| 77 | struct ipq { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 78 | struct inet_frag_queue q; |
| 79 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | u32 user; |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 81 | __be32 saddr; |
| 82 | __be32 daddr; |
| 83 | __be16 id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | u8 protocol; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 85 | int iif; |
| 86 | unsigned int rid; |
| 87 | struct inet_peer *peer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | }; |
| 89 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 90 | static struct inet_frags ip4_frags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 92 | int ip_frag_nqueues(void) |
| 93 | { |
| 94 | return ip4_frags.nqueues; |
| 95 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 97 | int ip_frag_mem(void) |
| 98 | { |
| 99 | return atomic_read(&ip4_frags.mem); |
| 100 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 102 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
| 103 | struct net_device *dev); |
| 104 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | static __inline__ void __ipq_unlink(struct ipq *qp) |
| 106 | { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 107 | hlist_del(&qp->q.list); |
| 108 | list_del(&qp->q.lru_list); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 109 | ip4_frags.nqueues--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | static __inline__ void ipq_unlink(struct ipq *ipq) |
| 113 | { |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 114 | write_lock(&ip4_frags.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | __ipq_unlink(ipq); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 116 | write_unlock(&ip4_frags.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | } |
| 118 | |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 119 | static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | { |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 121 | return jhash_3words((__force u32)id << 16 | prot, |
| 122 | (__force u32)saddr, (__force u32)daddr, |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 123 | ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } |
| 125 | |
Brian Haley | ab32ea5 | 2006-09-22 14:15:41 -0700 | [diff] [blame] | 126 | int sysctl_ipfrag_secret_interval __read_mostly = 10 * 60 * HZ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | |
| 128 | static void ipfrag_secret_rebuild(unsigned long dummy) |
| 129 | { |
| 130 | unsigned long now = jiffies; |
| 131 | int i; |
| 132 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 133 | write_lock(&ip4_frags.lock); |
| 134 | get_random_bytes(&ip4_frags.rnd, sizeof(u32)); |
| 135 | for (i = 0; i < INETFRAGS_HASHSZ; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | struct ipq *q; |
Yasuyuki Kozakai | e7c8a41 | 2005-11-16 12:55:37 -0800 | [diff] [blame] | 137 | struct hlist_node *p, *n; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 139 | hlist_for_each_entry_safe(q, p, n, &ip4_frags.hash[i], q.list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | unsigned int hval = ipqhashfn(q->id, q->saddr, |
| 141 | q->daddr, q->protocol); |
| 142 | |
| 143 | if (hval != i) { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 144 | hlist_del(&q->q.list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
| 146 | /* Relink to new hash chain. */ |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 147 | hlist_add_head(&q->q.list, &ip4_frags.hash[hval]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | } |
| 150 | } |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 151 | write_unlock(&ip4_frags.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 153 | mod_timer(&ip4_frags.secret_timer, now + sysctl_ipfrag_secret_interval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | } |
| 155 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | /* Memory Tracking Functions. */ |
| 157 | static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work) |
| 158 | { |
| 159 | if (work) |
| 160 | *work -= skb->truesize; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 161 | atomic_sub(skb->truesize, &ip4_frags.mem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | kfree_skb(skb); |
| 163 | } |
| 164 | |
| 165 | static __inline__ void frag_free_queue(struct ipq *qp, int *work) |
| 166 | { |
| 167 | if (work) |
| 168 | *work -= sizeof(struct ipq); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 169 | atomic_sub(sizeof(struct ipq), &ip4_frags.mem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | kfree(qp); |
| 171 | } |
| 172 | |
| 173 | static __inline__ struct ipq *frag_alloc_queue(void) |
| 174 | { |
| 175 | struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC); |
| 176 | |
Stephen Hemminger | 132adf5 | 2007-03-08 20:44:43 -0800 | [diff] [blame] | 177 | if (!qp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | return NULL; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 179 | atomic_add(sizeof(struct ipq), &ip4_frags.mem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | return qp; |
| 181 | } |
| 182 | |
| 183 | |
| 184 | /* Destruction primitives. */ |
| 185 | |
| 186 | /* Complete destruction of ipq. */ |
| 187 | static void ip_frag_destroy(struct ipq *qp, int *work) |
| 188 | { |
| 189 | struct sk_buff *fp; |
| 190 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 191 | BUG_TRAP(qp->q.last_in&COMPLETE); |
| 192 | BUG_TRAP(del_timer(&qp->q.timer) == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 194 | if (qp->peer) |
| 195 | inet_putpeer(qp->peer); |
| 196 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | /* Release all fragment data. */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 198 | fp = qp->q.fragments; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | while (fp) { |
| 200 | struct sk_buff *xp = fp->next; |
| 201 | |
| 202 | frag_kfree_skb(fp, work); |
| 203 | fp = xp; |
| 204 | } |
| 205 | |
| 206 | /* Finally, release the queue descriptor itself. */ |
| 207 | frag_free_queue(qp, work); |
| 208 | } |
| 209 | |
| 210 | static __inline__ void ipq_put(struct ipq *ipq, int *work) |
| 211 | { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 212 | if (atomic_dec_and_test(&ipq->q.refcnt)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | ip_frag_destroy(ipq, work); |
| 214 | } |
| 215 | |
| 216 | /* Kill ipq entry. It is not destroyed immediately, |
| 217 | * because caller (and someone more) holds reference count. |
| 218 | */ |
| 219 | static void ipq_kill(struct ipq *ipq) |
| 220 | { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 221 | if (del_timer(&ipq->q.timer)) |
| 222 | atomic_dec(&ipq->q.refcnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 224 | if (!(ipq->q.last_in & COMPLETE)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | ipq_unlink(ipq); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 226 | atomic_dec(&ipq->q.refcnt); |
| 227 | ipq->q.last_in |= COMPLETE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | } |
| 229 | } |
| 230 | |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 231 | /* Memory limiting on fragments. Evictor trashes the oldest |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | * fragment queue until we are back under the threshold. |
| 233 | */ |
| 234 | static void ip_evictor(void) |
| 235 | { |
| 236 | struct ipq *qp; |
| 237 | struct list_head *tmp; |
| 238 | int work; |
| 239 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 240 | work = atomic_read(&ip4_frags.mem) - sysctl_ipfrag_low_thresh; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | if (work <= 0) |
| 242 | return; |
| 243 | |
| 244 | while (work > 0) { |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 245 | read_lock(&ip4_frags.lock); |
| 246 | if (list_empty(&ip4_frags.lru_list)) { |
| 247 | read_unlock(&ip4_frags.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | return; |
| 249 | } |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 250 | tmp = ip4_frags.lru_list.next; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 251 | qp = list_entry(tmp, struct ipq, q.lru_list); |
| 252 | atomic_inc(&qp->q.refcnt); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 253 | read_unlock(&ip4_frags.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 255 | spin_lock(&qp->q.lock); |
| 256 | if (!(qp->q.last_in&COMPLETE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | ipq_kill(qp); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 258 | spin_unlock(&qp->q.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | |
| 260 | ipq_put(qp, &work); |
| 261 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | /* |
| 266 | * Oops, a fragment queue timed out. Kill it and send an ICMP reply. |
| 267 | */ |
| 268 | static void ip_expire(unsigned long arg) |
| 269 | { |
| 270 | struct ipq *qp = (struct ipq *) arg; |
| 271 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 272 | spin_lock(&qp->q.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 274 | if (qp->q.last_in & COMPLETE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | goto out; |
| 276 | |
| 277 | ipq_kill(qp); |
| 278 | |
| 279 | IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT); |
| 280 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
| 281 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 282 | if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) { |
| 283 | struct sk_buff *head = qp->q.fragments; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 285 | if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); |
| 287 | dev_put(head->dev); |
| 288 | } |
| 289 | } |
| 290 | out: |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 291 | spin_unlock(&qp->q.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | ipq_put(qp, NULL); |
| 293 | } |
| 294 | |
| 295 | /* Creation primitives. */ |
| 296 | |
David S. Miller | 55c0022 | 2006-04-09 22:43:55 -0700 | [diff] [blame] | 297 | static struct ipq *ip_frag_intern(struct ipq *qp_in) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | { |
| 299 | struct ipq *qp; |
Yasuyuki Kozakai | e7c8a41 | 2005-11-16 12:55:37 -0800 | [diff] [blame] | 300 | #ifdef CONFIG_SMP |
| 301 | struct hlist_node *n; |
| 302 | #endif |
David S. Miller | 55c0022 | 2006-04-09 22:43:55 -0700 | [diff] [blame] | 303 | unsigned int hash; |
| 304 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 305 | write_lock(&ip4_frags.lock); |
David S. Miller | 55c0022 | 2006-04-09 22:43:55 -0700 | [diff] [blame] | 306 | hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr, |
| 307 | qp_in->protocol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | #ifdef CONFIG_SMP |
| 309 | /* With SMP race we have to recheck hash table, because |
| 310 | * such entry could be created on other cpu, while we |
| 311 | * promoted read lock to write lock. |
| 312 | */ |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 313 | hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) { |
Stephen Hemminger | 132adf5 | 2007-03-08 20:44:43 -0800 | [diff] [blame] | 314 | if (qp->id == qp_in->id && |
| 315 | qp->saddr == qp_in->saddr && |
| 316 | qp->daddr == qp_in->daddr && |
| 317 | qp->protocol == qp_in->protocol && |
| 318 | qp->user == qp_in->user) { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 319 | atomic_inc(&qp->q.refcnt); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 320 | write_unlock(&ip4_frags.lock); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 321 | qp_in->q.last_in |= COMPLETE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | ipq_put(qp_in, NULL); |
| 323 | return qp; |
| 324 | } |
| 325 | } |
| 326 | #endif |
| 327 | qp = qp_in; |
| 328 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 329 | if (!mod_timer(&qp->q.timer, jiffies + sysctl_ipfrag_time)) |
| 330 | atomic_inc(&qp->q.refcnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 332 | atomic_inc(&qp->q.refcnt); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 333 | hlist_add_head(&qp->q.list, &ip4_frags.hash[hash]); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 334 | INIT_LIST_HEAD(&qp->q.lru_list); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 335 | list_add_tail(&qp->q.lru_list, &ip4_frags.lru_list); |
| 336 | ip4_frags.nqueues++; |
| 337 | write_unlock(&ip4_frags.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | return qp; |
| 339 | } |
| 340 | |
| 341 | /* Add an entry to the 'ipq' queue for a newly received IP datagram. */ |
David S. Miller | 55c0022 | 2006-04-09 22:43:55 -0700 | [diff] [blame] | 342 | static struct ipq *ip_frag_create(struct iphdr *iph, u32 user) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | { |
| 344 | struct ipq *qp; |
| 345 | |
| 346 | if ((qp = frag_alloc_queue()) == NULL) |
| 347 | goto out_nomem; |
| 348 | |
| 349 | qp->protocol = iph->protocol; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 350 | qp->q.last_in = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | qp->id = iph->id; |
| 352 | qp->saddr = iph->saddr; |
| 353 | qp->daddr = iph->daddr; |
| 354 | qp->user = user; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 355 | qp->q.len = 0; |
| 356 | qp->q.meat = 0; |
| 357 | qp->q.fragments = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | qp->iif = 0; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 359 | qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
| 361 | /* Initialize a timer for this entry. */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 362 | init_timer(&qp->q.timer); |
| 363 | qp->q.timer.data = (unsigned long) qp; /* pointer to queue */ |
| 364 | qp->q.timer.function = ip_expire; /* expire function */ |
| 365 | spin_lock_init(&qp->q.lock); |
| 366 | atomic_set(&qp->q.refcnt, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | |
David S. Miller | 55c0022 | 2006-04-09 22:43:55 -0700 | [diff] [blame] | 368 | return ip_frag_intern(qp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | |
| 370 | out_nomem: |
Patrick McHardy | 64ce207 | 2005-08-09 20:50:53 -0700 | [diff] [blame] | 371 | LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | return NULL; |
| 373 | } |
| 374 | |
| 375 | /* Find the correct entry in the "incomplete datagrams" queue for |
| 376 | * this IP datagram, and create new one, if nothing is found. |
| 377 | */ |
| 378 | static inline struct ipq *ip_find(struct iphdr *iph, u32 user) |
| 379 | { |
Alexey Dobriyan | 76ab608 | 2006-01-06 13:24:29 -0800 | [diff] [blame] | 380 | __be16 id = iph->id; |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 381 | __be32 saddr = iph->saddr; |
| 382 | __be32 daddr = iph->daddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | __u8 protocol = iph->protocol; |
David S. Miller | 55c0022 | 2006-04-09 22:43:55 -0700 | [diff] [blame] | 384 | unsigned int hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | struct ipq *qp; |
Yasuyuki Kozakai | e7c8a41 | 2005-11-16 12:55:37 -0800 | [diff] [blame] | 386 | struct hlist_node *n; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 388 | read_lock(&ip4_frags.lock); |
David S. Miller | 55c0022 | 2006-04-09 22:43:55 -0700 | [diff] [blame] | 389 | hash = ipqhashfn(id, saddr, daddr, protocol); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 390 | hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) { |
Stephen Hemminger | 132adf5 | 2007-03-08 20:44:43 -0800 | [diff] [blame] | 391 | if (qp->id == id && |
| 392 | qp->saddr == saddr && |
| 393 | qp->daddr == daddr && |
| 394 | qp->protocol == protocol && |
| 395 | qp->user == user) { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 396 | atomic_inc(&qp->q.refcnt); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 397 | read_unlock(&ip4_frags.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | return qp; |
| 399 | } |
| 400 | } |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 401 | read_unlock(&ip4_frags.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | |
David S. Miller | 55c0022 | 2006-04-09 22:43:55 -0700 | [diff] [blame] | 403 | return ip_frag_create(iph, user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | } |
| 405 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 406 | /* Is the fragment too far ahead to be part of ipq? */ |
| 407 | static inline int ip_frag_too_far(struct ipq *qp) |
| 408 | { |
| 409 | struct inet_peer *peer = qp->peer; |
| 410 | unsigned int max = sysctl_ipfrag_max_dist; |
| 411 | unsigned int start, end; |
| 412 | |
| 413 | int rc; |
| 414 | |
| 415 | if (!peer || !max) |
| 416 | return 0; |
| 417 | |
| 418 | start = qp->rid; |
| 419 | end = atomic_inc_return(&peer->rid); |
| 420 | qp->rid = end; |
| 421 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 422 | rc = qp->q.fragments && (end - start) > max; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 423 | |
| 424 | if (rc) { |
| 425 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
| 426 | } |
| 427 | |
| 428 | return rc; |
| 429 | } |
| 430 | |
| 431 | static int ip_frag_reinit(struct ipq *qp) |
| 432 | { |
| 433 | struct sk_buff *fp; |
| 434 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 435 | if (!mod_timer(&qp->q.timer, jiffies + sysctl_ipfrag_time)) { |
| 436 | atomic_inc(&qp->q.refcnt); |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 437 | return -ETIMEDOUT; |
| 438 | } |
| 439 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 440 | fp = qp->q.fragments; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 441 | do { |
| 442 | struct sk_buff *xp = fp->next; |
| 443 | frag_kfree_skb(fp, NULL); |
| 444 | fp = xp; |
| 445 | } while (fp); |
| 446 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 447 | qp->q.last_in = 0; |
| 448 | qp->q.len = 0; |
| 449 | qp->q.meat = 0; |
| 450 | qp->q.fragments = NULL; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 451 | qp->iif = 0; |
| 452 | |
| 453 | return 0; |
| 454 | } |
| 455 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | /* Add new segment to existing queue. */ |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 457 | static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | { |
| 459 | struct sk_buff *prev, *next; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 460 | struct net_device *dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | int flags, offset; |
| 462 | int ihl, end; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 463 | int err = -ENOENT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 465 | if (qp->q.last_in & COMPLETE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | goto err; |
| 467 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 468 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 469 | unlikely(ip_frag_too_far(qp)) && |
| 470 | unlikely(err = ip_frag_reinit(qp))) { |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 471 | ipq_kill(qp); |
| 472 | goto err; |
| 473 | } |
| 474 | |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 475 | offset = ntohs(ip_hdr(skb)->frag_off); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | flags = offset & ~IP_OFFSET; |
| 477 | offset &= IP_OFFSET; |
| 478 | offset <<= 3; /* offset is in 8-byte chunks */ |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 479 | ihl = ip_hdrlen(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | |
| 481 | /* Determine the position of this fragment. */ |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 482 | end = offset + skb->len - ihl; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 483 | err = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | |
| 485 | /* Is this the final fragment? */ |
| 486 | if ((flags & IP_MF) == 0) { |
| 487 | /* If we already have some bits beyond end |
| 488 | * or have different end, the segment is corrrupted. |
| 489 | */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 490 | if (end < qp->q.len || |
| 491 | ((qp->q.last_in & LAST_IN) && end != qp->q.len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | goto err; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 493 | qp->q.last_in |= LAST_IN; |
| 494 | qp->q.len = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | } else { |
| 496 | if (end&7) { |
| 497 | end &= ~7; |
| 498 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
| 499 | skb->ip_summed = CHECKSUM_NONE; |
| 500 | } |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 501 | if (end > qp->q.len) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | /* Some bits beyond end -> corruption. */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 503 | if (qp->q.last_in & LAST_IN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | goto err; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 505 | qp->q.len = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | } |
| 507 | } |
| 508 | if (end == offset) |
| 509 | goto err; |
| 510 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 511 | err = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | if (pskb_pull(skb, ihl) == NULL) |
| 513 | goto err; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 514 | |
| 515 | err = pskb_trim_rcsum(skb, end - offset); |
| 516 | if (err) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | goto err; |
| 518 | |
| 519 | /* Find out which fragments are in front and at the back of us |
| 520 | * in the chain of fragments so far. We must know where to put |
| 521 | * this fragment, right? |
| 522 | */ |
| 523 | prev = NULL; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 524 | for (next = qp->q.fragments; next != NULL; next = next->next) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | if (FRAG_CB(next)->offset >= offset) |
| 526 | break; /* bingo! */ |
| 527 | prev = next; |
| 528 | } |
| 529 | |
| 530 | /* We found where to put this one. Check for overlap with |
| 531 | * preceding fragment, and, if needed, align things so that |
| 532 | * any overlaps are eliminated. |
| 533 | */ |
| 534 | if (prev) { |
| 535 | int i = (FRAG_CB(prev)->offset + prev->len) - offset; |
| 536 | |
| 537 | if (i > 0) { |
| 538 | offset += i; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 539 | err = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | if (end <= offset) |
| 541 | goto err; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 542 | err = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | if (!pskb_pull(skb, i)) |
| 544 | goto err; |
| 545 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
| 546 | skb->ip_summed = CHECKSUM_NONE; |
| 547 | } |
| 548 | } |
| 549 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 550 | err = -ENOMEM; |
| 551 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | while (next && FRAG_CB(next)->offset < end) { |
| 553 | int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ |
| 554 | |
| 555 | if (i < next->len) { |
| 556 | /* Eat head of the next overlapped fragment |
| 557 | * and leave the loop. The next ones cannot overlap. |
| 558 | */ |
| 559 | if (!pskb_pull(next, i)) |
| 560 | goto err; |
| 561 | FRAG_CB(next)->offset += i; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 562 | qp->q.meat -= i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
| 564 | next->ip_summed = CHECKSUM_NONE; |
| 565 | break; |
| 566 | } else { |
| 567 | struct sk_buff *free_it = next; |
| 568 | |
Peter Zijlstra | 47c6bf77 | 2006-12-12 19:48:59 +0100 | [diff] [blame] | 569 | /* Old fragment is completely overridden with |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | * new one drop it. |
| 571 | */ |
| 572 | next = next->next; |
| 573 | |
| 574 | if (prev) |
| 575 | prev->next = next; |
| 576 | else |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 577 | qp->q.fragments = next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 579 | qp->q.meat -= free_it->len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | frag_kfree_skb(free_it, NULL); |
| 581 | } |
| 582 | } |
| 583 | |
| 584 | FRAG_CB(skb)->offset = offset; |
| 585 | |
| 586 | /* Insert this fragment in the chain of fragments. */ |
| 587 | skb->next = next; |
| 588 | if (prev) |
| 589 | prev->next = skb; |
| 590 | else |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 591 | qp->q.fragments = skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 593 | dev = skb->dev; |
| 594 | if (dev) { |
| 595 | qp->iif = dev->ifindex; |
| 596 | skb->dev = NULL; |
| 597 | } |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 598 | qp->q.stamp = skb->tstamp; |
| 599 | qp->q.meat += skb->len; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 600 | atomic_add(skb->truesize, &ip4_frags.mem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | if (offset == 0) |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 602 | qp->q.last_in |= FIRST_IN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 604 | if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len) |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 605 | return ip_frag_reasm(qp, prev, dev); |
| 606 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 607 | write_lock(&ip4_frags.lock); |
| 608 | list_move_tail(&qp->q.lru_list, &ip4_frags.lru_list); |
| 609 | write_unlock(&ip4_frags.lock); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 610 | return -EINPROGRESS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | |
| 612 | err: |
| 613 | kfree_skb(skb); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 614 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | } |
| 616 | |
| 617 | |
| 618 | /* Build a new IP datagram from all its fragments. */ |
| 619 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 620 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
| 621 | struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | { |
| 623 | struct iphdr *iph; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 624 | struct sk_buff *fp, *head = qp->q.fragments; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | int len; |
| 626 | int ihlen; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 627 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | |
| 629 | ipq_kill(qp); |
| 630 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 631 | /* Make the one we just received the head. */ |
| 632 | if (prev) { |
| 633 | head = prev->next; |
| 634 | fp = skb_clone(head, GFP_ATOMIC); |
| 635 | |
| 636 | if (!fp) |
| 637 | goto out_nomem; |
| 638 | |
| 639 | fp->next = head->next; |
| 640 | prev->next = fp; |
| 641 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 642 | skb_morph(head, qp->q.fragments); |
| 643 | head->next = qp->q.fragments->next; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 644 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 645 | kfree_skb(qp->q.fragments); |
| 646 | qp->q.fragments = head; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 647 | } |
| 648 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | BUG_TRAP(head != NULL); |
| 650 | BUG_TRAP(FRAG_CB(head)->offset == 0); |
| 651 | |
| 652 | /* Allocate a new buffer for the datagram. */ |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 653 | ihlen = ip_hdrlen(head); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 654 | len = ihlen + qp->q.len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 656 | err = -E2BIG; |
Stephen Hemminger | 132adf5 | 2007-03-08 20:44:43 -0800 | [diff] [blame] | 657 | if (len > 65535) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | goto out_oversize; |
| 659 | |
| 660 | /* Head of list must not be cloned. */ |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 661 | err = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) |
| 663 | goto out_nomem; |
| 664 | |
| 665 | /* If the first fragment is fragmented itself, we split |
| 666 | * it to two chunks: the first with data and paged part |
| 667 | * and the second, holding only fragments. */ |
| 668 | if (skb_shinfo(head)->frag_list) { |
| 669 | struct sk_buff *clone; |
| 670 | int i, plen = 0; |
| 671 | |
| 672 | if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) |
| 673 | goto out_nomem; |
| 674 | clone->next = head->next; |
| 675 | head->next = clone; |
| 676 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; |
| 677 | skb_shinfo(head)->frag_list = NULL; |
| 678 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) |
| 679 | plen += skb_shinfo(head)->frags[i].size; |
| 680 | clone->len = clone->data_len = head->data_len - plen; |
| 681 | head->data_len -= clone->len; |
| 682 | head->len -= clone->len; |
| 683 | clone->csum = 0; |
| 684 | clone->ip_summed = head->ip_summed; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 685 | atomic_add(clone->truesize, &ip4_frags.mem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | } |
| 687 | |
| 688 | skb_shinfo(head)->frag_list = head->next; |
Arnaldo Carvalho de Melo | d56f90a | 2007-04-10 20:50:43 -0700 | [diff] [blame] | 689 | skb_push(head, head->data - skb_network_header(head)); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 690 | atomic_sub(head->truesize, &ip4_frags.mem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | |
| 692 | for (fp=head->next; fp; fp = fp->next) { |
| 693 | head->data_len += fp->len; |
| 694 | head->len += fp->len; |
| 695 | if (head->ip_summed != fp->ip_summed) |
| 696 | head->ip_summed = CHECKSUM_NONE; |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 697 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | head->csum = csum_add(head->csum, fp->csum); |
| 699 | head->truesize += fp->truesize; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 700 | atomic_sub(fp->truesize, &ip4_frags.mem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | } |
| 702 | |
| 703 | head->next = NULL; |
| 704 | head->dev = dev; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 705 | head->tstamp = qp->q.stamp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 707 | iph = ip_hdr(head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | iph->frag_off = 0; |
| 709 | iph->tot_len = htons(len); |
| 710 | IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 711 | qp->q.fragments = NULL; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 712 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | |
| 714 | out_nomem: |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 715 | LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " |
Patrick McHardy | 64ce207 | 2005-08-09 20:50:53 -0700 | [diff] [blame] | 716 | "queue %p\n", qp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | goto out_fail; |
| 718 | out_oversize: |
| 719 | if (net_ratelimit()) |
| 720 | printk(KERN_INFO |
| 721 | "Oversized IP packet from %d.%d.%d.%d.\n", |
| 722 | NIPQUAD(qp->saddr)); |
| 723 | out_fail: |
| 724 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 725 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 726 | } |
| 727 | |
| 728 | /* Process an incoming IP datagram fragment. */ |
Herbert Xu | 776c729 | 2007-10-14 00:38:32 -0700 | [diff] [blame] | 729 | int ip_defrag(struct sk_buff *skb, u32 user) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | struct ipq *qp; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 732 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); |
| 734 | |
| 735 | /* Start by cleaning up the memory. */ |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 736 | if (atomic_read(&ip4_frags.mem) > sysctl_ipfrag_high_thresh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | ip_evictor(); |
| 738 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | /* Lookup (or create) queue header */ |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 740 | if ((qp = ip_find(ip_hdr(skb), user)) != NULL) { |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 741 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 743 | spin_lock(&qp->q.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 745 | ret = ip_frag_queue(qp, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 747 | spin_unlock(&qp->q.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | ipq_put(qp, NULL); |
Herbert Xu | 776c729 | 2007-10-14 00:38:32 -0700 | [diff] [blame] | 749 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | } |
| 751 | |
| 752 | IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS); |
| 753 | kfree_skb(skb); |
Herbert Xu | 776c729 | 2007-10-14 00:38:32 -0700 | [diff] [blame] | 754 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | } |
| 756 | |
Eric Dumazet | b7aa0bf | 2007-04-19 16:16:32 -0700 | [diff] [blame] | 757 | void __init ipfrag_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | { |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 759 | init_timer(&ip4_frags.secret_timer); |
| 760 | ip4_frags.secret_timer.function = ipfrag_secret_rebuild; |
| 761 | ip4_frags.secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval; |
| 762 | add_timer(&ip4_frags.secret_timer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame^] | 764 | inet_frags_init(&ip4_frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | } |
| 766 | |
| 767 | EXPORT_SYMBOL(ip_defrag); |