Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 3 | * operating system. INET is implemented using the BSD Socket |
| 4 | * interface as the means of communication with the user level. |
| 5 | * |
| 6 | * The IP fragmentation functionality. |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 7 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> |
Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 9 | * Alan Cox <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * |
| 11 | * Fixes: |
| 12 | * Alan Cox : Split from ip.c , see ip_input.c for history. |
| 13 | * David S. Miller : Begin massive cleanup... |
| 14 | * Andi Kleen : Add sysctls. |
| 15 | * xxxx : Overlapfrag bug. |
| 16 | * Ultima : ip_expire() kernel panic. |
| 17 | * Bill Hawes : Frag accounting and evictor fixes. |
| 18 | * John McDonald : 0 length frag bug. |
| 19 | * Alexey Kuznetsov: SMP races, threading, cleanup. |
| 20 | * Patrick McHardy : LRU queue of frag heads for evictor. |
| 21 | */ |
| 22 | |
Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 23 | #define pr_fmt(fmt) "IPv4: " fmt |
| 24 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 25 | #include <linux/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
| 27 | #include <linux/types.h> |
| 28 | #include <linux/mm.h> |
| 29 | #include <linux/jiffies.h> |
| 30 | #include <linux/skbuff.h> |
| 31 | #include <linux/list.h> |
| 32 | #include <linux/ip.h> |
| 33 | #include <linux/icmp.h> |
| 34 | #include <linux/netdevice.h> |
| 35 | #include <linux/jhash.h> |
| 36 | #include <linux/random.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 37 | #include <linux/slab.h> |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 38 | #include <net/route.h> |
| 39 | #include <net/dst.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <net/sock.h> |
| 41 | #include <net/ip.h> |
| 42 | #include <net/icmp.h> |
| 43 | #include <net/checksum.h> |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 44 | #include <net/inetpeer.h> |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 45 | #include <net/inet_frag.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <linux/tcp.h> |
| 47 | #include <linux/udp.h> |
| 48 | #include <linux/inet.h> |
| 49 | #include <linux/netfilter_ipv4.h> |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 50 | #include <net/inet_ecn.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
| 52 | /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 |
| 53 | * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c |
| 54 | * as well. Or notify me, at least. --ANK |
| 55 | */ |
| 56 | |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 57 | static int sysctl_ipfrag_max_dist __read_mostly = 64; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 58 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | struct ipfrag_skb_cb |
| 60 | { |
| 61 | struct inet_skb_parm h; |
| 62 | int offset; |
| 63 | }; |
| 64 | |
Jianjun Kong | fd3f8c4 | 2008-11-03 02:47:38 -0800 | [diff] [blame] | 65 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
| 67 | /* Describe an entry in the "incomplete datagrams" queue. */ |
| 68 | struct ipq { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 69 | struct inet_frag_queue q; |
| 70 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | u32 user; |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 72 | __be32 saddr; |
| 73 | __be32 daddr; |
| 74 | __be16 id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | u8 protocol; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 76 | u8 ecn; /* RFC3168 support */ |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 77 | int iif; |
| 78 | unsigned int rid; |
| 79 | struct inet_peer *peer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | }; |
| 81 | |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 82 | static inline u8 ip4_frag_ecn(u8 tos) |
| 83 | { |
Eric Dumazet | 5173cc0 | 2011-05-16 08:37:37 +0000 | [diff] [blame] | 84 | return 1 << (tos & INET_ECN_MASK); |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 85 | } |
| 86 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 87 | static struct inet_frags ip4_frags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 89 | int ip_frag_nqueues(struct net *net) |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 90 | { |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 91 | return net->ipv4.frags.nqueues; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 92 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 94 | int ip_frag_mem(struct net *net) |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 95 | { |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 96 | return sum_frag_mem_limit(&net->ipv4.frags); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 97 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 99 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
| 100 | struct net_device *dev); |
| 101 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 102 | struct ip4_create_arg { |
| 103 | struct iphdr *iph; |
| 104 | u32 user; |
| 105 | }; |
| 106 | |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 107 | static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | { |
Hannes Frederic Sowa | e7b519b | 2013-10-23 11:06:55 +0200 | [diff] [blame] | 109 | net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 110 | return jhash_3words((__force u32)id << 16 | prot, |
| 111 | (__force u32)saddr, (__force u32)daddr, |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 112 | ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 115 | static unsigned int ip4_hashfn(struct inet_frag_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | { |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 117 | struct ipq *ipq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 119 | ipq = container_of(q, struct ipq, q); |
| 120 | return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | } |
| 122 | |
Eric Dumazet | cbc264c | 2012-05-18 05:57:13 +0200 | [diff] [blame] | 123 | static bool ip4_frag_match(struct inet_frag_queue *q, void *a) |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 124 | { |
| 125 | struct ipq *qp; |
| 126 | struct ip4_create_arg *arg = a; |
| 127 | |
| 128 | qp = container_of(q, struct ipq, q); |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 129 | return qp->id == arg->iph->id && |
Eric Dumazet | cbc264c | 2012-05-18 05:57:13 +0200 | [diff] [blame] | 130 | qp->saddr == arg->iph->saddr && |
| 131 | qp->daddr == arg->iph->daddr && |
| 132 | qp->protocol == arg->iph->protocol && |
| 133 | qp->user == arg->user; |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 136 | static void ip4_frag_init(struct inet_frag_queue *q, void *a) |
| 137 | { |
| 138 | struct ipq *qp = container_of(q, struct ipq, q); |
Gao feng | 54db0cc | 2012-06-08 01:21:40 +0000 | [diff] [blame] | 139 | struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, |
| 140 | frags); |
| 141 | struct net *net = container_of(ipv4, struct net, ipv4); |
| 142 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 143 | struct ip4_create_arg *arg = a; |
| 144 | |
| 145 | qp->protocol = arg->iph->protocol; |
| 146 | qp->id = arg->iph->id; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 147 | qp->ecn = ip4_frag_ecn(arg->iph->tos); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 148 | qp->saddr = arg->iph->saddr; |
| 149 | qp->daddr = arg->iph->daddr; |
| 150 | qp->user = arg->user; |
| 151 | qp->peer = sysctl_ipfrag_max_dist ? |
David S. Miller | c0efc88 | 2012-06-09 19:12:36 -0700 | [diff] [blame] | 152 | inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL; |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 153 | } |
| 154 | |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 155 | static __inline__ void ip4_frag_free(struct inet_frag_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | { |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 157 | struct ipq *qp; |
| 158 | |
| 159 | qp = container_of(q, struct ipq, q); |
| 160 | if (qp->peer) |
| 161 | inet_putpeer(qp->peer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
| 165 | /* Destruction primitives. */ |
| 166 | |
Pavel Emelyanov | 4b6cb5d | 2007-10-15 02:41:09 -0700 | [diff] [blame] | 167 | static __inline__ void ipq_put(struct ipq *ipq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | { |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 169 | inet_frag_put(&ipq->q, &ip4_frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | } |
| 171 | |
| 172 | /* Kill ipq entry. It is not destroyed immediately, |
| 173 | * because caller (and someone more) holds reference count. |
| 174 | */ |
| 175 | static void ipq_kill(struct ipq *ipq) |
| 176 | { |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 177 | inet_frag_kill(&ipq->q, &ip4_frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | } |
| 179 | |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 180 | /* Memory limiting on fragments. Evictor trashes the oldest |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | * fragment queue until we are back under the threshold. |
| 182 | */ |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 183 | static void ip_evictor(struct net *net) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | { |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 185 | int evicted; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
Amerigo Wang | 6b10286 | 2012-09-18 16:50:11 +0000 | [diff] [blame] | 187 | evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags, false); |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 188 | if (evicted) |
Pavel Emelyanov | c5346fe | 2008-07-16 20:20:33 -0700 | [diff] [blame] | 189 | IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | /* |
| 193 | * Oops, a fragment queue timed out. Kill it and send an ICMP reply. |
| 194 | */ |
| 195 | static void ip_expire(unsigned long arg) |
| 196 | { |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 197 | struct ipq *qp; |
Pavel Emelyanov | 84a3aa0 | 2008-07-16 20:19:08 -0700 | [diff] [blame] | 198 | struct net *net; |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 199 | |
| 200 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); |
Pavel Emelyanov | 84a3aa0 | 2008-07-16 20:19:08 -0700 | [diff] [blame] | 201 | net = container_of(qp->q.net, struct net, ipv4.frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 203 | spin_lock(&qp->q.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 205 | if (qp->q.last_in & INET_FRAG_COMPLETE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | goto out; |
| 207 | |
| 208 | ipq_kill(qp); |
| 209 | |
Pavel Emelyanov | 7c73a6f | 2008-07-16 20:20:11 -0700 | [diff] [blame] | 210 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); |
| 211 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 213 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 214 | struct sk_buff *head = qp->q.fragments; |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 215 | const struct iphdr *iph; |
| 216 | int err; |
Denis V. Lunev | cb84663 | 2008-03-24 15:31:00 -0700 | [diff] [blame] | 217 | |
Eric Dumazet | 69df9d5 | 2009-11-05 20:59:47 -0800 | [diff] [blame] | 218 | rcu_read_lock(); |
| 219 | head->dev = dev_get_by_index_rcu(net, qp->iif); |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 220 | if (!head->dev) |
| 221 | goto out_rcu_unlock; |
| 222 | |
Eric Dumazet | 97599dc | 2013-04-16 12:55:41 +0000 | [diff] [blame] | 223 | /* skb has no dst, perform route lookup again */ |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 224 | iph = ip_hdr(head); |
David S. Miller | c6cffba | 2012-07-26 11:14:38 +0000 | [diff] [blame] | 225 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, |
| 226 | iph->tos, head->dev); |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 227 | if (err) |
| 228 | goto out_rcu_unlock; |
| 229 | |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 230 | /* |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 231 | * Only an end host needs to send an ICMP |
| 232 | * "Fragment Reassembly Timeout" message, per RFC792. |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 233 | */ |
David S. Miller | 595fc71 | 2011-07-05 01:05:48 -0700 | [diff] [blame] | 234 | if (qp->user == IP_DEFRAG_AF_PACKET || |
| 235 | (qp->user == IP_DEFRAG_CONNTRACK_IN && |
| 236 | skb_rtable(head)->rt_type != RTN_LOCAL)) |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 237 | goto out_rcu_unlock; |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 238 | |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 239 | |
| 240 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
| 241 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 242 | out_rcu_unlock: |
Patrick McHardy | d1c9ae6 | 2010-02-02 11:46:50 -0800 | [diff] [blame] | 243 | rcu_read_unlock(); |
| 244 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | out: |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 246 | spin_unlock(&qp->q.lock); |
Pavel Emelyanov | 4b6cb5d | 2007-10-15 02:41:09 -0700 | [diff] [blame] | 247 | ipq_put(qp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | } |
| 249 | |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 250 | /* Find the correct entry in the "incomplete datagrams" queue for |
| 251 | * this IP datagram, and create new one, if nothing is found. |
| 252 | */ |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 253 | static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | { |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 255 | struct inet_frag_queue *q; |
| 256 | struct ip4_create_arg arg; |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 257 | unsigned int hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 259 | arg.iph = iph; |
| 260 | arg.user = user; |
Pavel Emelyanov | 9a37580 | 2008-06-27 20:06:08 -0700 | [diff] [blame] | 261 | |
| 262 | read_lock(&ip4_frags.lock); |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 263 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 264 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 265 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 266 | if (IS_ERR_OR_NULL(q)) { |
| 267 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
| 268 | return NULL; |
| 269 | } |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 270 | return container_of(q, struct ipq, q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | } |
| 272 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 273 | /* Is the fragment too far ahead to be part of ipq? */ |
| 274 | static inline int ip_frag_too_far(struct ipq *qp) |
| 275 | { |
| 276 | struct inet_peer *peer = qp->peer; |
| 277 | unsigned int max = sysctl_ipfrag_max_dist; |
| 278 | unsigned int start, end; |
| 279 | |
| 280 | int rc; |
| 281 | |
| 282 | if (!peer || !max) |
| 283 | return 0; |
| 284 | |
| 285 | start = qp->rid; |
| 286 | end = atomic_inc_return(&peer->rid); |
| 287 | qp->rid = end; |
| 288 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 289 | rc = qp->q.fragments && (end - start) > max; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 290 | |
| 291 | if (rc) { |
Pavel Emelyanov | 7c73a6f | 2008-07-16 20:20:11 -0700 | [diff] [blame] | 292 | struct net *net; |
| 293 | |
| 294 | net = container_of(qp->q.net, struct net, ipv4.frags); |
| 295 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | return rc; |
| 299 | } |
| 300 | |
| 301 | static int ip_frag_reinit(struct ipq *qp) |
| 302 | { |
| 303 | struct sk_buff *fp; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 304 | unsigned int sum_truesize = 0; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 305 | |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 306 | if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 307 | atomic_inc(&qp->q.refcnt); |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 308 | return -ETIMEDOUT; |
| 309 | } |
| 310 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 311 | fp = qp->q.fragments; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 312 | do { |
| 313 | struct sk_buff *xp = fp->next; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 314 | |
| 315 | sum_truesize += fp->truesize; |
| 316 | kfree_skb(fp); |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 317 | fp = xp; |
| 318 | } while (fp); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 319 | sub_frag_mem_limit(&qp->q, sum_truesize); |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 320 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 321 | qp->q.last_in = 0; |
| 322 | qp->q.len = 0; |
| 323 | qp->q.meat = 0; |
| 324 | qp->q.fragments = NULL; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 325 | qp->q.fragments_tail = NULL; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 326 | qp->iif = 0; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 327 | qp->ecn = 0; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 328 | |
| 329 | return 0; |
| 330 | } |
| 331 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | /* Add new segment to existing queue. */ |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 333 | static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | { |
| 335 | struct sk_buff *prev, *next; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 336 | struct net_device *dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | int flags, offset; |
| 338 | int ihl, end; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 339 | int err = -ENOENT; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 340 | u8 ecn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 342 | if (qp->q.last_in & INET_FRAG_COMPLETE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | goto err; |
| 344 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 345 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 346 | unlikely(ip_frag_too_far(qp)) && |
| 347 | unlikely(err = ip_frag_reinit(qp))) { |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 348 | ipq_kill(qp); |
| 349 | goto err; |
| 350 | } |
| 351 | |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 352 | ecn = ip4_frag_ecn(ip_hdr(skb)->tos); |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 353 | offset = ntohs(ip_hdr(skb)->frag_off); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | flags = offset & ~IP_OFFSET; |
| 355 | offset &= IP_OFFSET; |
| 356 | offset <<= 3; /* offset is in 8-byte chunks */ |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 357 | ihl = ip_hdrlen(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | |
| 359 | /* Determine the position of this fragment. */ |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 360 | end = offset + skb->len - ihl; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 361 | err = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | |
| 363 | /* Is this the final fragment? */ |
| 364 | if ((flags & IP_MF) == 0) { |
| 365 | /* If we already have some bits beyond end |
Justin P. Mattock | 42b2aa8 | 2011-11-28 20:31:00 -0800 | [diff] [blame] | 366 | * or have different end, the segment is corrupted. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 368 | if (end < qp->q.len || |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 369 | ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | goto err; |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 371 | qp->q.last_in |= INET_FRAG_LAST_IN; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 372 | qp->q.len = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | } else { |
| 374 | if (end&7) { |
| 375 | end &= ~7; |
| 376 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
| 377 | skb->ip_summed = CHECKSUM_NONE; |
| 378 | } |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 379 | if (end > qp->q.len) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | /* Some bits beyond end -> corruption. */ |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 381 | if (qp->q.last_in & INET_FRAG_LAST_IN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | goto err; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 383 | qp->q.len = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | } |
| 385 | } |
| 386 | if (end == offset) |
| 387 | goto err; |
| 388 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 389 | err = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | if (pskb_pull(skb, ihl) == NULL) |
| 391 | goto err; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 392 | |
| 393 | err = pskb_trim_rcsum(skb, end - offset); |
| 394 | if (err) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | goto err; |
| 396 | |
| 397 | /* Find out which fragments are in front and at the back of us |
| 398 | * in the chain of fragments so far. We must know where to put |
| 399 | * this fragment, right? |
| 400 | */ |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 401 | prev = qp->q.fragments_tail; |
| 402 | if (!prev || FRAG_CB(prev)->offset < offset) { |
| 403 | next = NULL; |
| 404 | goto found; |
| 405 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | prev = NULL; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 407 | for (next = qp->q.fragments; next != NULL; next = next->next) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | if (FRAG_CB(next)->offset >= offset) |
| 409 | break; /* bingo! */ |
| 410 | prev = next; |
| 411 | } |
| 412 | |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 413 | found: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | /* We found where to put this one. Check for overlap with |
| 415 | * preceding fragment, and, if needed, align things so that |
| 416 | * any overlaps are eliminated. |
| 417 | */ |
| 418 | if (prev) { |
| 419 | int i = (FRAG_CB(prev)->offset + prev->len) - offset; |
| 420 | |
| 421 | if (i > 0) { |
| 422 | offset += i; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 423 | err = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | if (end <= offset) |
| 425 | goto err; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 426 | err = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | if (!pskb_pull(skb, i)) |
| 428 | goto err; |
| 429 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
| 430 | skb->ip_summed = CHECKSUM_NONE; |
| 431 | } |
| 432 | } |
| 433 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 434 | err = -ENOMEM; |
| 435 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | while (next && FRAG_CB(next)->offset < end) { |
| 437 | int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ |
| 438 | |
| 439 | if (i < next->len) { |
| 440 | /* Eat head of the next overlapped fragment |
| 441 | * and leave the loop. The next ones cannot overlap. |
| 442 | */ |
| 443 | if (!pskb_pull(next, i)) |
| 444 | goto err; |
| 445 | FRAG_CB(next)->offset += i; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 446 | qp->q.meat -= i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
| 448 | next->ip_summed = CHECKSUM_NONE; |
| 449 | break; |
| 450 | } else { |
| 451 | struct sk_buff *free_it = next; |
| 452 | |
Peter Zijlstra | 47c6bf77 | 2006-12-12 19:48:59 +0100 | [diff] [blame] | 453 | /* Old fragment is completely overridden with |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | * new one drop it. |
| 455 | */ |
| 456 | next = next->next; |
| 457 | |
| 458 | if (prev) |
| 459 | prev->next = next; |
| 460 | else |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 461 | qp->q.fragments = next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 463 | qp->q.meat -= free_it->len; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 464 | sub_frag_mem_limit(&qp->q, free_it->truesize); |
| 465 | kfree_skb(free_it); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | } |
| 467 | } |
| 468 | |
| 469 | FRAG_CB(skb)->offset = offset; |
| 470 | |
| 471 | /* Insert this fragment in the chain of fragments. */ |
| 472 | skb->next = next; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 473 | if (!next) |
| 474 | qp->q.fragments_tail = skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | if (prev) |
| 476 | prev->next = skb; |
| 477 | else |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 478 | qp->q.fragments = skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 480 | dev = skb->dev; |
| 481 | if (dev) { |
| 482 | qp->iif = dev->ifindex; |
| 483 | skb->dev = NULL; |
| 484 | } |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 485 | qp->q.stamp = skb->tstamp; |
| 486 | qp->q.meat += skb->len; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 487 | qp->ecn |= ecn; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 488 | add_frag_mem_limit(&qp->q, skb->truesize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | if (offset == 0) |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 490 | qp->q.last_in |= INET_FRAG_FIRST_IN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 492 | if (ip_hdr(skb)->frag_off & htons(IP_DF) && |
| 493 | skb->len + ihl > qp->q.max_size) |
| 494 | qp->q.max_size = skb->len + ihl; |
| 495 | |
Joe Perches | bc578a5 | 2008-03-28 16:35:27 -0700 | [diff] [blame] | 496 | if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
Eric Dumazet | 97599dc | 2013-04-16 12:55:41 +0000 | [diff] [blame] | 497 | qp->q.meat == qp->q.len) { |
| 498 | unsigned long orefdst = skb->_skb_refdst; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 499 | |
Eric Dumazet | 97599dc | 2013-04-16 12:55:41 +0000 | [diff] [blame] | 500 | skb->_skb_refdst = 0UL; |
| 501 | err = ip_frag_reasm(qp, prev, dev); |
| 502 | skb->_skb_refdst = orefdst; |
| 503 | return err; |
| 504 | } |
| 505 | |
| 506 | skb_dst_drop(skb); |
Jesper Dangaard Brouer | 3ef0eb0 | 2013-01-28 23:45:51 +0000 | [diff] [blame] | 507 | inet_frag_lru_move(&qp->q); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 508 | return -EINPROGRESS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | |
| 510 | err: |
| 511 | kfree_skb(skb); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 512 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | } |
| 514 | |
| 515 | |
| 516 | /* Build a new IP datagram from all its fragments. */ |
| 517 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 518 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
| 519 | struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | { |
Jorge Boncompte [DTI2] | 2bad35b | 2009-03-18 23:26:11 -0700 | [diff] [blame] | 521 | struct net *net = container_of(qp->q.net, struct net, ipv4.frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | struct iphdr *iph; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 523 | struct sk_buff *fp, *head = qp->q.fragments; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | int len; |
| 525 | int ihlen; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 526 | int err; |
Eric Dumazet | 3cc4949 | 2012-05-19 03:02:20 +0000 | [diff] [blame] | 527 | int sum_truesize; |
Eric Dumazet | 5173cc0 | 2011-05-16 08:37:37 +0000 | [diff] [blame] | 528 | u8 ecn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | |
| 530 | ipq_kill(qp); |
| 531 | |
Hannes Frederic Sowa | be99197 | 2013-03-22 08:24:37 +0000 | [diff] [blame] | 532 | ecn = ip_frag_ecn_table[qp->ecn]; |
Eric Dumazet | 5173cc0 | 2011-05-16 08:37:37 +0000 | [diff] [blame] | 533 | if (unlikely(ecn == 0xff)) { |
| 534 | err = -EINVAL; |
| 535 | goto out_fail; |
| 536 | } |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 537 | /* Make the one we just received the head. */ |
| 538 | if (prev) { |
| 539 | head = prev->next; |
| 540 | fp = skb_clone(head, GFP_ATOMIC); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 541 | if (!fp) |
| 542 | goto out_nomem; |
| 543 | |
| 544 | fp->next = head->next; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 545 | if (!fp->next) |
| 546 | qp->q.fragments_tail = fp; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 547 | prev->next = fp; |
| 548 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 549 | skb_morph(head, qp->q.fragments); |
| 550 | head->next = qp->q.fragments->next; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 551 | |
Eric Dumazet | cbf8f7b | 2012-04-19 06:10:26 +0000 | [diff] [blame] | 552 | consume_skb(qp->q.fragments); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 553 | qp->q.fragments = head; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 554 | } |
| 555 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 556 | WARN_ON(head == NULL); |
| 557 | WARN_ON(FRAG_CB(head)->offset != 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | |
| 559 | /* Allocate a new buffer for the datagram. */ |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 560 | ihlen = ip_hdrlen(head); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 561 | len = ihlen + qp->q.len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 563 | err = -E2BIG; |
Stephen Hemminger | 132adf5 | 2007-03-08 20:44:43 -0800 | [diff] [blame] | 564 | if (len > 65535) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | goto out_oversize; |
| 566 | |
| 567 | /* Head of list must not be cloned. */ |
Pravin B Shelar | 14bbd6a | 2013-02-14 09:44:49 +0000 | [diff] [blame] | 568 | if (skb_unclone(head, GFP_ATOMIC)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | goto out_nomem; |
| 570 | |
| 571 | /* If the first fragment is fragmented itself, we split |
| 572 | * it to two chunks: the first with data and paged part |
| 573 | * and the second, holding only fragments. */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 574 | if (skb_has_frag_list(head)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | struct sk_buff *clone; |
| 576 | int i, plen = 0; |
| 577 | |
| 578 | if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) |
| 579 | goto out_nomem; |
| 580 | clone->next = head->next; |
| 581 | head->next = clone; |
| 582 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; |
David S. Miller | d7fcf1a | 2009-06-09 00:19:37 -0700 | [diff] [blame] | 583 | skb_frag_list_init(head); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 584 | for (i = 0; i < skb_shinfo(head)->nr_frags; i++) |
| 585 | plen += skb_frag_size(&skb_shinfo(head)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | clone->len = clone->data_len = head->data_len - plen; |
| 587 | head->data_len -= clone->len; |
| 588 | head->len -= clone->len; |
| 589 | clone->csum = 0; |
| 590 | clone->ip_summed = head->ip_summed; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 591 | add_frag_mem_limit(&qp->q, clone->truesize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | } |
| 593 | |
Arnaldo Carvalho de Melo | d56f90a | 2007-04-10 20:50:43 -0700 | [diff] [blame] | 594 | skb_push(head, head->data - skb_network_header(head)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | |
Eric Dumazet | 3cc4949 | 2012-05-19 03:02:20 +0000 | [diff] [blame] | 596 | sum_truesize = head->truesize; |
| 597 | for (fp = head->next; fp;) { |
| 598 | bool headstolen; |
| 599 | int delta; |
| 600 | struct sk_buff *next = fp->next; |
| 601 | |
| 602 | sum_truesize += fp->truesize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | if (head->ip_summed != fp->ip_summed) |
| 604 | head->ip_summed = CHECKSUM_NONE; |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 605 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | head->csum = csum_add(head->csum, fp->csum); |
Eric Dumazet | 3cc4949 | 2012-05-19 03:02:20 +0000 | [diff] [blame] | 607 | |
| 608 | if (skb_try_coalesce(head, fp, &headstolen, &delta)) { |
| 609 | kfree_skb_partial(fp, headstolen); |
| 610 | } else { |
| 611 | if (!skb_shinfo(head)->frag_list) |
| 612 | skb_shinfo(head)->frag_list = fp; |
| 613 | head->data_len += fp->len; |
| 614 | head->len += fp->len; |
| 615 | head->truesize += fp->truesize; |
| 616 | } |
| 617 | fp = next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | } |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 619 | sub_frag_mem_limit(&qp->q, sum_truesize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | |
| 621 | head->next = NULL; |
| 622 | head->dev = dev; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 623 | head->tstamp = qp->q.stamp; |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 624 | IPCB(head)->frag_max_size = qp->q.max_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 626 | iph = ip_hdr(head); |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 627 | /* max_size != 0 implies at least one fragment had IP_DF set */ |
| 628 | iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | iph->tot_len = htons(len); |
Eric Dumazet | 5173cc0 | 2011-05-16 08:37:37 +0000 | [diff] [blame] | 630 | iph->tos |= ecn; |
Jorge Boncompte [DTI2] | 2bad35b | 2009-03-18 23:26:11 -0700 | [diff] [blame] | 631 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 632 | qp->q.fragments = NULL; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 633 | qp->q.fragments_tail = NULL; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 634 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | |
| 636 | out_nomem: |
Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 637 | LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"), |
| 638 | qp); |
David Howells | 4554247 | 2007-10-17 21:37:22 -0700 | [diff] [blame] | 639 | err = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | goto out_fail; |
| 641 | out_oversize: |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 642 | net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | out_fail: |
David Ford | bbf31bf | 2009-11-29 23:02:22 -0800 | [diff] [blame] | 644 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 645 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | } |
| 647 | |
| 648 | /* Process an incoming IP datagram fragment. */ |
Herbert Xu | 776c729 | 2007-10-14 00:38:32 -0700 | [diff] [blame] | 649 | int ip_defrag(struct sk_buff *skb, u32 user) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | struct ipq *qp; |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 652 | struct net *net; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 653 | |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 654 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); |
Pavel Emelyanov | 7c73a6f | 2008-07-16 20:20:11 -0700 | [diff] [blame] | 655 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | |
| 657 | /* Start by cleaning up the memory. */ |
Amerigo Wang | 6b10286 | 2012-09-18 16:50:11 +0000 | [diff] [blame] | 658 | ip_evictor(net); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | /* Lookup (or create) queue header */ |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 661 | if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 662 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 664 | spin_lock(&qp->q.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 666 | ret = ip_frag_queue(qp, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 668 | spin_unlock(&qp->q.lock); |
Pavel Emelyanov | 4b6cb5d | 2007-10-15 02:41:09 -0700 | [diff] [blame] | 669 | ipq_put(qp); |
Herbert Xu | 776c729 | 2007-10-14 00:38:32 -0700 | [diff] [blame] | 670 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | } |
| 672 | |
Pavel Emelyanov | 7c73a6f | 2008-07-16 20:20:11 -0700 | [diff] [blame] | 673 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | kfree_skb(skb); |
Herbert Xu | 776c729 | 2007-10-14 00:38:32 -0700 | [diff] [blame] | 675 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | } |
Eric Dumazet | 4bc2f18 | 2010-07-09 21:22:10 +0000 | [diff] [blame] | 677 | EXPORT_SYMBOL(ip_defrag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 679 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) |
| 680 | { |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 681 | struct iphdr iph; |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 682 | u32 len; |
| 683 | |
| 684 | if (skb->protocol != htons(ETH_P_IP)) |
| 685 | return skb; |
| 686 | |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 687 | if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 688 | return skb; |
| 689 | |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 690 | if (iph.ihl < 5 || iph.version != 4) |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 691 | return skb; |
| 692 | |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 693 | len = ntohs(iph.tot_len); |
| 694 | if (skb->len < len || len < (iph.ihl * 4)) |
| 695 | return skb; |
| 696 | |
| 697 | if (ip_is_fragment(&iph)) { |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 698 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 699 | if (skb) { |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 700 | if (!pskb_may_pull(skb, iph.ihl*4)) |
| 701 | return skb; |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 702 | if (pskb_trim_rcsum(skb, len)) |
| 703 | return skb; |
| 704 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
| 705 | if (ip_defrag(skb, user)) |
| 706 | return NULL; |
| 707 | skb->rxhash = 0; |
| 708 | } |
| 709 | } |
| 710 | return skb; |
| 711 | } |
| 712 | EXPORT_SYMBOL(ip_check_defrag); |
| 713 | |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 714 | #ifdef CONFIG_SYSCTL |
| 715 | static int zero; |
| 716 | |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 717 | static struct ctl_table ip4_frags_ns_ctl_table[] = { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 718 | { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 719 | .procname = "ipfrag_high_thresh", |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 720 | .data = &init_net.ipv4.frags.high_thresh, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 721 | .maxlen = sizeof(int), |
| 722 | .mode = 0644, |
Alexey Dobriyan | 6d9f239 | 2008-11-03 18:21:05 -0800 | [diff] [blame] | 723 | .proc_handler = proc_dointvec |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 724 | }, |
| 725 | { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 726 | .procname = "ipfrag_low_thresh", |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 727 | .data = &init_net.ipv4.frags.low_thresh, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 728 | .maxlen = sizeof(int), |
| 729 | .mode = 0644, |
Alexey Dobriyan | 6d9f239 | 2008-11-03 18:21:05 -0800 | [diff] [blame] | 730 | .proc_handler = proc_dointvec |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 731 | }, |
| 732 | { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 733 | .procname = "ipfrag_time", |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 734 | .data = &init_net.ipv4.frags.timeout, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 735 | .maxlen = sizeof(int), |
| 736 | .mode = 0644, |
Alexey Dobriyan | 6d9f239 | 2008-11-03 18:21:05 -0800 | [diff] [blame] | 737 | .proc_handler = proc_dointvec_jiffies, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 738 | }, |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 739 | { } |
| 740 | }; |
| 741 | |
| 742 | static struct ctl_table ip4_frags_ctl_table[] = { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 743 | { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 744 | .procname = "ipfrag_secret_interval", |
Pavel Emelyanov | 3b4bc4a | 2008-01-22 06:11:04 -0800 | [diff] [blame] | 745 | .data = &ip4_frags.secret_interval, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 746 | .maxlen = sizeof(int), |
| 747 | .mode = 0644, |
Alexey Dobriyan | 6d9f239 | 2008-11-03 18:21:05 -0800 | [diff] [blame] | 748 | .proc_handler = proc_dointvec_jiffies, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 749 | }, |
| 750 | { |
| 751 | .procname = "ipfrag_max_dist", |
| 752 | .data = &sysctl_ipfrag_max_dist, |
| 753 | .maxlen = sizeof(int), |
| 754 | .mode = 0644, |
Alexey Dobriyan | 6d9f239 | 2008-11-03 18:21:05 -0800 | [diff] [blame] | 755 | .proc_handler = proc_dointvec_minmax, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 756 | .extra1 = &zero |
| 757 | }, |
| 758 | { } |
| 759 | }; |
| 760 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 761 | static int __net_init ip4_frags_ns_ctl_register(struct net *net) |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 762 | { |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 763 | struct ctl_table *table; |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 764 | struct ctl_table_header *hdr; |
| 765 | |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 766 | table = ip4_frags_ns_ctl_table; |
Octavian Purdila | 09ad9bc | 2009-11-25 15:14:13 -0800 | [diff] [blame] | 767 | if (!net_eq(net, &init_net)) { |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 768 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 769 | if (table == NULL) |
| 770 | goto err_alloc; |
| 771 | |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 772 | table[0].data = &net->ipv4.frags.high_thresh; |
| 773 | table[1].data = &net->ipv4.frags.low_thresh; |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 774 | table[2].data = &net->ipv4.frags.timeout; |
Eric W. Biederman | 464dc80 | 2012-11-16 03:02:59 +0000 | [diff] [blame] | 775 | |
| 776 | /* Don't export sysctls to unprivileged users */ |
| 777 | if (net->user_ns != &init_user_ns) |
| 778 | table[0].procname = NULL; |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 779 | } |
| 780 | |
Eric W. Biederman | ec8f23c | 2012-04-19 13:44:49 +0000 | [diff] [blame] | 781 | hdr = register_net_sysctl(net, "net/ipv4", table); |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 782 | if (hdr == NULL) |
| 783 | goto err_reg; |
| 784 | |
| 785 | net->ipv4.frags_hdr = hdr; |
| 786 | return 0; |
| 787 | |
| 788 | err_reg: |
Octavian Purdila | 09ad9bc | 2009-11-25 15:14:13 -0800 | [diff] [blame] | 789 | if (!net_eq(net, &init_net)) |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 790 | kfree(table); |
| 791 | err_alloc: |
| 792 | return -ENOMEM; |
| 793 | } |
| 794 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 795 | static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 796 | { |
| 797 | struct ctl_table *table; |
| 798 | |
| 799 | table = net->ipv4.frags_hdr->ctl_table_arg; |
| 800 | unregister_net_sysctl_table(net->ipv4.frags_hdr); |
| 801 | kfree(table); |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 802 | } |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 803 | |
| 804 | static void ip4_frags_ctl_register(void) |
| 805 | { |
Eric W. Biederman | 4344475 | 2012-04-19 13:22:55 +0000 | [diff] [blame] | 806 | register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 807 | } |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 808 | #else |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 809 | static inline int ip4_frags_ns_ctl_register(struct net *net) |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 810 | { |
| 811 | return 0; |
| 812 | } |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 813 | |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 814 | static inline void ip4_frags_ns_ctl_unregister(struct net *net) |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 815 | { |
| 816 | } |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 817 | |
| 818 | static inline void ip4_frags_ctl_register(void) |
| 819 | { |
| 820 | } |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 821 | #endif |
| 822 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 823 | static int __net_init ipv4_frags_init_net(struct net *net) |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 824 | { |
Jesper Dangaard Brouer | c2a9366 | 2013-01-15 07:16:35 +0000 | [diff] [blame] | 825 | /* Fragment cache limits. |
| 826 | * |
| 827 | * The fragment memory accounting code, (tries to) account for |
| 828 | * the real memory usage, by measuring both the size of frag |
| 829 | * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) |
| 830 | * and the SKB's truesize. |
| 831 | * |
| 832 | * A 64K fragment consumes 129736 bytes (44*2944)+200 |
| 833 | * (1500 truesize == 2944, sizeof(struct ipq) == 200) |
| 834 | * |
| 835 | * We will commit 4MB at one time. Should we cross that limit |
| 836 | * we will prune down to 3MB, making room for approx 8 big 64K |
| 837 | * fragments 8x128k. |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 838 | */ |
Jesper Dangaard Brouer | c2a9366 | 2013-01-15 07:16:35 +0000 | [diff] [blame] | 839 | net->ipv4.frags.high_thresh = 4 * 1024 * 1024; |
| 840 | net->ipv4.frags.low_thresh = 3 * 1024 * 1024; |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 841 | /* |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 842 | * Important NOTE! Fragment queue must be destroyed before MSL expires. |
| 843 | * RFC791 is wrong proposing to prolongate timer each fragment arrival |
| 844 | * by TTL. |
| 845 | */ |
| 846 | net->ipv4.frags.timeout = IP_FRAG_TIME; |
| 847 | |
Pavel Emelyanov | e5a2bb8 | 2008-01-22 06:06:23 -0800 | [diff] [blame] | 848 | inet_frags_init_net(&net->ipv4.frags); |
| 849 | |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 850 | return ip4_frags_ns_ctl_register(net); |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 851 | } |
| 852 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 853 | static void __net_exit ipv4_frags_exit_net(struct net *net) |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 854 | { |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 855 | ip4_frags_ns_ctl_unregister(net); |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 856 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); |
| 857 | } |
| 858 | |
| 859 | static struct pernet_operations ip4_frags_ops = { |
| 860 | .init = ipv4_frags_init_net, |
| 861 | .exit = ipv4_frags_exit_net, |
| 862 | }; |
| 863 | |
Eric Dumazet | b7aa0bf | 2007-04-19 16:16:32 -0700 | [diff] [blame] | 864 | void __init ipfrag_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | { |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 866 | ip4_frags_ctl_register(); |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 867 | register_pernet_subsys(&ip4_frags_ops); |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 868 | ip4_frags.hashfn = ip4_hashfn; |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 869 | ip4_frags.constructor = ip4_frag_init; |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 870 | ip4_frags.destructor = ip4_frag_free; |
| 871 | ip4_frags.skb_free = NULL; |
| 872 | ip4_frags.qsize = sizeof(struct ipq); |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 873 | ip4_frags.match = ip4_frag_match; |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 874 | ip4_frags.frag_expire = ip_expire; |
Pavel Emelyanov | 3b4bc4a | 2008-01-22 06:11:04 -0800 | [diff] [blame] | 875 | ip4_frags.secret_interval = 10 * 60 * HZ; |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 876 | inet_frags_init(&ip4_frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | } |