Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 3 | * operating system. INET is implemented using the BSD Socket |
| 4 | * interface as the means of communication with the user level. |
| 5 | * |
| 6 | * The IP fragmentation functionality. |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 7 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> |
Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 9 | * Alan Cox <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * |
| 11 | * Fixes: |
| 12 | * Alan Cox : Split from ip.c , see ip_input.c for history. |
| 13 | * David S. Miller : Begin massive cleanup... |
| 14 | * Andi Kleen : Add sysctls. |
| 15 | * xxxx : Overlapfrag bug. |
| 16 | * Ultima : ip_expire() kernel panic. |
| 17 | * Bill Hawes : Frag accounting and evictor fixes. |
| 18 | * John McDonald : 0 length frag bug. |
| 19 | * Alexey Kuznetsov: SMP races, threading, cleanup. |
| 20 | * Patrick McHardy : LRU queue of frag heads for evictor. |
| 21 | */ |
| 22 | |
Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 23 | #define pr_fmt(fmt) "IPv4: " fmt |
| 24 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 25 | #include <linux/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
| 27 | #include <linux/types.h> |
| 28 | #include <linux/mm.h> |
| 29 | #include <linux/jiffies.h> |
| 30 | #include <linux/skbuff.h> |
| 31 | #include <linux/list.h> |
| 32 | #include <linux/ip.h> |
| 33 | #include <linux/icmp.h> |
| 34 | #include <linux/netdevice.h> |
| 35 | #include <linux/jhash.h> |
| 36 | #include <linux/random.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 37 | #include <linux/slab.h> |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 38 | #include <net/route.h> |
| 39 | #include <net/dst.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <net/sock.h> |
| 41 | #include <net/ip.h> |
| 42 | #include <net/icmp.h> |
| 43 | #include <net/checksum.h> |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 44 | #include <net/inetpeer.h> |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 45 | #include <net/inet_frag.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <linux/tcp.h> |
| 47 | #include <linux/udp.h> |
| 48 | #include <linux/inet.h> |
| 49 | #include <linux/netfilter_ipv4.h> |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 50 | #include <net/inet_ecn.h> |
David Ahern | 385add9 | 2015-09-29 20:07:13 -0700 | [diff] [blame] | 51 | #include <net/l3mdev.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | |
| 53 | /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 |
| 54 | * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c |
| 55 | * as well. Or notify me, at least. --ANK |
| 56 | */ |
Nikolay Aleksandrov | d4ad4d2 | 2014-08-01 12:29:48 +0200 | [diff] [blame] | 57 | static const char ip_frag_cache_name[] = "ip4-frags"; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 58 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | struct ipfrag_skb_cb |
| 60 | { |
| 61 | struct inet_skb_parm h; |
| 62 | int offset; |
| 63 | }; |
| 64 | |
Jianjun Kong | fd3f8c4 | 2008-11-03 02:47:38 -0800 | [diff] [blame] | 65 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
| 67 | /* Describe an entry in the "incomplete datagrams" queue. */ |
| 68 | struct ipq { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 69 | struct inet_frag_queue q; |
| 70 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | u32 user; |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 72 | __be32 saddr; |
| 73 | __be32 daddr; |
| 74 | __be16 id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | u8 protocol; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 76 | u8 ecn; /* RFC3168 support */ |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 77 | u16 max_df_size; /* largest frag with DF set seen */ |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 78 | int iif; |
David Ahern | 385add9 | 2015-09-29 20:07:13 -0700 | [diff] [blame] | 79 | int vif; /* L3 master device index */ |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 80 | unsigned int rid; |
| 81 | struct inet_peer *peer; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | }; |
| 83 | |
Fabian Frederick | aa1f731 | 2014-11-04 20:44:04 +0100 | [diff] [blame] | 84 | static u8 ip4_frag_ecn(u8 tos) |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 85 | { |
Eric Dumazet | 5173cc0 | 2011-05-16 08:37:37 +0000 | [diff] [blame] | 86 | return 1 << (tos & INET_ECN_MASK); |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 87 | } |
| 88 | |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 89 | static struct inet_frags ip4_frags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
Pavel Emelyanov | 6ddc082 | 2008-01-22 06:07:25 -0800 | [diff] [blame] | 91 | int ip_frag_mem(struct net *net) |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 92 | { |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 93 | return sum_frag_mem_limit(&net->ipv4.frags); |
Pavel Emelyanov | 7eb9515 | 2007-10-15 02:31:52 -0700 | [diff] [blame] | 94 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 96 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
| 97 | struct net_device *dev); |
| 98 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 99 | struct ip4_create_arg { |
| 100 | struct iphdr *iph; |
| 101 | u32 user; |
David Ahern | 9972f13 | 2015-08-13 14:59:09 -0600 | [diff] [blame] | 102 | int vif; |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 103 | }; |
| 104 | |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 105 | static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | { |
Hannes Frederic Sowa | e7b519b | 2013-10-23 11:06:55 +0200 | [diff] [blame] | 107 | net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); |
Al Viro | 1827777 | 2006-09-26 22:19:02 -0700 | [diff] [blame] | 108 | return jhash_3words((__force u32)id << 16 | prot, |
| 109 | (__force u32)saddr, (__force u32)daddr, |
Florian Westphal | fb3cfe6 | 2014-07-24 16:50:30 +0200 | [diff] [blame] | 110 | ip4_frags.rnd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | } |
| 112 | |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 113 | static unsigned int ip4_hashfn(const struct inet_frag_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | { |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 115 | const struct ipq *ipq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 117 | ipq = container_of(q, struct ipq, q); |
| 118 | return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | } |
| 120 | |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 121 | static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 122 | { |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 123 | const struct ipq *qp; |
| 124 | const struct ip4_create_arg *arg = a; |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 125 | |
| 126 | qp = container_of(q, struct ipq, q); |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 127 | return qp->id == arg->iph->id && |
Eric Dumazet | cbc264c | 2012-05-18 05:57:13 +0200 | [diff] [blame] | 128 | qp->saddr == arg->iph->saddr && |
| 129 | qp->daddr == arg->iph->daddr && |
| 130 | qp->protocol == arg->iph->protocol && |
David Ahern | 9972f13 | 2015-08-13 14:59:09 -0600 | [diff] [blame] | 131 | qp->user == arg->user && |
| 132 | qp->vif == arg->vif; |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 133 | } |
| 134 | |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 135 | static void ip4_frag_init(struct inet_frag_queue *q, const void *a) |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 136 | { |
| 137 | struct ipq *qp = container_of(q, struct ipq, q); |
Gao feng | 54db0cc | 2012-06-08 01:21:40 +0000 | [diff] [blame] | 138 | struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, |
| 139 | frags); |
| 140 | struct net *net = container_of(ipv4, struct net, ipv4); |
| 141 | |
Florian Westphal | 36c7778 | 2014-07-24 16:50:29 +0200 | [diff] [blame] | 142 | const struct ip4_create_arg *arg = a; |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 143 | |
| 144 | qp->protocol = arg->iph->protocol; |
| 145 | qp->id = arg->iph->id; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 146 | qp->ecn = ip4_frag_ecn(arg->iph->tos); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 147 | qp->saddr = arg->iph->saddr; |
| 148 | qp->daddr = arg->iph->daddr; |
David Ahern | 9972f13 | 2015-08-13 14:59:09 -0600 | [diff] [blame] | 149 | qp->vif = arg->vif; |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 150 | qp->user = arg->user; |
Nikolay Borisov | 0fbf4cb | 2016-02-15 12:11:31 +0200 | [diff] [blame] | 151 | qp->peer = q->net->max_dist ? |
David Ahern | 192132b | 2015-08-27 16:07:03 -0700 | [diff] [blame] | 152 | inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) : |
| 153 | NULL; |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 154 | } |
| 155 | |
Fabian Frederick | aa1f731 | 2014-11-04 20:44:04 +0100 | [diff] [blame] | 156 | static void ip4_frag_free(struct inet_frag_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | { |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 158 | struct ipq *qp; |
| 159 | |
| 160 | qp = container_of(q, struct ipq, q); |
| 161 | if (qp->peer) |
| 162 | inet_putpeer(qp->peer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | } |
| 164 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | |
| 166 | /* Destruction primitives. */ |
| 167 | |
Fabian Frederick | aa1f731 | 2014-11-04 20:44:04 +0100 | [diff] [blame] | 168 | static void ipq_put(struct ipq *ipq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | { |
Pavel Emelyanov | 762cc40 | 2007-10-15 02:41:56 -0700 | [diff] [blame] | 170 | inet_frag_put(&ipq->q, &ip4_frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | /* Kill ipq entry. It is not destroyed immediately, |
| 174 | * because caller (and someone more) holds reference count. |
| 175 | */ |
| 176 | static void ipq_kill(struct ipq *ipq) |
| 177 | { |
Pavel Emelyanov | 277e650 | 2007-10-15 02:37:18 -0700 | [diff] [blame] | 178 | inet_frag_kill(&ipq->q, &ip4_frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | } |
| 180 | |
Andy Zhou | 5cf4228 | 2015-05-15 14:15:35 -0700 | [diff] [blame] | 181 | static bool frag_expire_skip_icmp(u32 user) |
| 182 | { |
| 183 | return user == IP_DEFRAG_AF_PACKET || |
| 184 | ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN, |
Andy Zhou | 8bc0486 | 2015-05-15 14:15:36 -0700 | [diff] [blame] | 185 | __IP_DEFRAG_CONNTRACK_IN_END) || |
| 186 | ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN, |
| 187 | __IP_DEFRAG_CONNTRACK_BRIDGE_IN); |
Andy Zhou | 5cf4228 | 2015-05-15 14:15:35 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | /* |
| 191 | * Oops, a fragment queue timed out. Kill it and send an ICMP reply. |
| 192 | */ |
| 193 | static void ip_expire(unsigned long arg) |
| 194 | { |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 195 | struct ipq *qp; |
Pavel Emelyanov | 84a3aa0 | 2008-07-16 20:19:08 -0700 | [diff] [blame] | 196 | struct net *net; |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 197 | |
| 198 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); |
Pavel Emelyanov | 84a3aa0 | 2008-07-16 20:19:08 -0700 | [diff] [blame] | 199 | net = container_of(qp->q.net, struct net, ipv4.frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
Eric Dumazet | ec4fbd6 | 2017-03-22 08:57:15 -0700 | [diff] [blame] | 201 | rcu_read_lock(); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 202 | spin_lock(&qp->q.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | |
Nikolay Aleksandrov | 06aa8b8 | 2014-08-01 12:29:44 +0200 | [diff] [blame] | 204 | if (qp->q.flags & INET_FRAG_COMPLETE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | goto out; |
| 206 | |
| 207 | ipq_kill(qp); |
Eric Dumazet | b45386e | 2016-04-27 16:44:35 -0700 | [diff] [blame] | 208 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | |
Nikolay Aleksandrov | caaecdd | 2015-07-23 12:05:40 +0200 | [diff] [blame] | 210 | if (!inet_frag_evicting(&qp->q)) { |
Eric Dumazet | ec4fbd6 | 2017-03-22 08:57:15 -0700 | [diff] [blame] | 211 | struct sk_buff *clone, *head = qp->q.fragments; |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 212 | const struct iphdr *iph; |
| 213 | int err; |
Denis V. Lunev | cb84663 | 2008-03-24 15:31:00 -0700 | [diff] [blame] | 214 | |
Eric Dumazet | b45386e | 2016-04-27 16:44:35 -0700 | [diff] [blame] | 215 | __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); |
Nikolay Aleksandrov | 2e404f6 | 2014-08-01 12:29:47 +0200 | [diff] [blame] | 216 | |
| 217 | if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) |
| 218 | goto out; |
| 219 | |
Eric Dumazet | 69df9d5 | 2009-11-05 20:59:47 -0800 | [diff] [blame] | 220 | head->dev = dev_get_by_index_rcu(net, qp->iif); |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 221 | if (!head->dev) |
Eric Dumazet | ec4fbd6 | 2017-03-22 08:57:15 -0700 | [diff] [blame] | 222 | goto out; |
| 223 | |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 224 | |
Eric Dumazet | 97599dc | 2013-04-16 12:55:41 +0000 | [diff] [blame] | 225 | /* skb has no dst, perform route lookup again */ |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 226 | iph = ip_hdr(head); |
David S. Miller | c6cffba | 2012-07-26 11:14:38 +0000 | [diff] [blame] | 227 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, |
| 228 | iph->tos, head->dev); |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 229 | if (err) |
Eric Dumazet | ec4fbd6 | 2017-03-22 08:57:15 -0700 | [diff] [blame] | 230 | goto out; |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 231 | |
Nikolay Aleksandrov | 2e404f6 | 2014-08-01 12:29:47 +0200 | [diff] [blame] | 232 | /* Only an end host needs to send an ICMP |
Eric Dumazet | 64f3b9e | 2011-05-04 10:02:26 +0000 | [diff] [blame] | 233 | * "Fragment Reassembly Timeout" message, per RFC792. |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 234 | */ |
Andy Zhou | 5cf4228 | 2015-05-15 14:15:35 -0700 | [diff] [blame] | 235 | if (frag_expire_skip_icmp(qp->user) && |
| 236 | (skb_rtable(head)->rt_type != RTN_LOCAL)) |
Eric Dumazet | ec4fbd6 | 2017-03-22 08:57:15 -0700 | [diff] [blame] | 237 | goto out; |
| 238 | |
| 239 | clone = skb_clone(head, GFP_ATOMIC); |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 240 | |
Shan Wei | e9017b5 | 2010-01-23 01:57:42 -0800 | [diff] [blame] | 241 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
Eric Dumazet | ec4fbd6 | 2017-03-22 08:57:15 -0700 | [diff] [blame] | 242 | if (clone) { |
| 243 | spin_unlock(&qp->q.lock); |
| 244 | icmp_send(clone, ICMP_TIME_EXCEEDED, |
| 245 | ICMP_EXC_FRAGTIME, 0); |
| 246 | consume_skb(clone); |
| 247 | goto out_rcu_unlock; |
| 248 | } |
Patrick McHardy | d1c9ae6 | 2010-02-02 11:46:50 -0800 | [diff] [blame] | 249 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | out: |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 251 | spin_unlock(&qp->q.lock); |
Eric Dumazet | ec4fbd6 | 2017-03-22 08:57:15 -0700 | [diff] [blame] | 252 | out_rcu_unlock: |
| 253 | rcu_read_unlock(); |
Pavel Emelyanov | 4b6cb5d | 2007-10-15 02:41:09 -0700 | [diff] [blame] | 254 | ipq_put(qp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | } |
| 256 | |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 257 | /* Find the correct entry in the "incomplete datagrams" queue for |
| 258 | * this IP datagram, and create new one, if nothing is found. |
| 259 | */ |
David Ahern | 9972f13 | 2015-08-13 14:59:09 -0600 | [diff] [blame] | 260 | static struct ipq *ip_find(struct net *net, struct iphdr *iph, |
| 261 | u32 user, int vif) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | { |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 263 | struct inet_frag_queue *q; |
| 264 | struct ip4_create_arg arg; |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 265 | unsigned int hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 267 | arg.iph = iph; |
| 268 | arg.user = user; |
David Ahern | 9972f13 | 2015-08-13 14:59:09 -0600 | [diff] [blame] | 269 | arg.vif = vif; |
Pavel Emelyanov | 9a37580 | 2008-06-27 20:06:08 -0700 | [diff] [blame] | 270 | |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 271 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 272 | |
Pavel Emelyanov | ac18e75 | 2008-01-22 06:02:14 -0800 | [diff] [blame] | 273 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
Hannes Frederic Sowa | 5a3da1f | 2013-03-15 11:32:30 +0000 | [diff] [blame] | 274 | if (IS_ERR_OR_NULL(q)) { |
| 275 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
| 276 | return NULL; |
| 277 | } |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 278 | return container_of(q, struct ipq, q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | } |
| 280 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 281 | /* Is the fragment too far ahead to be part of ipq? */ |
Fabian Frederick | aa1f731 | 2014-11-04 20:44:04 +0100 | [diff] [blame] | 282 | static int ip_frag_too_far(struct ipq *qp) |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 283 | { |
| 284 | struct inet_peer *peer = qp->peer; |
Nikolay Borisov | 0fbf4cb | 2016-02-15 12:11:31 +0200 | [diff] [blame] | 285 | unsigned int max = qp->q.net->max_dist; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 286 | unsigned int start, end; |
| 287 | |
| 288 | int rc; |
| 289 | |
| 290 | if (!peer || !max) |
| 291 | return 0; |
| 292 | |
| 293 | start = qp->rid; |
| 294 | end = atomic_inc_return(&peer->rid); |
| 295 | qp->rid = end; |
| 296 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 297 | rc = qp->q.fragments && (end - start) > max; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 298 | |
| 299 | if (rc) { |
Pavel Emelyanov | 7c73a6f | 2008-07-16 20:20:11 -0700 | [diff] [blame] | 300 | struct net *net; |
| 301 | |
| 302 | net = container_of(qp->q.net, struct net, ipv4.frags); |
Eric Dumazet | b45386e | 2016-04-27 16:44:35 -0700 | [diff] [blame] | 303 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | return rc; |
| 307 | } |
| 308 | |
| 309 | static int ip_frag_reinit(struct ipq *qp) |
| 310 | { |
| 311 | struct sk_buff *fp; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 312 | unsigned int sum_truesize = 0; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 313 | |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 314 | if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 315 | atomic_inc(&qp->q.refcnt); |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 316 | return -ETIMEDOUT; |
| 317 | } |
| 318 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 319 | fp = qp->q.fragments; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 320 | do { |
| 321 | struct sk_buff *xp = fp->next; |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 322 | |
| 323 | sum_truesize += fp->truesize; |
| 324 | kfree_skb(fp); |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 325 | fp = xp; |
| 326 | } while (fp); |
Florian Westphal | 0e60d24 | 2015-07-23 12:05:38 +0200 | [diff] [blame] | 327 | sub_frag_mem_limit(qp->q.net, sum_truesize); |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 328 | |
Nikolay Aleksandrov | 06aa8b8 | 2014-08-01 12:29:44 +0200 | [diff] [blame] | 329 | qp->q.flags = 0; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 330 | qp->q.len = 0; |
| 331 | qp->q.meat = 0; |
| 332 | qp->q.fragments = NULL; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 333 | qp->q.fragments_tail = NULL; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 334 | qp->iif = 0; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 335 | qp->ecn = 0; |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 336 | |
| 337 | return 0; |
| 338 | } |
| 339 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | /* Add new segment to existing queue. */ |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 341 | static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | { |
| 343 | struct sk_buff *prev, *next; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 344 | struct net_device *dev; |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 345 | unsigned int fragsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | int flags, offset; |
| 347 | int ihl, end; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 348 | int err = -ENOENT; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 349 | u8 ecn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | |
Nikolay Aleksandrov | 06aa8b8 | 2014-08-01 12:29:44 +0200 | [diff] [blame] | 351 | if (qp->q.flags & INET_FRAG_COMPLETE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | goto err; |
| 353 | |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 354 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 355 | unlikely(ip_frag_too_far(qp)) && |
| 356 | unlikely(err = ip_frag_reinit(qp))) { |
Herbert Xu | 89cee8b | 2005-12-13 23:14:27 -0800 | [diff] [blame] | 357 | ipq_kill(qp); |
| 358 | goto err; |
| 359 | } |
| 360 | |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 361 | ecn = ip4_frag_ecn(ip_hdr(skb)->tos); |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 362 | offset = ntohs(ip_hdr(skb)->frag_off); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | flags = offset & ~IP_OFFSET; |
| 364 | offset &= IP_OFFSET; |
| 365 | offset <<= 3; /* offset is in 8-byte chunks */ |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 366 | ihl = ip_hdrlen(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | |
| 368 | /* Determine the position of this fragment. */ |
Edward Hyunkoo Jee | 0848f64 | 2015-07-21 09:43:59 +0200 | [diff] [blame] | 369 | end = offset + skb->len - skb_network_offset(skb) - ihl; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 370 | err = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | |
| 372 | /* Is this the final fragment? */ |
| 373 | if ((flags & IP_MF) == 0) { |
| 374 | /* If we already have some bits beyond end |
Justin P. Mattock | 42b2aa8 | 2011-11-28 20:31:00 -0800 | [diff] [blame] | 375 | * or have different end, the segment is corrupted. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | */ |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 377 | if (end < qp->q.len || |
Nikolay Aleksandrov | 06aa8b8 | 2014-08-01 12:29:44 +0200 | [diff] [blame] | 378 | ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | goto err; |
Nikolay Aleksandrov | 06aa8b8 | 2014-08-01 12:29:44 +0200 | [diff] [blame] | 380 | qp->q.flags |= INET_FRAG_LAST_IN; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 381 | qp->q.len = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | } else { |
| 383 | if (end&7) { |
| 384 | end &= ~7; |
| 385 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
| 386 | skb->ip_summed = CHECKSUM_NONE; |
| 387 | } |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 388 | if (end > qp->q.len) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | /* Some bits beyond end -> corruption. */ |
Nikolay Aleksandrov | 06aa8b8 | 2014-08-01 12:29:44 +0200 | [diff] [blame] | 390 | if (qp->q.flags & INET_FRAG_LAST_IN) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | goto err; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 392 | qp->q.len = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | } |
| 394 | } |
| 395 | if (end == offset) |
| 396 | goto err; |
| 397 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 398 | err = -ENOMEM; |
Edward Hyunkoo Jee | 0848f64 | 2015-07-21 09:43:59 +0200 | [diff] [blame] | 399 | if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | goto err; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 401 | |
| 402 | err = pskb_trim_rcsum(skb, end - offset); |
| 403 | if (err) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | goto err; |
| 405 | |
| 406 | /* Find out which fragments are in front and at the back of us |
| 407 | * in the chain of fragments so far. We must know where to put |
| 408 | * this fragment, right? |
| 409 | */ |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 410 | prev = qp->q.fragments_tail; |
| 411 | if (!prev || FRAG_CB(prev)->offset < offset) { |
| 412 | next = NULL; |
| 413 | goto found; |
| 414 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | prev = NULL; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 416 | for (next = qp->q.fragments; next != NULL; next = next->next) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | if (FRAG_CB(next)->offset >= offset) |
| 418 | break; /* bingo! */ |
| 419 | prev = next; |
| 420 | } |
| 421 | |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 422 | found: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | /* We found where to put this one. Check for overlap with |
| 424 | * preceding fragment, and, if needed, align things so that |
| 425 | * any overlaps are eliminated. |
| 426 | */ |
| 427 | if (prev) { |
| 428 | int i = (FRAG_CB(prev)->offset + prev->len) - offset; |
| 429 | |
| 430 | if (i > 0) { |
| 431 | offset += i; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 432 | err = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | if (end <= offset) |
| 434 | goto err; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 435 | err = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | if (!pskb_pull(skb, i)) |
| 437 | goto err; |
| 438 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) |
| 439 | skb->ip_summed = CHECKSUM_NONE; |
| 440 | } |
| 441 | } |
| 442 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 443 | err = -ENOMEM; |
| 444 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | while (next && FRAG_CB(next)->offset < end) { |
| 446 | int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ |
| 447 | |
| 448 | if (i < next->len) { |
| 449 | /* Eat head of the next overlapped fragment |
| 450 | * and leave the loop. The next ones cannot overlap. |
| 451 | */ |
| 452 | if (!pskb_pull(next, i)) |
| 453 | goto err; |
| 454 | FRAG_CB(next)->offset += i; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 455 | qp->q.meat -= i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
| 457 | next->ip_summed = CHECKSUM_NONE; |
| 458 | break; |
| 459 | } else { |
| 460 | struct sk_buff *free_it = next; |
| 461 | |
Peter Zijlstra | 47c6bf77 | 2006-12-12 19:48:59 +0100 | [diff] [blame] | 462 | /* Old fragment is completely overridden with |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | * new one drop it. |
| 464 | */ |
| 465 | next = next->next; |
| 466 | |
| 467 | if (prev) |
| 468 | prev->next = next; |
| 469 | else |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 470 | qp->q.fragments = next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 472 | qp->q.meat -= free_it->len; |
Florian Westphal | 0e60d24 | 2015-07-23 12:05:38 +0200 | [diff] [blame] | 473 | sub_frag_mem_limit(qp->q.net, free_it->truesize); |
Jesper Dangaard Brouer | d433673 | 2013-01-28 23:45:12 +0000 | [diff] [blame] | 474 | kfree_skb(free_it); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | } |
| 476 | } |
| 477 | |
| 478 | FRAG_CB(skb)->offset = offset; |
| 479 | |
| 480 | /* Insert this fragment in the chain of fragments. */ |
| 481 | skb->next = next; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 482 | if (!next) |
| 483 | qp->q.fragments_tail = skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | if (prev) |
| 485 | prev->next = skb; |
| 486 | else |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 487 | qp->q.fragments = skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 489 | dev = skb->dev; |
| 490 | if (dev) { |
| 491 | qp->iif = dev->ifindex; |
| 492 | skb->dev = NULL; |
| 493 | } |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 494 | qp->q.stamp = skb->tstamp; |
| 495 | qp->q.meat += skb->len; |
Eric Dumazet | 6623e3b | 2011-01-05 07:52:55 +0000 | [diff] [blame] | 496 | qp->ecn |= ecn; |
Florian Westphal | 0e60d24 | 2015-07-23 12:05:38 +0200 | [diff] [blame] | 497 | add_frag_mem_limit(qp->q.net, skb->truesize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | if (offset == 0) |
Nikolay Aleksandrov | 06aa8b8 | 2014-08-01 12:29:44 +0200 | [diff] [blame] | 499 | qp->q.flags |= INET_FRAG_FIRST_IN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 501 | fragsize = skb->len + ihl; |
| 502 | |
| 503 | if (fragsize > qp->q.max_size) |
| 504 | qp->q.max_size = fragsize; |
| 505 | |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 506 | if (ip_hdr(skb)->frag_off & htons(IP_DF) && |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 507 | fragsize > qp->max_df_size) |
| 508 | qp->max_df_size = fragsize; |
Patrick McHardy | 5f2d04f | 2012-08-26 19:13:55 +0200 | [diff] [blame] | 509 | |
Nikolay Aleksandrov | 06aa8b8 | 2014-08-01 12:29:44 +0200 | [diff] [blame] | 510 | if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
Eric Dumazet | 97599dc | 2013-04-16 12:55:41 +0000 | [diff] [blame] | 511 | qp->q.meat == qp->q.len) { |
| 512 | unsigned long orefdst = skb->_skb_refdst; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 513 | |
Eric Dumazet | 97599dc | 2013-04-16 12:55:41 +0000 | [diff] [blame] | 514 | skb->_skb_refdst = 0UL; |
| 515 | err = ip_frag_reasm(qp, prev, dev); |
| 516 | skb->_skb_refdst = orefdst; |
| 517 | return err; |
| 518 | } |
| 519 | |
| 520 | skb_dst_drop(skb); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 521 | return -EINPROGRESS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | |
| 523 | err: |
| 524 | kfree_skb(skb); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 525 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | } |
| 527 | |
| 528 | |
| 529 | /* Build a new IP datagram from all its fragments. */ |
| 530 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 531 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
| 532 | struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | { |
Jorge Boncompte [DTI2] | 2bad35b | 2009-03-18 23:26:11 -0700 | [diff] [blame] | 534 | struct net *net = container_of(qp->q.net, struct net, ipv4.frags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | struct iphdr *iph; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 536 | struct sk_buff *fp, *head = qp->q.fragments; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | int len; |
| 538 | int ihlen; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 539 | int err; |
Eric Dumazet | 5173cc0 | 2011-05-16 08:37:37 +0000 | [diff] [blame] | 540 | u8 ecn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | |
| 542 | ipq_kill(qp); |
| 543 | |
Hannes Frederic Sowa | be99197 | 2013-03-22 08:24:37 +0000 | [diff] [blame] | 544 | ecn = ip_frag_ecn_table[qp->ecn]; |
Eric Dumazet | 5173cc0 | 2011-05-16 08:37:37 +0000 | [diff] [blame] | 545 | if (unlikely(ecn == 0xff)) { |
| 546 | err = -EINVAL; |
| 547 | goto out_fail; |
| 548 | } |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 549 | /* Make the one we just received the head. */ |
| 550 | if (prev) { |
| 551 | head = prev->next; |
| 552 | fp = skb_clone(head, GFP_ATOMIC); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 553 | if (!fp) |
| 554 | goto out_nomem; |
| 555 | |
| 556 | fp->next = head->next; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 557 | if (!fp->next) |
| 558 | qp->q.fragments_tail = fp; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 559 | prev->next = fp; |
| 560 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 561 | skb_morph(head, qp->q.fragments); |
| 562 | head->next = qp->q.fragments->next; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 563 | |
Eric Dumazet | cbf8f7b | 2012-04-19 06:10:26 +0000 | [diff] [blame] | 564 | consume_skb(qp->q.fragments); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 565 | qp->q.fragments = head; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 566 | } |
| 567 | |
Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 568 | WARN_ON(!head); |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 569 | WARN_ON(FRAG_CB(head)->offset != 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | |
| 571 | /* Allocate a new buffer for the datagram. */ |
Arnaldo Carvalho de Melo | c9bdd4b | 2007-03-12 20:09:15 -0300 | [diff] [blame] | 572 | ihlen = ip_hdrlen(head); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 573 | len = ihlen + qp->q.len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 575 | err = -E2BIG; |
Stephen Hemminger | 132adf5 | 2007-03-08 20:44:43 -0800 | [diff] [blame] | 576 | if (len > 65535) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | goto out_oversize; |
| 578 | |
| 579 | /* Head of list must not be cloned. */ |
Pravin B Shelar | 14bbd6a | 2013-02-14 09:44:49 +0000 | [diff] [blame] | 580 | if (skb_unclone(head, GFP_ATOMIC)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | goto out_nomem; |
| 582 | |
| 583 | /* If the first fragment is fragmented itself, we split |
| 584 | * it to two chunks: the first with data and paged part |
| 585 | * and the second, holding only fragments. */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 586 | if (skb_has_frag_list(head)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | struct sk_buff *clone; |
| 588 | int i, plen = 0; |
| 589 | |
Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 590 | clone = alloc_skb(0, GFP_ATOMIC); |
| 591 | if (!clone) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | goto out_nomem; |
| 593 | clone->next = head->next; |
| 594 | head->next = clone; |
| 595 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; |
David S. Miller | d7fcf1a | 2009-06-09 00:19:37 -0700 | [diff] [blame] | 596 | skb_frag_list_init(head); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 597 | for (i = 0; i < skb_shinfo(head)->nr_frags; i++) |
| 598 | plen += skb_frag_size(&skb_shinfo(head)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | clone->len = clone->data_len = head->data_len - plen; |
| 600 | head->data_len -= clone->len; |
| 601 | head->len -= clone->len; |
| 602 | clone->csum = 0; |
| 603 | clone->ip_summed = head->ip_summed; |
Florian Westphal | 0e60d24 | 2015-07-23 12:05:38 +0200 | [diff] [blame] | 604 | add_frag_mem_limit(qp->q.net, clone->truesize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | } |
| 606 | |
Florian Westphal | 14fe22e | 2015-07-11 01:37:36 +0200 | [diff] [blame] | 607 | skb_shinfo(head)->frag_list = head->next; |
Arnaldo Carvalho de Melo | d56f90a | 2007-04-10 20:50:43 -0700 | [diff] [blame] | 608 | skb_push(head, head->data - skb_network_header(head)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | |
Florian Westphal | 14fe22e | 2015-07-11 01:37:36 +0200 | [diff] [blame] | 610 | for (fp=head->next; fp; fp = fp->next) { |
| 611 | head->data_len += fp->len; |
| 612 | head->len += fp->len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | if (head->ip_summed != fp->ip_summed) |
| 614 | head->ip_summed = CHECKSUM_NONE; |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 615 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | head->csum = csum_add(head->csum, fp->csum); |
Florian Westphal | 14fe22e | 2015-07-11 01:37:36 +0200 | [diff] [blame] | 617 | head->truesize += fp->truesize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | } |
David S. Miller | 5510b3c | 2015-07-31 23:52:20 -0700 | [diff] [blame] | 619 | sub_frag_mem_limit(qp->q.net, head->truesize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | |
| 621 | head->next = NULL; |
| 622 | head->dev = dev; |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 623 | head->tstamp = qp->q.stamp; |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 624 | IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | |
Arnaldo Carvalho de Melo | eddc9ec | 2007-04-20 22:47:35 -0700 | [diff] [blame] | 626 | iph = ip_hdr(head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | iph->tot_len = htons(len); |
Eric Dumazet | 5173cc0 | 2011-05-16 08:37:37 +0000 | [diff] [blame] | 628 | iph->tos |= ecn; |
Florian Westphal | d6b915e | 2015-05-22 16:32:51 +0200 | [diff] [blame] | 629 | |
| 630 | /* When we set IP_DF on a refragmented skb we must also force a |
| 631 | * call to ip_fragment to avoid forwarding a DF-skb of size s while |
| 632 | * original sender only sent fragments of size f (where f < s). |
| 633 | * |
| 634 | * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest |
| 635 | * frag seen to avoid sending tiny DF-fragments in case skb was built |
| 636 | * from one very small df-fragment and one large non-df frag. |
| 637 | */ |
| 638 | if (qp->max_df_size == qp->q.max_size) { |
| 639 | IPCB(head)->flags |= IPSKB_FRAG_PMTU; |
| 640 | iph->frag_off = htons(IP_DF); |
| 641 | } else { |
| 642 | iph->frag_off = 0; |
| 643 | } |
| 644 | |
Edward Hyunkoo Jee | 0848f64 | 2015-07-21 09:43:59 +0200 | [diff] [blame] | 645 | ip_send_check(iph); |
| 646 | |
Eric Dumazet | b45386e | 2016-04-27 16:44:35 -0700 | [diff] [blame] | 647 | __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS); |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 648 | qp->q.fragments = NULL; |
Changli Gao | d6bebca | 2010-06-29 04:39:37 +0000 | [diff] [blame] | 649 | qp->q.fragments_tail = NULL; |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 650 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | |
| 652 | out_nomem: |
Joe Perches | ba7a46f | 2014-11-11 10:59:17 -0800 | [diff] [blame] | 653 | net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); |
David Howells | 4554247 | 2007-10-17 21:37:22 -0700 | [diff] [blame] | 654 | err = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | goto out_fail; |
| 656 | out_oversize: |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 657 | net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | out_fail: |
Eric Dumazet | b45386e | 2016-04-27 16:44:35 -0700 | [diff] [blame] | 659 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 660 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | } |
| 662 | |
| 663 | /* Process an incoming IP datagram fragment. */ |
Eric W. Biederman | 19bcf9f | 2015-10-09 13:44:54 -0500 | [diff] [blame] | 664 | int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | { |
David Ahern | 9972f13 | 2015-08-13 14:59:09 -0600 | [diff] [blame] | 666 | struct net_device *dev = skb->dev ? : skb_dst(skb)->dev; |
David Ahern | 385add9 | 2015-09-29 20:07:13 -0700 | [diff] [blame] | 667 | int vif = l3mdev_master_ifindex_rcu(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | struct ipq *qp; |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 669 | |
Eric Dumazet | b45386e | 2016-04-27 16:44:35 -0700 | [diff] [blame] | 670 | __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS); |
Joe Stringer | 8282f27 | 2016-01-22 15:49:12 -0800 | [diff] [blame] | 671 | skb_orphan(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | /* Lookup (or create) queue header */ |
David Ahern | 9972f13 | 2015-08-13 14:59:09 -0600 | [diff] [blame] | 674 | qp = ip_find(net, ip_hdr(skb), user, vif); |
Ian Morris | 00db412 | 2015-04-03 09:17:27 +0100 | [diff] [blame] | 675 | if (qp) { |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 676 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 678 | spin_lock(&qp->q.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | |
Herbert Xu | 1706d58 | 2007-10-14 00:38:15 -0700 | [diff] [blame] | 680 | ret = ip_frag_queue(qp, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | |
Pavel Emelyanov | 5ab11c9 | 2007-10-15 02:24:19 -0700 | [diff] [blame] | 682 | spin_unlock(&qp->q.lock); |
Pavel Emelyanov | 4b6cb5d | 2007-10-15 02:41:09 -0700 | [diff] [blame] | 683 | ipq_put(qp); |
Herbert Xu | 776c729 | 2007-10-14 00:38:32 -0700 | [diff] [blame] | 684 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | } |
| 686 | |
Eric Dumazet | b45386e | 2016-04-27 16:44:35 -0700 | [diff] [blame] | 687 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | kfree_skb(skb); |
Herbert Xu | 776c729 | 2007-10-14 00:38:32 -0700 | [diff] [blame] | 689 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | } |
Eric Dumazet | 4bc2f18 | 2010-07-09 21:22:10 +0000 | [diff] [blame] | 691 | EXPORT_SYMBOL(ip_defrag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | |
Eric W. Biederman | 19bcf9f | 2015-10-09 13:44:54 -0500 | [diff] [blame] | 693 | struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 694 | { |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 695 | struct iphdr iph; |
Alexander Drozdov | 3e32e73 | 2015-03-05 10:29:39 +0300 | [diff] [blame] | 696 | int netoff; |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 697 | u32 len; |
| 698 | |
| 699 | if (skb->protocol != htons(ETH_P_IP)) |
| 700 | return skb; |
| 701 | |
Alexander Drozdov | 3e32e73 | 2015-03-05 10:29:39 +0300 | [diff] [blame] | 702 | netoff = skb_network_offset(skb); |
| 703 | |
| 704 | if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 705 | return skb; |
| 706 | |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 707 | if (iph.ihl < 5 || iph.version != 4) |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 708 | return skb; |
| 709 | |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 710 | len = ntohs(iph.tot_len); |
Alexander Drozdov | 3e32e73 | 2015-03-05 10:29:39 +0300 | [diff] [blame] | 711 | if (skb->len < netoff + len || len < (iph.ihl * 4)) |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 712 | return skb; |
| 713 | |
| 714 | if (ip_is_fragment(&iph)) { |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 715 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 716 | if (skb) { |
Alexander Drozdov | 3e32e73 | 2015-03-05 10:29:39 +0300 | [diff] [blame] | 717 | if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) |
Johannes Berg | 1bf3751 | 2012-12-09 23:41:06 +0000 | [diff] [blame] | 718 | return skb; |
Alexander Drozdov | 3e32e73 | 2015-03-05 10:29:39 +0300 | [diff] [blame] | 719 | if (pskb_trim_rcsum(skb, netoff + len)) |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 720 | return skb; |
| 721 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
Eric W. Biederman | 19bcf9f | 2015-10-09 13:44:54 -0500 | [diff] [blame] | 722 | if (ip_defrag(net, skb, user)) |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 723 | return NULL; |
Tom Herbert | 7539fad | 2013-12-15 22:12:18 -0800 | [diff] [blame] | 724 | skb_clear_hash(skb); |
Eric Dumazet | bc416d9 | 2011-10-06 10:28:31 +0000 | [diff] [blame] | 725 | } |
| 726 | } |
| 727 | return skb; |
| 728 | } |
| 729 | EXPORT_SYMBOL(ip_check_defrag); |
| 730 | |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 731 | #ifdef CONFIG_SYSCTL |
| 732 | static int zero; |
| 733 | |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 734 | static struct ctl_table ip4_frags_ns_ctl_table[] = { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 735 | { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 736 | .procname = "ipfrag_high_thresh", |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 737 | .data = &init_net.ipv4.frags.high_thresh, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 738 | .maxlen = sizeof(int), |
| 739 | .mode = 0644, |
Nikolay Aleksandrov | 1bab4c7 | 2014-07-24 16:50:37 +0200 | [diff] [blame] | 740 | .proc_handler = proc_dointvec_minmax, |
| 741 | .extra1 = &init_net.ipv4.frags.low_thresh |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 742 | }, |
| 743 | { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 744 | .procname = "ipfrag_low_thresh", |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 745 | .data = &init_net.ipv4.frags.low_thresh, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 746 | .maxlen = sizeof(int), |
| 747 | .mode = 0644, |
Nikolay Aleksandrov | 1bab4c7 | 2014-07-24 16:50:37 +0200 | [diff] [blame] | 748 | .proc_handler = proc_dointvec_minmax, |
| 749 | .extra1 = &zero, |
| 750 | .extra2 = &init_net.ipv4.frags.high_thresh |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 751 | }, |
| 752 | { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 753 | .procname = "ipfrag_time", |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 754 | .data = &init_net.ipv4.frags.timeout, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 755 | .maxlen = sizeof(int), |
| 756 | .mode = 0644, |
Alexey Dobriyan | 6d9f239 | 2008-11-03 18:21:05 -0800 | [diff] [blame] | 757 | .proc_handler = proc_dointvec_jiffies, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 758 | }, |
Nikolay Borisov | 0fbf4cb | 2016-02-15 12:11:31 +0200 | [diff] [blame] | 759 | { |
| 760 | .procname = "ipfrag_max_dist", |
| 761 | .data = &init_net.ipv4.frags.max_dist, |
| 762 | .maxlen = sizeof(int), |
| 763 | .mode = 0644, |
| 764 | .proc_handler = proc_dointvec_minmax, |
| 765 | .extra1 = &zero |
| 766 | }, |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 767 | { } |
| 768 | }; |
| 769 | |
Florian Westphal | e3a57d1 | 2014-07-24 16:50:35 +0200 | [diff] [blame] | 770 | /* secret interval has been deprecated */ |
| 771 | static int ip4_frags_secret_interval_unused; |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 772 | static struct ctl_table ip4_frags_ctl_table[] = { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 773 | { |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 774 | .procname = "ipfrag_secret_interval", |
Florian Westphal | e3a57d1 | 2014-07-24 16:50:35 +0200 | [diff] [blame] | 775 | .data = &ip4_frags_secret_interval_unused, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 776 | .maxlen = sizeof(int), |
| 777 | .mode = 0644, |
Alexey Dobriyan | 6d9f239 | 2008-11-03 18:21:05 -0800 | [diff] [blame] | 778 | .proc_handler = proc_dointvec_jiffies, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 779 | }, |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 780 | { } |
| 781 | }; |
| 782 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 783 | static int __net_init ip4_frags_ns_ctl_register(struct net *net) |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 784 | { |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 785 | struct ctl_table *table; |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 786 | struct ctl_table_header *hdr; |
| 787 | |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 788 | table = ip4_frags_ns_ctl_table; |
Octavian Purdila | 09ad9bc | 2009-11-25 15:14:13 -0800 | [diff] [blame] | 789 | if (!net_eq(net, &init_net)) { |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 790 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); |
Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 791 | if (!table) |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 792 | goto err_alloc; |
| 793 | |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 794 | table[0].data = &net->ipv4.frags.high_thresh; |
Nikolay Aleksandrov | 1bab4c7 | 2014-07-24 16:50:37 +0200 | [diff] [blame] | 795 | table[0].extra1 = &net->ipv4.frags.low_thresh; |
| 796 | table[0].extra2 = &init_net.ipv4.frags.high_thresh; |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 797 | table[1].data = &net->ipv4.frags.low_thresh; |
Nikolay Aleksandrov | 1bab4c7 | 2014-07-24 16:50:37 +0200 | [diff] [blame] | 798 | table[1].extra2 = &net->ipv4.frags.high_thresh; |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 799 | table[2].data = &net->ipv4.frags.timeout; |
Nikolay Borisov | 0fbf4cb | 2016-02-15 12:11:31 +0200 | [diff] [blame] | 800 | table[3].data = &net->ipv4.frags.max_dist; |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 801 | } |
| 802 | |
Eric W. Biederman | ec8f23c | 2012-04-19 13:44:49 +0000 | [diff] [blame] | 803 | hdr = register_net_sysctl(net, "net/ipv4", table); |
Ian Morris | 51456b2 | 2015-04-03 09:17:26 +0100 | [diff] [blame] | 804 | if (!hdr) |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 805 | goto err_reg; |
| 806 | |
| 807 | net->ipv4.frags_hdr = hdr; |
| 808 | return 0; |
| 809 | |
| 810 | err_reg: |
Octavian Purdila | 09ad9bc | 2009-11-25 15:14:13 -0800 | [diff] [blame] | 811 | if (!net_eq(net, &init_net)) |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 812 | kfree(table); |
| 813 | err_alloc: |
| 814 | return -ENOMEM; |
| 815 | } |
| 816 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 817 | static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 818 | { |
| 819 | struct ctl_table *table; |
| 820 | |
| 821 | table = net->ipv4.frags_hdr->ctl_table_arg; |
| 822 | unregister_net_sysctl_table(net->ipv4.frags_hdr); |
| 823 | kfree(table); |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 824 | } |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 825 | |
Fabian Frederick | 57a02c3 | 2014-10-01 19:18:57 +0200 | [diff] [blame] | 826 | static void __init ip4_frags_ctl_register(void) |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 827 | { |
Eric W. Biederman | 4344475 | 2012-04-19 13:22:55 +0000 | [diff] [blame] | 828 | register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 829 | } |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 830 | #else |
Fabian Frederick | aa1f731 | 2014-11-04 20:44:04 +0100 | [diff] [blame] | 831 | static int ip4_frags_ns_ctl_register(struct net *net) |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 832 | { |
| 833 | return 0; |
| 834 | } |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 835 | |
Fabian Frederick | aa1f731 | 2014-11-04 20:44:04 +0100 | [diff] [blame] | 836 | static void ip4_frags_ns_ctl_unregister(struct net *net) |
Pavel Emelyanov | e4a2d5c | 2008-01-22 06:08:36 -0800 | [diff] [blame] | 837 | { |
| 838 | } |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 839 | |
Fabian Frederick | aa1f731 | 2014-11-04 20:44:04 +0100 | [diff] [blame] | 840 | static void __init ip4_frags_ctl_register(void) |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 841 | { |
| 842 | } |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 843 | #endif |
| 844 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 845 | static int __net_init ipv4_frags_init_net(struct net *net) |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 846 | { |
Eric Dumazet | 1d6119b | 2015-11-02 09:03:11 -0800 | [diff] [blame] | 847 | int res; |
| 848 | |
Jesper Dangaard Brouer | c2a9366 | 2013-01-15 07:16:35 +0000 | [diff] [blame] | 849 | /* Fragment cache limits. |
| 850 | * |
| 851 | * The fragment memory accounting code, (tries to) account for |
| 852 | * the real memory usage, by measuring both the size of frag |
| 853 | * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) |
| 854 | * and the SKB's truesize. |
| 855 | * |
| 856 | * A 64K fragment consumes 129736 bytes (44*2944)+200 |
| 857 | * (1500 truesize == 2944, sizeof(struct ipq) == 200) |
| 858 | * |
| 859 | * We will commit 4MB at one time. Should we cross that limit |
| 860 | * we will prune down to 3MB, making room for approx 8 big 64K |
| 861 | * fragments 8x128k. |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 862 | */ |
Jesper Dangaard Brouer | c2a9366 | 2013-01-15 07:16:35 +0000 | [diff] [blame] | 863 | net->ipv4.frags.high_thresh = 4 * 1024 * 1024; |
| 864 | net->ipv4.frags.low_thresh = 3 * 1024 * 1024; |
Pavel Emelyanov | e31e0bdc7 | 2008-01-22 06:10:13 -0800 | [diff] [blame] | 865 | /* |
Pavel Emelyanov | b2fd532 | 2008-01-22 06:09:37 -0800 | [diff] [blame] | 866 | * Important NOTE! Fragment queue must be destroyed before MSL expires. |
| 867 | * RFC791 is wrong proposing to prolongate timer each fragment arrival |
| 868 | * by TTL. |
| 869 | */ |
| 870 | net->ipv4.frags.timeout = IP_FRAG_TIME; |
| 871 | |
Nikolay Borisov | 0fbf4cb | 2016-02-15 12:11:31 +0200 | [diff] [blame] | 872 | net->ipv4.frags.max_dist = 64; |
| 873 | |
Eric Dumazet | 1d6119b | 2015-11-02 09:03:11 -0800 | [diff] [blame] | 874 | res = inet_frags_init_net(&net->ipv4.frags); |
| 875 | if (res) |
| 876 | return res; |
| 877 | res = ip4_frags_ns_ctl_register(net); |
| 878 | if (res) |
| 879 | inet_frags_uninit_net(&net->ipv4.frags); |
| 880 | return res; |
Pavel Emelyanov | 8d8354d | 2008-01-22 05:58:31 -0800 | [diff] [blame] | 881 | } |
| 882 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 883 | static void __net_exit ipv4_frags_exit_net(struct net *net) |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 884 | { |
Pavel Emelyanov | 0a64b4b | 2008-05-19 13:51:29 -0700 | [diff] [blame] | 885 | ip4_frags_ns_ctl_unregister(net); |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 886 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); |
| 887 | } |
| 888 | |
| 889 | static struct pernet_operations ip4_frags_ops = { |
| 890 | .init = ipv4_frags_init_net, |
| 891 | .exit = ipv4_frags_exit_net, |
| 892 | }; |
| 893 | |
Eric Dumazet | b7aa0bf | 2007-04-19 16:16:32 -0700 | [diff] [blame] | 894 | void __init ipfrag_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | { |
Pavel Emelyanov | 7d291eb | 2008-05-19 13:53:02 -0700 | [diff] [blame] | 896 | ip4_frags_ctl_register(); |
Pavel Emelyanov | 81566e8 | 2008-01-22 06:12:39 -0800 | [diff] [blame] | 897 | register_pernet_subsys(&ip4_frags_ops); |
Pavel Emelyanov | 321a3a9 | 2007-10-15 02:38:08 -0700 | [diff] [blame] | 898 | ip4_frags.hashfn = ip4_hashfn; |
Pavel Emelyanov | c6fda28 | 2007-10-17 19:46:47 -0700 | [diff] [blame] | 899 | ip4_frags.constructor = ip4_frag_init; |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 900 | ip4_frags.destructor = ip4_frag_free; |
Pavel Emelyanov | 1e4b828 | 2007-10-15 02:39:14 -0700 | [diff] [blame] | 901 | ip4_frags.qsize = sizeof(struct ipq); |
Pavel Emelyanov | abd6523 | 2007-10-17 19:47:21 -0700 | [diff] [blame] | 902 | ip4_frags.match = ip4_frag_match; |
Pavel Emelyanov | e521db9 | 2007-10-17 19:45:23 -0700 | [diff] [blame] | 903 | ip4_frags.frag_expire = ip_expire; |
Nikolay Aleksandrov | d4ad4d2 | 2014-08-01 12:29:48 +0200 | [diff] [blame] | 904 | ip4_frags.frags_cache_name = ip_frag_cache_name; |
| 905 | if (inet_frags_init(&ip4_frags)) |
| 906 | panic("IP: failed to allocate ip4_frags cache\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | } |