blob: 1ac69f65d0dee600d0ab4db20ff5942952932c40 [file] [log] [blame]
Pavel Emelyanov7eb95152007-10-15 02:31:52 -07001/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070019#include <linux/random.h>
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070020#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070023
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +000024#include <net/sock.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070025#include <net/inet_frag.h>
Hannes Frederic Sowabe991972013-03-22 08:24:37 +000026#include <net/inet_ecn.h>
27
Florian Westphalb13d3cb2014-07-24 16:50:32 +020028#define INETFRAGS_EVICT_BUCKETS 128
29#define INETFRAGS_EVICT_MAX 512
30
Florian Westphale3a57d12014-07-24 16:50:35 +020031/* don't rebuild inetfrag table with new secret more often than this */
32#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33
Hannes Frederic Sowabe991972013-03-22 08:24:37 +000034/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35 * Value : 0xff if frame should be dropped.
36 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37 */
38const u8 ip_frag_ecn_table[16] = {
39 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
41 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
42 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
43
44 /* invalid combinations : drop frame */
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52};
53EXPORT_SYMBOL(ip_frag_ecn_table);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070054
Florian Westphalfb3cfe62014-07-24 16:50:30 +020055static unsigned int
56inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57{
58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59}
60
Florian Westphale3a57d12014-07-24 16:50:35 +020061static bool inet_frag_may_rebuild(struct inet_frags *f)
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070062{
Florian Westphale3a57d12014-07-24 16:50:35 +020063 return time_after(jiffies,
64 f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65}
66
67static void inet_frag_secret_rebuild(struct inet_frags *f)
68{
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070069 int i;
70
Florian Westphalab1c7242014-07-24 16:50:36 +020071 write_seqlock_bh(&f->rnd_seqlock);
Florian Westphale3a57d12014-07-24 16:50:35 +020072
73 if (!inet_frag_may_rebuild(f))
74 goto out;
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +000075
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070076 get_random_bytes(&f->rnd, sizeof(u32));
Florian Westphale3a57d12014-07-24 16:50:35 +020077
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070078 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +000079 struct inet_frag_bucket *hb;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070080 struct inet_frag_queue *q;
Sasha Levinb67bfe02013-02-27 17:06:00 -080081 struct hlist_node *n;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070082
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +000083 hb = &f->hash[i];
Florian Westphalab1c7242014-07-24 16:50:36 +020084 spin_lock(&hb->chain_lock);
85
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +000086 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
Florian Westphalfb3cfe62014-07-24 16:50:30 +020087 unsigned int hval = inet_frag_hashfn(f, q);
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070088
89 if (hval != i) {
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +000090 struct inet_frag_bucket *hb_dest;
91
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070092 hlist_del(&q->list);
93
94 /* Relink to new hash chain. */
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +000095 hb_dest = &f->hash[hval];
Florian Westphalab1c7242014-07-24 16:50:36 +020096
97 /* This is the only place where we take
98 * another chain_lock while already holding
99 * one. As this will not run concurrently,
100 * we cannot deadlock on hb_dest lock below, if its
101 * already locked it will be released soon since
102 * other caller cannot be waiting for hb lock
103 * that we've taken above.
104 */
105 spin_lock_nested(&hb_dest->chain_lock,
106 SINGLE_DEPTH_NESTING);
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000107 hlist_add_head(&q->list, &hb_dest->chain);
Florian Westphalab1c7242014-07-24 16:50:36 +0200108 spin_unlock(&hb_dest->chain_lock);
Pavel Emelyanov321a3a92007-10-15 02:38:08 -0700109 }
110 }
Florian Westphalab1c7242014-07-24 16:50:36 +0200111 spin_unlock(&hb->chain_lock);
Pavel Emelyanov321a3a92007-10-15 02:38:08 -0700112 }
Pavel Emelyanov321a3a92007-10-15 02:38:08 -0700113
Florian Westphale3a57d12014-07-24 16:50:35 +0200114 f->rebuild = false;
115 f->last_rebuild_jiffies = jiffies;
116out:
Florian Westphalab1c7242014-07-24 16:50:36 +0200117 write_sequnlock_bh(&f->rnd_seqlock);
Pavel Emelyanov321a3a92007-10-15 02:38:08 -0700118}
119
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200120static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121{
Kirill Tkhaia5600022018-03-06 18:46:39 +0300122 if (!hlist_unhashed(&q->list_evictor))
123 return false;
124
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200125 return q->net->low_thresh == 0 ||
126 frag_mem_limit(q->net) >= q->net->low_thresh;
127}
128
129static unsigned int
130inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131{
132 struct inet_frag_queue *fq;
133 struct hlist_node *n;
134 unsigned int evicted = 0;
135 HLIST_HEAD(expired);
136
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200137 spin_lock(&hb->chain_lock);
138
139 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
140 if (!inet_fragq_should_evict(fq))
141 continue;
142
Florian Westphal5719b292015-07-23 12:05:39 +0200143 if (!del_timer(&fq->timer))
144 continue;
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200145
Florian Westphald1fe1942015-07-23 12:05:37 +0200146 hlist_add_head(&fq->list_evictor, &expired);
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200147 ++evicted;
148 }
149
150 spin_unlock(&hb->chain_lock);
151
Florian Westphald1fe1942015-07-23 12:05:37 +0200152 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
Kees Cook78802012017-10-16 17:29:20 -0700153 f->frag_expire(&fq->timer);
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200154
155 return evicted;
156}
157
158static void inet_frag_worker(struct work_struct *work)
159{
160 unsigned int budget = INETFRAGS_EVICT_BUCKETS;
161 unsigned int i, evicted = 0;
162 struct inet_frags *f;
163
164 f = container_of(work, struct inet_frags, frags_work);
165
166 BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
167
Florian Westphalab1c7242014-07-24 16:50:36 +0200168 local_bh_disable();
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200169
Mark Rutland6aa7de02017-10-23 14:07:29 -0700170 for (i = READ_ONCE(f->next_bucket); budget; --budget) {
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200171 evicted += inet_evict_bucket(f, &f->hash[i]);
172 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
173 if (evicted > INETFRAGS_EVICT_MAX)
174 break;
175 }
176
177 f->next_bucket = i;
178
Florian Westphalab1c7242014-07-24 16:50:36 +0200179 local_bh_enable();
180
Florian Westphale3a57d12014-07-24 16:50:35 +0200181 if (f->rebuild && inet_frag_may_rebuild(f))
182 inet_frag_secret_rebuild(f);
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200183}
184
185static void inet_frag_schedule_worker(struct inet_frags *f)
186{
187 if (unlikely(!work_pending(&f->frags_work)))
188 schedule_work(&f->frags_work);
189}
190
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200191int inet_frags_init(struct inet_frags *f)
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700192{
193 int i;
194
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200195 INIT_WORK(&f->frags_work, inet_frag_worker);
196
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000197 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
198 struct inet_frag_bucket *hb = &f->hash[i];
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700199
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000200 spin_lock_init(&hb->chain_lock);
201 INIT_HLIST_HEAD(&hb->chain);
202 }
Florian Westphalab1c7242014-07-24 16:50:36 +0200203
204 seqlock_init(&f->rnd_seqlock);
Florian Westphale3a57d12014-07-24 16:50:35 +0200205 f->last_rebuild_jiffies = 0;
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200206 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
207 NULL);
208 if (!f->frags_cachep)
209 return -ENOMEM;
210
211 return 0;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700212}
213EXPORT_SYMBOL(inet_frags_init);
214
215void inet_frags_fini(struct inet_frags *f)
216{
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200217 cancel_work_sync(&f->frags_work);
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200218 kmem_cache_destroy(f->frags_cachep);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700219}
220EXPORT_SYMBOL(inet_frags_fini);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700221
Eric Dumazet093ba722018-03-31 12:58:44 -0700222void inet_frags_exit_net(struct netns_frags *nf)
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800223{
Eric Dumazet093ba722018-03-31 12:58:44 -0700224 struct inet_frags *f =nf->f;
Florian Westphalab1c7242014-07-24 16:50:36 +0200225 unsigned int seq;
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200226 int i;
227
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800228 nf->low_thresh = 0;
David S. Millere8e16b702008-03-28 17:30:18 -0700229
Florian Westphalab1c7242014-07-24 16:50:36 +0200230evict_again:
Florian Westphal5719b292015-07-23 12:05:39 +0200231 local_bh_disable();
Florian Westphalab1c7242014-07-24 16:50:36 +0200232 seq = read_seqbegin(&f->rnd_seqlock);
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200233
234 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
235 inet_evict_bucket(f, &f->hash[i]);
236
Florian Westphalab1c7242014-07-24 16:50:36 +0200237 local_bh_enable();
Florian Westphal5719b292015-07-23 12:05:39 +0200238 cond_resched();
239
240 if (read_seqretry(&f->rnd_seqlock, seq) ||
Jesper Dangaard Brouerfb452a12017-09-01 11:26:08 +0200241 sum_frag_mem_limit(nf))
Florian Westphal5719b292015-07-23 12:05:39 +0200242 goto evict_again;
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800243}
244EXPORT_SYMBOL(inet_frags_exit_net);
245
Florian Westphalab1c7242014-07-24 16:50:36 +0200246static struct inet_frag_bucket *
247get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
248__acquires(hb->chain_lock)
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700249{
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000250 struct inet_frag_bucket *hb;
Florian Westphalab1c7242014-07-24 16:50:36 +0200251 unsigned int seq, hash;
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000252
Florian Westphalab1c7242014-07-24 16:50:36 +0200253 restart:
254 seq = read_seqbegin(&f->rnd_seqlock);
255
Florian Westphalfb3cfe62014-07-24 16:50:30 +0200256 hash = inet_frag_hashfn(f, fq);
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000257 hb = &f->hash[hash];
258
259 spin_lock(&hb->chain_lock);
Florian Westphalab1c7242014-07-24 16:50:36 +0200260 if (read_seqretry(&f->rnd_seqlock, seq)) {
261 spin_unlock(&hb->chain_lock);
262 goto restart;
263 }
264
265 return hb;
266}
267
Eric Dumazet093ba722018-03-31 12:58:44 -0700268static inline void fq_unlink(struct inet_frag_queue *fq)
Florian Westphalab1c7242014-07-24 16:50:36 +0200269{
270 struct inet_frag_bucket *hb;
271
Eric Dumazet093ba722018-03-31 12:58:44 -0700272 hb = get_frag_bucket_locked(fq, fq->net->f);
Florian Westphald1fe1942015-07-23 12:05:37 +0200273 hlist_del(&fq->list);
Florian Westphal5719b292015-07-23 12:05:39 +0200274 fq->flags |= INET_FRAG_COMPLETE;
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000275 spin_unlock(&hb->chain_lock);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700276}
277
Eric Dumazet093ba722018-03-31 12:58:44 -0700278void inet_frag_kill(struct inet_frag_queue *fq)
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700279{
280 if (del_timer(&fq->timer))
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300281 refcount_dec(&fq->refcnt);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700282
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200283 if (!(fq->flags & INET_FRAG_COMPLETE)) {
Eric Dumazet093ba722018-03-31 12:58:44 -0700284 fq_unlink(fq);
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300285 refcount_dec(&fq->refcnt);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700286 }
287}
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700288EXPORT_SYMBOL(inet_frag_kill);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700289
Eric Dumazet093ba722018-03-31 12:58:44 -0700290void inet_frag_destroy(struct inet_frag_queue *q)
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700291{
292 struct sk_buff *fp;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800293 struct netns_frags *nf;
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000294 unsigned int sum, sum_truesize = 0;
Eric Dumazet093ba722018-03-31 12:58:44 -0700295 struct inet_frags *f;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700296
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200297 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700298 WARN_ON(del_timer(&q->timer) != 0);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700299
300 /* Release all fragment data. */
301 fp = q->fragments;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800302 nf = q->net;
Eric Dumazet093ba722018-03-31 12:58:44 -0700303 f = nf->f;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700304 while (fp) {
305 struct sk_buff *xp = fp->next;
306
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000307 sum_truesize += fp->truesize;
Florian Westphala72a5e22016-01-05 22:17:55 +0100308 kfree_skb(fp);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700309 fp = xp;
310 }
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000311 sum = sum_truesize + f->qsize;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700312
Pavel Emelyanovc9547702007-10-17 19:48:26 -0700313 if (f->destructor)
314 f->destructor(q);
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200315 kmem_cache_free(f->frags_cachep, q);
Florian Westphal5719b292015-07-23 12:05:39 +0200316
317 sub_frag_mem_limit(nf, sum);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700318}
319EXPORT_SYMBOL(inet_frag_destroy);
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700320
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800321static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
Nikolay Aleksandrovf926e232014-08-01 12:29:46 +0200322 struct inet_frag_queue *qp_in,
323 struct inet_frags *f,
324 void *arg)
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700325{
Florian Westphalab1c7242014-07-24 16:50:36 +0200326 struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700327 struct inet_frag_queue *qp;
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000328
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700329#ifdef CONFIG_SMP
330 /* With SMP race we have to recheck hash table, because
Florian Westphalab1c7242014-07-24 16:50:36 +0200331 * such entry could have been created on other cpu before
332 * we acquired hash bucket lock.
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700333 */
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000334 hlist_for_each_entry(qp, &hb->chain, list) {
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800335 if (qp->net == nf && f->match(qp, arg)) {
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300336 refcount_inc(&qp->refcnt);
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000337 spin_unlock(&hb->chain_lock);
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200338 qp_in->flags |= INET_FRAG_COMPLETE;
Eric Dumazet093ba722018-03-31 12:58:44 -0700339 inet_frag_put(qp_in);
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700340 return qp;
341 }
342 }
343#endif
344 qp = qp_in;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800345 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300346 refcount_inc(&qp->refcnt);
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700347
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300348 refcount_inc(&qp->refcnt);
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000349 hlist_add_head(&qp->list, &hb->chain);
Florian Westphal3fd588e2014-07-24 16:50:34 +0200350
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000351 spin_unlock(&hb->chain_lock);
Nikolay Aleksandrov24b9bf42014-03-03 23:19:18 +0100352
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700353 return qp;
354}
Pavel Emelyanove521db92007-10-17 19:45:23 -0700355
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800356static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
Nikolay Aleksandrovf926e232014-08-01 12:29:46 +0200357 struct inet_frags *f,
358 void *arg)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700359{
360 struct inet_frag_queue *q;
361
Michal Kubeček307592192016-05-27 17:53:52 +0200362 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200363 inet_frag_schedule_worker(f);
Florian Westphal86e93e42014-07-24 16:50:31 +0200364 return NULL;
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200365 }
Florian Westphal86e93e42014-07-24 16:50:31 +0200366
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200367 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
Ian Morris51456b22015-04-03 09:17:26 +0100368 if (!q)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700369 return NULL;
370
Gao feng54db0cc2012-06-08 01:21:40 +0000371 q->net = nf;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700372 f->constructor(q, arg);
Florian Westphal0e60d242015-07-23 12:05:38 +0200373 add_frag_mem_limit(nf, f->qsize);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000374
Kees Cook78802012017-10-16 17:29:20 -0700375 timer_setup(&q->timer, f->frag_expire, 0);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700376 spin_lock_init(&q->lock);
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300377 refcount_set(&q->refcnt, 1);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700378
379 return q;
380}
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700381
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800382static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
Nikolay Aleksandrovf926e232014-08-01 12:29:46 +0200383 struct inet_frags *f,
384 void *arg)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700385{
386 struct inet_frag_queue *q;
387
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800388 q = inet_frag_alloc(nf, f, arg);
Ian Morris51456b22015-04-03 09:17:26 +0100389 if (!q)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700390 return NULL;
391
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700392 return inet_frag_intern(nf, q, f, arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700393}
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700394
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800395struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
Nikolay Aleksandrovf926e232014-08-01 12:29:46 +0200396 struct inet_frags *f, void *key,
397 unsigned int hash)
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700398{
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000399 struct inet_frag_bucket *hb;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700400 struct inet_frag_queue *q;
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000401 int depth = 0;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700402
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200403 if (frag_mem_limit(nf) > nf->low_thresh)
404 inet_frag_schedule_worker(f);
Florian Westphal86e93e42014-07-24 16:50:31 +0200405
Florian Westphalfb3cfe62014-07-24 16:50:30 +0200406 hash &= (INETFRAGS_HASHSZ - 1);
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000407 hb = &f->hash[hash];
408
409 spin_lock(&hb->chain_lock);
410 hlist_for_each_entry(q, &hb->chain, list) {
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800411 if (q->net == nf && f->match(q, key)) {
Reshetova, Elenaedcb6912017-06-30 13:08:07 +0300412 refcount_inc(&q->refcnt);
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000413 spin_unlock(&hb->chain_lock);
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700414 return q;
415 }
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000416 depth++;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700417 }
Jesper Dangaard Brouer19952cc2013-04-03 23:38:16 +0000418 spin_unlock(&hb->chain_lock);
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700419
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000420 if (depth <= INETFRAGS_MAXDEPTH)
421 return inet_frag_create(nf, f, key);
Florian Westphale3a57d12014-07-24 16:50:35 +0200422
423 if (inet_frag_may_rebuild(f)) {
Florian Westphalab1c7242014-07-24 16:50:36 +0200424 if (!f->rebuild)
425 f->rebuild = true;
Florian Westphale3a57d12014-07-24 16:50:35 +0200426 inet_frag_schedule_worker(f);
427 }
428
429 return ERR_PTR(-ENOBUFS);
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700430}
431EXPORT_SYMBOL(inet_frag_find);
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000432
433void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
434 const char *prefix)
435{
436 static const char msg[] = "inet_frag_find: Fragment hash bucket"
437 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
438 ". Dropping fragment.\n";
439
440 if (PTR_ERR(q) == -ENOBUFS)
Joe Perchesba7a46f2014-11-11 10:59:17 -0800441 net_dbg_ratelimited("%s%s", prefix, msg);
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000442}
443EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);