blob: 535636017534fabb0a534e4479a75f351cca8d34 [file] [log] [blame]
Pavel Emelyanov7eb95152007-10-15 02:31:52 -07001/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070019#include <linux/random.h>
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070020#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070023
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +000024#include <net/sock.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070025#include <net/inet_frag.h>
Hannes Frederic Sowabe991972013-03-22 08:24:37 +000026#include <net/inet_ecn.h>
27
28/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
29 * Value : 0xff if frame should be dropped.
30 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
31 */
32const u8 ip_frag_ecn_table[16] = {
33 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
34 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
35 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
36 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
37
38 /* invalid combinations : drop frame */
39 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
40 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
41 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
42 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
43 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
44 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
46};
47EXPORT_SYMBOL(ip_frag_ecn_table);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070048
Florian Westphal86e93e42014-07-24 16:50:31 +020049static int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
50
Florian Westphalfb3cfe62014-07-24 16:50:30 +020051static unsigned int
52inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
53{
54 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
55}
56
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070057static void inet_frag_secret_rebuild(unsigned long dummy)
58{
59 struct inet_frags *f = (struct inet_frags *)dummy;
60 unsigned long now = jiffies;
61 int i;
62
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +000063 /* Per bucket lock NOT needed here, due to write lock protection */
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070064 write_lock(&f->lock);
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +000065
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070066 get_random_bytes(&f->rnd, sizeof(u32));
67 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +000068 struct inet_frag_bucket *hb;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070069 struct inet_frag_queue *q;
Sasha Levinb67bfe02013-02-27 17:06:00 -080070 struct hlist_node *n;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070071
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +000072 hb = &f->hash[i];
73 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
Florian Westphalfb3cfe62014-07-24 16:50:30 +020074 unsigned int hval = inet_frag_hashfn(f, q);
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070075
76 if (hval != i) {
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +000077 struct inet_frag_bucket *hb_dest;
78
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070079 hlist_del(&q->list);
80
81 /* Relink to new hash chain. */
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +000082 hb_dest = &f->hash[hval];
83 hlist_add_head(&q->list, &hb_dest->chain);
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070084 }
85 }
86 }
87 write_unlock(&f->lock);
88
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -080089 mod_timer(&f->secret_timer, now + f->secret_interval);
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070090}
91
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070092void inet_frags_init(struct inet_frags *f)
93{
94 int i;
95
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +000096 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
97 struct inet_frag_bucket *hb = &f->hash[i];
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070098
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +000099 spin_lock_init(&hb->chain_lock);
100 INIT_HLIST_HEAD(&hb->chain);
101 }
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700102 rwlock_init(&f->lock);
103
Pavel Emelyanovb24b8a22008-01-23 21:20:07 -0800104 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
105 (unsigned long)f);
Pavel Emelyanov3b4bc4a2008-01-22 06:11:04 -0800106 f->secret_timer.expires = jiffies + f->secret_interval;
Pavel Emelyanov321a3a92007-10-15 02:38:08 -0700107 add_timer(&f->secret_timer);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700108}
109EXPORT_SYMBOL(inet_frags_init);
110
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -0800111void inet_frags_init_net(struct netns_frags *nf)
112{
113 nf->nqueues = 0;
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000114 init_frag_mem_limit(nf);
Pavel Emelyanov3140c252008-01-22 06:11:48 -0800115 INIT_LIST_HEAD(&nf->lru_list);
Jesper Dangaard Brouer3ef0eb02013-01-28 23:45:51 +0000116 spin_lock_init(&nf->lru_lock);
Pavel Emelyanove5a2bb82008-01-22 06:06:23 -0800117}
118EXPORT_SYMBOL(inet_frags_init_net);
119
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700120void inet_frags_fini(struct inet_frags *f)
121{
Pavel Emelyanov321a3a92007-10-15 02:38:08 -0700122 del_timer(&f->secret_timer);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700123}
124EXPORT_SYMBOL(inet_frags_fini);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700125
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800126void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
127{
128 nf->low_thresh = 0;
David S. Millere8e16b702008-03-28 17:30:18 -0700129
130 local_bh_disable();
Amerigo Wang6b102862012-09-18 16:50:11 +0000131 inet_frag_evictor(nf, f, true);
David S. Millere8e16b702008-03-28 17:30:18 -0700132 local_bh_enable();
Jesper Dangaard Brouer6d7b8572013-01-28 23:45:33 +0000133
134 percpu_counter_destroy(&nf->mem);
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800135}
136EXPORT_SYMBOL(inet_frags_exit_net);
137
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700138static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
139{
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000140 struct inet_frag_bucket *hb;
141 unsigned int hash;
142
143 read_lock(&f->lock);
Florian Westphalfb3cfe62014-07-24 16:50:30 +0200144 hash = inet_frag_hashfn(f, fq);
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000145 hb = &f->hash[hash];
146
147 spin_lock(&hb->chain_lock);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700148 hlist_del(&fq->list);
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000149 spin_unlock(&hb->chain_lock);
150
151 read_unlock(&f->lock);
Jesper Dangaard Brouer3ef0eb02013-01-28 23:45:51 +0000152 inet_frag_lru_del(fq);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700153}
154
155void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
156{
157 if (del_timer(&fq->timer))
158 atomic_dec(&fq->refcnt);
159
Joe Perchesbc578a52008-03-28 16:35:27 -0700160 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700161 fq_unlink(fq, f);
162 atomic_dec(&fq->refcnt);
Joe Perchesbc578a52008-03-28 16:35:27 -0700163 fq->last_in |= INET_FRAG_COMPLETE;
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700164 }
165}
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700166EXPORT_SYMBOL(inet_frag_kill);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700167
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800168static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000169 struct sk_buff *skb)
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700170{
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700171 if (f->skb_free)
172 f->skb_free(skb);
173 kfree_skb(skb);
174}
175
176void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
177 int *work)
178{
179 struct sk_buff *fp;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800180 struct netns_frags *nf;
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000181 unsigned int sum, sum_truesize = 0;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700182
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700183 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
184 WARN_ON(del_timer(&q->timer) != 0);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700185
186 /* Release all fragment data. */
187 fp = q->fragments;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800188 nf = q->net;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700189 while (fp) {
190 struct sk_buff *xp = fp->next;
191
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000192 sum_truesize += fp->truesize;
193 frag_kfree_skb(nf, f, fp);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700194 fp = xp;
195 }
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000196 sum = sum_truesize + f->qsize;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700197 if (work)
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000198 *work -= sum;
199 sub_frag_mem_limit(q, sum);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700200
Pavel Emelyanovc9547702007-10-17 19:48:26 -0700201 if (f->destructor)
202 f->destructor(q);
203 kfree(q);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700204
205}
206EXPORT_SYMBOL(inet_frag_destroy);
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700207
Florian Westphal86e93e42014-07-24 16:50:31 +0200208static int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700209{
210 struct inet_frag_queue *q;
211 int work, evicted = 0;
212
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000213 work = frag_mem_limit(nf) - nf->low_thresh;
Florian Westphale588e2f2014-03-06 18:06:41 +0100214 while (work > 0 || force) {
Jesper Dangaard Brouer3ef0eb02013-01-28 23:45:51 +0000215 spin_lock(&nf->lru_lock);
216
Pavel Emelyanov3140c252008-01-22 06:11:48 -0800217 if (list_empty(&nf->lru_list)) {
Jesper Dangaard Brouer3ef0eb02013-01-28 23:45:51 +0000218 spin_unlock(&nf->lru_lock);
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700219 break;
220 }
221
Pavel Emelyanov3140c252008-01-22 06:11:48 -0800222 q = list_first_entry(&nf->lru_list,
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700223 struct inet_frag_queue, lru_list);
224 atomic_inc(&q->refcnt);
Jesper Dangaard Brouer68399ac2013-03-27 05:55:25 +0000225 /* Remove q from list to avoid several CPUs grabbing it */
226 list_del_init(&q->lru_list);
227
Jesper Dangaard Brouer3ef0eb02013-01-28 23:45:51 +0000228 spin_unlock(&nf->lru_lock);
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700229
230 spin_lock(&q->lock);
Joe Perchesbc578a52008-03-28 16:35:27 -0700231 if (!(q->last_in & INET_FRAG_COMPLETE))
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700232 inet_frag_kill(q, f);
233 spin_unlock(&q->lock);
234
235 if (atomic_dec_and_test(&q->refcnt))
236 inet_frag_destroy(q, f, &work);
237 evicted++;
238 }
239
240 return evicted;
241}
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700242
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800243static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
244 struct inet_frag_queue *qp_in, struct inet_frags *f,
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700245 void *arg)
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700246{
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000247 struct inet_frag_bucket *hb;
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700248 struct inet_frag_queue *qp;
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700249 unsigned int hash;
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700250
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000251 read_lock(&f->lock); /* Protects against hash rebuild */
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700252 /*
253 * While we stayed w/o the lock other CPU could update
254 * the rnd seed, so we need to re-calculate the hash
255 * chain. Fortunatelly the qp_in can be used to get one.
256 */
Florian Westphalfb3cfe62014-07-24 16:50:30 +0200257 hash = inet_frag_hashfn(f, qp_in);
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000258 hb = &f->hash[hash];
259 spin_lock(&hb->chain_lock);
260
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700261#ifdef CONFIG_SMP
262 /* With SMP race we have to recheck hash table, because
263 * such entry could be created on other cpu, while we
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000264 * released the hash bucket lock.
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700265 */
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000266 hlist_for_each_entry(qp, &hb->chain, list) {
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800267 if (qp->net == nf && f->match(qp, arg)) {
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700268 atomic_inc(&qp->refcnt);
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000269 spin_unlock(&hb->chain_lock);
270 read_unlock(&f->lock);
Joe Perchesbc578a52008-03-28 16:35:27 -0700271 qp_in->last_in |= INET_FRAG_COMPLETE;
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700272 inet_frag_put(qp_in, f);
273 return qp;
274 }
275 }
276#endif
277 qp = qp_in;
Pavel Emelyanovb2fd5322008-01-22 06:09:37 -0800278 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700279 atomic_inc(&qp->refcnt);
280
281 atomic_inc(&qp->refcnt);
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000282 hlist_add_head(&qp->list, &hb->chain);
Nikolay Aleksandrov24b9bf42014-03-03 23:19:18 +0100283 inet_frag_lru_add(nf, qp);
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000284 spin_unlock(&hb->chain_lock);
285 read_unlock(&f->lock);
Nikolay Aleksandrov24b9bf42014-03-03 23:19:18 +0100286
Pavel Emelyanov2588fe12007-10-17 19:44:34 -0700287 return qp;
288}
Pavel Emelyanove521db92007-10-17 19:45:23 -0700289
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800290static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
291 struct inet_frags *f, void *arg)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700292{
293 struct inet_frag_queue *q;
294
Florian Westphal86e93e42014-07-24 16:50:31 +0200295 if (frag_mem_limit(nf) > nf->high_thresh)
296 return NULL;
297
Pavel Emelyanove521db92007-10-17 19:45:23 -0700298 q = kzalloc(f->qsize, GFP_ATOMIC);
299 if (q == NULL)
300 return NULL;
301
Gao feng54db0cc2012-06-08 01:21:40 +0000302 q->net = nf;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700303 f->constructor(q, arg);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000304 add_frag_mem_limit(q, f->qsize);
305
Pavel Emelyanove521db92007-10-17 19:45:23 -0700306 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
307 spin_lock_init(&q->lock);
308 atomic_set(&q->refcnt, 1);
Konstantin Khlebnikovb56141a2013-05-05 04:56:22 +0000309 INIT_LIST_HEAD(&q->lru_list);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700310
311 return q;
312}
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700313
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800314static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700315 struct inet_frags *f, void *arg)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700316{
317 struct inet_frag_queue *q;
318
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800319 q = inet_frag_alloc(nf, f, arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700320 if (q == NULL)
321 return NULL;
322
Pavel Emelyanov9a375802008-06-27 20:06:08 -0700323 return inet_frag_intern(nf, q, f, arg);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700324}
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700325
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800326struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
327 struct inet_frags *f, void *key, unsigned int hash)
Hannes Eder56bca312009-02-25 10:32:52 +0000328 __releases(&f->lock)
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700329{
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000330 struct inet_frag_bucket *hb;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700331 struct inet_frag_queue *q;
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000332 int depth = 0;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700333
Florian Westphal86e93e42014-07-24 16:50:31 +0200334 if (frag_mem_limit(nf) > nf->high_thresh)
335 inet_frag_evictor(nf, f, false);
336
Florian Westphalfb3cfe62014-07-24 16:50:30 +0200337 hash &= (INETFRAGS_HASHSZ - 1);
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000338 hb = &f->hash[hash];
339
340 spin_lock(&hb->chain_lock);
341 hlist_for_each_entry(q, &hb->chain, list) {
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800342 if (q->net == nf && f->match(q, key)) {
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700343 atomic_inc(&q->refcnt);
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000344 spin_unlock(&hb->chain_lock);
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700345 read_unlock(&f->lock);
346 return q;
347 }
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000348 depth++;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700349 }
Jesper Dangaard Brouer19952cc42013-04-03 23:38:16 +0000350 spin_unlock(&hb->chain_lock);
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700351 read_unlock(&f->lock);
352
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000353 if (depth <= INETFRAGS_MAXDEPTH)
354 return inet_frag_create(nf, f, key);
355 else
356 return ERR_PTR(-ENOBUFS);
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700357}
358EXPORT_SYMBOL(inet_frag_find);
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +0000359
360void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
361 const char *prefix)
362{
363 static const char msg[] = "inet_frag_find: Fragment hash bucket"
364 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
365 ". Dropping fragment.\n";
366
367 if (PTR_ERR(q) == -ENOBUFS)
368 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
369}
370EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);