blob: 2325cd3454a632fd91988cf4413d6aac433c6c56 [file] [log] [blame]
Pavel Emelyanov7eb95152007-10-15 02:31:52 -07001/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
Pavel Emelyanov321a3a92007-10-15 02:38:08 -070019#include <linux/random.h>
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -070020#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070023
Hannes Frederic Sowa5a3da1f2013-03-15 11:32:30 +000024#include <net/sock.h>
Pavel Emelyanov7eb95152007-10-15 02:31:52 -070025#include <net/inet_frag.h>
Hannes Frederic Sowabe991972013-03-22 08:24:37 +000026#include <net/inet_ecn.h>
Peter Oskolkovaaee29e2019-04-26 08:41:05 -070027#include <net/ip.h>
28#include <net/ipv6.h>
29
30/* Use skb->cb to track consecutive/adjacent fragments coming at
31 * the end of the queue. Nodes in the rb-tree queue will
32 * contain "runs" of one or more adjacent fragments.
33 *
34 * Invariants:
35 * - next_frag is NULL at the tail of a "run";
36 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
37 */
38struct ipfrag_skb_cb {
39 union {
40 struct inet_skb_parm h4;
41 struct inet6_skb_parm h6;
42 };
43 struct sk_buff *next_frag;
44 int frag_run_len;
45};
46
47#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
48
49static void fragcb_clear(struct sk_buff *skb)
50{
51 RB_CLEAR_NODE(&skb->rbnode);
52 FRAG_CB(skb)->next_frag = NULL;
53 FRAG_CB(skb)->frag_run_len = skb->len;
54}
55
56/* Append skb to the last "run". */
57static void fragrun_append_to_last(struct inet_frag_queue *q,
58 struct sk_buff *skb)
59{
60 fragcb_clear(skb);
61
62 FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
63 FRAG_CB(q->fragments_tail)->next_frag = skb;
64 q->fragments_tail = skb;
65}
66
67/* Create a new "run" with the skb. */
68static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
69{
70 BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
71 fragcb_clear(skb);
72
73 if (q->last_run_head)
74 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
75 &q->last_run_head->rbnode.rb_right);
76 else
77 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
78 rb_insert_color(&skb->rbnode, &q->rb_fragments);
79
80 q->fragments_tail = skb;
81 q->last_run_head = skb;
82}
Hannes Frederic Sowabe991972013-03-22 08:24:37 +000083
84/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
85 * Value : 0xff if frame should be dropped.
86 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
87 */
88const u8 ip_frag_ecn_table[16] = {
89 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
90 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
91 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
92 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
93
94 /* invalid combinations : drop frame */
95 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
96 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
97 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
98 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
99 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
100 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
101 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
102};
103EXPORT_SYMBOL(ip_frag_ecn_table);
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700104
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200105int inet_frags_init(struct inet_frags *f)
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700106{
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200107 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
108 NULL);
109 if (!f->frags_cachep)
110 return -ENOMEM;
111
112 return 0;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700113}
114EXPORT_SYMBOL(inet_frags_init);
115
116void inet_frags_fini(struct inet_frags *f)
117{
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700118 /* We must wait that all inet_frag_destroy_rcu() have completed. */
119 rcu_barrier();
120
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200121 kmem_cache_destroy(f->frags_cachep);
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700122 f->frags_cachep = NULL;
Pavel Emelyanov7eb95152007-10-15 02:31:52 -0700123}
124EXPORT_SYMBOL(inet_frags_fini);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700125
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700126static void inet_frags_free_cb(void *ptr, void *arg)
127{
128 struct inet_frag_queue *fq = ptr;
129
130 /* If we can not cancel the timer, it means this frag_queue
131 * is already disappearing, we have nothing to do.
132 * Otherwise, we own a refcount until the end of this function.
133 */
134 if (!del_timer(&fq->timer))
135 return;
136
137 spin_lock_bh(&fq->lock);
138 if (!(fq->flags & INET_FRAG_COMPLETE)) {
139 fq->flags |= INET_FRAG_COMPLETE;
140 atomic_dec(&fq->refcnt);
141 }
142 spin_unlock_bh(&fq->lock);
143
144 inet_frag_put(fq);
145}
146
Eric Dumazet2ffb1c32018-10-10 12:29:50 -0700147void inet_frags_exit_net(struct netns_frags *nf)
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800148{
Paolo Abeni8c849652018-07-06 12:30:20 +0200149 nf->high_thresh = 0; /* prevent creation of new frags */
Florian Westphalb13d3cb2014-07-24 16:50:32 +0200150
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700151 rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
Pavel Emelyanov81566e82008-01-22 06:12:39 -0800152}
153EXPORT_SYMBOL(inet_frags_exit_net);
154
Eric Dumazet2ffb1c32018-10-10 12:29:50 -0700155void inet_frag_kill(struct inet_frag_queue *fq)
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700156{
157 if (del_timer(&fq->timer))
158 atomic_dec(&fq->refcnt);
159
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200160 if (!(fq->flags & INET_FRAG_COMPLETE)) {
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700161 struct netns_frags *nf = fq->net;
162
163 fq->flags |= INET_FRAG_COMPLETE;
164 rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700165 atomic_dec(&fq->refcnt);
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700166 }
167}
Pavel Emelyanov277e6502007-10-15 02:37:18 -0700168EXPORT_SYMBOL(inet_frag_kill);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700169
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700170static void inet_frag_destroy_rcu(struct rcu_head *head)
171{
172 struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
173 rcu);
174 struct inet_frags *f = q->net->f;
175
176 if (f->destructor)
177 f->destructor(q);
178 kmem_cache_free(f->frags_cachep, q);
179}
180
Peter Oskolkovaaee29e2019-04-26 08:41:05 -0700181unsigned int inet_frag_rbtree_purge(struct rb_root *root)
182{
183 struct rb_node *p = rb_first(root);
184 unsigned int sum = 0;
185
186 while (p) {
187 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
188
189 p = rb_next(p);
190 rb_erase(&skb->rbnode, root);
191 while (skb) {
192 struct sk_buff *next = FRAG_CB(skb)->next_frag;
193
194 sum += skb->truesize;
195 kfree_skb(skb);
196 skb = next;
197 }
198 }
199 return sum;
200}
201EXPORT_SYMBOL(inet_frag_rbtree_purge);
202
Eric Dumazet2ffb1c32018-10-10 12:29:50 -0700203void inet_frag_destroy(struct inet_frag_queue *q)
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700204{
205 struct sk_buff *fp;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800206 struct netns_frags *nf;
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000207 unsigned int sum, sum_truesize = 0;
Eric Dumazet2ffb1c32018-10-10 12:29:50 -0700208 struct inet_frags *f;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700209
Nikolay Aleksandrov06aa8b82014-08-01 12:29:44 +0200210 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700211 WARN_ON(del_timer(&q->timer) != 0);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700212
213 /* Release all fragment data. */
214 fp = q->fragments;
Pavel Emelyanov6ddc0822008-01-22 06:07:25 -0800215 nf = q->net;
Eric Dumazet2ffb1c32018-10-10 12:29:50 -0700216 f = nf->f;
Peter Oskolkov10043952018-10-10 12:30:13 -0700217 if (fp) {
218 do {
219 struct sk_buff *xp = fp->next;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700220
Peter Oskolkov10043952018-10-10 12:30:13 -0700221 sum_truesize += fp->truesize;
222 kfree_skb(fp);
223 fp = xp;
224 } while (fp);
225 } else {
Peter Oskolkov4077ddb2018-10-10 12:30:15 -0700226 sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700227 }
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000228 sum = sum_truesize + f->qsize;
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700229
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700230 call_rcu(&q->rcu, inet_frag_destroy_rcu);
Florian Westphal5719b292015-07-23 12:05:39 +0200231
232 sub_frag_mem_limit(nf, sum);
Pavel Emelyanov1e4b8282007-10-15 02:39:14 -0700233}
234EXPORT_SYMBOL(inet_frag_destroy);
Pavel Emelyanov8e7999c2007-10-15 02:40:06 -0700235
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800236static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
Nikolay Aleksandrovf926e232014-08-01 12:29:46 +0200237 struct inet_frags *f,
238 void *arg)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700239{
240 struct inet_frag_queue *q;
241
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700242 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
243 return NULL;
244
Nikolay Aleksandrovd4ad4d22014-08-01 12:29:48 +0200245 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
Ian Morris51456b22015-04-03 09:17:26 +0100246 if (!q)
Pavel Emelyanove521db92007-10-17 19:45:23 -0700247 return NULL;
248
Gao feng54db0cc2012-06-08 01:21:40 +0000249 q->net = nf;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700250 f->constructor(q, arg);
Florian Westphal0e60d242015-07-23 12:05:38 +0200251 add_frag_mem_limit(nf, f->qsize);
Jesper Dangaard Brouerd4336732013-01-28 23:45:12 +0000252
Pavel Emelyanove521db92007-10-17 19:45:23 -0700253 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
254 spin_lock_init(&q->lock);
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700255 atomic_set(&q->refcnt, 3);
Pavel Emelyanove521db92007-10-17 19:45:23 -0700256
257 return q;
258}
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700259
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800260static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
Eric Dumazeteb183302018-11-08 17:34:27 -0800261 void *arg,
262 struct inet_frag_queue **prev)
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700263{
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700264 struct inet_frags *f = nf->f;
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700265 struct inet_frag_queue *q;
266
Pavel Emelyanovac18e752008-01-22 06:02:14 -0800267 q = inet_frag_alloc(nf, f, arg);
Eric Dumazeteb183302018-11-08 17:34:27 -0800268 if (!q) {
269 *prev = ERR_PTR(-ENOMEM);
Pavel Emelyanovc6fda282007-10-17 19:46:47 -0700270 return NULL;
Eric Dumazeteb183302018-11-08 17:34:27 -0800271 }
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700272 mod_timer(&q->timer, jiffies + nf->timeout);
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700273
Eric Dumazeteb183302018-11-08 17:34:27 -0800274 *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
275 &q->node, f->rhash_params);
276 if (*prev) {
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700277 q->flags |= INET_FRAG_COMPLETE;
278 inet_frag_kill(q);
279 inet_frag_destroy(q);
Eric Dumazetc5282a02018-07-30 20:09:11 -0700280 return NULL;
281 }
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700282 return q;
283}
284EXPORT_SYMBOL(inet_frag_create);
Eric Dumazetc5282a02018-07-30 20:09:11 -0700285
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700286/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
287struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
288{
Eric Dumazeteb183302018-11-08 17:34:27 -0800289 struct inet_frag_queue *fq = NULL, *prev;
Florian Westphal86e93e42014-07-24 16:50:31 +0200290
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700291 rcu_read_lock();
Eric Dumazeteb183302018-11-08 17:34:27 -0800292 prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
293 if (!prev)
294 fq = inet_frag_create(nf, key, &prev);
295 if (prev && !IS_ERR(prev)) {
296 fq = prev;
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700297 if (!atomic_inc_not_zero(&fq->refcnt))
298 fq = NULL;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700299 }
Eric Dumazet23ce9c52018-10-10 12:29:56 -0700300 rcu_read_unlock();
Eric Dumazeteb183302018-11-08 17:34:27 -0800301 return fq;
Pavel Emelyanovabd65232007-10-17 19:47:21 -0700302}
303EXPORT_SYMBOL(inet_frag_find);
Peter Oskolkovaaee29e2019-04-26 08:41:05 -0700304
305int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
306 int offset, int end)
307{
308 struct sk_buff *last = q->fragments_tail;
309
310 /* RFC5722, Section 4, amended by Errata ID : 3089
311 * When reassembling an IPv6 datagram, if
312 * one or more its constituent fragments is determined to be an
313 * overlapping fragment, the entire datagram (and any constituent
314 * fragments) MUST be silently discarded.
315 *
316 * Duplicates, however, should be ignored (i.e. skb dropped, but the
317 * queue/fragments kept for later reassembly).
318 */
319 if (!last)
320 fragrun_create(q, skb); /* First fragment. */
321 else if (last->ip_defrag_offset + last->len < end) {
322 /* This is the common case: skb goes to the end. */
323 /* Detect and discard overlaps. */
324 if (offset < last->ip_defrag_offset + last->len)
325 return IPFRAG_OVERLAP;
326 if (offset == last->ip_defrag_offset + last->len)
327 fragrun_append_to_last(q, skb);
328 else
329 fragrun_create(q, skb);
330 } else {
331 /* Binary search. Note that skb can become the first fragment,
332 * but not the last (covered above).
333 */
334 struct rb_node **rbn, *parent;
335
336 rbn = &q->rb_fragments.rb_node;
337 do {
338 struct sk_buff *curr;
339 int curr_run_end;
340
341 parent = *rbn;
342 curr = rb_to_skb(parent);
343 curr_run_end = curr->ip_defrag_offset +
344 FRAG_CB(curr)->frag_run_len;
345 if (end <= curr->ip_defrag_offset)
346 rbn = &parent->rb_left;
347 else if (offset >= curr_run_end)
348 rbn = &parent->rb_right;
349 else if (offset >= curr->ip_defrag_offset &&
350 end <= curr_run_end)
351 return IPFRAG_DUP;
352 else
353 return IPFRAG_OVERLAP;
354 } while (*rbn);
355 /* Here we have parent properly set, and rbn pointing to
356 * one of its NULL left/right children. Insert skb.
357 */
358 fragcb_clear(skb);
359 rb_link_node(&skb->rbnode, parent, rbn);
360 rb_insert_color(&skb->rbnode, &q->rb_fragments);
361 }
362
363 skb->ip_defrag_offset = offset;
364
365 return IPFRAG_OK;
366}
367EXPORT_SYMBOL(inet_frag_queue_insert);
368
369void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
370 struct sk_buff *parent)
371{
372 struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
373 struct sk_buff **nextp;
374 int delta;
375
376 if (head != skb) {
377 fp = skb_clone(skb, GFP_ATOMIC);
378 if (!fp)
379 return NULL;
380 FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
381 if (RB_EMPTY_NODE(&skb->rbnode))
382 FRAG_CB(parent)->next_frag = fp;
383 else
384 rb_replace_node(&skb->rbnode, &fp->rbnode,
385 &q->rb_fragments);
386 if (q->fragments_tail == skb)
387 q->fragments_tail = fp;
388 skb_morph(skb, head);
389 FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
390 rb_replace_node(&head->rbnode, &skb->rbnode,
391 &q->rb_fragments);
392 consume_skb(head);
393 head = skb;
394 }
395 WARN_ON(head->ip_defrag_offset != 0);
396
397 delta = -head->truesize;
398
399 /* Head of list must not be cloned. */
400 if (skb_unclone(head, GFP_ATOMIC))
401 return NULL;
402
403 delta += head->truesize;
404 if (delta)
405 add_frag_mem_limit(q->net, delta);
406
407 /* If the first fragment is fragmented itself, we split
408 * it to two chunks: the first with data and paged part
409 * and the second, holding only fragments.
410 */
411 if (skb_has_frag_list(head)) {
412 struct sk_buff *clone;
413 int i, plen = 0;
414
415 clone = alloc_skb(0, GFP_ATOMIC);
416 if (!clone)
417 return NULL;
418 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
419 skb_frag_list_init(head);
420 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
421 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
422 clone->data_len = head->data_len - plen;
423 clone->len = clone->data_len;
424 head->truesize += clone->truesize;
425 clone->csum = 0;
426 clone->ip_summed = head->ip_summed;
427 add_frag_mem_limit(q->net, clone->truesize);
428 skb_shinfo(head)->frag_list = clone;
429 nextp = &clone->next;
430 } else {
431 nextp = &skb_shinfo(head)->frag_list;
432 }
433
434 return nextp;
435}
436EXPORT_SYMBOL(inet_frag_reasm_prepare);
437
438void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
439 void *reasm_data)
440{
441 struct sk_buff **nextp = (struct sk_buff **)reasm_data;
442 struct rb_node *rbn;
443 struct sk_buff *fp;
444
445 skb_push(head, head->data - skb_network_header(head));
446
447 /* Traverse the tree in order, to build frag_list. */
448 fp = FRAG_CB(head)->next_frag;
449 rbn = rb_next(&head->rbnode);
450 rb_erase(&head->rbnode, &q->rb_fragments);
451 while (rbn || fp) {
452 /* fp points to the next sk_buff in the current run;
453 * rbn points to the next run.
454 */
455 /* Go through the current run. */
456 while (fp) {
457 *nextp = fp;
458 nextp = &fp->next;
459 fp->prev = NULL;
460 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
461 fp->sk = NULL;
462 head->data_len += fp->len;
463 head->len += fp->len;
464 if (head->ip_summed != fp->ip_summed)
465 head->ip_summed = CHECKSUM_NONE;
466 else if (head->ip_summed == CHECKSUM_COMPLETE)
467 head->csum = csum_add(head->csum, fp->csum);
468 head->truesize += fp->truesize;
469 fp = FRAG_CB(fp)->next_frag;
470 }
471 /* Move to the next run. */
472 if (rbn) {
473 struct rb_node *rbnext = rb_next(rbn);
474
475 fp = rb_to_skb(rbn);
476 rb_erase(rbn, &q->rb_fragments);
477 rbn = rbnext;
478 }
479 }
480 sub_frag_mem_limit(q->net, head->truesize);
481
482 *nextp = NULL;
483 head->next = NULL;
484 head->prev = NULL;
485 head->tstamp = q->stamp;
486}
487EXPORT_SYMBOL(inet_frag_reasm_finish);
488
489struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
490{
491 struct sk_buff *head;
492
493 if (q->fragments) {
494 head = q->fragments;
495 q->fragments = head->next;
496 } else {
497 struct sk_buff *skb;
498
499 head = skb_rb_first(&q->rb_fragments);
500 if (!head)
501 return NULL;
502 skb = FRAG_CB(head)->next_frag;
503 if (skb)
504 rb_replace_node(&head->rbnode, &skb->rbnode,
505 &q->rb_fragments);
506 else
507 rb_erase(&head->rbnode, &q->rb_fragments);
508 memset(&head->rbnode, 0, sizeof(head->rbnode));
509 barrier();
510 }
511 if (head == q->fragments_tail)
512 q->fragments_tail = NULL;
513
514 sub_frag_mem_limit(q->net, head->truesize);
515
516 return head;
517}
518EXPORT_SYMBOL(inet_frag_pull_head);