blob: 1b64817d7de6b8a1fc1173aa2b2ed898c08a6033 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
8 *
9 * Fixes:
10 * Alan Cox : Fixed the worst of the load
11 * balancer bugs.
12 * Dave Platt : Interrupt stacking fix.
13 * Richard Kooijman : Timestamp fixes.
14 * Alan Cox : Changed buffer format.
15 * Alan Cox : destructor hook for AF_UNIX etc.
16 * Linus Torvalds : Better skb_clone.
17 * Alan Cox : Added skb_copy.
18 * Alan Cox : Added all the changed routines Linus
19 * only put in the headers
20 * Ray VanTassle : Fixed --skb->lock in free
21 * Alan Cox : skb_copy copy arp field
22 * Andi Kleen : slabified it.
23 * Robert Olsson : Removed skb_head_pool
24 *
25 * NOTE:
26 * The __skb_ routines should be called with interrupts
27 * disabled, or you better be *real* sure that the operation is atomic
28 * with respect to whatever list is being frobbed (e.g. via lock_sock()
29 * or via disabling bottom half handlers, etc).
30 *
31 * This program is free software; you can redistribute it and/or
32 * modify it under the terms of the GNU General Public License
33 * as published by the Free Software Foundation; either version
34 * 2 of the License, or (at your option) any later version.
35 */
36
37/*
38 * The functions in this file will not compile correctly with gcc 2.4.x
39 */
40
41#include <linux/config.h>
42#include <linux/module.h>
43#include <linux/types.h>
44#include <linux/kernel.h>
45#include <linux/sched.h>
46#include <linux/mm.h>
47#include <linux/interrupt.h>
48#include <linux/in.h>
49#include <linux/inet.h>
50#include <linux/slab.h>
51#include <linux/netdevice.h>
52#ifdef CONFIG_NET_CLS_ACT
53#include <net/pkt_sched.h>
54#endif
55#include <linux/string.h>
56#include <linux/skbuff.h>
57#include <linux/cache.h>
58#include <linux/rtnetlink.h>
59#include <linux/init.h>
60#include <linux/highmem.h>
61
62#include <net/protocol.h>
63#include <net/dst.h>
64#include <net/sock.h>
65#include <net/checksum.h>
66#include <net/xfrm.h>
67
68#include <asm/uaccess.h>
69#include <asm/system.h>
70
71static kmem_cache_t *skbuff_head_cache;
72
73/*
74 * Keep out-of-line to prevent kernel bloat.
75 * __builtin_return_address is not used because it is not always
76 * reliable.
77 */
78
79/**
80 * skb_over_panic - private function
81 * @skb: buffer
82 * @sz: size
83 * @here: address
84 *
85 * Out of line support code for skb_put(). Not user callable.
86 */
87void skb_over_panic(struct sk_buff *skb, int sz, void *here)
88{
89 printk(KERN_INFO "skput:over: %p:%d put:%d dev:%s",
90 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
91 BUG();
92}
93
94/**
95 * skb_under_panic - private function
96 * @skb: buffer
97 * @sz: size
98 * @here: address
99 *
100 * Out of line support code for skb_push(). Not user callable.
101 */
102
103void skb_under_panic(struct sk_buff *skb, int sz, void *here)
104{
105 printk(KERN_INFO "skput:under: %p:%d put:%d dev:%s",
106 here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
107 BUG();
108}
109
110/* Allocate a new skbuff. We do this ourselves so we can fill in a few
111 * 'private' fields and also do memory statistics to find all the
112 * [BEEP] leaks.
113 *
114 */
115
116/**
117 * alloc_skb - allocate a network buffer
118 * @size: size to allocate
119 * @gfp_mask: allocation mask
120 *
121 * Allocate a new &sk_buff. The returned buffer has no headroom and a
122 * tail room of size bytes. The object has a reference count of one.
123 * The return is the buffer. On a failure the return is %NULL.
124 *
125 * Buffers may only be allocated from interrupts using a @gfp_mask of
126 * %GFP_ATOMIC.
127 */
128struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
129{
130 struct sk_buff *skb;
131 u8 *data;
132
133 /* Get the HEAD */
134 skb = kmem_cache_alloc(skbuff_head_cache,
135 gfp_mask & ~__GFP_DMA);
136 if (!skb)
137 goto out;
138
139 /* Get the DATA. Size must match skb_add_mtu(). */
140 size = SKB_DATA_ALIGN(size);
141 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
142 if (!data)
143 goto nodata;
144
145 memset(skb, 0, offsetof(struct sk_buff, truesize));
146 skb->truesize = size + sizeof(struct sk_buff);
147 atomic_set(&skb->users, 1);
148 skb->head = data;
149 skb->data = data;
150 skb->tail = data;
151 skb->end = data + size;
152
153 atomic_set(&(skb_shinfo(skb)->dataref), 1);
154 skb_shinfo(skb)->nr_frags = 0;
155 skb_shinfo(skb)->tso_size = 0;
156 skb_shinfo(skb)->tso_segs = 0;
157 skb_shinfo(skb)->frag_list = NULL;
158out:
159 return skb;
160nodata:
161 kmem_cache_free(skbuff_head_cache, skb);
162 skb = NULL;
163 goto out;
164}
165
166/**
167 * alloc_skb_from_cache - allocate a network buffer
168 * @cp: kmem_cache from which to allocate the data area
169 * (object size must be big enough for @size bytes + skb overheads)
170 * @size: size to allocate
171 * @gfp_mask: allocation mask
172 *
173 * Allocate a new &sk_buff. The returned buffer has no headroom and
174 * tail room of size bytes. The object has a reference count of one.
175 * The return is the buffer. On a failure the return is %NULL.
176 *
177 * Buffers may only be allocated from interrupts using a @gfp_mask of
178 * %GFP_ATOMIC.
179 */
180struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
181 unsigned int size, int gfp_mask)
182{
183 struct sk_buff *skb;
184 u8 *data;
185
186 /* Get the HEAD */
187 skb = kmem_cache_alloc(skbuff_head_cache,
188 gfp_mask & ~__GFP_DMA);
189 if (!skb)
190 goto out;
191
192 /* Get the DATA. */
193 size = SKB_DATA_ALIGN(size);
194 data = kmem_cache_alloc(cp, gfp_mask);
195 if (!data)
196 goto nodata;
197
198 memset(skb, 0, offsetof(struct sk_buff, truesize));
199 skb->truesize = size + sizeof(struct sk_buff);
200 atomic_set(&skb->users, 1);
201 skb->head = data;
202 skb->data = data;
203 skb->tail = data;
204 skb->end = data + size;
205
206 atomic_set(&(skb_shinfo(skb)->dataref), 1);
207 skb_shinfo(skb)->nr_frags = 0;
208 skb_shinfo(skb)->tso_size = 0;
209 skb_shinfo(skb)->tso_segs = 0;
210 skb_shinfo(skb)->frag_list = NULL;
211out:
212 return skb;
213nodata:
214 kmem_cache_free(skbuff_head_cache, skb);
215 skb = NULL;
216 goto out;
217}
218
219
220static void skb_drop_fraglist(struct sk_buff *skb)
221{
222 struct sk_buff *list = skb_shinfo(skb)->frag_list;
223
224 skb_shinfo(skb)->frag_list = NULL;
225
226 do {
227 struct sk_buff *this = list;
228 list = list->next;
229 kfree_skb(this);
230 } while (list);
231}
232
233static void skb_clone_fraglist(struct sk_buff *skb)
234{
235 struct sk_buff *list;
236
237 for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
238 skb_get(list);
239}
240
241void skb_release_data(struct sk_buff *skb)
242{
243 if (!skb->cloned ||
244 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
245 &skb_shinfo(skb)->dataref)) {
246 if (skb_shinfo(skb)->nr_frags) {
247 int i;
248 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
249 put_page(skb_shinfo(skb)->frags[i].page);
250 }
251
252 if (skb_shinfo(skb)->frag_list)
253 skb_drop_fraglist(skb);
254
255 kfree(skb->head);
256 }
257}
258
259/*
260 * Free an skbuff by memory without cleaning the state.
261 */
262void kfree_skbmem(struct sk_buff *skb)
263{
264 skb_release_data(skb);
265 kmem_cache_free(skbuff_head_cache, skb);
266}
267
268/**
269 * __kfree_skb - private function
270 * @skb: buffer
271 *
272 * Free an sk_buff. Release anything attached to the buffer.
273 * Clean the state. This is an internal helper function. Users should
274 * always call kfree_skb
275 */
276
277void __kfree_skb(struct sk_buff *skb)
278{
Stephen Hemminger9c2b3322005-04-19 22:39:42 -0700279 BUG_ON(skb->list != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281 dst_release(skb->dst);
282#ifdef CONFIG_XFRM
283 secpath_put(skb->sp);
284#endif
Stephen Hemminger9c2b3322005-04-19 22:39:42 -0700285 if (skb->destructor) {
286 WARN_ON(in_irq());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 skb->destructor(skb);
288 }
289#ifdef CONFIG_NETFILTER
290 nf_conntrack_put(skb->nfct);
291#ifdef CONFIG_BRIDGE_NETFILTER
292 nf_bridge_put(skb->nf_bridge);
293#endif
294#endif
295/* XXX: IS this still necessary? - JHS */
296#ifdef CONFIG_NET_SCHED
297 skb->tc_index = 0;
298#ifdef CONFIG_NET_CLS_ACT
299 skb->tc_verd = 0;
300 skb->tc_classid = 0;
301#endif
302#endif
303
304 kfree_skbmem(skb);
305}
306
307/**
308 * skb_clone - duplicate an sk_buff
309 * @skb: buffer to clone
310 * @gfp_mask: allocation priority
311 *
312 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
313 * copies share the same packet data but not structure. The new
314 * buffer has a reference count of 1. If the allocation fails the
315 * function returns %NULL otherwise the new buffer is returned.
316 *
317 * If this function is called from an interrupt gfp_mask() must be
318 * %GFP_ATOMIC.
319 */
320
321struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
322{
323 struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
324
325 if (!n)
326 return NULL;
327
328#define C(x) n->x = skb->x
329
330 n->next = n->prev = NULL;
331 n->list = NULL;
332 n->sk = NULL;
333 C(stamp);
334 C(dev);
335 C(real_dev);
336 C(h);
337 C(nh);
338 C(mac);
339 C(dst);
340 dst_clone(skb->dst);
341 C(sp);
342#ifdef CONFIG_INET
343 secpath_get(skb->sp);
344#endif
345 memcpy(n->cb, skb->cb, sizeof(skb->cb));
346 C(len);
347 C(data_len);
348 C(csum);
349 C(local_df);
350 n->cloned = 1;
351 n->nohdr = 0;
352 C(pkt_type);
353 C(ip_summed);
354 C(priority);
355 C(protocol);
356 C(security);
357 n->destructor = NULL;
358#ifdef CONFIG_NETFILTER
359 C(nfmark);
360 C(nfcache);
361 C(nfct);
362 nf_conntrack_get(skb->nfct);
363 C(nfctinfo);
364#ifdef CONFIG_NETFILTER_DEBUG
365 C(nf_debug);
366#endif
367#ifdef CONFIG_BRIDGE_NETFILTER
368 C(nf_bridge);
369 nf_bridge_get(skb->nf_bridge);
370#endif
371#endif /*CONFIG_NETFILTER*/
372#if defined(CONFIG_HIPPI)
373 C(private);
374#endif
375#ifdef CONFIG_NET_SCHED
376 C(tc_index);
377#ifdef CONFIG_NET_CLS_ACT
378 n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
379 n->tc_verd = CLR_TC_OK2MUNGE(skb->tc_verd);
380 n->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
381 C(input_dev);
382 C(tc_classid);
383#endif
384
385#endif
386 C(truesize);
387 atomic_set(&n->users, 1);
388 C(head);
389 C(data);
390 C(tail);
391 C(end);
392
393 atomic_inc(&(skb_shinfo(skb)->dataref));
394 skb->cloned = 1;
395
396 return n;
397}
398
399static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
400{
401 /*
402 * Shift between the two data areas in bytes
403 */
404 unsigned long offset = new->data - old->data;
405
406 new->list = NULL;
407 new->sk = NULL;
408 new->dev = old->dev;
409 new->real_dev = old->real_dev;
410 new->priority = old->priority;
411 new->protocol = old->protocol;
412 new->dst = dst_clone(old->dst);
413#ifdef CONFIG_INET
414 new->sp = secpath_get(old->sp);
415#endif
416 new->h.raw = old->h.raw + offset;
417 new->nh.raw = old->nh.raw + offset;
418 new->mac.raw = old->mac.raw + offset;
419 memcpy(new->cb, old->cb, sizeof(old->cb));
420 new->local_df = old->local_df;
421 new->pkt_type = old->pkt_type;
422 new->stamp = old->stamp;
423 new->destructor = NULL;
424 new->security = old->security;
425#ifdef CONFIG_NETFILTER
426 new->nfmark = old->nfmark;
427 new->nfcache = old->nfcache;
428 new->nfct = old->nfct;
429 nf_conntrack_get(old->nfct);
430 new->nfctinfo = old->nfctinfo;
431#ifdef CONFIG_NETFILTER_DEBUG
432 new->nf_debug = old->nf_debug;
433#endif
434#ifdef CONFIG_BRIDGE_NETFILTER
435 new->nf_bridge = old->nf_bridge;
436 nf_bridge_get(old->nf_bridge);
437#endif
438#endif
439#ifdef CONFIG_NET_SCHED
440#ifdef CONFIG_NET_CLS_ACT
441 new->tc_verd = old->tc_verd;
442#endif
443 new->tc_index = old->tc_index;
444#endif
445 atomic_set(&new->users, 1);
446 skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
447 skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
448}
449
450/**
451 * skb_copy - create private copy of an sk_buff
452 * @skb: buffer to copy
453 * @gfp_mask: allocation priority
454 *
455 * Make a copy of both an &sk_buff and its data. This is used when the
456 * caller wishes to modify the data and needs a private copy of the
457 * data to alter. Returns %NULL on failure or the pointer to the buffer
458 * on success. The returned buffer has a reference count of 1.
459 *
460 * As by-product this function converts non-linear &sk_buff to linear
461 * one, so that &sk_buff becomes completely private and caller is allowed
462 * to modify all the data of returned buffer. This means that this
463 * function is not recommended for use in circumstances when only
464 * header is going to be modified. Use pskb_copy() instead.
465 */
466
467struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
468{
469 int headerlen = skb->data - skb->head;
470 /*
471 * Allocate the copy buffer
472 */
473 struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
474 gfp_mask);
475 if (!n)
476 return NULL;
477
478 /* Set the data pointer */
479 skb_reserve(n, headerlen);
480 /* Set the tail pointer and length */
481 skb_put(n, skb->len);
482 n->csum = skb->csum;
483 n->ip_summed = skb->ip_summed;
484
485 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
486 BUG();
487
488 copy_skb_header(n, skb);
489 return n;
490}
491
492
493/**
494 * pskb_copy - create copy of an sk_buff with private head.
495 * @skb: buffer to copy
496 * @gfp_mask: allocation priority
497 *
498 * Make a copy of both an &sk_buff and part of its data, located
499 * in header. Fragmented data remain shared. This is used when
500 * the caller wishes to modify only header of &sk_buff and needs
501 * private copy of the header to alter. Returns %NULL on failure
502 * or the pointer to the buffer on success.
503 * The returned buffer has a reference count of 1.
504 */
505
506struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
507{
508 /*
509 * Allocate the copy buffer
510 */
511 struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
512
513 if (!n)
514 goto out;
515
516 /* Set the data pointer */
517 skb_reserve(n, skb->data - skb->head);
518 /* Set the tail pointer and length */
519 skb_put(n, skb_headlen(skb));
520 /* Copy the bytes */
521 memcpy(n->data, skb->data, n->len);
522 n->csum = skb->csum;
523 n->ip_summed = skb->ip_summed;
524
525 n->data_len = skb->data_len;
526 n->len = skb->len;
527
528 if (skb_shinfo(skb)->nr_frags) {
529 int i;
530
531 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
532 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
533 get_page(skb_shinfo(n)->frags[i].page);
534 }
535 skb_shinfo(n)->nr_frags = i;
536 }
537
538 if (skb_shinfo(skb)->frag_list) {
539 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
540 skb_clone_fraglist(n);
541 }
542
543 copy_skb_header(n, skb);
544out:
545 return n;
546}
547
548/**
549 * pskb_expand_head - reallocate header of &sk_buff
550 * @skb: buffer to reallocate
551 * @nhead: room to add at head
552 * @ntail: room to add at tail
553 * @gfp_mask: allocation priority
554 *
555 * Expands (or creates identical copy, if &nhead and &ntail are zero)
556 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have
557 * reference count of 1. Returns zero in the case of success or error,
558 * if expansion failed. In the last case, &sk_buff is not changed.
559 *
560 * All the pointers pointing into skb header may change and must be
561 * reloaded after call to this function.
562 */
563
564int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
565{
566 int i;
567 u8 *data;
568 int size = nhead + (skb->end - skb->head) + ntail;
569 long off;
570
571 if (skb_shared(skb))
572 BUG();
573
574 size = SKB_DATA_ALIGN(size);
575
576 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
577 if (!data)
578 goto nodata;
579
580 /* Copy only real data... and, alas, header. This should be
581 * optimized for the cases when header is void. */
582 memcpy(data + nhead, skb->head, skb->tail - skb->head);
583 memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
584
585 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
586 get_page(skb_shinfo(skb)->frags[i].page);
587
588 if (skb_shinfo(skb)->frag_list)
589 skb_clone_fraglist(skb);
590
591 skb_release_data(skb);
592
593 off = (data + nhead) - skb->head;
594
595 skb->head = data;
596 skb->end = data + size;
597 skb->data += off;
598 skb->tail += off;
599 skb->mac.raw += off;
600 skb->h.raw += off;
601 skb->nh.raw += off;
602 skb->cloned = 0;
603 skb->nohdr = 0;
604 atomic_set(&skb_shinfo(skb)->dataref, 1);
605 return 0;
606
607nodata:
608 return -ENOMEM;
609}
610
611/* Make private copy of skb with writable head and some headroom */
612
613struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
614{
615 struct sk_buff *skb2;
616 int delta = headroom - skb_headroom(skb);
617
618 if (delta <= 0)
619 skb2 = pskb_copy(skb, GFP_ATOMIC);
620 else {
621 skb2 = skb_clone(skb, GFP_ATOMIC);
622 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
623 GFP_ATOMIC)) {
624 kfree_skb(skb2);
625 skb2 = NULL;
626 }
627 }
628 return skb2;
629}
630
631
632/**
633 * skb_copy_expand - copy and expand sk_buff
634 * @skb: buffer to copy
635 * @newheadroom: new free bytes at head
636 * @newtailroom: new free bytes at tail
637 * @gfp_mask: allocation priority
638 *
639 * Make a copy of both an &sk_buff and its data and while doing so
640 * allocate additional space.
641 *
642 * This is used when the caller wishes to modify the data and needs a
643 * private copy of the data to alter as well as more space for new fields.
644 * Returns %NULL on failure or the pointer to the buffer
645 * on success. The returned buffer has a reference count of 1.
646 *
647 * You must pass %GFP_ATOMIC as the allocation priority if this function
648 * is called from an interrupt.
649 *
650 * BUG ALERT: ip_summed is not copied. Why does this work? Is it used
651 * only by netfilter in the cases when checksum is recalculated? --ANK
652 */
653struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
654 int newheadroom, int newtailroom, int gfp_mask)
655{
656 /*
657 * Allocate the copy buffer
658 */
659 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
660 gfp_mask);
661 int head_copy_len, head_copy_off;
662
663 if (!n)
664 return NULL;
665
666 skb_reserve(n, newheadroom);
667
668 /* Set the tail pointer and length */
669 skb_put(n, skb->len);
670
671 head_copy_len = skb_headroom(skb);
672 head_copy_off = 0;
673 if (newheadroom <= head_copy_len)
674 head_copy_len = newheadroom;
675 else
676 head_copy_off = newheadroom - head_copy_len;
677
678 /* Copy the linear header and data. */
679 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
680 skb->len + head_copy_len))
681 BUG();
682
683 copy_skb_header(n, skb);
684
685 return n;
686}
687
688/**
689 * skb_pad - zero pad the tail of an skb
690 * @skb: buffer to pad
691 * @pad: space to pad
692 *
693 * Ensure that a buffer is followed by a padding area that is zero
694 * filled. Used by network drivers which may DMA or transfer data
695 * beyond the buffer end onto the wire.
696 *
697 * May return NULL in out of memory cases.
698 */
699
700struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
701{
702 struct sk_buff *nskb;
703
704 /* If the skbuff is non linear tailroom is always zero.. */
705 if (skb_tailroom(skb) >= pad) {
706 memset(skb->data+skb->len, 0, pad);
707 return skb;
708 }
709
710 nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
711 kfree_skb(skb);
712 if (nskb)
713 memset(nskb->data+nskb->len, 0, pad);
714 return nskb;
715}
716
717/* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
718 * If realloc==0 and trimming is impossible without change of data,
719 * it is BUG().
720 */
721
722int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
723{
724 int offset = skb_headlen(skb);
725 int nfrags = skb_shinfo(skb)->nr_frags;
726 int i;
727
728 for (i = 0; i < nfrags; i++) {
729 int end = offset + skb_shinfo(skb)->frags[i].size;
730 if (end > len) {
731 if (skb_cloned(skb)) {
732 if (!realloc)
733 BUG();
734 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
735 return -ENOMEM;
736 }
737 if (len <= offset) {
738 put_page(skb_shinfo(skb)->frags[i].page);
739 skb_shinfo(skb)->nr_frags--;
740 } else {
741 skb_shinfo(skb)->frags[i].size = len - offset;
742 }
743 }
744 offset = end;
745 }
746
747 if (offset < len) {
748 skb->data_len -= skb->len - len;
749 skb->len = len;
750 } else {
751 if (len <= skb_headlen(skb)) {
752 skb->len = len;
753 skb->data_len = 0;
754 skb->tail = skb->data + len;
755 if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
756 skb_drop_fraglist(skb);
757 } else {
758 skb->data_len -= skb->len - len;
759 skb->len = len;
760 }
761 }
762
763 return 0;
764}
765
766/**
767 * __pskb_pull_tail - advance tail of skb header
768 * @skb: buffer to reallocate
769 * @delta: number of bytes to advance tail
770 *
771 * The function makes a sense only on a fragmented &sk_buff,
772 * it expands header moving its tail forward and copying necessary
773 * data from fragmented part.
774 *
775 * &sk_buff MUST have reference count of 1.
776 *
777 * Returns %NULL (and &sk_buff does not change) if pull failed
778 * or value of new tail of skb in the case of success.
779 *
780 * All the pointers pointing into skb header may change and must be
781 * reloaded after call to this function.
782 */
783
784/* Moves tail of skb head forward, copying data from fragmented part,
785 * when it is necessary.
786 * 1. It may fail due to malloc failure.
787 * 2. It may change skb pointers.
788 *
789 * It is pretty complicated. Luckily, it is called only in exceptional cases.
790 */
791unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
792{
793 /* If skb has not enough free space at tail, get new one
794 * plus 128 bytes for future expansions. If we have enough
795 * room at tail, reallocate without expansion only if skb is cloned.
796 */
797 int i, k, eat = (skb->tail + delta) - skb->end;
798
799 if (eat > 0 || skb_cloned(skb)) {
800 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
801 GFP_ATOMIC))
802 return NULL;
803 }
804
805 if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
806 BUG();
807
808 /* Optimization: no fragments, no reasons to preestimate
809 * size of pulled pages. Superb.
810 */
811 if (!skb_shinfo(skb)->frag_list)
812 goto pull_pages;
813
814 /* Estimate size of pulled pages. */
815 eat = delta;
816 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
817 if (skb_shinfo(skb)->frags[i].size >= eat)
818 goto pull_pages;
819 eat -= skb_shinfo(skb)->frags[i].size;
820 }
821
822 /* If we need update frag list, we are in troubles.
823 * Certainly, it possible to add an offset to skb data,
824 * but taking into account that pulling is expected to
825 * be very rare operation, it is worth to fight against
826 * further bloating skb head and crucify ourselves here instead.
827 * Pure masohism, indeed. 8)8)
828 */
829 if (eat) {
830 struct sk_buff *list = skb_shinfo(skb)->frag_list;
831 struct sk_buff *clone = NULL;
832 struct sk_buff *insp = NULL;
833
834 do {
835 if (!list)
836 BUG();
837
838 if (list->len <= eat) {
839 /* Eaten as whole. */
840 eat -= list->len;
841 list = list->next;
842 insp = list;
843 } else {
844 /* Eaten partially. */
845
846 if (skb_shared(list)) {
847 /* Sucks! We need to fork list. :-( */
848 clone = skb_clone(list, GFP_ATOMIC);
849 if (!clone)
850 return NULL;
851 insp = list->next;
852 list = clone;
853 } else {
854 /* This may be pulled without
855 * problems. */
856 insp = list;
857 }
858 if (!pskb_pull(list, eat)) {
859 if (clone)
860 kfree_skb(clone);
861 return NULL;
862 }
863 break;
864 }
865 } while (eat);
866
867 /* Free pulled out fragments. */
868 while ((list = skb_shinfo(skb)->frag_list) != insp) {
869 skb_shinfo(skb)->frag_list = list->next;
870 kfree_skb(list);
871 }
872 /* And insert new clone at head. */
873 if (clone) {
874 clone->next = list;
875 skb_shinfo(skb)->frag_list = clone;
876 }
877 }
878 /* Success! Now we may commit changes to skb data. */
879
880pull_pages:
881 eat = delta;
882 k = 0;
883 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
884 if (skb_shinfo(skb)->frags[i].size <= eat) {
885 put_page(skb_shinfo(skb)->frags[i].page);
886 eat -= skb_shinfo(skb)->frags[i].size;
887 } else {
888 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
889 if (eat) {
890 skb_shinfo(skb)->frags[k].page_offset += eat;
891 skb_shinfo(skb)->frags[k].size -= eat;
892 eat = 0;
893 }
894 k++;
895 }
896 }
897 skb_shinfo(skb)->nr_frags = k;
898
899 skb->tail += delta;
900 skb->data_len -= delta;
901
902 return skb->tail;
903}
904
905/* Copy some data bits from skb to kernel buffer. */
906
907int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
908{
909 int i, copy;
910 int start = skb_headlen(skb);
911
912 if (offset > (int)skb->len - len)
913 goto fault;
914
915 /* Copy header. */
916 if ((copy = start - offset) > 0) {
917 if (copy > len)
918 copy = len;
919 memcpy(to, skb->data + offset, copy);
920 if ((len -= copy) == 0)
921 return 0;
922 offset += copy;
923 to += copy;
924 }
925
926 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
927 int end;
928
929 BUG_TRAP(start <= offset + len);
930
931 end = start + skb_shinfo(skb)->frags[i].size;
932 if ((copy = end - offset) > 0) {
933 u8 *vaddr;
934
935 if (copy > len)
936 copy = len;
937
938 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
939 memcpy(to,
940 vaddr + skb_shinfo(skb)->frags[i].page_offset+
941 offset - start, copy);
942 kunmap_skb_frag(vaddr);
943
944 if ((len -= copy) == 0)
945 return 0;
946 offset += copy;
947 to += copy;
948 }
949 start = end;
950 }
951
952 if (skb_shinfo(skb)->frag_list) {
953 struct sk_buff *list = skb_shinfo(skb)->frag_list;
954
955 for (; list; list = list->next) {
956 int end;
957
958 BUG_TRAP(start <= offset + len);
959
960 end = start + list->len;
961 if ((copy = end - offset) > 0) {
962 if (copy > len)
963 copy = len;
964 if (skb_copy_bits(list, offset - start,
965 to, copy))
966 goto fault;
967 if ((len -= copy) == 0)
968 return 0;
969 offset += copy;
970 to += copy;
971 }
972 start = end;
973 }
974 }
975 if (!len)
976 return 0;
977
978fault:
979 return -EFAULT;
980}
981
Herbert Xu357b40a2005-04-19 22:30:14 -0700982/**
983 * skb_store_bits - store bits from kernel buffer to skb
984 * @skb: destination buffer
985 * @offset: offset in destination
986 * @from: source buffer
987 * @len: number of bytes to copy
988 *
989 * Copy the specified number of bytes from the source buffer to the
990 * destination skb. This function handles all the messy bits of
991 * traversing fragment lists and such.
992 */
993
994int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
995{
996 int i, copy;
997 int start = skb_headlen(skb);
998
999 if (offset > (int)skb->len - len)
1000 goto fault;
1001
1002 if ((copy = start - offset) > 0) {
1003 if (copy > len)
1004 copy = len;
1005 memcpy(skb->data + offset, from, copy);
1006 if ((len -= copy) == 0)
1007 return 0;
1008 offset += copy;
1009 from += copy;
1010 }
1011
1012 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1013 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1014 int end;
1015
1016 BUG_TRAP(start <= offset + len);
1017
1018 end = start + frag->size;
1019 if ((copy = end - offset) > 0) {
1020 u8 *vaddr;
1021
1022 if (copy > len)
1023 copy = len;
1024
1025 vaddr = kmap_skb_frag(frag);
1026 memcpy(vaddr + frag->page_offset + offset - start,
1027 from, copy);
1028 kunmap_skb_frag(vaddr);
1029
1030 if ((len -= copy) == 0)
1031 return 0;
1032 offset += copy;
1033 from += copy;
1034 }
1035 start = end;
1036 }
1037
1038 if (skb_shinfo(skb)->frag_list) {
1039 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1040
1041 for (; list; list = list->next) {
1042 int end;
1043
1044 BUG_TRAP(start <= offset + len);
1045
1046 end = start + list->len;
1047 if ((copy = end - offset) > 0) {
1048 if (copy > len)
1049 copy = len;
1050 if (skb_store_bits(list, offset - start,
1051 from, copy))
1052 goto fault;
1053 if ((len -= copy) == 0)
1054 return 0;
1055 offset += copy;
1056 from += copy;
1057 }
1058 start = end;
1059 }
1060 }
1061 if (!len)
1062 return 0;
1063
1064fault:
1065 return -EFAULT;
1066}
1067
1068EXPORT_SYMBOL(skb_store_bits);
1069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070/* Checksum skb data. */
1071
1072unsigned int skb_checksum(const struct sk_buff *skb, int offset,
1073 int len, unsigned int csum)
1074{
1075 int start = skb_headlen(skb);
1076 int i, copy = start - offset;
1077 int pos = 0;
1078
1079 /* Checksum header. */
1080 if (copy > 0) {
1081 if (copy > len)
1082 copy = len;
1083 csum = csum_partial(skb->data + offset, copy, csum);
1084 if ((len -= copy) == 0)
1085 return csum;
1086 offset += copy;
1087 pos = copy;
1088 }
1089
1090 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1091 int end;
1092
1093 BUG_TRAP(start <= offset + len);
1094
1095 end = start + skb_shinfo(skb)->frags[i].size;
1096 if ((copy = end - offset) > 0) {
1097 unsigned int csum2;
1098 u8 *vaddr;
1099 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1100
1101 if (copy > len)
1102 copy = len;
1103 vaddr = kmap_skb_frag(frag);
1104 csum2 = csum_partial(vaddr + frag->page_offset +
1105 offset - start, copy, 0);
1106 kunmap_skb_frag(vaddr);
1107 csum = csum_block_add(csum, csum2, pos);
1108 if (!(len -= copy))
1109 return csum;
1110 offset += copy;
1111 pos += copy;
1112 }
1113 start = end;
1114 }
1115
1116 if (skb_shinfo(skb)->frag_list) {
1117 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1118
1119 for (; list; list = list->next) {
1120 int end;
1121
1122 BUG_TRAP(start <= offset + len);
1123
1124 end = start + list->len;
1125 if ((copy = end - offset) > 0) {
1126 unsigned int csum2;
1127 if (copy > len)
1128 copy = len;
1129 csum2 = skb_checksum(list, offset - start,
1130 copy, 0);
1131 csum = csum_block_add(csum, csum2, pos);
1132 if ((len -= copy) == 0)
1133 return csum;
1134 offset += copy;
1135 pos += copy;
1136 }
1137 start = end;
1138 }
1139 }
1140 if (len)
1141 BUG();
1142
1143 return csum;
1144}
1145
1146/* Both of above in one bottle. */
1147
1148unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1149 u8 *to, int len, unsigned int csum)
1150{
1151 int start = skb_headlen(skb);
1152 int i, copy = start - offset;
1153 int pos = 0;
1154
1155 /* Copy header. */
1156 if (copy > 0) {
1157 if (copy > len)
1158 copy = len;
1159 csum = csum_partial_copy_nocheck(skb->data + offset, to,
1160 copy, csum);
1161 if ((len -= copy) == 0)
1162 return csum;
1163 offset += copy;
1164 to += copy;
1165 pos = copy;
1166 }
1167
1168 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1169 int end;
1170
1171 BUG_TRAP(start <= offset + len);
1172
1173 end = start + skb_shinfo(skb)->frags[i].size;
1174 if ((copy = end - offset) > 0) {
1175 unsigned int csum2;
1176 u8 *vaddr;
1177 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1178
1179 if (copy > len)
1180 copy = len;
1181 vaddr = kmap_skb_frag(frag);
1182 csum2 = csum_partial_copy_nocheck(vaddr +
1183 frag->page_offset +
1184 offset - start, to,
1185 copy, 0);
1186 kunmap_skb_frag(vaddr);
1187 csum = csum_block_add(csum, csum2, pos);
1188 if (!(len -= copy))
1189 return csum;
1190 offset += copy;
1191 to += copy;
1192 pos += copy;
1193 }
1194 start = end;
1195 }
1196
1197 if (skb_shinfo(skb)->frag_list) {
1198 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1199
1200 for (; list; list = list->next) {
1201 unsigned int csum2;
1202 int end;
1203
1204 BUG_TRAP(start <= offset + len);
1205
1206 end = start + list->len;
1207 if ((copy = end - offset) > 0) {
1208 if (copy > len)
1209 copy = len;
1210 csum2 = skb_copy_and_csum_bits(list,
1211 offset - start,
1212 to, copy, 0);
1213 csum = csum_block_add(csum, csum2, pos);
1214 if ((len -= copy) == 0)
1215 return csum;
1216 offset += copy;
1217 to += copy;
1218 pos += copy;
1219 }
1220 start = end;
1221 }
1222 }
1223 if (len)
1224 BUG();
1225 return csum;
1226}
1227
1228void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1229{
1230 unsigned int csum;
1231 long csstart;
1232
1233 if (skb->ip_summed == CHECKSUM_HW)
1234 csstart = skb->h.raw - skb->data;
1235 else
1236 csstart = skb_headlen(skb);
1237
1238 if (csstart > skb_headlen(skb))
1239 BUG();
1240
1241 memcpy(to, skb->data, csstart);
1242
1243 csum = 0;
1244 if (csstart != skb->len)
1245 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1246 skb->len - csstart, 0);
1247
1248 if (skb->ip_summed == CHECKSUM_HW) {
1249 long csstuff = csstart + skb->csum;
1250
1251 *((unsigned short *)(to + csstuff)) = csum_fold(csum);
1252 }
1253}
1254
1255/**
1256 * skb_dequeue - remove from the head of the queue
1257 * @list: list to dequeue from
1258 *
1259 * Remove the head of the list. The list lock is taken so the function
1260 * may be used safely with other locking list functions. The head item is
1261 * returned or %NULL if the list is empty.
1262 */
1263
1264struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1265{
1266 unsigned long flags;
1267 struct sk_buff *result;
1268
1269 spin_lock_irqsave(&list->lock, flags);
1270 result = __skb_dequeue(list);
1271 spin_unlock_irqrestore(&list->lock, flags);
1272 return result;
1273}
1274
1275/**
1276 * skb_dequeue_tail - remove from the tail of the queue
1277 * @list: list to dequeue from
1278 *
1279 * Remove the tail of the list. The list lock is taken so the function
1280 * may be used safely with other locking list functions. The tail item is
1281 * returned or %NULL if the list is empty.
1282 */
1283struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1284{
1285 unsigned long flags;
1286 struct sk_buff *result;
1287
1288 spin_lock_irqsave(&list->lock, flags);
1289 result = __skb_dequeue_tail(list);
1290 spin_unlock_irqrestore(&list->lock, flags);
1291 return result;
1292}
1293
1294/**
1295 * skb_queue_purge - empty a list
1296 * @list: list to empty
1297 *
1298 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1299 * the list and one reference dropped. This function takes the list
1300 * lock and is atomic with respect to other list locking functions.
1301 */
1302void skb_queue_purge(struct sk_buff_head *list)
1303{
1304 struct sk_buff *skb;
1305 while ((skb = skb_dequeue(list)) != NULL)
1306 kfree_skb(skb);
1307}
1308
1309/**
1310 * skb_queue_head - queue a buffer at the list head
1311 * @list: list to use
1312 * @newsk: buffer to queue
1313 *
1314 * Queue a buffer at the start of the list. This function takes the
1315 * list lock and can be used safely with other locking &sk_buff functions
1316 * safely.
1317 *
1318 * A buffer cannot be placed on two lists at the same time.
1319 */
1320void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1321{
1322 unsigned long flags;
1323
1324 spin_lock_irqsave(&list->lock, flags);
1325 __skb_queue_head(list, newsk);
1326 spin_unlock_irqrestore(&list->lock, flags);
1327}
1328
1329/**
1330 * skb_queue_tail - queue a buffer at the list tail
1331 * @list: list to use
1332 * @newsk: buffer to queue
1333 *
1334 * Queue a buffer at the tail of the list. This function takes the
1335 * list lock and can be used safely with other locking &sk_buff functions
1336 * safely.
1337 *
1338 * A buffer cannot be placed on two lists at the same time.
1339 */
1340void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1341{
1342 unsigned long flags;
1343
1344 spin_lock_irqsave(&list->lock, flags);
1345 __skb_queue_tail(list, newsk);
1346 spin_unlock_irqrestore(&list->lock, flags);
1347}
1348/**
1349 * skb_unlink - remove a buffer from a list
1350 * @skb: buffer to remove
1351 *
1352 * Place a packet after a given packet in a list. The list locks are taken
1353 * and this function is atomic with respect to other list locked calls
1354 *
1355 * Works even without knowing the list it is sitting on, which can be
1356 * handy at times. It also means that THE LIST MUST EXIST when you
1357 * unlink. Thus a list must have its contents unlinked before it is
1358 * destroyed.
1359 */
1360void skb_unlink(struct sk_buff *skb)
1361{
1362 struct sk_buff_head *list = skb->list;
1363
1364 if (list) {
1365 unsigned long flags;
1366
1367 spin_lock_irqsave(&list->lock, flags);
1368 if (skb->list == list)
1369 __skb_unlink(skb, skb->list);
1370 spin_unlock_irqrestore(&list->lock, flags);
1371 }
1372}
1373
1374
1375/**
1376 * skb_append - append a buffer
1377 * @old: buffer to insert after
1378 * @newsk: buffer to insert
1379 *
1380 * Place a packet after a given packet in a list. The list locks are taken
1381 * and this function is atomic with respect to other list locked calls.
1382 * A buffer cannot be placed on two lists at the same time.
1383 */
1384
1385void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1386{
1387 unsigned long flags;
1388
1389 spin_lock_irqsave(&old->list->lock, flags);
1390 __skb_append(old, newsk);
1391 spin_unlock_irqrestore(&old->list->lock, flags);
1392}
1393
1394
1395/**
1396 * skb_insert - insert a buffer
1397 * @old: buffer to insert before
1398 * @newsk: buffer to insert
1399 *
1400 * Place a packet before a given packet in a list. The list locks are taken
1401 * and this function is atomic with respect to other list locked calls
1402 * A buffer cannot be placed on two lists at the same time.
1403 */
1404
1405void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
1406{
1407 unsigned long flags;
1408
1409 spin_lock_irqsave(&old->list->lock, flags);
1410 __skb_insert(newsk, old->prev, old, old->list);
1411 spin_unlock_irqrestore(&old->list->lock, flags);
1412}
1413
1414#if 0
1415/*
1416 * Tune the memory allocator for a new MTU size.
1417 */
1418void skb_add_mtu(int mtu)
1419{
1420 /* Must match allocation in alloc_skb */
1421 mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
1422
1423 kmem_add_cache_size(mtu);
1424}
1425#endif
1426
1427static inline void skb_split_inside_header(struct sk_buff *skb,
1428 struct sk_buff* skb1,
1429 const u32 len, const int pos)
1430{
1431 int i;
1432
1433 memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
1434
1435 /* And move data appendix as is. */
1436 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1437 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
1438
1439 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
1440 skb_shinfo(skb)->nr_frags = 0;
1441 skb1->data_len = skb->data_len;
1442 skb1->len += skb1->data_len;
1443 skb->data_len = 0;
1444 skb->len = len;
1445 skb->tail = skb->data + len;
1446}
1447
1448static inline void skb_split_no_header(struct sk_buff *skb,
1449 struct sk_buff* skb1,
1450 const u32 len, int pos)
1451{
1452 int i, k = 0;
1453 const int nfrags = skb_shinfo(skb)->nr_frags;
1454
1455 skb_shinfo(skb)->nr_frags = 0;
1456 skb1->len = skb1->data_len = skb->len - len;
1457 skb->len = len;
1458 skb->data_len = len - pos;
1459
1460 for (i = 0; i < nfrags; i++) {
1461 int size = skb_shinfo(skb)->frags[i].size;
1462
1463 if (pos + size > len) {
1464 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
1465
1466 if (pos < len) {
1467 /* Split frag.
1468 * We have two variants in this case:
1469 * 1. Move all the frag to the second
1470 * part, if it is possible. F.e.
1471 * this approach is mandatory for TUX,
1472 * where splitting is expensive.
1473 * 2. Split is accurately. We make this.
1474 */
1475 get_page(skb_shinfo(skb)->frags[i].page);
1476 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
1477 skb_shinfo(skb1)->frags[0].size -= len - pos;
1478 skb_shinfo(skb)->frags[i].size = len - pos;
1479 skb_shinfo(skb)->nr_frags++;
1480 }
1481 k++;
1482 } else
1483 skb_shinfo(skb)->nr_frags++;
1484 pos += size;
1485 }
1486 skb_shinfo(skb1)->nr_frags = k;
1487}
1488
1489/**
1490 * skb_split - Split fragmented skb to two parts at length len.
1491 * @skb: the buffer to split
1492 * @skb1: the buffer to receive the second part
1493 * @len: new length for skb
1494 */
1495void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
1496{
1497 int pos = skb_headlen(skb);
1498
1499 if (len < pos) /* Split line is inside header. */
1500 skb_split_inside_header(skb, skb1, len, pos);
1501 else /* Second chunk has no header, nothing to copy. */
1502 skb_split_no_header(skb, skb1, len, pos);
1503}
1504
1505void __init skb_init(void)
1506{
1507 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
1508 sizeof(struct sk_buff),
1509 0,
1510 SLAB_HWCACHE_ALIGN,
1511 NULL, NULL);
1512 if (!skbuff_head_cache)
1513 panic("cannot create skbuff cache");
1514}
1515
1516EXPORT_SYMBOL(___pskb_trim);
1517EXPORT_SYMBOL(__kfree_skb);
1518EXPORT_SYMBOL(__pskb_pull_tail);
1519EXPORT_SYMBOL(alloc_skb);
1520EXPORT_SYMBOL(pskb_copy);
1521EXPORT_SYMBOL(pskb_expand_head);
1522EXPORT_SYMBOL(skb_checksum);
1523EXPORT_SYMBOL(skb_clone);
1524EXPORT_SYMBOL(skb_clone_fraglist);
1525EXPORT_SYMBOL(skb_copy);
1526EXPORT_SYMBOL(skb_copy_and_csum_bits);
1527EXPORT_SYMBOL(skb_copy_and_csum_dev);
1528EXPORT_SYMBOL(skb_copy_bits);
1529EXPORT_SYMBOL(skb_copy_expand);
1530EXPORT_SYMBOL(skb_over_panic);
1531EXPORT_SYMBOL(skb_pad);
1532EXPORT_SYMBOL(skb_realloc_headroom);
1533EXPORT_SYMBOL(skb_under_panic);
1534EXPORT_SYMBOL(skb_dequeue);
1535EXPORT_SYMBOL(skb_dequeue_tail);
1536EXPORT_SYMBOL(skb_insert);
1537EXPORT_SYMBOL(skb_queue_purge);
1538EXPORT_SYMBOL(skb_queue_head);
1539EXPORT_SYMBOL(skb_queue_tail);
1540EXPORT_SYMBOL(skb_unlink);
1541EXPORT_SYMBOL(skb_append);
1542EXPORT_SYMBOL(skb_split);