blob: 02adea2099a7673a3338785b9bce3faa67b91cd1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/compiler.h>
19#include <linux/time.h>
20#include <linux/cache.h>
21
22#include <asm/atomic.h>
23#include <asm/types.h>
24#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/net.h>
Thomas Graf3fc7e8a2005-06-23 21:00:17 -070026#include <linux/textsearch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/checksum.h>
Al Viroa80958f2006-12-04 20:41:19 +000028#include <linux/rcupdate.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070029#include <linux/dmaengine.h>
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -070030#include <linux/hrtimer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#define HAVE_ALLOC_SKB /* For the drivers to know */
33#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Herbert Xu60476372007-04-09 11:59:39 -070035/* Don't change this without changing skb_csum_unnecessary! */
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#define CHECKSUM_NONE 0
Herbert Xu60476372007-04-09 11:59:39 -070037#define CHECKSUM_UNNECESSARY 1
38#define CHECKSUM_COMPLETE 2
39#define CHECKSUM_PARTIAL 3
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
42 ~(SMP_CACHE_BYTES - 1))
David S. Millerfc910a22007-03-25 20:27:59 -070043#define SKB_WITH_OVERHEAD(X) \
Herbert Xudeea84b2007-10-21 16:27:46 -070044 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
David S. Millerfc910a22007-03-25 20:27:59 -070045#define SKB_MAX_ORDER(X, ORDER) \
46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
48#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
49
50/* A. Checksumming of received packets by device.
51 *
52 * NONE: device failed to checksum this packet.
53 * skb->csum is undefined.
54 *
55 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
56 * skb->csum is undefined.
57 * It is bad option, but, unfortunately, many of vendors do this.
58 * Apparently with secret goal to sell you new device, when you
59 * will add new protocol to your host. F.e. IPv6. 8)
60 *
Patrick McHardy84fa7932006-08-29 16:44:56 -070061 * COMPLETE: the most generic way. Device supplied checksum of _all_
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 * the packet as seen by netif_rx in skb->csum.
63 * NOTE: Even if device supports only some protocols, but
Patrick McHardy84fa7932006-08-29 16:44:56 -070064 * is able to produce some skb->csum, it MUST use COMPLETE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 * not UNNECESSARY.
66 *
Herbert Xuc6c6e3e2007-07-10 22:41:55 -070067 * PARTIAL: identical to the case for output below. This may occur
68 * on a packet received directly from another Linux OS, e.g.,
69 * a virtualised Linux kernel on the same host. The packet can
70 * be treated in the same way as UNNECESSARY except that on
71 * output (i.e., forwarding) the checksum must be filled in
72 * by the OS or the hardware.
73 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 * B. Checksumming on output.
75 *
76 * NONE: skb is checksummed by protocol or csum is not required.
77 *
Patrick McHardy84fa7932006-08-29 16:44:56 -070078 * PARTIAL: device is required to csum packet as seen by hard_start_xmit
Herbert Xuc6c6e3e2007-07-10 22:41:55 -070079 * from skb->csum_start to the end and to record the checksum
80 * at skb->csum_start + skb->csum_offset.
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 *
82 * Device must show its capabilities in dev->features, set
83 * at device setup time.
84 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
85 * everything.
86 * NETIF_F_NO_CSUM - loopback or reliable single hop media.
87 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
88 * TCP/UDP over IPv4. Sigh. Vendors like this
89 * way by an unknown reason. Though, see comment above
90 * about CHECKSUM_UNNECESSARY. 8)
Herbert Xuc6c6e3e2007-07-10 22:41:55 -070091 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 *
93 * Any questions? No questions, good. --ANK
94 */
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096struct net_device;
David Howells716ea3a2007-04-02 20:19:53 -070097struct scatterlist;
Jens Axboe9c55e012007-11-06 23:30:13 -080098struct pipe_inode_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -0700100#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101struct nf_conntrack {
102 atomic_t use;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103};
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -0700104#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106#ifdef CONFIG_BRIDGE_NETFILTER
107struct nf_bridge_info {
108 atomic_t use;
109 struct net_device *physindev;
110 struct net_device *physoutdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 unsigned int mask;
112 unsigned long data[32 / sizeof(unsigned long)];
113};
114#endif
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116struct sk_buff_head {
117 /* These two members must be first. */
118 struct sk_buff *next;
119 struct sk_buff *prev;
120
121 __u32 qlen;
122 spinlock_t lock;
123};
124
125struct sk_buff;
126
127/* To allow 64K frame to be packed as single skb without frag_list */
128#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
129
130typedef struct skb_frag_struct skb_frag_t;
131
132struct skb_frag_struct {
133 struct page *page;
David S. Millera309bb02007-07-30 18:47:03 -0700134 __u32 page_offset;
135 __u32 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136};
137
138/* This data is invariant across clones and lives at
139 * the end of the header data, ie. at skb->end.
140 */
141struct skb_shared_info {
142 atomic_t dataref;
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800143 unsigned short nr_frags;
Herbert Xu79671682006-06-22 02:40:14 -0700144 unsigned short gso_size;
145 /* Warning: this field is not always filled in (UFO)! */
146 unsigned short gso_segs;
147 unsigned short gso_type;
Al Viroae08e1f2006-11-08 00:27:11 -0800148 __be32 ip6_frag_id;
David S. Miller271bff72008-09-11 04:48:58 -0700149#ifdef CONFIG_HAS_DMA
150 unsigned int num_dma_maps;
151#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 struct sk_buff *frag_list;
153 skb_frag_t frags[MAX_SKB_FRAGS];
David S. Miller271bff72008-09-11 04:48:58 -0700154#ifdef CONFIG_HAS_DMA
155 dma_addr_t dma_maps[MAX_SKB_FRAGS + 1];
156#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157};
158
159/* We divide dataref into two halves. The higher 16 bits hold references
160 * to the payload part of skb->data. The lower 16 bits hold references to
Patrick McHardy334a8132007-06-25 04:35:20 -0700161 * the entire skb->data. A clone of a headerless skb holds the length of
162 * the header in skb->hdr_len.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 *
164 * All users must obey the rule that the skb->data reference count must be
165 * greater than or equal to the payload reference count.
166 *
167 * Holding a reference to the payload part means that the user does not
168 * care about modifications to the header part of skb->data.
169 */
170#define SKB_DATAREF_SHIFT 16
171#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
172
David S. Millerd179cd12005-08-17 14:57:30 -0700173
174enum {
175 SKB_FCLONE_UNAVAILABLE,
176 SKB_FCLONE_ORIG,
177 SKB_FCLONE_CLONE,
178};
179
Herbert Xu79671682006-06-22 02:40:14 -0700180enum {
181 SKB_GSO_TCPV4 = 1 << 0,
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700182 SKB_GSO_UDP = 1 << 1,
Herbert Xu576a30e2006-06-27 13:22:38 -0700183
184 /* This indicates the skb is from an untrusted source. */
185 SKB_GSO_DODGY = 1 << 2,
Michael Chanb0da85372006-06-29 12:30:00 -0700186
187 /* This indicates the tcp segment has CWR set. */
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700188 SKB_GSO_TCP_ECN = 1 << 3,
189
190 SKB_GSO_TCPV6 = 1 << 4,
Chris Leech01d5b2f2009-02-27 14:06:49 -0800191
192 SKB_GSO_FCOE = 1 << 5,
Herbert Xu79671682006-06-22 02:40:14 -0700193};
194
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -0700195#if BITS_PER_LONG > 32
196#define NET_SKBUFF_DATA_USES_OFFSET 1
197#endif
198
199#ifdef NET_SKBUFF_DATA_USES_OFFSET
200typedef unsigned int sk_buff_data_t;
201#else
202typedef unsigned char *sk_buff_data_t;
203#endif
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205/**
206 * struct sk_buff - socket buffer
207 * @next: Next buffer in list
208 * @prev: Previous buffer in list
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 * @sk: Socket we are owned by
Herbert Xu325ed822005-10-03 13:57:23 -0700210 * @tstamp: Time we arrived
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * @dev: Device we arrived on/are leaving by
Randy Dunlapbe52178b2007-05-03 03:16:20 -0700212 * @transport_header: Transport layer header
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700213 * @network_header: Network layer header
214 * @mac_header: Link layer header
Martin Waitz67be2dd2005-05-01 08:59:26 -0700215 * @dst: destination entry
216 * @sp: the security path, used for xfrm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 * @cb: Control buffer. Free for use by every layer. Put private vars here
218 * @len: Length of actual data
219 * @data_len: Data length
220 * @mac_len: Length of link layer header
Patrick McHardy334a8132007-06-25 04:35:20 -0700221 * @hdr_len: writable header length of cloned skb
Herbert Xu663ead32007-04-09 11:59:07 -0700222 * @csum: Checksum (must include start/offset pair)
223 * @csum_start: Offset from skb->head where checksumming should start
224 * @csum_offset: Offset from csum_start where checksum should be stored
Martin Waitz67be2dd2005-05-01 08:59:26 -0700225 * @local_df: allow local fragmentation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 * @cloned: Head may be cloned (check refcnt to be sure)
227 * @nohdr: Payload reference only, must not modify header
228 * @pkt_type: Packet class
Randy Dunlapc83c2482005-10-18 22:07:41 -0700229 * @fclone: skbuff clone status
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 * @ip_summed: Driver fed us an IP checksum
231 * @priority: Packet queueing priority
232 * @users: User count - see {datagram,tcp}.c
233 * @protocol: Packet protocol from driver
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 * @truesize: Buffer size
235 * @head: Head of buffer
236 * @data: Data head pointer
237 * @tail: Tail pointer
238 * @end: End pointer
239 * @destructor: Destruct function
Thomas Graf82e91ff2006-11-09 15:19:14 -0800240 * @mark: Generic packet mark
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 * @nfct: Associated connection, if any
Randy Dunlapc83c2482005-10-18 22:07:41 -0700242 * @ipvs_property: skbuff is owned by ipvs
Randy Dunlap31729362008-02-18 20:52:13 -0800243 * @peeked: this packet has been seen already, so stats have been
244 * done for it, don't do them again
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700245 * @nf_trace: netfilter packet trace flag
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 * @nfctinfo: Relationship of this skb to the connection
Randy Dunlap461ddf32005-11-20 21:25:15 -0800247 * @nfct_reasm: netfilter conntrack re-assembly pointer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700249 * @iif: ifindex of device we arrived on
250 * @queue_mapping: Queue mapping for multiqueue devices
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 * @tc_index: Traffic control index
252 * @tc_verd: traffic control verdict
Randy Dunlap553a5672008-04-20 10:51:01 -0700253 * @ndisc_nodetype: router type (from link layer)
Randy Dunlap4a7b61d2008-07-31 20:52:08 -0700254 * @do_not_encrypt: set to prevent encryption of this frame
Sujith8b30b1f2008-10-24 09:55:27 +0530255 * @requeue: set to indicate that the wireless core should attempt
256 * a software retry on this frame if we failed to
257 * receive an ACK for it
Randy Dunlapf4b8ea72006-06-22 16:00:11 -0700258 * @dma_cookie: a cookie to one of several possible DMA operations
259 * done by skb DMA functions
James Morris984bc162006-06-09 00:29:17 -0700260 * @secmark: security marking
Patrick McHardy6aa895b02008-07-14 22:49:06 -0700261 * @vlan_tci: vlan tag control information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 */
263
264struct sk_buff {
265 /* These two members must be first. */
266 struct sk_buff *next;
267 struct sk_buff *prev;
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 struct sock *sk;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -0700270 ktime_t tstamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Eric Dumazetee6b9672008-03-05 18:30:47 -0800273 union {
274 struct dst_entry *dst;
275 struct rtable *rtable;
276 };
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -0700277#ifdef CONFIG_XFRM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 struct sec_path *sp;
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -0700279#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 /*
281 * This is the control buffer. It is free to use for every
282 * layer. Please put your private variables there. If you
283 * want to keep them across layers you have to do a skb_clone()
284 * first. This is owned by whoever has the skb queued ATM.
285 */
Patrick McHardy3e3850e2006-01-06 23:04:54 -0800286 char cb[48];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288 unsigned int len,
Patrick McHardy334a8132007-06-25 04:35:20 -0700289 data_len;
290 __u16 mac_len,
291 hdr_len;
Al Viroff1dcad2006-11-20 18:07:29 -0800292 union {
293 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -0700294 struct {
295 __u16 csum_start;
296 __u16 csum_offset;
297 };
Al Viroff1dcad2006-11-20 18:07:29 -0800298 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 __u32 priority;
Thomas Graf1cbb3382005-07-05 14:13:41 -0700300 __u8 local_df:1,
301 cloned:1,
302 ip_summed:2,
Harald Welte6869c4d2005-08-09 19:24:19 -0700303 nohdr:1,
304 nfctinfo:3;
David S. Millerd179cd12005-08-17 14:57:30 -0700305 __u8 pkt_type:3,
Patrick McHardyb84f4cc2005-11-20 21:19:21 -0800306 fclone:2,
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700307 ipvs_property:1,
Herbert Xua59322b2007-12-05 01:53:40 -0800308 peeked:1,
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700309 nf_trace:1;
Alexey Dobriyana0d3bea2005-08-11 16:05:50 -0700310 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
312 void (*destructor)(struct sk_buff *skb);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800313#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -0700314 struct nf_conntrack *nfct;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800315 struct sk_buff *nfct_reasm;
316#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317#ifdef CONFIG_BRIDGE_NETFILTER
318 struct nf_bridge_info *nf_bridge;
319#endif
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700320
321 int iif;
322 __u16 queue_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323#ifdef CONFIG_NET_SCHED
Patrick McHardyb6b99eb2005-08-09 19:33:51 -0700324 __u16 tc_index; /* traffic control index */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325#ifdef CONFIG_NET_CLS_ACT
Patrick McHardyb6b99eb2005-08-09 19:33:51 -0700326 __u16 tc_verd; /* traffic control verdict */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328#endif
YOSHIFUJI Hideakide357cc2008-03-15 23:59:18 -0400329#ifdef CONFIG_IPV6_NDISC_NODETYPE
Templin, Fred Lfadf6bf2008-03-11 18:35:59 -0400330 __u8 ndisc_nodetype:2;
YOSHIFUJI Hideakide357cc2008-03-15 23:59:18 -0400331#endif
Johannes Bergd0f09802008-07-29 11:32:07 +0200332#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
333 __u8 do_not_encrypt:1;
Sujith8b30b1f2008-10-24 09:55:27 +0530334 __u8 requeue:1;
Johannes Bergd0f09802008-07-29 11:32:07 +0200335#endif
336 /* 0/13/14 bit hole */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700337
Chris Leech97fc2f02006-05-23 17:55:33 -0700338#ifdef CONFIG_NET_DMA
339 dma_cookie_t dma_cookie;
340#endif
James Morris984bc162006-06-09 00:29:17 -0700341#ifdef CONFIG_NETWORK_SECMARK
342 __u32 secmark;
343#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Thomas Graf82e91ff2006-11-09 15:19:14 -0800345 __u32 mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
Patrick McHardy6aa895b02008-07-14 22:49:06 -0700347 __u16 vlan_tci;
348
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700349 sk_buff_data_t transport_header;
350 sk_buff_data_t network_header;
351 sk_buff_data_t mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 /* These elements must be at the end, see alloc_skb() for details. */
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700353 sk_buff_data_t tail;
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700354 sk_buff_data_t end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 unsigned char *head,
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700356 *data;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700357 unsigned int truesize;
358 atomic_t users;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359};
360
361#ifdef __KERNEL__
362/*
363 * Handling routines are only of interest to the kernel
364 */
365#include <linux/slab.h>
366
367#include <asm/system.h>
368
David S. Millera40c24a2008-09-11 04:51:14 -0700369#ifdef CONFIG_HAS_DMA
370#include <linux/dma-mapping.h>
371extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
372 enum dma_data_direction dir);
373extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
374 enum dma_data_direction dir);
375#endif
376
Jörn Engel231d06a2006-03-20 21:28:35 -0800377extern void kfree_skb(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378extern void __kfree_skb(struct sk_buff *skb);
David S. Millerd179cd12005-08-17 14:57:30 -0700379extern struct sk_buff *__alloc_skb(unsigned int size,
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800380 gfp_t priority, int fclone, int node);
David S. Millerd179cd12005-08-17 14:57:30 -0700381static inline struct sk_buff *alloc_skb(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100382 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -0700383{
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800384 return __alloc_skb(size, priority, 0, -1);
David S. Millerd179cd12005-08-17 14:57:30 -0700385}
386
387static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100388 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -0700389{
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800390 return __alloc_skb(size, priority, 1, -1);
David S. Millerd179cd12005-08-17 14:57:30 -0700391}
392
Lennert Buytenhek04a4bb52008-10-01 02:33:12 -0700393extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
394
Herbert Xue0053ec2007-10-14 00:37:52 -0700395extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700396extern struct sk_buff *skb_clone(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100397 gfp_t priority);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700398extern struct sk_buff *skb_copy(const struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100399 gfp_t priority);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700400extern struct sk_buff *pskb_copy(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100401 gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402extern int pskb_expand_head(struct sk_buff *skb,
Victor Fusco86a76ca2005-07-08 14:57:47 -0700403 int nhead, int ntail,
Al Virodd0fc662005-10-07 07:46:04 +0100404 gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
406 unsigned int headroom);
407extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
408 int newheadroom, int newtailroom,
Al Virodd0fc662005-10-07 07:46:04 +0100409 gfp_t priority);
David Howells716ea3a2007-04-02 20:19:53 -0700410extern int skb_to_sgvec(struct sk_buff *skb,
411 struct scatterlist *sg, int offset,
412 int len);
413extern int skb_cow_data(struct sk_buff *skb, int tailbits,
414 struct sk_buff **trailer);
Herbert Xu5b057c62006-06-23 02:06:41 -0700415extern int skb_pad(struct sk_buff *skb, int pad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416#define dev_kfree_skb(a) kfree_skb(a)
417extern void skb_over_panic(struct sk_buff *skb, int len,
418 void *here);
419extern void skb_under_panic(struct sk_buff *skb, int len,
420 void *here);
421
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700422extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
423 int getfrag(void *from, char *to, int offset,
424 int len,int odd, struct sk_buff *skb),
425 void *from, int length);
426
Thomas Graf677e90e2005-06-23 20:59:51 -0700427struct skb_seq_state
428{
429 __u32 lower_offset;
430 __u32 upper_offset;
431 __u32 frag_idx;
432 __u32 stepped_offset;
433 struct sk_buff *root_skb;
434 struct sk_buff *cur_skb;
435 __u8 *frag_data;
436};
437
438extern void skb_prepare_seq_read(struct sk_buff *skb,
439 unsigned int from, unsigned int to,
440 struct skb_seq_state *st);
441extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
442 struct skb_seq_state *st);
443extern void skb_abort_seq_read(struct skb_seq_state *st);
444
Thomas Graf3fc7e8a2005-06-23 21:00:17 -0700445extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
446 unsigned int to, struct ts_config *config,
447 struct ts_state *state);
448
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700449#ifdef NET_SKBUFF_DATA_USES_OFFSET
450static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
451{
452 return skb->head + skb->end;
453}
454#else
455static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
456{
457 return skb->end;
458}
459#endif
460
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461/* Internal */
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700462#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464/**
465 * skb_queue_empty - check if a queue is empty
466 * @list: queue head
467 *
468 * Returns true if the queue is empty, false otherwise.
469 */
470static inline int skb_queue_empty(const struct sk_buff_head *list)
471{
472 return list->next == (struct sk_buff *)list;
473}
474
475/**
David S. Millerfc7ebb22008-09-23 00:34:07 -0700476 * skb_queue_is_last - check if skb is the last entry in the queue
477 * @list: queue head
478 * @skb: buffer
479 *
480 * Returns true if @skb is the last buffer on the list.
481 */
482static inline bool skb_queue_is_last(const struct sk_buff_head *list,
483 const struct sk_buff *skb)
484{
485 return (skb->next == (struct sk_buff *) list);
486}
487
488/**
Ilpo Järvinen832d11c2008-11-24 21:20:15 -0800489 * skb_queue_is_first - check if skb is the first entry in the queue
490 * @list: queue head
491 * @skb: buffer
492 *
493 * Returns true if @skb is the first buffer on the list.
494 */
495static inline bool skb_queue_is_first(const struct sk_buff_head *list,
496 const struct sk_buff *skb)
497{
498 return (skb->prev == (struct sk_buff *) list);
499}
500
501/**
David S. Miller249c8b42008-09-23 00:44:42 -0700502 * skb_queue_next - return the next packet in the queue
503 * @list: queue head
504 * @skb: current buffer
505 *
506 * Return the next packet in @list after @skb. It is only valid to
507 * call this if skb_queue_is_last() evaluates to false.
508 */
509static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
510 const struct sk_buff *skb)
511{
512 /* This BUG_ON may seem severe, but if we just return then we
513 * are going to dereference garbage.
514 */
515 BUG_ON(skb_queue_is_last(list, skb));
516 return skb->next;
517}
518
519/**
Ilpo Järvinen832d11c2008-11-24 21:20:15 -0800520 * skb_queue_prev - return the prev packet in the queue
521 * @list: queue head
522 * @skb: current buffer
523 *
524 * Return the prev packet in @list before @skb. It is only valid to
525 * call this if skb_queue_is_first() evaluates to false.
526 */
527static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
528 const struct sk_buff *skb)
529{
530 /* This BUG_ON may seem severe, but if we just return then we
531 * are going to dereference garbage.
532 */
533 BUG_ON(skb_queue_is_first(list, skb));
534 return skb->prev;
535}
536
537/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 * skb_get - reference buffer
539 * @skb: buffer to reference
540 *
541 * Makes another reference to a socket buffer and returns a pointer
542 * to the buffer.
543 */
544static inline struct sk_buff *skb_get(struct sk_buff *skb)
545{
546 atomic_inc(&skb->users);
547 return skb;
548}
549
550/*
551 * If users == 1, we are the only owner and are can avoid redundant
552 * atomic change.
553 */
554
555/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 * skb_cloned - is the buffer a clone
557 * @skb: buffer to check
558 *
559 * Returns true if the buffer was generated with skb_clone() and is
560 * one of multiple shared copies of the buffer. Cloned buffers are
561 * shared data so must not be written to under normal circumstances.
562 */
563static inline int skb_cloned(const struct sk_buff *skb)
564{
565 return skb->cloned &&
566 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
567}
568
569/**
570 * skb_header_cloned - is the header a clone
571 * @skb: buffer to check
572 *
573 * Returns true if modifying the header part of the buffer requires
574 * the data to be copied.
575 */
576static inline int skb_header_cloned(const struct sk_buff *skb)
577{
578 int dataref;
579
580 if (!skb->cloned)
581 return 0;
582
583 dataref = atomic_read(&skb_shinfo(skb)->dataref);
584 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
585 return dataref != 1;
586}
587
588/**
589 * skb_header_release - release reference to header
590 * @skb: buffer to operate on
591 *
592 * Drop a reference to the header part of the buffer. This is done
593 * by acquiring a payload reference. You must not read from the header
594 * part of skb->data after this.
595 */
596static inline void skb_header_release(struct sk_buff *skb)
597{
598 BUG_ON(skb->nohdr);
599 skb->nohdr = 1;
600 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
601}
602
603/**
604 * skb_shared - is the buffer shared
605 * @skb: buffer to check
606 *
607 * Returns true if more than one person has a reference to this
608 * buffer.
609 */
610static inline int skb_shared(const struct sk_buff *skb)
611{
612 return atomic_read(&skb->users) != 1;
613}
614
615/**
616 * skb_share_check - check if buffer is shared and if so clone it
617 * @skb: buffer to check
618 * @pri: priority for memory allocation
619 *
620 * If the buffer is shared the buffer is cloned and the old copy
621 * drops a reference. A new clone with a single reference is returned.
622 * If the buffer is not shared the original buffer is returned. When
623 * being called from interrupt status or with spinlocks held pri must
624 * be GFP_ATOMIC.
625 *
626 * NULL is returned on a memory allocation failure.
627 */
Victor Fusco86a76ca2005-07-08 14:57:47 -0700628static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100629 gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
631 might_sleep_if(pri & __GFP_WAIT);
632 if (skb_shared(skb)) {
633 struct sk_buff *nskb = skb_clone(skb, pri);
634 kfree_skb(skb);
635 skb = nskb;
636 }
637 return skb;
638}
639
640/*
641 * Copy shared buffers into a new sk_buff. We effectively do COW on
642 * packets to handle cases where we have a local reader and forward
643 * and a couple of other messy ones. The normal one is tcpdumping
644 * a packet thats being forwarded.
645 */
646
647/**
648 * skb_unshare - make a copy of a shared buffer
649 * @skb: buffer to check
650 * @pri: priority for memory allocation
651 *
652 * If the socket buffer is a clone then this function creates a new
653 * copy of the data, drops a reference count on the old copy and returns
654 * the new copy with the reference count at 1. If the buffer is not a clone
655 * the original buffer is returned. When called with a spinlock held or
656 * from interrupt state @pri must be %GFP_ATOMIC
657 *
658 * %NULL is returned on a memory allocation failure.
659 */
Victor Fuscoe2bf5212005-07-18 13:36:38 -0700660static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100661 gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662{
663 might_sleep_if(pri & __GFP_WAIT);
664 if (skb_cloned(skb)) {
665 struct sk_buff *nskb = skb_copy(skb, pri);
666 kfree_skb(skb); /* Free our shared copy */
667 skb = nskb;
668 }
669 return skb;
670}
671
672/**
673 * skb_peek
674 * @list_: list to peek at
675 *
676 * Peek an &sk_buff. Unlike most other operations you _MUST_
677 * be careful with this one. A peek leaves the buffer on the
678 * list and someone else may run off with it. You must hold
679 * the appropriate locks or have a private queue to do this.
680 *
681 * Returns %NULL for an empty list or a pointer to the head element.
682 * The reference count is not incremented and the reference is therefore
683 * volatile. Use with caution.
684 */
685static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
686{
687 struct sk_buff *list = ((struct sk_buff *)list_)->next;
688 if (list == (struct sk_buff *)list_)
689 list = NULL;
690 return list;
691}
692
693/**
694 * skb_peek_tail
695 * @list_: list to peek at
696 *
697 * Peek an &sk_buff. Unlike most other operations you _MUST_
698 * be careful with this one. A peek leaves the buffer on the
699 * list and someone else may run off with it. You must hold
700 * the appropriate locks or have a private queue to do this.
701 *
702 * Returns %NULL for an empty list or a pointer to the tail element.
703 * The reference count is not incremented and the reference is therefore
704 * volatile. Use with caution.
705 */
706static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
707{
708 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
709 if (list == (struct sk_buff *)list_)
710 list = NULL;
711 return list;
712}
713
714/**
715 * skb_queue_len - get queue length
716 * @list_: list to measure
717 *
718 * Return the length of an &sk_buff queue.
719 */
720static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
721{
722 return list_->qlen;
723}
724
David S. Miller67fed452008-09-21 22:36:24 -0700725/**
726 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
727 * @list: queue to initialize
728 *
729 * This initializes only the list and queue length aspects of
730 * an sk_buff_head object. This allows to initialize the list
731 * aspects of an sk_buff_head without reinitializing things like
732 * the spinlock. It can also be used for on-stack sk_buff_head
733 * objects where the spinlock is known to not be used.
734 */
735static inline void __skb_queue_head_init(struct sk_buff_head *list)
736{
737 list->prev = list->next = (struct sk_buff *)list;
738 list->qlen = 0;
739}
740
Arjan van de Ven76f10ad2006-08-02 14:06:55 -0700741/*
742 * This function creates a split out lock class for each invocation;
743 * this is needed for now since a whole lot of users of the skb-queue
744 * infrastructure in drivers have different locking usage (in hardirq)
745 * than the networking core (in softirq only). In the long run either the
746 * network layer or drivers should need annotation to consolidate the
747 * main types of usage into 3 classes.
748 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749static inline void skb_queue_head_init(struct sk_buff_head *list)
750{
751 spin_lock_init(&list->lock);
David S. Miller67fed452008-09-21 22:36:24 -0700752 __skb_queue_head_init(list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753}
754
Pavel Emelianovc2ecba72007-04-17 12:45:31 -0700755static inline void skb_queue_head_init_class(struct sk_buff_head *list,
756 struct lock_class_key *class)
757{
758 skb_queue_head_init(list);
759 lockdep_set_class(&list->lock, class);
760}
761
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762/*
Gerrit Renkerbf299272008-04-14 00:04:51 -0700763 * Insert an sk_buff on a list.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 *
765 * The "__skb_xxxx()" functions are the non-atomic ones that
766 * can only be called with interrupts disabled.
767 */
Gerrit Renkerbf299272008-04-14 00:04:51 -0700768extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
769static inline void __skb_insert(struct sk_buff *newsk,
770 struct sk_buff *prev, struct sk_buff *next,
771 struct sk_buff_head *list)
772{
773 newsk->next = next;
774 newsk->prev = prev;
775 next->prev = prev->next = newsk;
776 list->qlen++;
777}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
David S. Miller67fed452008-09-21 22:36:24 -0700779static inline void __skb_queue_splice(const struct sk_buff_head *list,
780 struct sk_buff *prev,
781 struct sk_buff *next)
782{
783 struct sk_buff *first = list->next;
784 struct sk_buff *last = list->prev;
785
786 first->prev = prev;
787 prev->next = first;
788
789 last->next = next;
790 next->prev = last;
791}
792
793/**
794 * skb_queue_splice - join two skb lists, this is designed for stacks
795 * @list: the new list to add
796 * @head: the place to add it in the first list
797 */
798static inline void skb_queue_splice(const struct sk_buff_head *list,
799 struct sk_buff_head *head)
800{
801 if (!skb_queue_empty(list)) {
802 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
David S. Miller1d4a31d2008-09-22 21:57:21 -0700803 head->qlen += list->qlen;
David S. Miller67fed452008-09-21 22:36:24 -0700804 }
805}
806
807/**
808 * skb_queue_splice - join two skb lists and reinitialise the emptied list
809 * @list: the new list to add
810 * @head: the place to add it in the first list
811 *
812 * The list at @list is reinitialised
813 */
814static inline void skb_queue_splice_init(struct sk_buff_head *list,
815 struct sk_buff_head *head)
816{
817 if (!skb_queue_empty(list)) {
818 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
David S. Miller1d4a31d2008-09-22 21:57:21 -0700819 head->qlen += list->qlen;
David S. Miller67fed452008-09-21 22:36:24 -0700820 __skb_queue_head_init(list);
821 }
822}
823
824/**
825 * skb_queue_splice_tail - join two skb lists, each list being a queue
826 * @list: the new list to add
827 * @head: the place to add it in the first list
828 */
829static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
830 struct sk_buff_head *head)
831{
832 if (!skb_queue_empty(list)) {
833 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
David S. Miller1d4a31d2008-09-22 21:57:21 -0700834 head->qlen += list->qlen;
David S. Miller67fed452008-09-21 22:36:24 -0700835 }
836}
837
838/**
839 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
840 * @list: the new list to add
841 * @head: the place to add it in the first list
842 *
843 * Each of the lists is a queue.
844 * The list at @list is reinitialised
845 */
846static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
847 struct sk_buff_head *head)
848{
849 if (!skb_queue_empty(list)) {
850 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
David S. Miller1d4a31d2008-09-22 21:57:21 -0700851 head->qlen += list->qlen;
David S. Miller67fed452008-09-21 22:36:24 -0700852 __skb_queue_head_init(list);
853 }
854}
855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856/**
Stephen Hemminger300ce172005-10-30 13:47:34 -0800857 * __skb_queue_after - queue a buffer at the list head
858 * @list: list to use
859 * @prev: place after this buffer
860 * @newsk: buffer to queue
861 *
862 * Queue a buffer int the middle of a list. This function takes no locks
863 * and you must therefore hold required locks before calling it.
864 *
865 * A buffer cannot be placed on two lists at the same time.
866 */
867static inline void __skb_queue_after(struct sk_buff_head *list,
868 struct sk_buff *prev,
869 struct sk_buff *newsk)
870{
Gerrit Renkerbf299272008-04-14 00:04:51 -0700871 __skb_insert(newsk, prev, prev->next, list);
Stephen Hemminger300ce172005-10-30 13:47:34 -0800872}
873
Gerrit Renker7de6c032008-04-14 00:05:09 -0700874extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
875 struct sk_buff_head *list);
876
Gerrit Renkerf5572852008-04-14 00:05:28 -0700877static inline void __skb_queue_before(struct sk_buff_head *list,
878 struct sk_buff *next,
879 struct sk_buff *newsk)
880{
881 __skb_insert(newsk, next->prev, next, list);
882}
883
Stephen Hemminger300ce172005-10-30 13:47:34 -0800884/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 * __skb_queue_head - queue a buffer at the list head
886 * @list: list to use
887 * @newsk: buffer to queue
888 *
889 * Queue a buffer at the start of a list. This function takes no locks
890 * and you must therefore hold required locks before calling it.
891 *
892 * A buffer cannot be placed on two lists at the same time.
893 */
894extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
895static inline void __skb_queue_head(struct sk_buff_head *list,
896 struct sk_buff *newsk)
897{
Stephen Hemminger300ce172005-10-30 13:47:34 -0800898 __skb_queue_after(list, (struct sk_buff *)list, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899}
900
901/**
902 * __skb_queue_tail - queue a buffer at the list tail
903 * @list: list to use
904 * @newsk: buffer to queue
905 *
906 * Queue a buffer at the end of a list. This function takes no locks
907 * and you must therefore hold required locks before calling it.
908 *
909 * A buffer cannot be placed on two lists at the same time.
910 */
911extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
912static inline void __skb_queue_tail(struct sk_buff_head *list,
913 struct sk_buff *newsk)
914{
Gerrit Renkerf5572852008-04-14 00:05:28 -0700915 __skb_queue_before(list, (struct sk_buff *)list, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916}
917
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 * remove sk_buff from list. _Must_ be called atomically, and with
920 * the list known..
921 */
David S. Miller8728b832005-08-09 19:25:21 -0700922extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
924{
925 struct sk_buff *next, *prev;
926
927 list->qlen--;
928 next = skb->next;
929 prev = skb->prev;
930 skb->next = skb->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 next->prev = prev;
932 prev->next = next;
933}
934
Gerrit Renkerf525c062008-04-14 00:04:12 -0700935/**
936 * __skb_dequeue - remove from the head of the queue
937 * @list: list to dequeue from
938 *
939 * Remove the head of the list. This function does not take any locks
940 * so must be used with appropriate locks held only. The head item is
941 * returned or %NULL if the list is empty.
942 */
943extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
944static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
945{
946 struct sk_buff *skb = skb_peek(list);
947 if (skb)
948 __skb_unlink(skb, list);
949 return skb;
950}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
952/**
953 * __skb_dequeue_tail - remove from the tail of the queue
954 * @list: list to dequeue from
955 *
956 * Remove the tail of the list. This function does not take any locks
957 * so must be used with appropriate locks held only. The tail item is
958 * returned or %NULL if the list is empty.
959 */
960extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
961static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
962{
963 struct sk_buff *skb = skb_peek_tail(list);
964 if (skb)
965 __skb_unlink(skb, list);
966 return skb;
967}
968
969
970static inline int skb_is_nonlinear(const struct sk_buff *skb)
971{
972 return skb->data_len;
973}
974
975static inline unsigned int skb_headlen(const struct sk_buff *skb)
976{
977 return skb->len - skb->data_len;
978}
979
980static inline int skb_pagelen(const struct sk_buff *skb)
981{
982 int i, len = 0;
983
984 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
985 len += skb_shinfo(skb)->frags[i].size;
986 return len + skb_headlen(skb);
987}
988
989static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
990 struct page *page, int off, int size)
991{
992 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
993
994 frag->page = page;
995 frag->page_offset = off;
996 frag->size = size;
997 skb_shinfo(skb)->nr_frags = i + 1;
998}
999
Peter Zijlstra654bed12008-10-07 14:22:33 -07001000extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1001 int off, int size);
1002
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1004#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
1005#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1006
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001007#ifdef NET_SKBUFF_DATA_USES_OFFSET
1008static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1009{
1010 return skb->head + skb->tail;
1011}
1012
1013static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1014{
1015 skb->tail = skb->data - skb->head;
1016}
1017
1018static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1019{
1020 skb_reset_tail_pointer(skb);
1021 skb->tail += offset;
1022}
1023#else /* NET_SKBUFF_DATA_USES_OFFSET */
1024static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1025{
1026 return skb->tail;
1027}
1028
1029static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1030{
1031 skb->tail = skb->data;
1032}
1033
1034static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1035{
1036 skb->tail = skb->data + offset;
1037}
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001038
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001039#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1040
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041/*
1042 * Add data to an sk_buff
1043 */
Ilpo Järvinen0dde3e12008-03-27 17:43:41 -07001044extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1046{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001047 unsigned char *tmp = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 SKB_LINEAR_ASSERT(skb);
1049 skb->tail += len;
1050 skb->len += len;
1051 return tmp;
1052}
1053
Ilpo Järvinenc2aa2702008-03-27 17:52:40 -07001054extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1056{
1057 skb->data -= len;
1058 skb->len += len;
1059 return skb->data;
1060}
1061
Ilpo Järvinen6be8ac22008-03-27 17:47:24 -07001062extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1064{
1065 skb->len -= len;
1066 BUG_ON(skb->len < skb->data_len);
1067 return skb->data += len;
1068}
1069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1071
1072static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1073{
1074 if (len > skb_headlen(skb) &&
Gerrit Renker987c4022008-08-11 18:17:17 -07001075 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 return NULL;
1077 skb->len -= len;
1078 return skb->data += len;
1079}
1080
1081static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1082{
1083 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1084}
1085
1086static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1087{
1088 if (likely(len <= skb_headlen(skb)))
1089 return 1;
1090 if (unlikely(len > skb->len))
1091 return 0;
Gerrit Renker987c4022008-08-11 18:17:17 -07001092 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093}
1094
1095/**
1096 * skb_headroom - bytes at buffer head
1097 * @skb: buffer to check
1098 *
1099 * Return the number of bytes of free space at the head of an &sk_buff.
1100 */
Chuck Leverc2636b42007-10-23 21:07:32 -07001101static inline unsigned int skb_headroom(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102{
1103 return skb->data - skb->head;
1104}
1105
1106/**
1107 * skb_tailroom - bytes at buffer end
1108 * @skb: buffer to check
1109 *
1110 * Return the number of bytes of free space at the tail of an sk_buff
1111 */
1112static inline int skb_tailroom(const struct sk_buff *skb)
1113{
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001114 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115}
1116
1117/**
1118 * skb_reserve - adjust headroom
1119 * @skb: buffer to alter
1120 * @len: bytes to move
1121 *
1122 * Increase the headroom of an empty &sk_buff by reducing the tail
1123 * room. This is only allowed for an empty buffer.
1124 */
David S. Miller8243126c2006-01-17 02:54:21 -08001125static inline void skb_reserve(struct sk_buff *skb, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126{
1127 skb->data += len;
1128 skb->tail += len;
1129}
1130
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -07001131#ifdef NET_SKBUFF_DATA_USES_OFFSET
1132static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1133{
1134 return skb->head + skb->transport_header;
1135}
1136
1137static inline void skb_reset_transport_header(struct sk_buff *skb)
1138{
1139 skb->transport_header = skb->data - skb->head;
1140}
1141
1142static inline void skb_set_transport_header(struct sk_buff *skb,
1143 const int offset)
1144{
1145 skb_reset_transport_header(skb);
1146 skb->transport_header += offset;
1147}
1148
1149static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1150{
1151 return skb->head + skb->network_header;
1152}
1153
1154static inline void skb_reset_network_header(struct sk_buff *skb)
1155{
1156 skb->network_header = skb->data - skb->head;
1157}
1158
1159static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1160{
1161 skb_reset_network_header(skb);
1162 skb->network_header += offset;
1163}
1164
1165static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1166{
1167 return skb->head + skb->mac_header;
1168}
1169
1170static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1171{
1172 return skb->mac_header != ~0U;
1173}
1174
1175static inline void skb_reset_mac_header(struct sk_buff *skb)
1176{
1177 skb->mac_header = skb->data - skb->head;
1178}
1179
1180static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1181{
1182 skb_reset_mac_header(skb);
1183 skb->mac_header += offset;
1184}
1185
1186#else /* NET_SKBUFF_DATA_USES_OFFSET */
1187
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001188static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1189{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001190 return skb->transport_header;
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001191}
1192
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001193static inline void skb_reset_transport_header(struct sk_buff *skb)
1194{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001195 skb->transport_header = skb->data;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001196}
1197
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001198static inline void skb_set_transport_header(struct sk_buff *skb,
1199 const int offset)
1200{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001201 skb->transport_header = skb->data + offset;
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001202}
1203
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001204static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1205{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001206 return skb->network_header;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001207}
1208
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001209static inline void skb_reset_network_header(struct sk_buff *skb)
1210{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001211 skb->network_header = skb->data;
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001212}
1213
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -03001214static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1215{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001216 skb->network_header = skb->data + offset;
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -03001217}
1218
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001219static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1220{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001221 return skb->mac_header;
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001222}
1223
1224static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1225{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001226 return skb->mac_header != NULL;
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001227}
1228
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001229static inline void skb_reset_mac_header(struct sk_buff *skb)
1230{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001231 skb->mac_header = skb->data;
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001232}
1233
Arnaldo Carvalho de Melo48d49d0c2007-03-10 12:30:58 -03001234static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1235{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001236 skb->mac_header = skb->data + offset;
Arnaldo Carvalho de Melo48d49d0c2007-03-10 12:30:58 -03001237}
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -07001238#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1239
1240static inline int skb_transport_offset(const struct sk_buff *skb)
1241{
1242 return skb_transport_header(skb) - skb->data;
1243}
1244
1245static inline u32 skb_network_header_len(const struct sk_buff *skb)
1246{
1247 return skb->transport_header - skb->network_header;
1248}
1249
1250static inline int skb_network_offset(const struct sk_buff *skb)
1251{
1252 return skb_network_header(skb) - skb->data;
1253}
Arnaldo Carvalho de Melo48d49d0c2007-03-10 12:30:58 -03001254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255/*
1256 * CPUs often take a performance hit when accessing unaligned memory
1257 * locations. The actual performance hit varies, it can be small if the
1258 * hardware handles it or large if we have to take an exception and fix it
1259 * in software.
1260 *
1261 * Since an ethernet header is 14 bytes network drivers often end up with
1262 * the IP header at an unaligned offset. The IP header can be aligned by
1263 * shifting the start of the packet by 2 bytes. Drivers should do this
1264 * with:
1265 *
1266 * skb_reserve(NET_IP_ALIGN);
1267 *
1268 * The downside to this alignment of the IP header is that the DMA is now
1269 * unaligned. On some architectures the cost of an unaligned DMA is high
1270 * and this cost outweighs the gains made by aligning the IP header.
1271 *
1272 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1273 * to be overridden.
1274 */
1275#ifndef NET_IP_ALIGN
1276#define NET_IP_ALIGN 2
1277#endif
1278
Anton Blanchard025be812006-03-31 02:27:06 -08001279/*
1280 * The networking layer reserves some headroom in skb data (via
1281 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1282 * the header has to grow. In the default case, if the header has to grow
1283 * 16 bytes or less we avoid the reallocation.
1284 *
1285 * Unfortunately this headroom changes the DMA alignment of the resulting
1286 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1287 * on some architectures. An architecture can override this value,
1288 * perhaps setting it to a cacheline in size (since that will maintain
1289 * cacheline alignment of the DMA). It must be a power of 2.
1290 *
1291 * Various parts of the networking layer expect at least 16 bytes of
1292 * headroom, you should not reduce this.
1293 */
1294#ifndef NET_SKB_PAD
1295#define NET_SKB_PAD 16
1296#endif
1297
Herbert Xu3cc0e872006-06-09 16:13:38 -07001298extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
1300static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1301{
Herbert Xu3cc0e872006-06-09 16:13:38 -07001302 if (unlikely(skb->data_len)) {
1303 WARN_ON(1);
1304 return;
1305 }
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001306 skb->len = len;
1307 skb_set_tail_pointer(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308}
1309
Ilpo Järvinen419ae742008-03-27 17:54:01 -07001310extern void skb_trim(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
1312static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1313{
Herbert Xu3cc0e872006-06-09 16:13:38 -07001314 if (skb->data_len)
1315 return ___pskb_trim(skb, len);
1316 __skb_trim(skb, len);
1317 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318}
1319
1320static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1321{
1322 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1323}
1324
1325/**
Herbert Xue9fa4f72006-08-13 20:12:58 -07001326 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1327 * @skb: buffer to alter
1328 * @len: new length
1329 *
1330 * This is identical to pskb_trim except that the caller knows that
1331 * the skb is not cloned so we should never get an error due to out-
1332 * of-memory.
1333 */
1334static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1335{
1336 int err = pskb_trim(skb, len);
1337 BUG_ON(err);
1338}
1339
1340/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 * skb_orphan - orphan a buffer
1342 * @skb: buffer to orphan
1343 *
1344 * If a buffer currently has an owner then we call the owner's
1345 * destructor function and make the @skb unowned. The buffer continues
1346 * to exist but is no longer charged to its former owner.
1347 */
1348static inline void skb_orphan(struct sk_buff *skb)
1349{
1350 if (skb->destructor)
1351 skb->destructor(skb);
1352 skb->destructor = NULL;
1353 skb->sk = NULL;
1354}
1355
1356/**
1357 * __skb_queue_purge - empty a list
1358 * @list: list to empty
1359 *
1360 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1361 * the list and one reference dropped. This function does not take the
1362 * list lock and the caller must hold the relevant locks to use it.
1363 */
1364extern void skb_queue_purge(struct sk_buff_head *list);
1365static inline void __skb_queue_purge(struct sk_buff_head *list)
1366{
1367 struct sk_buff *skb;
1368 while ((skb = __skb_dequeue(list)) != NULL)
1369 kfree_skb(skb);
1370}
1371
1372/**
Christoph Hellwigb4e54de2006-07-24 15:31:14 -07001373 * __dev_alloc_skb - allocate an skbuff for receiving
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 * @length: length to allocate
1375 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1376 *
1377 * Allocate a new &sk_buff and assign it a usage count of one. The
1378 * buffer has unspecified headroom built in. Users should allocate
1379 * the headroom they think they need without accounting for the
1380 * built in space. The built in space is used for optimisations.
1381 *
Christoph Hellwig766ea8c2006-08-07 15:49:53 -07001382 * %NULL is returned if there is no free memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
Al Virodd0fc662005-10-07 07:46:04 +01001385 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386{
Anton Blanchard025be812006-03-31 02:27:06 -08001387 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 if (likely(skb))
Anton Blanchard025be812006-03-31 02:27:06 -08001389 skb_reserve(skb, NET_SKB_PAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 return skb;
1391}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
Ilpo Järvinenf58518e2008-03-27 17:51:31 -07001393extern struct sk_buff *dev_alloc_skb(unsigned int length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
Christoph Hellwig8af27452006-07-31 22:35:23 -07001395extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1396 unsigned int length, gfp_t gfp_mask);
1397
1398/**
1399 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
1400 * @dev: network device to receive on
1401 * @length: length to allocate
1402 *
1403 * Allocate a new &sk_buff and assign it a usage count of one. The
1404 * buffer has unspecified headroom built in. Users should allocate
1405 * the headroom they think they need without accounting for the
1406 * built in space. The built in space is used for optimisations.
1407 *
1408 * %NULL is returned if there is no free memory. Although this function
1409 * allocates memory it can be called from an interrupt.
1410 */
1411static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1412 unsigned int length)
1413{
1414 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1415}
1416
Peter Zijlstra654bed12008-10-07 14:22:33 -07001417extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
1418
1419/**
1420 * netdev_alloc_page - allocate a page for ps-rx on a specific device
1421 * @dev: network device to receive on
1422 *
1423 * Allocate a new page node local to the specified device.
1424 *
1425 * %NULL is returned if there is no free memory.
1426 */
1427static inline struct page *netdev_alloc_page(struct net_device *dev)
1428{
1429 return __netdev_alloc_page(dev, GFP_ATOMIC);
1430}
1431
1432static inline void netdev_free_page(struct net_device *dev, struct page *page)
1433{
1434 __free_page(page);
1435}
1436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437/**
Patrick McHardy334a8132007-06-25 04:35:20 -07001438 * skb_clone_writable - is the header of a clone writable
1439 * @skb: buffer to check
1440 * @len: length up to which to write
1441 *
1442 * Returns true if modifying the header part of the cloned buffer
1443 * does not requires the data to be copied.
1444 */
Chuck Leverc2636b42007-10-23 21:07:32 -07001445static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
Patrick McHardy334a8132007-06-25 04:35:20 -07001446{
1447 return !skb_header_cloned(skb) &&
1448 skb_headroom(skb) + len <= skb->hdr_len;
1449}
1450
Herbert Xud9cc2042007-09-16 16:21:16 -07001451static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1452 int cloned)
1453{
1454 int delta = 0;
1455
1456 if (headroom < NET_SKB_PAD)
1457 headroom = NET_SKB_PAD;
1458 if (headroom > skb_headroom(skb))
1459 delta = headroom - skb_headroom(skb);
1460
1461 if (delta || cloned)
1462 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1463 GFP_ATOMIC);
1464 return 0;
1465}
1466
Patrick McHardy334a8132007-06-25 04:35:20 -07001467/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 * skb_cow - copy header of skb when it is required
1469 * @skb: buffer to cow
1470 * @headroom: needed headroom
1471 *
1472 * If the skb passed lacks sufficient headroom or its data part
1473 * is shared, data is reallocated. If reallocation fails, an error
1474 * is returned and original skb is not changed.
1475 *
1476 * The result is skb with writable area skb->head...skb->tail
1477 * and at least @headroom of space at head.
1478 */
1479static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1480{
Herbert Xud9cc2042007-09-16 16:21:16 -07001481 return __skb_cow(skb, headroom, skb_cloned(skb));
1482}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Herbert Xud9cc2042007-09-16 16:21:16 -07001484/**
1485 * skb_cow_head - skb_cow but only making the head writable
1486 * @skb: buffer to cow
1487 * @headroom: needed headroom
1488 *
1489 * This function is identical to skb_cow except that we replace the
1490 * skb_cloned check by skb_header_cloned. It should be used when
1491 * you only need to push on some header and do not need to modify
1492 * the data.
1493 */
1494static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1495{
1496 return __skb_cow(skb, headroom, skb_header_cloned(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497}
1498
1499/**
1500 * skb_padto - pad an skbuff up to a minimal size
1501 * @skb: buffer to pad
1502 * @len: minimal length
1503 *
1504 * Pads up a buffer to ensure the trailing bytes exist and are
1505 * blanked. If the buffer already contains sufficient data it
Herbert Xu5b057c62006-06-23 02:06:41 -07001506 * is untouched. Otherwise it is extended. Returns zero on
1507 * success. The skb is freed on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 */
1509
Herbert Xu5b057c62006-06-23 02:06:41 -07001510static inline int skb_padto(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511{
1512 unsigned int size = skb->len;
1513 if (likely(size >= len))
Herbert Xu5b057c62006-06-23 02:06:41 -07001514 return 0;
Gerrit Renker987c4022008-08-11 18:17:17 -07001515 return skb_pad(skb, len - size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516}
1517
1518static inline int skb_add_data(struct sk_buff *skb,
1519 char __user *from, int copy)
1520{
1521 const int off = skb->len;
1522
1523 if (skb->ip_summed == CHECKSUM_NONE) {
1524 int err = 0;
Al Viro50842052006-11-14 21:36:34 -08001525 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 copy, 0, &err);
1527 if (!err) {
1528 skb->csum = csum_block_add(skb->csum, csum, off);
1529 return 0;
1530 }
1531 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1532 return 0;
1533
1534 __skb_trim(skb, off);
1535 return -EFAULT;
1536}
1537
1538static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1539 struct page *page, int off)
1540{
1541 if (i) {
1542 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1543
1544 return page == frag->page &&
1545 off == frag->page_offset + frag->size;
1546 }
1547 return 0;
1548}
1549
Herbert Xu364c6ba2006-06-09 16:10:40 -07001550static inline int __skb_linearize(struct sk_buff *skb)
1551{
1552 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1553}
1554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555/**
1556 * skb_linearize - convert paged skb to linear one
1557 * @skb: buffer to linarize
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 *
1559 * If there is no free memory -ENOMEM is returned, otherwise zero
1560 * is returned and the old skb data released.
1561 */
Herbert Xu364c6ba2006-06-09 16:10:40 -07001562static inline int skb_linearize(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563{
Herbert Xu364c6ba2006-06-09 16:10:40 -07001564 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1565}
1566
1567/**
1568 * skb_linearize_cow - make sure skb is linear and writable
1569 * @skb: buffer to process
1570 *
1571 * If there is no free memory -ENOMEM is returned, otherwise zero
1572 * is returned and the old skb data released.
1573 */
1574static inline int skb_linearize_cow(struct sk_buff *skb)
1575{
1576 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1577 __skb_linearize(skb) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578}
1579
1580/**
1581 * skb_postpull_rcsum - update checksum for received skb after pull
1582 * @skb: buffer to update
1583 * @start: start of data before pull
1584 * @len: length of data pulled
1585 *
1586 * After doing a pull on a received packet, you need to call this to
Patrick McHardy84fa7932006-08-29 16:44:56 -07001587 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1588 * CHECKSUM_NONE so that it can be recomputed from scratch.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 */
1590
1591static inline void skb_postpull_rcsum(struct sk_buff *skb,
Herbert Xucbb042f2006-03-20 22:43:56 -08001592 const void *start, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593{
Patrick McHardy84fa7932006-08-29 16:44:56 -07001594 if (skb->ip_summed == CHECKSUM_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1596}
1597
Herbert Xucbb042f2006-03-20 22:43:56 -08001598unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1599
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600/**
1601 * pskb_trim_rcsum - trim received skb and update checksum
1602 * @skb: buffer to trim
1603 * @len: new length
1604 *
1605 * This is exactly the same as pskb_trim except that it ensures the
1606 * checksum of received packets are still valid after the operation.
1607 */
1608
1609static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1610{
Stephen Hemminger0e4e4222005-09-08 12:32:03 -07001611 if (likely(len >= skb->len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 return 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07001613 if (skb->ip_summed == CHECKSUM_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 skb->ip_summed = CHECKSUM_NONE;
1615 return __pskb_trim(skb, len);
1616}
1617
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618#define skb_queue_walk(queue, skb) \
1619 for (skb = (queue)->next; \
1620 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1621 skb = skb->next)
1622
James Chapman46f89142007-04-30 00:07:31 -07001623#define skb_queue_walk_safe(queue, skb, tmp) \
1624 for (skb = (queue)->next, tmp = skb->next; \
1625 skb != (struct sk_buff *)(queue); \
1626 skb = tmp, tmp = skb->next)
1627
David S. Miller1164f522008-09-23 00:49:44 -07001628#define skb_queue_walk_from(queue, skb) \
1629 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1630 skb = skb->next)
1631
1632#define skb_queue_walk_from_safe(queue, skb, tmp) \
1633 for (tmp = skb->next; \
1634 skb != (struct sk_buff *)(queue); \
1635 skb = tmp, tmp = skb->next)
1636
Stephen Hemminger300ce172005-10-30 13:47:34 -08001637#define skb_queue_reverse_walk(queue, skb) \
1638 for (skb = (queue)->prev; \
1639 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1640 skb = skb->prev)
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
Herbert Xua59322b2007-12-05 01:53:40 -08001643extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
1644 int *peeked, int *err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1646 int noblock, int *err);
1647extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1648 struct poll_table_struct *wait);
1649extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1650 int offset, struct iovec *to,
1651 int size);
Herbert Xufb286bb2005-11-10 13:01:24 -08001652extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 int hlen,
1654 struct iovec *iov);
Rusty Russelldb543c12008-08-15 15:13:53 -07001655extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
1656 int offset,
1657 struct iovec *from,
1658 int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
Herbert Xu27ab2562007-12-05 01:51:58 -08001660extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
Herbert Xu3305b802005-12-13 23:16:37 -08001661 unsigned int flags);
Al Viro2bbbc862006-11-14 21:37:14 -08001662extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
1663 int len, __wsum csum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1665 void *to, int len);
Stephen Hemminger0c6fcc82007-04-20 16:40:01 -07001666extern int skb_store_bits(struct sk_buff *skb, int offset,
1667 const void *from, int len);
Al Viro81d77662006-11-14 21:37:33 -08001668extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 int offset, u8 *to, int len,
Al Viro81d77662006-11-14 21:37:33 -08001670 __wsum csum);
Jens Axboe9c55e012007-11-06 23:30:13 -08001671extern int skb_splice_bits(struct sk_buff *skb,
1672 unsigned int offset,
1673 struct pipe_inode_info *pipe,
1674 unsigned int len,
1675 unsigned int flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1677extern void skb_split(struct sk_buff *skb,
1678 struct sk_buff *skb1, const u32 len);
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001679extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1680 int shiftlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Herbert Xu576a30e2006-06-27 13:22:38 -07001682extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
Herbert Xu71d93b32008-12-15 23:42:33 -08001683extern int skb_gro_receive(struct sk_buff **head,
1684 struct sk_buff *skb);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001685
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1687 int len, void *buffer)
1688{
1689 int hlen = skb_headlen(skb);
1690
Patrick McHardy55820ee2005-07-05 14:08:10 -07001691 if (hlen - offset >= len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 return skb->data + offset;
1693
1694 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1695 return NULL;
1696
1697 return buffer;
1698}
1699
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001700static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1701 void *to,
1702 const unsigned int len)
1703{
1704 memcpy(to, skb->data, len);
1705}
1706
1707static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1708 const int offset, void *to,
1709 const unsigned int len)
1710{
1711 memcpy(to, skb->data + offset, len);
1712}
1713
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001714static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1715 const void *from,
1716 const unsigned int len)
1717{
1718 memcpy(skb->data, from, len);
1719}
1720
1721static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1722 const int offset,
1723 const void *from,
1724 const unsigned int len)
1725{
1726 memcpy(skb->data + offset, from, len);
1727}
1728
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729extern void skb_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001731/**
1732 * skb_get_timestamp - get timestamp from a skb
1733 * @skb: skb to get stamp from
1734 * @stamp: pointer to struct timeval to store stamp in
1735 *
1736 * Timestamps are stored in the skb as offsets to a base timestamp.
1737 * This function converts the offset back to a struct timeval and stores
1738 * it in stamp.
1739 */
Stephen Hemmingerf2c38392005-09-06 15:48:03 -07001740static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001741{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001742 *stamp = ktime_to_timeval(skb->tstamp);
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001743}
1744
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001745static inline void __net_timestamp(struct sk_buff *skb)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001746{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001747 skb->tstamp = ktime_get_real();
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001748}
1749
Stephen Hemminger164891a2007-04-23 22:26:16 -07001750static inline ktime_t net_timedelta(ktime_t t)
1751{
1752 return ktime_sub(ktime_get_real(), t);
1753}
1754
Ilpo Järvinenb9ce2042007-06-15 15:08:43 -07001755static inline ktime_t net_invalid_timestamp(void)
1756{
1757 return ktime_set(0, 0);
1758}
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001759
Herbert Xu759e5d02007-03-25 20:10:56 -07001760extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
Al Virob51655b2006-11-14 21:40:42 -08001761extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
Herbert Xufb286bb2005-11-10 13:01:24 -08001762
Herbert Xu60476372007-04-09 11:59:39 -07001763static inline int skb_csum_unnecessary(const struct sk_buff *skb)
1764{
1765 return skb->ip_summed & CHECKSUM_UNNECESSARY;
1766}
1767
Herbert Xufb286bb2005-11-10 13:01:24 -08001768/**
1769 * skb_checksum_complete - Calculate checksum of an entire packet
1770 * @skb: packet to process
1771 *
1772 * This function calculates the checksum over the entire packet plus
1773 * the value of skb->csum. The latter can be used to supply the
1774 * checksum of a pseudo header as used by TCP/UDP. It returns the
1775 * checksum.
1776 *
1777 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
1778 * this function can be used to verify that checksum on received
1779 * packets. In that case the function should return zero if the
1780 * checksum is correct. In particular, this function will return zero
1781 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1782 * hardware has already verified the correctness of the checksum.
1783 */
Al Viro4381ca32007-07-15 21:00:11 +01001784static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
Herbert Xufb286bb2005-11-10 13:01:24 -08001785{
Herbert Xu60476372007-04-09 11:59:39 -07001786 return skb_csum_unnecessary(skb) ?
1787 0 : __skb_checksum_complete(skb);
Herbert Xufb286bb2005-11-10 13:01:24 -08001788}
1789
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07001790#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -07001791extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1793{
1794 if (nfct && atomic_dec_and_test(&nfct->use))
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -07001795 nf_conntrack_destroy(nfct);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796}
1797static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1798{
1799 if (nfct)
1800 atomic_inc(&nfct->use);
1801}
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001802static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1803{
1804 if (skb)
1805 atomic_inc(&skb->users);
1806}
1807static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1808{
1809 if (skb)
1810 kfree_skb(skb);
1811}
1812#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813#ifdef CONFIG_BRIDGE_NETFILTER
1814static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1815{
1816 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1817 kfree(nf_bridge);
1818}
1819static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1820{
1821 if (nf_bridge)
1822 atomic_inc(&nf_bridge->use);
1823}
1824#endif /* CONFIG_BRIDGE_NETFILTER */
Patrick McHardya193a4a2006-03-20 19:23:05 -08001825static inline void nf_reset(struct sk_buff *skb)
1826{
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07001827#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Patrick McHardya193a4a2006-03-20 19:23:05 -08001828 nf_conntrack_put(skb->nfct);
1829 skb->nfct = NULL;
Patrick McHardya193a4a2006-03-20 19:23:05 -08001830 nf_conntrack_put_reasm(skb->nfct_reasm);
1831 skb->nfct_reasm = NULL;
1832#endif
1833#ifdef CONFIG_BRIDGE_NETFILTER
1834 nf_bridge_put(skb->nf_bridge);
1835 skb->nf_bridge = NULL;
1836#endif
1837}
1838
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07001839/* Note: This doesn't put any conntrack and bridge info in dst. */
1840static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1841{
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07001842#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07001843 dst->nfct = src->nfct;
1844 nf_conntrack_get(src->nfct);
1845 dst->nfctinfo = src->nfctinfo;
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07001846 dst->nfct_reasm = src->nfct_reasm;
1847 nf_conntrack_get_reasm(src->nfct_reasm);
1848#endif
1849#ifdef CONFIG_BRIDGE_NETFILTER
1850 dst->nf_bridge = src->nf_bridge;
1851 nf_bridge_get(src->nf_bridge);
1852#endif
1853}
1854
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07001855static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1856{
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07001857#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07001858 nf_conntrack_put(dst->nfct);
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07001859 nf_conntrack_put_reasm(dst->nfct_reasm);
1860#endif
1861#ifdef CONFIG_BRIDGE_NETFILTER
1862 nf_bridge_put(dst->nf_bridge);
1863#endif
1864 __nf_copy(dst, src);
1865}
1866
James Morris984bc162006-06-09 00:29:17 -07001867#ifdef CONFIG_NETWORK_SECMARK
1868static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1869{
1870 to->secmark = from->secmark;
1871}
1872
1873static inline void skb_init_secmark(struct sk_buff *skb)
1874{
1875 skb->secmark = 0;
1876}
1877#else
1878static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1879{ }
1880
1881static inline void skb_init_secmark(struct sk_buff *skb)
1882{ }
1883#endif
1884
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001885static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1886{
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001887 skb->queue_mapping = queue_mapping;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001888}
1889
Pavel Emelyanov4e3ab472007-10-21 17:01:29 -07001890static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
1891{
Pavel Emelyanov4e3ab472007-10-21 17:01:29 -07001892 return skb->queue_mapping;
Pavel Emelyanov4e3ab472007-10-21 17:01:29 -07001893}
1894
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001895static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1896{
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001897 to->queue_mapping = from->queue_mapping;
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001898}
1899
Alexey Dobriyandef8b4f2008-10-28 13:24:06 -07001900#ifdef CONFIG_XFRM
1901static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
1902{
1903 return skb->sp;
1904}
1905#else
1906static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
1907{
1908 return NULL;
1909}
1910#endif
1911
Herbert Xu89114af2006-07-08 13:34:32 -07001912static inline int skb_is_gso(const struct sk_buff *skb)
1913{
1914 return skb_shinfo(skb)->gso_size;
1915}
1916
Brice Goglineabd7e32007-10-13 12:33:32 +02001917static inline int skb_is_gso_v6(const struct sk_buff *skb)
1918{
1919 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
1920}
1921
Ben Hutchings4497b072008-06-19 16:22:28 -07001922extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
1923
1924static inline bool skb_warn_if_lro(const struct sk_buff *skb)
1925{
1926 /* LRO sets gso_size but not gso_type, whereas if GSO is really
1927 * wanted then gso_type will be set. */
1928 struct skb_shared_info *shinfo = skb_shinfo(skb);
1929 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
1930 __skb_warn_lro_forwarding(skb);
1931 return true;
1932 }
1933 return false;
1934}
1935
Herbert Xu35fc92a2007-03-26 23:22:20 -07001936static inline void skb_forward_csum(struct sk_buff *skb)
1937{
1938 /* Unfortunately we don't support this one. Any brave souls? */
1939 if (skb->ip_summed == CHECKSUM_COMPLETE)
1940 skb->ip_summed = CHECKSUM_NONE;
1941}
1942
Rusty Russellf35d9d82008-02-04 23:49:54 -05001943bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944#endif /* __KERNEL__ */
1945#endif /* _LINUX_SKBUFF_H */