blob: 91140fe8c119a317b28d62ce1255f00d8719dba8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/compiler.h>
19#include <linux/time.h>
20#include <linux/cache.h>
21
22#include <asm/atomic.h>
23#include <asm/types.h>
24#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/net.h>
Thomas Graf3fc7e8a2005-06-23 21:00:17 -070026#include <linux/textsearch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/checksum.h>
Al Viroa80958f2006-12-04 20:41:19 +000028#include <linux/rcupdate.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070029#include <linux/dmaengine.h>
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -070030#include <linux/hrtimer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#define HAVE_ALLOC_SKB /* For the drivers to know */
33#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Herbert Xu60476372007-04-09 11:59:39 -070035/* Don't change this without changing skb_csum_unnecessary! */
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#define CHECKSUM_NONE 0
Herbert Xu60476372007-04-09 11:59:39 -070037#define CHECKSUM_UNNECESSARY 1
38#define CHECKSUM_COMPLETE 2
39#define CHECKSUM_PARTIAL 3
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
42 ~(SMP_CACHE_BYTES - 1))
David S. Millerfc910a22007-03-25 20:27:59 -070043#define SKB_WITH_OVERHEAD(X) \
Herbert Xudeea84b2007-10-21 16:27:46 -070044 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
David S. Millerfc910a22007-03-25 20:27:59 -070045#define SKB_MAX_ORDER(X, ORDER) \
46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
48#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
49
50/* A. Checksumming of received packets by device.
51 *
52 * NONE: device failed to checksum this packet.
53 * skb->csum is undefined.
54 *
55 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
56 * skb->csum is undefined.
57 * It is bad option, but, unfortunately, many of vendors do this.
58 * Apparently with secret goal to sell you new device, when you
59 * will add new protocol to your host. F.e. IPv6. 8)
60 *
Patrick McHardy84fa7932006-08-29 16:44:56 -070061 * COMPLETE: the most generic way. Device supplied checksum of _all_
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 * the packet as seen by netif_rx in skb->csum.
63 * NOTE: Even if device supports only some protocols, but
Patrick McHardy84fa7932006-08-29 16:44:56 -070064 * is able to produce some skb->csum, it MUST use COMPLETE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 * not UNNECESSARY.
66 *
Herbert Xuc6c6e3e2007-07-10 22:41:55 -070067 * PARTIAL: identical to the case for output below. This may occur
68 * on a packet received directly from another Linux OS, e.g.,
69 * a virtualised Linux kernel on the same host. The packet can
70 * be treated in the same way as UNNECESSARY except that on
71 * output (i.e., forwarding) the checksum must be filled in
72 * by the OS or the hardware.
73 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 * B. Checksumming on output.
75 *
76 * NONE: skb is checksummed by protocol or csum is not required.
77 *
Patrick McHardy84fa7932006-08-29 16:44:56 -070078 * PARTIAL: device is required to csum packet as seen by hard_start_xmit
Herbert Xuc6c6e3e2007-07-10 22:41:55 -070079 * from skb->csum_start to the end and to record the checksum
80 * at skb->csum_start + skb->csum_offset.
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 *
82 * Device must show its capabilities in dev->features, set
83 * at device setup time.
84 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
85 * everything.
86 * NETIF_F_NO_CSUM - loopback or reliable single hop media.
87 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
88 * TCP/UDP over IPv4. Sigh. Vendors like this
89 * way by an unknown reason. Though, see comment above
90 * about CHECKSUM_UNNECESSARY. 8)
Herbert Xuc6c6e3e2007-07-10 22:41:55 -070091 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 *
93 * Any questions? No questions, good. --ANK
94 */
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096struct net_device;
David Howells716ea3a2007-04-02 20:19:53 -070097struct scatterlist;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -070099#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100struct nf_conntrack {
101 atomic_t use;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102};
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -0700103#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105#ifdef CONFIG_BRIDGE_NETFILTER
106struct nf_bridge_info {
107 atomic_t use;
108 struct net_device *physindev;
109 struct net_device *physoutdev;
110#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
111 struct net_device *netoutdev;
112#endif
113 unsigned int mask;
114 unsigned long data[32 / sizeof(unsigned long)];
115};
116#endif
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118struct sk_buff_head {
119 /* These two members must be first. */
120 struct sk_buff *next;
121 struct sk_buff *prev;
122
123 __u32 qlen;
124 spinlock_t lock;
125};
126
127struct sk_buff;
128
129/* To allow 64K frame to be packed as single skb without frag_list */
130#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
131
132typedef struct skb_frag_struct skb_frag_t;
133
134struct skb_frag_struct {
135 struct page *page;
David S. Millera309bb02007-07-30 18:47:03 -0700136 __u32 page_offset;
137 __u32 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138};
139
140/* This data is invariant across clones and lives at
141 * the end of the header data, ie. at skb->end.
142 */
143struct skb_shared_info {
144 atomic_t dataref;
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800145 unsigned short nr_frags;
Herbert Xu79671682006-06-22 02:40:14 -0700146 unsigned short gso_size;
147 /* Warning: this field is not always filled in (UFO)! */
148 unsigned short gso_segs;
149 unsigned short gso_type;
Al Viroae08e1f2006-11-08 00:27:11 -0800150 __be32 ip6_frag_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 struct sk_buff *frag_list;
152 skb_frag_t frags[MAX_SKB_FRAGS];
153};
154
155/* We divide dataref into two halves. The higher 16 bits hold references
156 * to the payload part of skb->data. The lower 16 bits hold references to
Patrick McHardy334a8132007-06-25 04:35:20 -0700157 * the entire skb->data. A clone of a headerless skb holds the length of
158 * the header in skb->hdr_len.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 *
160 * All users must obey the rule that the skb->data reference count must be
161 * greater than or equal to the payload reference count.
162 *
163 * Holding a reference to the payload part means that the user does not
164 * care about modifications to the header part of skb->data.
165 */
166#define SKB_DATAREF_SHIFT 16
167#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
168
David S. Millerd179cd12005-08-17 14:57:30 -0700169
170enum {
171 SKB_FCLONE_UNAVAILABLE,
172 SKB_FCLONE_ORIG,
173 SKB_FCLONE_CLONE,
174};
175
Herbert Xu79671682006-06-22 02:40:14 -0700176enum {
177 SKB_GSO_TCPV4 = 1 << 0,
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700178 SKB_GSO_UDP = 1 << 1,
Herbert Xu576a30e2006-06-27 13:22:38 -0700179
180 /* This indicates the skb is from an untrusted source. */
181 SKB_GSO_DODGY = 1 << 2,
Michael Chanb0da85372006-06-29 12:30:00 -0700182
183 /* This indicates the tcp segment has CWR set. */
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700184 SKB_GSO_TCP_ECN = 1 << 3,
185
186 SKB_GSO_TCPV6 = 1 << 4,
Herbert Xu79671682006-06-22 02:40:14 -0700187};
188
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -0700189#if BITS_PER_LONG > 32
190#define NET_SKBUFF_DATA_USES_OFFSET 1
191#endif
192
193#ifdef NET_SKBUFF_DATA_USES_OFFSET
194typedef unsigned int sk_buff_data_t;
195#else
196typedef unsigned char *sk_buff_data_t;
197#endif
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199/**
200 * struct sk_buff - socket buffer
201 * @next: Next buffer in list
202 * @prev: Previous buffer in list
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 * @sk: Socket we are owned by
Herbert Xu325ed822005-10-03 13:57:23 -0700204 * @tstamp: Time we arrived
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 * @dev: Device we arrived on/are leaving by
Randy Dunlapbe521782007-05-03 03:16:20 -0700206 * @transport_header: Transport layer header
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700207 * @network_header: Network layer header
208 * @mac_header: Link layer header
Martin Waitz67be2dd2005-05-01 08:59:26 -0700209 * @dst: destination entry
210 * @sp: the security path, used for xfrm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * @cb: Control buffer. Free for use by every layer. Put private vars here
212 * @len: Length of actual data
213 * @data_len: Data length
214 * @mac_len: Length of link layer header
Patrick McHardy334a8132007-06-25 04:35:20 -0700215 * @hdr_len: writable header length of cloned skb
Herbert Xu663ead32007-04-09 11:59:07 -0700216 * @csum: Checksum (must include start/offset pair)
217 * @csum_start: Offset from skb->head where checksumming should start
218 * @csum_offset: Offset from csum_start where checksum should be stored
Martin Waitz67be2dd2005-05-01 08:59:26 -0700219 * @local_df: allow local fragmentation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 * @cloned: Head may be cloned (check refcnt to be sure)
221 * @nohdr: Payload reference only, must not modify header
222 * @pkt_type: Packet class
Randy Dunlapc83c2482005-10-18 22:07:41 -0700223 * @fclone: skbuff clone status
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 * @ip_summed: Driver fed us an IP checksum
225 * @priority: Packet queueing priority
226 * @users: User count - see {datagram,tcp}.c
227 * @protocol: Packet protocol from driver
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 * @truesize: Buffer size
229 * @head: Head of buffer
230 * @data: Data head pointer
231 * @tail: Tail pointer
232 * @end: End pointer
233 * @destructor: Destruct function
Thomas Graf82e91ff2006-11-09 15:19:14 -0800234 * @mark: Generic packet mark
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 * @nfct: Associated connection, if any
Randy Dunlapc83c2482005-10-18 22:07:41 -0700236 * @ipvs_property: skbuff is owned by ipvs
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700237 * @nf_trace: netfilter packet trace flag
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 * @nfctinfo: Relationship of this skb to the connection
Randy Dunlap461ddf32005-11-20 21:25:15 -0800239 * @nfct_reasm: netfilter conntrack re-assembly pointer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700241 * @iif: ifindex of device we arrived on
242 * @queue_mapping: Queue mapping for multiqueue devices
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 * @tc_index: Traffic control index
244 * @tc_verd: traffic control verdict
Randy Dunlapf4b8ea72006-06-22 16:00:11 -0700245 * @dma_cookie: a cookie to one of several possible DMA operations
246 * done by skb DMA functions
James Morris984bc162006-06-09 00:29:17 -0700247 * @secmark: security marking
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 */
249
250struct sk_buff {
251 /* These two members must be first. */
252 struct sk_buff *next;
253 struct sk_buff *prev;
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 struct sock *sk;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -0700256 ktime_t tstamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 struct dst_entry *dst;
260 struct sec_path *sp;
261
262 /*
263 * This is the control buffer. It is free to use for every
264 * layer. Please put your private variables there. If you
265 * want to keep them across layers you have to do a skb_clone()
266 * first. This is owned by whoever has the skb queued ATM.
267 */
Patrick McHardy3e3850e2006-01-06 23:04:54 -0800268 char cb[48];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
270 unsigned int len,
Patrick McHardy334a8132007-06-25 04:35:20 -0700271 data_len;
272 __u16 mac_len,
273 hdr_len;
Al Viroff1dcad2006-11-20 18:07:29 -0800274 union {
275 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -0700276 struct {
277 __u16 csum_start;
278 __u16 csum_offset;
279 };
Al Viroff1dcad2006-11-20 18:07:29 -0800280 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 __u32 priority;
Thomas Graf1cbb3382005-07-05 14:13:41 -0700282 __u8 local_df:1,
283 cloned:1,
284 ip_summed:2,
Harald Welte6869c4d2005-08-09 19:24:19 -0700285 nohdr:1,
286 nfctinfo:3;
David S. Millerd179cd12005-08-17 14:57:30 -0700287 __u8 pkt_type:3,
Patrick McHardyb84f4cc2005-11-20 21:19:21 -0800288 fclone:2,
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700289 ipvs_property:1,
290 nf_trace:1;
Alexey Dobriyana0d3bea2005-08-11 16:05:50 -0700291 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293 void (*destructor)(struct sk_buff *skb);
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800294#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -0700295 struct nf_conntrack *nfct;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800296 struct sk_buff *nfct_reasm;
297#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298#ifdef CONFIG_BRIDGE_NETFILTER
299 struct nf_bridge_info *nf_bridge;
300#endif
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700301
302 int iif;
Pavel Emelyanove3fa2592007-10-21 17:02:30 -0700303#ifdef CONFIG_NETDEVICES_MULTIQUEUE
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700304 __u16 queue_mapping;
Pavel Emelyanove3fa2592007-10-21 17:02:30 -0700305#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306#ifdef CONFIG_NET_SCHED
Patrick McHardyb6b99eb2005-08-09 19:33:51 -0700307 __u16 tc_index; /* traffic control index */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308#ifdef CONFIG_NET_CLS_ACT
Patrick McHardyb6b99eb2005-08-09 19:33:51 -0700309 __u16 tc_verd; /* traffic control verdict */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311#endif
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700312 /* 2 byte hole */
313
Chris Leech97fc2f02006-05-23 17:55:33 -0700314#ifdef CONFIG_NET_DMA
315 dma_cookie_t dma_cookie;
316#endif
James Morris984bc162006-06-09 00:29:17 -0700317#ifdef CONFIG_NETWORK_SECMARK
318 __u32 secmark;
319#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
Thomas Graf82e91ff2006-11-09 15:19:14 -0800321 __u32 mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700323 sk_buff_data_t transport_header;
324 sk_buff_data_t network_header;
325 sk_buff_data_t mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 /* These elements must be at the end, see alloc_skb() for details. */
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700327 sk_buff_data_t tail;
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700328 sk_buff_data_t end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 unsigned char *head,
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700330 *data;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700331 unsigned int truesize;
332 atomic_t users;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333};
334
335#ifdef __KERNEL__
336/*
337 * Handling routines are only of interest to the kernel
338 */
339#include <linux/slab.h>
340
341#include <asm/system.h>
342
Jörn Engel231d06a2006-03-20 21:28:35 -0800343extern void kfree_skb(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344extern void __kfree_skb(struct sk_buff *skb);
David S. Millerd179cd12005-08-17 14:57:30 -0700345extern struct sk_buff *__alloc_skb(unsigned int size,
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800346 gfp_t priority, int fclone, int node);
David S. Millerd179cd12005-08-17 14:57:30 -0700347static inline struct sk_buff *alloc_skb(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100348 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -0700349{
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800350 return __alloc_skb(size, priority, 0, -1);
David S. Millerd179cd12005-08-17 14:57:30 -0700351}
352
353static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100354 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -0700355{
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800356 return __alloc_skb(size, priority, 1, -1);
David S. Millerd179cd12005-08-17 14:57:30 -0700357}
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359extern void kfree_skbmem(struct sk_buff *skb);
Herbert Xue0053ec2007-10-14 00:37:52 -0700360extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700361extern struct sk_buff *skb_clone(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100362 gfp_t priority);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700363extern struct sk_buff *skb_copy(const struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100364 gfp_t priority);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700365extern struct sk_buff *pskb_copy(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100366 gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367extern int pskb_expand_head(struct sk_buff *skb,
Victor Fusco86a76ca2005-07-08 14:57:47 -0700368 int nhead, int ntail,
Al Virodd0fc662005-10-07 07:46:04 +0100369 gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
371 unsigned int headroom);
372extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
373 int newheadroom, int newtailroom,
Al Virodd0fc662005-10-07 07:46:04 +0100374 gfp_t priority);
David Howells716ea3a2007-04-02 20:19:53 -0700375extern int skb_to_sgvec(struct sk_buff *skb,
376 struct scatterlist *sg, int offset,
377 int len);
378extern int skb_cow_data(struct sk_buff *skb, int tailbits,
379 struct sk_buff **trailer);
Herbert Xu5b057c62006-06-23 02:06:41 -0700380extern int skb_pad(struct sk_buff *skb, int pad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381#define dev_kfree_skb(a) kfree_skb(a)
382extern void skb_over_panic(struct sk_buff *skb, int len,
383 void *here);
384extern void skb_under_panic(struct sk_buff *skb, int len,
385 void *here);
David S. Millerdc6de332006-04-20 00:10:50 -0700386extern void skb_truesize_bug(struct sk_buff *skb);
387
388static inline void skb_truesize_check(struct sk_buff *skb)
389{
Chuck Lever78608ba2007-11-10 21:53:30 -0800390 int len = sizeof(struct sk_buff) + skb->len;
391
392 if (unlikely((int)skb->truesize < len))
David S. Millerdc6de332006-04-20 00:10:50 -0700393 skb_truesize_bug(skb);
394}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700396extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
397 int getfrag(void *from, char *to, int offset,
398 int len,int odd, struct sk_buff *skb),
399 void *from, int length);
400
Thomas Graf677e90e2005-06-23 20:59:51 -0700401struct skb_seq_state
402{
403 __u32 lower_offset;
404 __u32 upper_offset;
405 __u32 frag_idx;
406 __u32 stepped_offset;
407 struct sk_buff *root_skb;
408 struct sk_buff *cur_skb;
409 __u8 *frag_data;
410};
411
412extern void skb_prepare_seq_read(struct sk_buff *skb,
413 unsigned int from, unsigned int to,
414 struct skb_seq_state *st);
415extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
416 struct skb_seq_state *st);
417extern void skb_abort_seq_read(struct skb_seq_state *st);
418
Thomas Graf3fc7e8a2005-06-23 21:00:17 -0700419extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
420 unsigned int to, struct ts_config *config,
421 struct ts_state *state);
422
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700423#ifdef NET_SKBUFF_DATA_USES_OFFSET
424static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
425{
426 return skb->head + skb->end;
427}
428#else
429static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
430{
431 return skb->end;
432}
433#endif
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435/* Internal */
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700436#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438/**
439 * skb_queue_empty - check if a queue is empty
440 * @list: queue head
441 *
442 * Returns true if the queue is empty, false otherwise.
443 */
444static inline int skb_queue_empty(const struct sk_buff_head *list)
445{
446 return list->next == (struct sk_buff *)list;
447}
448
449/**
450 * skb_get - reference buffer
451 * @skb: buffer to reference
452 *
453 * Makes another reference to a socket buffer and returns a pointer
454 * to the buffer.
455 */
456static inline struct sk_buff *skb_get(struct sk_buff *skb)
457{
458 atomic_inc(&skb->users);
459 return skb;
460}
461
462/*
463 * If users == 1, we are the only owner and are can avoid redundant
464 * atomic change.
465 */
466
467/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 * skb_cloned - is the buffer a clone
469 * @skb: buffer to check
470 *
471 * Returns true if the buffer was generated with skb_clone() and is
472 * one of multiple shared copies of the buffer. Cloned buffers are
473 * shared data so must not be written to under normal circumstances.
474 */
475static inline int skb_cloned(const struct sk_buff *skb)
476{
477 return skb->cloned &&
478 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
479}
480
481/**
482 * skb_header_cloned - is the header a clone
483 * @skb: buffer to check
484 *
485 * Returns true if modifying the header part of the buffer requires
486 * the data to be copied.
487 */
488static inline int skb_header_cloned(const struct sk_buff *skb)
489{
490 int dataref;
491
492 if (!skb->cloned)
493 return 0;
494
495 dataref = atomic_read(&skb_shinfo(skb)->dataref);
496 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
497 return dataref != 1;
498}
499
500/**
501 * skb_header_release - release reference to header
502 * @skb: buffer to operate on
503 *
504 * Drop a reference to the header part of the buffer. This is done
505 * by acquiring a payload reference. You must not read from the header
506 * part of skb->data after this.
507 */
508static inline void skb_header_release(struct sk_buff *skb)
509{
510 BUG_ON(skb->nohdr);
511 skb->nohdr = 1;
512 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
513}
514
515/**
516 * skb_shared - is the buffer shared
517 * @skb: buffer to check
518 *
519 * Returns true if more than one person has a reference to this
520 * buffer.
521 */
522static inline int skb_shared(const struct sk_buff *skb)
523{
524 return atomic_read(&skb->users) != 1;
525}
526
527/**
528 * skb_share_check - check if buffer is shared and if so clone it
529 * @skb: buffer to check
530 * @pri: priority for memory allocation
531 *
532 * If the buffer is shared the buffer is cloned and the old copy
533 * drops a reference. A new clone with a single reference is returned.
534 * If the buffer is not shared the original buffer is returned. When
535 * being called from interrupt status or with spinlocks held pri must
536 * be GFP_ATOMIC.
537 *
538 * NULL is returned on a memory allocation failure.
539 */
Victor Fusco86a76ca2005-07-08 14:57:47 -0700540static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100541 gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542{
543 might_sleep_if(pri & __GFP_WAIT);
544 if (skb_shared(skb)) {
545 struct sk_buff *nskb = skb_clone(skb, pri);
546 kfree_skb(skb);
547 skb = nskb;
548 }
549 return skb;
550}
551
552/*
553 * Copy shared buffers into a new sk_buff. We effectively do COW on
554 * packets to handle cases where we have a local reader and forward
555 * and a couple of other messy ones. The normal one is tcpdumping
556 * a packet thats being forwarded.
557 */
558
559/**
560 * skb_unshare - make a copy of a shared buffer
561 * @skb: buffer to check
562 * @pri: priority for memory allocation
563 *
564 * If the socket buffer is a clone then this function creates a new
565 * copy of the data, drops a reference count on the old copy and returns
566 * the new copy with the reference count at 1. If the buffer is not a clone
567 * the original buffer is returned. When called with a spinlock held or
568 * from interrupt state @pri must be %GFP_ATOMIC
569 *
570 * %NULL is returned on a memory allocation failure.
571 */
Victor Fuscoe2bf5212005-07-18 13:36:38 -0700572static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100573 gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
575 might_sleep_if(pri & __GFP_WAIT);
576 if (skb_cloned(skb)) {
577 struct sk_buff *nskb = skb_copy(skb, pri);
578 kfree_skb(skb); /* Free our shared copy */
579 skb = nskb;
580 }
581 return skb;
582}
583
584/**
585 * skb_peek
586 * @list_: list to peek at
587 *
588 * Peek an &sk_buff. Unlike most other operations you _MUST_
589 * be careful with this one. A peek leaves the buffer on the
590 * list and someone else may run off with it. You must hold
591 * the appropriate locks or have a private queue to do this.
592 *
593 * Returns %NULL for an empty list or a pointer to the head element.
594 * The reference count is not incremented and the reference is therefore
595 * volatile. Use with caution.
596 */
597static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
598{
599 struct sk_buff *list = ((struct sk_buff *)list_)->next;
600 if (list == (struct sk_buff *)list_)
601 list = NULL;
602 return list;
603}
604
605/**
606 * skb_peek_tail
607 * @list_: list to peek at
608 *
609 * Peek an &sk_buff. Unlike most other operations you _MUST_
610 * be careful with this one. A peek leaves the buffer on the
611 * list and someone else may run off with it. You must hold
612 * the appropriate locks or have a private queue to do this.
613 *
614 * Returns %NULL for an empty list or a pointer to the tail element.
615 * The reference count is not incremented and the reference is therefore
616 * volatile. Use with caution.
617 */
618static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
619{
620 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
621 if (list == (struct sk_buff *)list_)
622 list = NULL;
623 return list;
624}
625
626/**
627 * skb_queue_len - get queue length
628 * @list_: list to measure
629 *
630 * Return the length of an &sk_buff queue.
631 */
632static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
633{
634 return list_->qlen;
635}
636
Arjan van de Ven76f10ad2006-08-02 14:06:55 -0700637/*
638 * This function creates a split out lock class for each invocation;
639 * this is needed for now since a whole lot of users of the skb-queue
640 * infrastructure in drivers have different locking usage (in hardirq)
641 * than the networking core (in softirq only). In the long run either the
642 * network layer or drivers should need annotation to consolidate the
643 * main types of usage into 3 classes.
644 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645static inline void skb_queue_head_init(struct sk_buff_head *list)
646{
647 spin_lock_init(&list->lock);
648 list->prev = list->next = (struct sk_buff *)list;
649 list->qlen = 0;
650}
651
Pavel Emelianovc2ecba72007-04-17 12:45:31 -0700652static inline void skb_queue_head_init_class(struct sk_buff_head *list,
653 struct lock_class_key *class)
654{
655 skb_queue_head_init(list);
656 lockdep_set_class(&list->lock, class);
657}
658
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659/*
660 * Insert an sk_buff at the start of a list.
661 *
662 * The "__skb_xxxx()" functions are the non-atomic ones that
663 * can only be called with interrupts disabled.
664 */
665
666/**
Stephen Hemminger300ce172005-10-30 13:47:34 -0800667 * __skb_queue_after - queue a buffer at the list head
668 * @list: list to use
669 * @prev: place after this buffer
670 * @newsk: buffer to queue
671 *
672 * Queue a buffer int the middle of a list. This function takes no locks
673 * and you must therefore hold required locks before calling it.
674 *
675 * A buffer cannot be placed on two lists at the same time.
676 */
677static inline void __skb_queue_after(struct sk_buff_head *list,
678 struct sk_buff *prev,
679 struct sk_buff *newsk)
680{
681 struct sk_buff *next;
682 list->qlen++;
683
684 next = prev->next;
685 newsk->next = next;
686 newsk->prev = prev;
687 next->prev = prev->next = newsk;
688}
689
690/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 * __skb_queue_head - queue a buffer at the list head
692 * @list: list to use
693 * @newsk: buffer to queue
694 *
695 * Queue a buffer at the start of a list. This function takes no locks
696 * and you must therefore hold required locks before calling it.
697 *
698 * A buffer cannot be placed on two lists at the same time.
699 */
700extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
701static inline void __skb_queue_head(struct sk_buff_head *list,
702 struct sk_buff *newsk)
703{
Stephen Hemminger300ce172005-10-30 13:47:34 -0800704 __skb_queue_after(list, (struct sk_buff *)list, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705}
706
707/**
708 * __skb_queue_tail - queue a buffer at the list tail
709 * @list: list to use
710 * @newsk: buffer to queue
711 *
712 * Queue a buffer at the end of a list. This function takes no locks
713 * and you must therefore hold required locks before calling it.
714 *
715 * A buffer cannot be placed on two lists at the same time.
716 */
717extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
718static inline void __skb_queue_tail(struct sk_buff_head *list,
719 struct sk_buff *newsk)
720{
721 struct sk_buff *prev, *next;
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 list->qlen++;
724 next = (struct sk_buff *)list;
725 prev = next->prev;
726 newsk->next = next;
727 newsk->prev = prev;
728 next->prev = prev->next = newsk;
729}
730
731
732/**
733 * __skb_dequeue - remove from the head of the queue
734 * @list: list to dequeue from
735 *
736 * Remove the head of the list. This function does not take any locks
737 * so must be used with appropriate locks held only. The head item is
738 * returned or %NULL if the list is empty.
739 */
740extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
741static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
742{
743 struct sk_buff *next, *prev, *result;
744
745 prev = (struct sk_buff *) list;
746 next = prev->next;
747 result = NULL;
748 if (next != prev) {
749 result = next;
750 next = next->next;
751 list->qlen--;
752 next->prev = prev;
753 prev->next = next;
754 result->next = result->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 }
756 return result;
757}
758
759
760/*
761 * Insert a packet on a list.
762 */
David S. Miller8728b832005-08-09 19:25:21 -0700763extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764static inline void __skb_insert(struct sk_buff *newsk,
765 struct sk_buff *prev, struct sk_buff *next,
766 struct sk_buff_head *list)
767{
768 newsk->next = next;
769 newsk->prev = prev;
770 next->prev = prev->next = newsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 list->qlen++;
772}
773
774/*
775 * Place a packet after a given packet in a list.
776 */
David S. Miller8728b832005-08-09 19:25:21 -0700777extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
778static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779{
David S. Miller8728b832005-08-09 19:25:21 -0700780 __skb_insert(newsk, old, old->next, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781}
782
783/*
784 * remove sk_buff from list. _Must_ be called atomically, and with
785 * the list known..
786 */
David S. Miller8728b832005-08-09 19:25:21 -0700787extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
789{
790 struct sk_buff *next, *prev;
791
792 list->qlen--;
793 next = skb->next;
794 prev = skb->prev;
795 skb->next = skb->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 next->prev = prev;
797 prev->next = next;
798}
799
800
801/* XXX: more streamlined implementation */
802
803/**
804 * __skb_dequeue_tail - remove from the tail of the queue
805 * @list: list to dequeue from
806 *
807 * Remove the tail of the list. This function does not take any locks
808 * so must be used with appropriate locks held only. The tail item is
809 * returned or %NULL if the list is empty.
810 */
811extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
812static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
813{
814 struct sk_buff *skb = skb_peek_tail(list);
815 if (skb)
816 __skb_unlink(skb, list);
817 return skb;
818}
819
820
821static inline int skb_is_nonlinear(const struct sk_buff *skb)
822{
823 return skb->data_len;
824}
825
826static inline unsigned int skb_headlen(const struct sk_buff *skb)
827{
828 return skb->len - skb->data_len;
829}
830
831static inline int skb_pagelen(const struct sk_buff *skb)
832{
833 int i, len = 0;
834
835 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
836 len += skb_shinfo(skb)->frags[i].size;
837 return len + skb_headlen(skb);
838}
839
840static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
841 struct page *page, int off, int size)
842{
843 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
844
845 frag->page = page;
846 frag->page_offset = off;
847 frag->size = size;
848 skb_shinfo(skb)->nr_frags = i + 1;
849}
850
851#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
852#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
853#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
854
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700855#ifdef NET_SKBUFF_DATA_USES_OFFSET
856static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
857{
858 return skb->head + skb->tail;
859}
860
861static inline void skb_reset_tail_pointer(struct sk_buff *skb)
862{
863 skb->tail = skb->data - skb->head;
864}
865
866static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
867{
868 skb_reset_tail_pointer(skb);
869 skb->tail += offset;
870}
871#else /* NET_SKBUFF_DATA_USES_OFFSET */
872static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
873{
874 return skb->tail;
875}
876
877static inline void skb_reset_tail_pointer(struct sk_buff *skb)
878{
879 skb->tail = skb->data;
880}
881
882static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
883{
884 skb->tail = skb->data + offset;
885}
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700886
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700887#endif /* NET_SKBUFF_DATA_USES_OFFSET */
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889/*
890 * Add data to an sk_buff
891 */
892static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
893{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700894 unsigned char *tmp = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 SKB_LINEAR_ASSERT(skb);
896 skb->tail += len;
897 skb->len += len;
898 return tmp;
899}
900
901/**
902 * skb_put - add data to a buffer
903 * @skb: buffer to use
904 * @len: amount of data to add
905 *
906 * This function extends the used data area of the buffer. If this would
907 * exceed the total buffer size the kernel will panic. A pointer to the
908 * first byte of the extra data is returned.
909 */
910static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
911{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700912 unsigned char *tmp = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 SKB_LINEAR_ASSERT(skb);
914 skb->tail += len;
915 skb->len += len;
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -0700916 if (unlikely(skb->tail > skb->end))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 skb_over_panic(skb, len, current_text_addr());
918 return tmp;
919}
920
921static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
922{
923 skb->data -= len;
924 skb->len += len;
925 return skb->data;
926}
927
928/**
929 * skb_push - add data to the start of a buffer
930 * @skb: buffer to use
931 * @len: amount of data to add
932 *
933 * This function extends the used data area of the buffer at the buffer
934 * start. If this would exceed the total buffer headroom the kernel will
935 * panic. A pointer to the first byte of the extra data is returned.
936 */
937static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
938{
939 skb->data -= len;
940 skb->len += len;
941 if (unlikely(skb->data<skb->head))
942 skb_under_panic(skb, len, current_text_addr());
943 return skb->data;
944}
945
946static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
947{
948 skb->len -= len;
949 BUG_ON(skb->len < skb->data_len);
950 return skb->data += len;
951}
952
953/**
954 * skb_pull - remove data from the start of a buffer
955 * @skb: buffer to use
956 * @len: amount of data to remove
957 *
958 * This function removes data from the start of a buffer, returning
959 * the memory to the headroom. A pointer to the next data in the buffer
960 * is returned. Once the data has been pulled future pushes will overwrite
961 * the old data.
962 */
963static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
964{
965 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
966}
967
968extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
969
970static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
971{
972 if (len > skb_headlen(skb) &&
973 !__pskb_pull_tail(skb, len-skb_headlen(skb)))
974 return NULL;
975 skb->len -= len;
976 return skb->data += len;
977}
978
979static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
980{
981 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
982}
983
984static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
985{
986 if (likely(len <= skb_headlen(skb)))
987 return 1;
988 if (unlikely(len > skb->len))
989 return 0;
990 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
991}
992
993/**
994 * skb_headroom - bytes at buffer head
995 * @skb: buffer to check
996 *
997 * Return the number of bytes of free space at the head of an &sk_buff.
998 */
Chuck Leverc2636b42007-10-23 21:07:32 -0700999static inline unsigned int skb_headroom(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000{
1001 return skb->data - skb->head;
1002}
1003
1004/**
1005 * skb_tailroom - bytes at buffer end
1006 * @skb: buffer to check
1007 *
1008 * Return the number of bytes of free space at the tail of an sk_buff
1009 */
1010static inline int skb_tailroom(const struct sk_buff *skb)
1011{
Arnaldo Carvalho de Melo4305b542007-04-19 20:43:29 -07001012 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013}
1014
1015/**
1016 * skb_reserve - adjust headroom
1017 * @skb: buffer to alter
1018 * @len: bytes to move
1019 *
1020 * Increase the headroom of an empty &sk_buff by reducing the tail
1021 * room. This is only allowed for an empty buffer.
1022 */
David S. Miller82431262006-01-17 02:54:21 -08001023static inline void skb_reserve(struct sk_buff *skb, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024{
1025 skb->data += len;
1026 skb->tail += len;
1027}
1028
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -07001029#ifdef NET_SKBUFF_DATA_USES_OFFSET
1030static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1031{
1032 return skb->head + skb->transport_header;
1033}
1034
1035static inline void skb_reset_transport_header(struct sk_buff *skb)
1036{
1037 skb->transport_header = skb->data - skb->head;
1038}
1039
1040static inline void skb_set_transport_header(struct sk_buff *skb,
1041 const int offset)
1042{
1043 skb_reset_transport_header(skb);
1044 skb->transport_header += offset;
1045}
1046
1047static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1048{
1049 return skb->head + skb->network_header;
1050}
1051
1052static inline void skb_reset_network_header(struct sk_buff *skb)
1053{
1054 skb->network_header = skb->data - skb->head;
1055}
1056
1057static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1058{
1059 skb_reset_network_header(skb);
1060 skb->network_header += offset;
1061}
1062
1063static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1064{
1065 return skb->head + skb->mac_header;
1066}
1067
1068static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1069{
1070 return skb->mac_header != ~0U;
1071}
1072
1073static inline void skb_reset_mac_header(struct sk_buff *skb)
1074{
1075 skb->mac_header = skb->data - skb->head;
1076}
1077
1078static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1079{
1080 skb_reset_mac_header(skb);
1081 skb->mac_header += offset;
1082}
1083
1084#else /* NET_SKBUFF_DATA_USES_OFFSET */
1085
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001086static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1087{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001088 return skb->transport_header;
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001089}
1090
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001091static inline void skb_reset_transport_header(struct sk_buff *skb)
1092{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001093 skb->transport_header = skb->data;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001094}
1095
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001096static inline void skb_set_transport_header(struct sk_buff *skb,
1097 const int offset)
1098{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001099 skb->transport_header = skb->data + offset;
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001100}
1101
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001102static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1103{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001104 return skb->network_header;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001105}
1106
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001107static inline void skb_reset_network_header(struct sk_buff *skb)
1108{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001109 skb->network_header = skb->data;
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001110}
1111
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -03001112static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1113{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001114 skb->network_header = skb->data + offset;
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -03001115}
1116
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001117static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1118{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001119 return skb->mac_header;
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001120}
1121
1122static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1123{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001124 return skb->mac_header != NULL;
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001125}
1126
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001127static inline void skb_reset_mac_header(struct sk_buff *skb)
1128{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001129 skb->mac_header = skb->data;
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001130}
1131
Arnaldo Carvalho de Melo48d49d0c2007-03-10 12:30:58 -03001132static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1133{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001134 skb->mac_header = skb->data + offset;
Arnaldo Carvalho de Melo48d49d0c2007-03-10 12:30:58 -03001135}
Arnaldo Carvalho de Melo2e07fa92007-04-10 21:22:35 -07001136#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1137
1138static inline int skb_transport_offset(const struct sk_buff *skb)
1139{
1140 return skb_transport_header(skb) - skb->data;
1141}
1142
1143static inline u32 skb_network_header_len(const struct sk_buff *skb)
1144{
1145 return skb->transport_header - skb->network_header;
1146}
1147
1148static inline int skb_network_offset(const struct sk_buff *skb)
1149{
1150 return skb_network_header(skb) - skb->data;
1151}
Arnaldo Carvalho de Melo48d49d0c2007-03-10 12:30:58 -03001152
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153/*
1154 * CPUs often take a performance hit when accessing unaligned memory
1155 * locations. The actual performance hit varies, it can be small if the
1156 * hardware handles it or large if we have to take an exception and fix it
1157 * in software.
1158 *
1159 * Since an ethernet header is 14 bytes network drivers often end up with
1160 * the IP header at an unaligned offset. The IP header can be aligned by
1161 * shifting the start of the packet by 2 bytes. Drivers should do this
1162 * with:
1163 *
1164 * skb_reserve(NET_IP_ALIGN);
1165 *
1166 * The downside to this alignment of the IP header is that the DMA is now
1167 * unaligned. On some architectures the cost of an unaligned DMA is high
1168 * and this cost outweighs the gains made by aligning the IP header.
1169 *
1170 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1171 * to be overridden.
1172 */
1173#ifndef NET_IP_ALIGN
1174#define NET_IP_ALIGN 2
1175#endif
1176
Anton Blanchard025be812006-03-31 02:27:06 -08001177/*
1178 * The networking layer reserves some headroom in skb data (via
1179 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1180 * the header has to grow. In the default case, if the header has to grow
1181 * 16 bytes or less we avoid the reallocation.
1182 *
1183 * Unfortunately this headroom changes the DMA alignment of the resulting
1184 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1185 * on some architectures. An architecture can override this value,
1186 * perhaps setting it to a cacheline in size (since that will maintain
1187 * cacheline alignment of the DMA). It must be a power of 2.
1188 *
1189 * Various parts of the networking layer expect at least 16 bytes of
1190 * headroom, you should not reduce this.
1191 */
1192#ifndef NET_SKB_PAD
1193#define NET_SKB_PAD 16
1194#endif
1195
Herbert Xu3cc0e872006-06-09 16:13:38 -07001196extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1199{
Herbert Xu3cc0e872006-06-09 16:13:38 -07001200 if (unlikely(skb->data_len)) {
1201 WARN_ON(1);
1202 return;
1203 }
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001204 skb->len = len;
1205 skb_set_tail_pointer(skb, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206}
1207
1208/**
1209 * skb_trim - remove end from a buffer
1210 * @skb: buffer to alter
1211 * @len: new length
1212 *
1213 * Cut the length of a buffer down by removing data from the tail. If
1214 * the buffer is already under the length specified it is not modified.
Herbert Xu3cc0e872006-06-09 16:13:38 -07001215 * The skb must be linear.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 */
1217static inline void skb_trim(struct sk_buff *skb, unsigned int len)
1218{
1219 if (skb->len > len)
1220 __skb_trim(skb, len);
1221}
1222
1223
1224static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1225{
Herbert Xu3cc0e872006-06-09 16:13:38 -07001226 if (skb->data_len)
1227 return ___pskb_trim(skb, len);
1228 __skb_trim(skb, len);
1229 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230}
1231
1232static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1233{
1234 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1235}
1236
1237/**
Herbert Xue9fa4f72006-08-13 20:12:58 -07001238 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1239 * @skb: buffer to alter
1240 * @len: new length
1241 *
1242 * This is identical to pskb_trim except that the caller knows that
1243 * the skb is not cloned so we should never get an error due to out-
1244 * of-memory.
1245 */
1246static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1247{
1248 int err = pskb_trim(skb, len);
1249 BUG_ON(err);
1250}
1251
1252/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 * skb_orphan - orphan a buffer
1254 * @skb: buffer to orphan
1255 *
1256 * If a buffer currently has an owner then we call the owner's
1257 * destructor function and make the @skb unowned. The buffer continues
1258 * to exist but is no longer charged to its former owner.
1259 */
1260static inline void skb_orphan(struct sk_buff *skb)
1261{
1262 if (skb->destructor)
1263 skb->destructor(skb);
1264 skb->destructor = NULL;
1265 skb->sk = NULL;
1266}
1267
1268/**
1269 * __skb_queue_purge - empty a list
1270 * @list: list to empty
1271 *
1272 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1273 * the list and one reference dropped. This function does not take the
1274 * list lock and the caller must hold the relevant locks to use it.
1275 */
1276extern void skb_queue_purge(struct sk_buff_head *list);
1277static inline void __skb_queue_purge(struct sk_buff_head *list)
1278{
1279 struct sk_buff *skb;
1280 while ((skb = __skb_dequeue(list)) != NULL)
1281 kfree_skb(skb);
1282}
1283
1284/**
Christoph Hellwigb4e54de2006-07-24 15:31:14 -07001285 * __dev_alloc_skb - allocate an skbuff for receiving
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 * @length: length to allocate
1287 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1288 *
1289 * Allocate a new &sk_buff and assign it a usage count of one. The
1290 * buffer has unspecified headroom built in. Users should allocate
1291 * the headroom they think they need without accounting for the
1292 * built in space. The built in space is used for optimisations.
1293 *
Christoph Hellwig766ea8c2006-08-07 15:49:53 -07001294 * %NULL is returned if there is no free memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
Al Virodd0fc662005-10-07 07:46:04 +01001297 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298{
Anton Blanchard025be812006-03-31 02:27:06 -08001299 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 if (likely(skb))
Anton Blanchard025be812006-03-31 02:27:06 -08001301 skb_reserve(skb, NET_SKB_PAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 return skb;
1303}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
1305/**
Christoph Hellwigb4e54de2006-07-24 15:31:14 -07001306 * dev_alloc_skb - allocate an skbuff for receiving
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 * @length: length to allocate
1308 *
1309 * Allocate a new &sk_buff and assign it a usage count of one. The
1310 * buffer has unspecified headroom built in. Users should allocate
1311 * the headroom they think they need without accounting for the
1312 * built in space. The built in space is used for optimisations.
1313 *
Christoph Hellwig766ea8c2006-08-07 15:49:53 -07001314 * %NULL is returned if there is no free memory. Although this function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 * allocates memory it can be called from an interrupt.
1316 */
1317static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1318{
1319 return __dev_alloc_skb(length, GFP_ATOMIC);
1320}
1321
Christoph Hellwig8af27452006-07-31 22:35:23 -07001322extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1323 unsigned int length, gfp_t gfp_mask);
1324
1325/**
1326 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
1327 * @dev: network device to receive on
1328 * @length: length to allocate
1329 *
1330 * Allocate a new &sk_buff and assign it a usage count of one. The
1331 * buffer has unspecified headroom built in. Users should allocate
1332 * the headroom they think they need without accounting for the
1333 * built in space. The built in space is used for optimisations.
1334 *
1335 * %NULL is returned if there is no free memory. Although this function
1336 * allocates memory it can be called from an interrupt.
1337 */
1338static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1339 unsigned int length)
1340{
1341 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1342}
1343
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344/**
Patrick McHardy334a8132007-06-25 04:35:20 -07001345 * skb_clone_writable - is the header of a clone writable
1346 * @skb: buffer to check
1347 * @len: length up to which to write
1348 *
1349 * Returns true if modifying the header part of the cloned buffer
1350 * does not requires the data to be copied.
1351 */
Chuck Leverc2636b42007-10-23 21:07:32 -07001352static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
Patrick McHardy334a8132007-06-25 04:35:20 -07001353{
1354 return !skb_header_cloned(skb) &&
1355 skb_headroom(skb) + len <= skb->hdr_len;
1356}
1357
Herbert Xud9cc2042007-09-16 16:21:16 -07001358static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1359 int cloned)
1360{
1361 int delta = 0;
1362
1363 if (headroom < NET_SKB_PAD)
1364 headroom = NET_SKB_PAD;
1365 if (headroom > skb_headroom(skb))
1366 delta = headroom - skb_headroom(skb);
1367
1368 if (delta || cloned)
1369 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1370 GFP_ATOMIC);
1371 return 0;
1372}
1373
Patrick McHardy334a8132007-06-25 04:35:20 -07001374/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 * skb_cow - copy header of skb when it is required
1376 * @skb: buffer to cow
1377 * @headroom: needed headroom
1378 *
1379 * If the skb passed lacks sufficient headroom or its data part
1380 * is shared, data is reallocated. If reallocation fails, an error
1381 * is returned and original skb is not changed.
1382 *
1383 * The result is skb with writable area skb->head...skb->tail
1384 * and at least @headroom of space at head.
1385 */
1386static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1387{
Herbert Xud9cc2042007-09-16 16:21:16 -07001388 return __skb_cow(skb, headroom, skb_cloned(skb));
1389}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
Herbert Xud9cc2042007-09-16 16:21:16 -07001391/**
1392 * skb_cow_head - skb_cow but only making the head writable
1393 * @skb: buffer to cow
1394 * @headroom: needed headroom
1395 *
1396 * This function is identical to skb_cow except that we replace the
1397 * skb_cloned check by skb_header_cloned. It should be used when
1398 * you only need to push on some header and do not need to modify
1399 * the data.
1400 */
1401static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1402{
1403 return __skb_cow(skb, headroom, skb_header_cloned(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404}
1405
1406/**
1407 * skb_padto - pad an skbuff up to a minimal size
1408 * @skb: buffer to pad
1409 * @len: minimal length
1410 *
1411 * Pads up a buffer to ensure the trailing bytes exist and are
1412 * blanked. If the buffer already contains sufficient data it
Herbert Xu5b057c62006-06-23 02:06:41 -07001413 * is untouched. Otherwise it is extended. Returns zero on
1414 * success. The skb is freed on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 */
1416
Herbert Xu5b057c62006-06-23 02:06:41 -07001417static inline int skb_padto(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418{
1419 unsigned int size = skb->len;
1420 if (likely(size >= len))
Herbert Xu5b057c62006-06-23 02:06:41 -07001421 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 return skb_pad(skb, len-size);
1423}
1424
1425static inline int skb_add_data(struct sk_buff *skb,
1426 char __user *from, int copy)
1427{
1428 const int off = skb->len;
1429
1430 if (skb->ip_summed == CHECKSUM_NONE) {
1431 int err = 0;
Al Viro50842052006-11-14 21:36:34 -08001432 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 copy, 0, &err);
1434 if (!err) {
1435 skb->csum = csum_block_add(skb->csum, csum, off);
1436 return 0;
1437 }
1438 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1439 return 0;
1440
1441 __skb_trim(skb, off);
1442 return -EFAULT;
1443}
1444
1445static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1446 struct page *page, int off)
1447{
1448 if (i) {
1449 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1450
1451 return page == frag->page &&
1452 off == frag->page_offset + frag->size;
1453 }
1454 return 0;
1455}
1456
Herbert Xu364c6ba2006-06-09 16:10:40 -07001457static inline int __skb_linearize(struct sk_buff *skb)
1458{
1459 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1460}
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462/**
1463 * skb_linearize - convert paged skb to linear one
1464 * @skb: buffer to linarize
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 *
1466 * If there is no free memory -ENOMEM is returned, otherwise zero
1467 * is returned and the old skb data released.
1468 */
Herbert Xu364c6ba2006-06-09 16:10:40 -07001469static inline int skb_linearize(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470{
Herbert Xu364c6ba2006-06-09 16:10:40 -07001471 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1472}
1473
1474/**
1475 * skb_linearize_cow - make sure skb is linear and writable
1476 * @skb: buffer to process
1477 *
1478 * If there is no free memory -ENOMEM is returned, otherwise zero
1479 * is returned and the old skb data released.
1480 */
1481static inline int skb_linearize_cow(struct sk_buff *skb)
1482{
1483 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1484 __skb_linearize(skb) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485}
1486
1487/**
1488 * skb_postpull_rcsum - update checksum for received skb after pull
1489 * @skb: buffer to update
1490 * @start: start of data before pull
1491 * @len: length of data pulled
1492 *
1493 * After doing a pull on a received packet, you need to call this to
Patrick McHardy84fa7932006-08-29 16:44:56 -07001494 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1495 * CHECKSUM_NONE so that it can be recomputed from scratch.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 */
1497
1498static inline void skb_postpull_rcsum(struct sk_buff *skb,
Herbert Xucbb042f2006-03-20 22:43:56 -08001499 const void *start, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500{
Patrick McHardy84fa7932006-08-29 16:44:56 -07001501 if (skb->ip_summed == CHECKSUM_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1503}
1504
Herbert Xucbb042f2006-03-20 22:43:56 -08001505unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1506
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507/**
1508 * pskb_trim_rcsum - trim received skb and update checksum
1509 * @skb: buffer to trim
1510 * @len: new length
1511 *
1512 * This is exactly the same as pskb_trim except that it ensures the
1513 * checksum of received packets are still valid after the operation.
1514 */
1515
1516static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1517{
Stephen Hemminger0e4e4222005-09-08 12:32:03 -07001518 if (likely(len >= skb->len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 return 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07001520 if (skb->ip_summed == CHECKSUM_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 skb->ip_summed = CHECKSUM_NONE;
1522 return __pskb_trim(skb, len);
1523}
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525#define skb_queue_walk(queue, skb) \
1526 for (skb = (queue)->next; \
1527 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1528 skb = skb->next)
1529
James Chapman46f89142007-04-30 00:07:31 -07001530#define skb_queue_walk_safe(queue, skb, tmp) \
1531 for (skb = (queue)->next, tmp = skb->next; \
1532 skb != (struct sk_buff *)(queue); \
1533 skb = tmp, tmp = skb->next)
1534
Stephen Hemminger300ce172005-10-30 13:47:34 -08001535#define skb_queue_reverse_walk(queue, skb) \
1536 for (skb = (queue)->prev; \
1537 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1538 skb = skb->prev)
1539
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1542 int noblock, int *err);
1543extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1544 struct poll_table_struct *wait);
1545extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1546 int offset, struct iovec *to,
1547 int size);
Herbert Xufb286bb2005-11-10 13:01:24 -08001548extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 int hlen,
1550 struct iovec *iov);
1551extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
Herbert Xu3305b802005-12-13 23:16:37 -08001552extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1553 unsigned int flags);
Al Viro2bbbc862006-11-14 21:37:14 -08001554extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
1555 int len, __wsum csum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1557 void *to, int len);
Stephen Hemminger0c6fcc82007-04-20 16:40:01 -07001558extern int skb_store_bits(struct sk_buff *skb, int offset,
1559 const void *from, int len);
Al Viro81d77662006-11-14 21:37:33 -08001560extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 int offset, u8 *to, int len,
Al Viro81d77662006-11-14 21:37:33 -08001562 __wsum csum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1564extern void skb_split(struct sk_buff *skb,
1565 struct sk_buff *skb1, const u32 len);
1566
Herbert Xu576a30e2006-06-27 13:22:38 -07001567extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1570 int len, void *buffer)
1571{
1572 int hlen = skb_headlen(skb);
1573
Patrick McHardy55820ee2005-07-05 14:08:10 -07001574 if (hlen - offset >= len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 return skb->data + offset;
1576
1577 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1578 return NULL;
1579
1580 return buffer;
1581}
1582
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03001583static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1584 void *to,
1585 const unsigned int len)
1586{
1587 memcpy(to, skb->data, len);
1588}
1589
1590static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1591 const int offset, void *to,
1592 const unsigned int len)
1593{
1594 memcpy(to, skb->data + offset, len);
1595}
1596
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001597static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1598 const void *from,
1599 const unsigned int len)
1600{
1601 memcpy(skb->data, from, len);
1602}
1603
1604static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1605 const int offset,
1606 const void *from,
1607 const unsigned int len)
1608{
1609 memcpy(skb->data + offset, from, len);
1610}
1611
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612extern void skb_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001614/**
1615 * skb_get_timestamp - get timestamp from a skb
1616 * @skb: skb to get stamp from
1617 * @stamp: pointer to struct timeval to store stamp in
1618 *
1619 * Timestamps are stored in the skb as offsets to a base timestamp.
1620 * This function converts the offset back to a struct timeval and stores
1621 * it in stamp.
1622 */
Stephen Hemmingerf2c38392005-09-06 15:48:03 -07001623static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001624{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001625 *stamp = ktime_to_timeval(skb->tstamp);
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001626}
1627
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001628static inline void __net_timestamp(struct sk_buff *skb)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001629{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001630 skb->tstamp = ktime_get_real();
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001631}
1632
Stephen Hemminger164891a2007-04-23 22:26:16 -07001633static inline ktime_t net_timedelta(ktime_t t)
1634{
1635 return ktime_sub(ktime_get_real(), t);
1636}
1637
Ilpo Järvinenb9ce2042007-06-15 15:08:43 -07001638static inline ktime_t net_invalid_timestamp(void)
1639{
1640 return ktime_set(0, 0);
1641}
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001642
Herbert Xu759e5d02007-03-25 20:10:56 -07001643extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
Al Virob51655b2006-11-14 21:40:42 -08001644extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
Herbert Xufb286bb2005-11-10 13:01:24 -08001645
Herbert Xu60476372007-04-09 11:59:39 -07001646static inline int skb_csum_unnecessary(const struct sk_buff *skb)
1647{
1648 return skb->ip_summed & CHECKSUM_UNNECESSARY;
1649}
1650
Herbert Xufb286bb2005-11-10 13:01:24 -08001651/**
1652 * skb_checksum_complete - Calculate checksum of an entire packet
1653 * @skb: packet to process
1654 *
1655 * This function calculates the checksum over the entire packet plus
1656 * the value of skb->csum. The latter can be used to supply the
1657 * checksum of a pseudo header as used by TCP/UDP. It returns the
1658 * checksum.
1659 *
1660 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
1661 * this function can be used to verify that checksum on received
1662 * packets. In that case the function should return zero if the
1663 * checksum is correct. In particular, this function will return zero
1664 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1665 * hardware has already verified the correctness of the checksum.
1666 */
Al Viro4381ca32007-07-15 21:00:11 +01001667static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
Herbert Xufb286bb2005-11-10 13:01:24 -08001668{
Herbert Xu60476372007-04-09 11:59:39 -07001669 return skb_csum_unnecessary(skb) ?
1670 0 : __skb_checksum_complete(skb);
Herbert Xufb286bb2005-11-10 13:01:24 -08001671}
1672
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07001673#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -07001674extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1676{
1677 if (nfct && atomic_dec_and_test(&nfct->use))
Yasuyuki Kozakaide6e05c2007-03-23 11:17:27 -07001678 nf_conntrack_destroy(nfct);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679}
1680static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1681{
1682 if (nfct)
1683 atomic_inc(&nfct->use);
1684}
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001685static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1686{
1687 if (skb)
1688 atomic_inc(&skb->users);
1689}
1690static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1691{
1692 if (skb)
1693 kfree_skb(skb);
1694}
1695#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696#ifdef CONFIG_BRIDGE_NETFILTER
1697static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1698{
1699 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1700 kfree(nf_bridge);
1701}
1702static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1703{
1704 if (nf_bridge)
1705 atomic_inc(&nf_bridge->use);
1706}
1707#endif /* CONFIG_BRIDGE_NETFILTER */
Patrick McHardya193a4a2006-03-20 19:23:05 -08001708static inline void nf_reset(struct sk_buff *skb)
1709{
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07001710#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Patrick McHardya193a4a2006-03-20 19:23:05 -08001711 nf_conntrack_put(skb->nfct);
1712 skb->nfct = NULL;
Patrick McHardya193a4a2006-03-20 19:23:05 -08001713 nf_conntrack_put_reasm(skb->nfct_reasm);
1714 skb->nfct_reasm = NULL;
1715#endif
1716#ifdef CONFIG_BRIDGE_NETFILTER
1717 nf_bridge_put(skb->nf_bridge);
1718 skb->nf_bridge = NULL;
1719#endif
1720}
1721
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07001722/* Note: This doesn't put any conntrack and bridge info in dst. */
1723static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1724{
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07001725#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07001726 dst->nfct = src->nfct;
1727 nf_conntrack_get(src->nfct);
1728 dst->nfctinfo = src->nfctinfo;
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07001729 dst->nfct_reasm = src->nfct_reasm;
1730 nf_conntrack_get_reasm(src->nfct_reasm);
1731#endif
1732#ifdef CONFIG_BRIDGE_NETFILTER
1733 dst->nf_bridge = src->nf_bridge;
1734 nf_bridge_get(src->nf_bridge);
1735#endif
1736}
1737
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07001738static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1739{
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07001740#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Yasuyuki Kozakai5f79e0f2007-03-23 11:17:07 -07001741 nf_conntrack_put(dst->nfct);
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07001742 nf_conntrack_put_reasm(dst->nfct_reasm);
1743#endif
1744#ifdef CONFIG_BRIDGE_NETFILTER
1745 nf_bridge_put(dst->nf_bridge);
1746#endif
1747 __nf_copy(dst, src);
1748}
1749
James Morris984bc162006-06-09 00:29:17 -07001750#ifdef CONFIG_NETWORK_SECMARK
1751static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1752{
1753 to->secmark = from->secmark;
1754}
1755
1756static inline void skb_init_secmark(struct sk_buff *skb)
1757{
1758 skb->secmark = 0;
1759}
1760#else
1761static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1762{ }
1763
1764static inline void skb_init_secmark(struct sk_buff *skb)
1765{ }
1766#endif
1767
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001768static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1769{
1770#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1771 skb->queue_mapping = queue_mapping;
1772#endif
1773}
1774
Pavel Emelyanov4e3ab472007-10-21 17:01:29 -07001775static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
1776{
1777#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1778 return skb->queue_mapping;
1779#else
1780 return 0;
1781#endif
1782}
1783
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001784static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1785{
1786#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1787 to->queue_mapping = from->queue_mapping;
1788#endif
1789}
1790
Herbert Xu89114af2006-07-08 13:34:32 -07001791static inline int skb_is_gso(const struct sk_buff *skb)
1792{
1793 return skb_shinfo(skb)->gso_size;
1794}
1795
Brice Goglineabd7e32007-10-13 12:33:32 +02001796static inline int skb_is_gso_v6(const struct sk_buff *skb)
1797{
1798 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
1799}
1800
Herbert Xu35fc92a2007-03-26 23:22:20 -07001801static inline void skb_forward_csum(struct sk_buff *skb)
1802{
1803 /* Unfortunately we don't support this one. Any brave souls? */
1804 if (skb->ip_summed == CHECKSUM_COMPLETE)
1805 skb->ip_summed = CHECKSUM_NONE;
1806}
1807
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808#endif /* __KERNEL__ */
1809#endif /* _LINUX_SKBUFF_H */