blob: c45ad126327195ced36f516652cca2ba83120423 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/compiler.h>
19#include <linux/time.h>
20#include <linux/cache.h>
21
22#include <asm/atomic.h>
23#include <asm/types.h>
24#include <linux/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/net.h>
Thomas Graf3fc7e8a2005-06-23 21:00:17 -070026#include <linux/textsearch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <net/checksum.h>
Al Viroa80958f2006-12-04 20:41:19 +000028#include <linux/rcupdate.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070029#include <linux/dmaengine.h>
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -070030#include <linux/hrtimer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#define HAVE_ALLOC_SKB /* For the drivers to know */
33#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#define CHECKSUM_NONE 0
Patrick McHardy84fa7932006-08-29 16:44:56 -070036#define CHECKSUM_PARTIAL 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#define CHECKSUM_UNNECESSARY 2
Patrick McHardy84fa7932006-08-29 16:44:56 -070038#define CHECKSUM_COMPLETE 3
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
41 ~(SMP_CACHE_BYTES - 1))
David S. Millerfc910a22007-03-25 20:27:59 -070042#define SKB_WITH_OVERHEAD(X) \
43 (((X) - sizeof(struct skb_shared_info)) & \
44 ~(SMP_CACHE_BYTES - 1))
45#define SKB_MAX_ORDER(X, ORDER) \
46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
48#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
49
50/* A. Checksumming of received packets by device.
51 *
52 * NONE: device failed to checksum this packet.
53 * skb->csum is undefined.
54 *
55 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
56 * skb->csum is undefined.
57 * It is bad option, but, unfortunately, many of vendors do this.
58 * Apparently with secret goal to sell you new device, when you
59 * will add new protocol to your host. F.e. IPv6. 8)
60 *
Patrick McHardy84fa7932006-08-29 16:44:56 -070061 * COMPLETE: the most generic way. Device supplied checksum of _all_
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 * the packet as seen by netif_rx in skb->csum.
63 * NOTE: Even if device supports only some protocols, but
Patrick McHardy84fa7932006-08-29 16:44:56 -070064 * is able to produce some skb->csum, it MUST use COMPLETE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 * not UNNECESSARY.
66 *
67 * B. Checksumming on output.
68 *
69 * NONE: skb is checksummed by protocol or csum is not required.
70 *
Patrick McHardy84fa7932006-08-29 16:44:56 -070071 * PARTIAL: device is required to csum packet as seen by hard_start_xmit
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -070072 * from skb->transport_header to the end and to record the checksum
73 * at skb->transport_header + skb->csum.
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 *
75 * Device must show its capabilities in dev->features, set
76 * at device setup time.
77 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
78 * everything.
79 * NETIF_F_NO_CSUM - loopback or reliable single hop media.
80 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
81 * TCP/UDP over IPv4. Sigh. Vendors like this
82 * way by an unknown reason. Though, see comment above
83 * about CHECKSUM_UNNECESSARY. 8)
84 *
85 * Any questions? No questions, good. --ANK
86 */
87
Linus Torvalds1da177e2005-04-16 15:20:36 -070088struct net_device;
89
90#ifdef CONFIG_NETFILTER
91struct nf_conntrack {
92 atomic_t use;
93 void (*destroy)(struct nf_conntrack *);
94};
95
96#ifdef CONFIG_BRIDGE_NETFILTER
97struct nf_bridge_info {
98 atomic_t use;
99 struct net_device *physindev;
100 struct net_device *physoutdev;
101#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
102 struct net_device *netoutdev;
103#endif
104 unsigned int mask;
105 unsigned long data[32 / sizeof(unsigned long)];
106};
107#endif
108
109#endif
110
111struct sk_buff_head {
112 /* These two members must be first. */
113 struct sk_buff *next;
114 struct sk_buff *prev;
115
116 __u32 qlen;
117 spinlock_t lock;
118};
119
120struct sk_buff;
121
122/* To allow 64K frame to be packed as single skb without frag_list */
123#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
124
125typedef struct skb_frag_struct skb_frag_t;
126
127struct skb_frag_struct {
128 struct page *page;
129 __u16 page_offset;
130 __u16 size;
131};
132
133/* This data is invariant across clones and lives at
134 * the end of the header data, ie. at skb->end.
135 */
136struct skb_shared_info {
137 atomic_t dataref;
Benjamin LaHaise4947d3e2006-01-03 14:06:50 -0800138 unsigned short nr_frags;
Herbert Xu79671682006-06-22 02:40:14 -0700139 unsigned short gso_size;
140 /* Warning: this field is not always filled in (UFO)! */
141 unsigned short gso_segs;
142 unsigned short gso_type;
Al Viroae08e1f2006-11-08 00:27:11 -0800143 __be32 ip6_frag_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 struct sk_buff *frag_list;
145 skb_frag_t frags[MAX_SKB_FRAGS];
146};
147
148/* We divide dataref into two halves. The higher 16 bits hold references
149 * to the payload part of skb->data. The lower 16 bits hold references to
150 * the entire skb->data. It is up to the users of the skb to agree on
151 * where the payload starts.
152 *
153 * All users must obey the rule that the skb->data reference count must be
154 * greater than or equal to the payload reference count.
155 *
156 * Holding a reference to the payload part means that the user does not
157 * care about modifications to the header part of skb->data.
158 */
159#define SKB_DATAREF_SHIFT 16
160#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
161
David S. Millerd179cd12005-08-17 14:57:30 -0700162
163enum {
164 SKB_FCLONE_UNAVAILABLE,
165 SKB_FCLONE_ORIG,
166 SKB_FCLONE_CLONE,
167};
168
Herbert Xu79671682006-06-22 02:40:14 -0700169enum {
170 SKB_GSO_TCPV4 = 1 << 0,
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700171 SKB_GSO_UDP = 1 << 1,
Herbert Xu576a30e2006-06-27 13:22:38 -0700172
173 /* This indicates the skb is from an untrusted source. */
174 SKB_GSO_DODGY = 1 << 2,
Michael Chanb0da85372006-06-29 12:30:00 -0700175
176 /* This indicates the tcp segment has CWR set. */
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700177 SKB_GSO_TCP_ECN = 1 << 3,
178
179 SKB_GSO_TCPV6 = 1 << 4,
Herbert Xu79671682006-06-22 02:40:14 -0700180};
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182/**
183 * struct sk_buff - socket buffer
184 * @next: Next buffer in list
185 * @prev: Previous buffer in list
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * @sk: Socket we are owned by
Herbert Xu325ed822005-10-03 13:57:23 -0700187 * @tstamp: Time we arrived
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 * @dev: Device we arrived on/are leaving by
Patrick McHardyc01003c2007-03-29 11:46:52 -0700189 * @iif: ifindex of device we arrived on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 * @h: Transport layer header
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700191 * @network_header: Network layer header
192 * @mac_header: Link layer header
Martin Waitz67be2dd2005-05-01 08:59:26 -0700193 * @dst: destination entry
194 * @sp: the security path, used for xfrm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 * @cb: Control buffer. Free for use by every layer. Put private vars here
196 * @len: Length of actual data
197 * @data_len: Data length
198 * @mac_len: Length of link layer header
199 * @csum: Checksum
Martin Waitz67be2dd2005-05-01 08:59:26 -0700200 * @local_df: allow local fragmentation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 * @cloned: Head may be cloned (check refcnt to be sure)
202 * @nohdr: Payload reference only, must not modify header
203 * @pkt_type: Packet class
Randy Dunlapc83c2482005-10-18 22:07:41 -0700204 * @fclone: skbuff clone status
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 * @ip_summed: Driver fed us an IP checksum
206 * @priority: Packet queueing priority
207 * @users: User count - see {datagram,tcp}.c
208 * @protocol: Packet protocol from driver
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 * @truesize: Buffer size
210 * @head: Head of buffer
211 * @data: Data head pointer
212 * @tail: Tail pointer
213 * @end: End pointer
214 * @destructor: Destruct function
Thomas Graf82e91ff2006-11-09 15:19:14 -0800215 * @mark: Generic packet mark
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 * @nfct: Associated connection, if any
Randy Dunlapc83c2482005-10-18 22:07:41 -0700217 * @ipvs_property: skbuff is owned by ipvs
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 * @nfctinfo: Relationship of this skb to the connection
Randy Dunlap461ddf32005-11-20 21:25:15 -0800219 * @nfct_reasm: netfilter conntrack re-assembly pointer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 * @tc_index: Traffic control index
222 * @tc_verd: traffic control verdict
Randy Dunlapf4b8ea72006-06-22 16:00:11 -0700223 * @dma_cookie: a cookie to one of several possible DMA operations
224 * done by skb DMA functions
James Morris984bc162006-06-09 00:29:17 -0700225 * @secmark: security marking
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 */
227
228struct sk_buff {
229 /* These two members must be first. */
230 struct sk_buff *next;
231 struct sk_buff *prev;
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 struct sock *sk;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -0700234 ktime_t tstamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 struct net_device *dev;
Patrick McHardyc01003c2007-03-29 11:46:52 -0700236 int iif;
237 /* 4 byte hole on 64 bit*/
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700239 unsigned char *transport_header;
240 unsigned char *network_header;
241 unsigned char *mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 struct dst_entry *dst;
243 struct sec_path *sp;
244
245 /*
246 * This is the control buffer. It is free to use for every
247 * layer. Please put your private variables there. If you
248 * want to keep them across layers you have to do a skb_clone()
249 * first. This is owned by whoever has the skb queued ATM.
250 */
Patrick McHardy3e3850e2006-01-06 23:04:54 -0800251 char cb[48];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253 unsigned int len,
254 data_len,
Al Viro1f61ab52006-11-14 21:44:08 -0800255 mac_len;
Al Viroff1dcad2006-11-20 18:07:29 -0800256 union {
257 __wsum csum;
258 __u32 csum_offset;
259 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 __u32 priority;
Thomas Graf1cbb3382005-07-05 14:13:41 -0700261 __u8 local_df:1,
262 cloned:1,
263 ip_summed:2,
Harald Welte6869c4d2005-08-09 19:24:19 -0700264 nohdr:1,
265 nfctinfo:3;
David S. Millerd179cd12005-08-17 14:57:30 -0700266 __u8 pkt_type:3,
Patrick McHardyb84f4cc2005-11-20 21:19:21 -0800267 fclone:2,
268 ipvs_property:1;
Alexey Dobriyana0d3bea2005-08-11 16:05:50 -0700269 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271 void (*destructor)(struct sk_buff *skb);
272#ifdef CONFIG_NETFILTER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 struct nf_conntrack *nfct;
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -0800274#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
275 struct sk_buff *nfct_reasm;
276#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277#ifdef CONFIG_BRIDGE_NETFILTER
278 struct nf_bridge_info *nf_bridge;
279#endif
280#endif /* CONFIG_NETFILTER */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281#ifdef CONFIG_NET_SCHED
Patrick McHardyb6b99eb2005-08-09 19:33:51 -0700282 __u16 tc_index; /* traffic control index */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283#ifdef CONFIG_NET_CLS_ACT
Patrick McHardyb6b99eb2005-08-09 19:33:51 -0700284 __u16 tc_verd; /* traffic control verdict */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286#endif
Chris Leech97fc2f02006-05-23 17:55:33 -0700287#ifdef CONFIG_NET_DMA
288 dma_cookie_t dma_cookie;
289#endif
James Morris984bc162006-06-09 00:29:17 -0700290#ifdef CONFIG_NETWORK_SECMARK
291 __u32 secmark;
292#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Thomas Graf82e91ff2006-11-09 15:19:14 -0800294 __u32 mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
296 /* These elements must be at the end, see alloc_skb() for details. */
297 unsigned int truesize;
298 atomic_t users;
299 unsigned char *head,
300 *data,
301 *tail,
302 *end;
303};
304
305#ifdef __KERNEL__
306/*
307 * Handling routines are only of interest to the kernel
308 */
309#include <linux/slab.h>
310
311#include <asm/system.h>
312
Jörn Engel231d06a2006-03-20 21:28:35 -0800313extern void kfree_skb(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314extern void __kfree_skb(struct sk_buff *skb);
David S. Millerd179cd12005-08-17 14:57:30 -0700315extern struct sk_buff *__alloc_skb(unsigned int size,
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800316 gfp_t priority, int fclone, int node);
David S. Millerd179cd12005-08-17 14:57:30 -0700317static inline struct sk_buff *alloc_skb(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100318 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -0700319{
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800320 return __alloc_skb(size, priority, 0, -1);
David S. Millerd179cd12005-08-17 14:57:30 -0700321}
322
323static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
Al Virodd0fc662005-10-07 07:46:04 +0100324 gfp_t priority)
David S. Millerd179cd12005-08-17 14:57:30 -0700325{
Christoph Hellwigb30973f2006-12-06 20:32:36 -0800326 return __alloc_skb(size, priority, 1, -1);
David S. Millerd179cd12005-08-17 14:57:30 -0700327}
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329extern void kfree_skbmem(struct sk_buff *skb);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700330extern struct sk_buff *skb_clone(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100331 gfp_t priority);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700332extern struct sk_buff *skb_copy(const struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100333 gfp_t priority);
Victor Fusco86a76ca2005-07-08 14:57:47 -0700334extern struct sk_buff *pskb_copy(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100335 gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336extern int pskb_expand_head(struct sk_buff *skb,
Victor Fusco86a76ca2005-07-08 14:57:47 -0700337 int nhead, int ntail,
Al Virodd0fc662005-10-07 07:46:04 +0100338 gfp_t gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
340 unsigned int headroom);
341extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
342 int newheadroom, int newtailroom,
Al Virodd0fc662005-10-07 07:46:04 +0100343 gfp_t priority);
Herbert Xu5b057c62006-06-23 02:06:41 -0700344extern int skb_pad(struct sk_buff *skb, int pad);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345#define dev_kfree_skb(a) kfree_skb(a)
346extern void skb_over_panic(struct sk_buff *skb, int len,
347 void *here);
348extern void skb_under_panic(struct sk_buff *skb, int len,
349 void *here);
David S. Millerdc6de332006-04-20 00:10:50 -0700350extern void skb_truesize_bug(struct sk_buff *skb);
351
352static inline void skb_truesize_check(struct sk_buff *skb)
353{
354 if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len))
355 skb_truesize_bug(skb);
356}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700358extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
359 int getfrag(void *from, char *to, int offset,
360 int len,int odd, struct sk_buff *skb),
361 void *from, int length);
362
Thomas Graf677e90e2005-06-23 20:59:51 -0700363struct skb_seq_state
364{
365 __u32 lower_offset;
366 __u32 upper_offset;
367 __u32 frag_idx;
368 __u32 stepped_offset;
369 struct sk_buff *root_skb;
370 struct sk_buff *cur_skb;
371 __u8 *frag_data;
372};
373
374extern void skb_prepare_seq_read(struct sk_buff *skb,
375 unsigned int from, unsigned int to,
376 struct skb_seq_state *st);
377extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
378 struct skb_seq_state *st);
379extern void skb_abort_seq_read(struct skb_seq_state *st);
380
Thomas Graf3fc7e8a2005-06-23 21:00:17 -0700381extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
382 unsigned int to, struct ts_config *config,
383 struct ts_state *state);
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/* Internal */
386#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
387
388/**
389 * skb_queue_empty - check if a queue is empty
390 * @list: queue head
391 *
392 * Returns true if the queue is empty, false otherwise.
393 */
394static inline int skb_queue_empty(const struct sk_buff_head *list)
395{
396 return list->next == (struct sk_buff *)list;
397}
398
399/**
400 * skb_get - reference buffer
401 * @skb: buffer to reference
402 *
403 * Makes another reference to a socket buffer and returns a pointer
404 * to the buffer.
405 */
406static inline struct sk_buff *skb_get(struct sk_buff *skb)
407{
408 atomic_inc(&skb->users);
409 return skb;
410}
411
412/*
413 * If users == 1, we are the only owner and are can avoid redundant
414 * atomic change.
415 */
416
417/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 * skb_cloned - is the buffer a clone
419 * @skb: buffer to check
420 *
421 * Returns true if the buffer was generated with skb_clone() and is
422 * one of multiple shared copies of the buffer. Cloned buffers are
423 * shared data so must not be written to under normal circumstances.
424 */
425static inline int skb_cloned(const struct sk_buff *skb)
426{
427 return skb->cloned &&
428 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
429}
430
431/**
432 * skb_header_cloned - is the header a clone
433 * @skb: buffer to check
434 *
435 * Returns true if modifying the header part of the buffer requires
436 * the data to be copied.
437 */
438static inline int skb_header_cloned(const struct sk_buff *skb)
439{
440 int dataref;
441
442 if (!skb->cloned)
443 return 0;
444
445 dataref = atomic_read(&skb_shinfo(skb)->dataref);
446 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
447 return dataref != 1;
448}
449
450/**
451 * skb_header_release - release reference to header
452 * @skb: buffer to operate on
453 *
454 * Drop a reference to the header part of the buffer. This is done
455 * by acquiring a payload reference. You must not read from the header
456 * part of skb->data after this.
457 */
458static inline void skb_header_release(struct sk_buff *skb)
459{
460 BUG_ON(skb->nohdr);
461 skb->nohdr = 1;
462 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
463}
464
465/**
466 * skb_shared - is the buffer shared
467 * @skb: buffer to check
468 *
469 * Returns true if more than one person has a reference to this
470 * buffer.
471 */
472static inline int skb_shared(const struct sk_buff *skb)
473{
474 return atomic_read(&skb->users) != 1;
475}
476
477/**
478 * skb_share_check - check if buffer is shared and if so clone it
479 * @skb: buffer to check
480 * @pri: priority for memory allocation
481 *
482 * If the buffer is shared the buffer is cloned and the old copy
483 * drops a reference. A new clone with a single reference is returned.
484 * If the buffer is not shared the original buffer is returned. When
485 * being called from interrupt status or with spinlocks held pri must
486 * be GFP_ATOMIC.
487 *
488 * NULL is returned on a memory allocation failure.
489 */
Victor Fusco86a76ca2005-07-08 14:57:47 -0700490static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100491 gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492{
493 might_sleep_if(pri & __GFP_WAIT);
494 if (skb_shared(skb)) {
495 struct sk_buff *nskb = skb_clone(skb, pri);
496 kfree_skb(skb);
497 skb = nskb;
498 }
499 return skb;
500}
501
502/*
503 * Copy shared buffers into a new sk_buff. We effectively do COW on
504 * packets to handle cases where we have a local reader and forward
505 * and a couple of other messy ones. The normal one is tcpdumping
506 * a packet thats being forwarded.
507 */
508
509/**
510 * skb_unshare - make a copy of a shared buffer
511 * @skb: buffer to check
512 * @pri: priority for memory allocation
513 *
514 * If the socket buffer is a clone then this function creates a new
515 * copy of the data, drops a reference count on the old copy and returns
516 * the new copy with the reference count at 1. If the buffer is not a clone
517 * the original buffer is returned. When called with a spinlock held or
518 * from interrupt state @pri must be %GFP_ATOMIC
519 *
520 * %NULL is returned on a memory allocation failure.
521 */
Victor Fuscoe2bf5212005-07-18 13:36:38 -0700522static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
Al Virodd0fc662005-10-07 07:46:04 +0100523 gfp_t pri)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
525 might_sleep_if(pri & __GFP_WAIT);
526 if (skb_cloned(skb)) {
527 struct sk_buff *nskb = skb_copy(skb, pri);
528 kfree_skb(skb); /* Free our shared copy */
529 skb = nskb;
530 }
531 return skb;
532}
533
534/**
535 * skb_peek
536 * @list_: list to peek at
537 *
538 * Peek an &sk_buff. Unlike most other operations you _MUST_
539 * be careful with this one. A peek leaves the buffer on the
540 * list and someone else may run off with it. You must hold
541 * the appropriate locks or have a private queue to do this.
542 *
543 * Returns %NULL for an empty list or a pointer to the head element.
544 * The reference count is not incremented and the reference is therefore
545 * volatile. Use with caution.
546 */
547static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
548{
549 struct sk_buff *list = ((struct sk_buff *)list_)->next;
550 if (list == (struct sk_buff *)list_)
551 list = NULL;
552 return list;
553}
554
555/**
556 * skb_peek_tail
557 * @list_: list to peek at
558 *
559 * Peek an &sk_buff. Unlike most other operations you _MUST_
560 * be careful with this one. A peek leaves the buffer on the
561 * list and someone else may run off with it. You must hold
562 * the appropriate locks or have a private queue to do this.
563 *
564 * Returns %NULL for an empty list or a pointer to the tail element.
565 * The reference count is not incremented and the reference is therefore
566 * volatile. Use with caution.
567 */
568static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
569{
570 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
571 if (list == (struct sk_buff *)list_)
572 list = NULL;
573 return list;
574}
575
576/**
577 * skb_queue_len - get queue length
578 * @list_: list to measure
579 *
580 * Return the length of an &sk_buff queue.
581 */
582static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
583{
584 return list_->qlen;
585}
586
Arjan van de Ven76f10ad2006-08-02 14:06:55 -0700587/*
588 * This function creates a split out lock class for each invocation;
589 * this is needed for now since a whole lot of users of the skb-queue
590 * infrastructure in drivers have different locking usage (in hardirq)
591 * than the networking core (in softirq only). In the long run either the
592 * network layer or drivers should need annotation to consolidate the
593 * main types of usage into 3 classes.
594 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595static inline void skb_queue_head_init(struct sk_buff_head *list)
596{
597 spin_lock_init(&list->lock);
598 list->prev = list->next = (struct sk_buff *)list;
599 list->qlen = 0;
600}
601
Pavel Emelianovc2ecba72007-04-17 12:45:31 -0700602static inline void skb_queue_head_init_class(struct sk_buff_head *list,
603 struct lock_class_key *class)
604{
605 skb_queue_head_init(list);
606 lockdep_set_class(&list->lock, class);
607}
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609/*
610 * Insert an sk_buff at the start of a list.
611 *
612 * The "__skb_xxxx()" functions are the non-atomic ones that
613 * can only be called with interrupts disabled.
614 */
615
616/**
Stephen Hemminger300ce172005-10-30 13:47:34 -0800617 * __skb_queue_after - queue a buffer at the list head
618 * @list: list to use
619 * @prev: place after this buffer
620 * @newsk: buffer to queue
621 *
622 * Queue a buffer int the middle of a list. This function takes no locks
623 * and you must therefore hold required locks before calling it.
624 *
625 * A buffer cannot be placed on two lists at the same time.
626 */
627static inline void __skb_queue_after(struct sk_buff_head *list,
628 struct sk_buff *prev,
629 struct sk_buff *newsk)
630{
631 struct sk_buff *next;
632 list->qlen++;
633
634 next = prev->next;
635 newsk->next = next;
636 newsk->prev = prev;
637 next->prev = prev->next = newsk;
638}
639
640/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 * __skb_queue_head - queue a buffer at the list head
642 * @list: list to use
643 * @newsk: buffer to queue
644 *
645 * Queue a buffer at the start of a list. This function takes no locks
646 * and you must therefore hold required locks before calling it.
647 *
648 * A buffer cannot be placed on two lists at the same time.
649 */
650extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
651static inline void __skb_queue_head(struct sk_buff_head *list,
652 struct sk_buff *newsk)
653{
Stephen Hemminger300ce172005-10-30 13:47:34 -0800654 __skb_queue_after(list, (struct sk_buff *)list, newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655}
656
657/**
658 * __skb_queue_tail - queue a buffer at the list tail
659 * @list: list to use
660 * @newsk: buffer to queue
661 *
662 * Queue a buffer at the end of a list. This function takes no locks
663 * and you must therefore hold required locks before calling it.
664 *
665 * A buffer cannot be placed on two lists at the same time.
666 */
667extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
668static inline void __skb_queue_tail(struct sk_buff_head *list,
669 struct sk_buff *newsk)
670{
671 struct sk_buff *prev, *next;
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 list->qlen++;
674 next = (struct sk_buff *)list;
675 prev = next->prev;
676 newsk->next = next;
677 newsk->prev = prev;
678 next->prev = prev->next = newsk;
679}
680
681
682/**
683 * __skb_dequeue - remove from the head of the queue
684 * @list: list to dequeue from
685 *
686 * Remove the head of the list. This function does not take any locks
687 * so must be used with appropriate locks held only. The head item is
688 * returned or %NULL if the list is empty.
689 */
690extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
691static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
692{
693 struct sk_buff *next, *prev, *result;
694
695 prev = (struct sk_buff *) list;
696 next = prev->next;
697 result = NULL;
698 if (next != prev) {
699 result = next;
700 next = next->next;
701 list->qlen--;
702 next->prev = prev;
703 prev->next = next;
704 result->next = result->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 }
706 return result;
707}
708
709
710/*
711 * Insert a packet on a list.
712 */
David S. Miller8728b832005-08-09 19:25:21 -0700713extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714static inline void __skb_insert(struct sk_buff *newsk,
715 struct sk_buff *prev, struct sk_buff *next,
716 struct sk_buff_head *list)
717{
718 newsk->next = next;
719 newsk->prev = prev;
720 next->prev = prev->next = newsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 list->qlen++;
722}
723
724/*
725 * Place a packet after a given packet in a list.
726 */
David S. Miller8728b832005-08-09 19:25:21 -0700727extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
728static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
David S. Miller8728b832005-08-09 19:25:21 -0700730 __skb_insert(newsk, old, old->next, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731}
732
733/*
734 * remove sk_buff from list. _Must_ be called atomically, and with
735 * the list known..
736 */
David S. Miller8728b832005-08-09 19:25:21 -0700737extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
739{
740 struct sk_buff *next, *prev;
741
742 list->qlen--;
743 next = skb->next;
744 prev = skb->prev;
745 skb->next = skb->prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 next->prev = prev;
747 prev->next = next;
748}
749
750
751/* XXX: more streamlined implementation */
752
753/**
754 * __skb_dequeue_tail - remove from the tail of the queue
755 * @list: list to dequeue from
756 *
757 * Remove the tail of the list. This function does not take any locks
758 * so must be used with appropriate locks held only. The tail item is
759 * returned or %NULL if the list is empty.
760 */
761extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
762static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
763{
764 struct sk_buff *skb = skb_peek_tail(list);
765 if (skb)
766 __skb_unlink(skb, list);
767 return skb;
768}
769
770
771static inline int skb_is_nonlinear(const struct sk_buff *skb)
772{
773 return skb->data_len;
774}
775
776static inline unsigned int skb_headlen(const struct sk_buff *skb)
777{
778 return skb->len - skb->data_len;
779}
780
781static inline int skb_pagelen(const struct sk_buff *skb)
782{
783 int i, len = 0;
784
785 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
786 len += skb_shinfo(skb)->frags[i].size;
787 return len + skb_headlen(skb);
788}
789
790static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
791 struct page *page, int off, int size)
792{
793 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
794
795 frag->page = page;
796 frag->page_offset = off;
797 frag->size = size;
798 skb_shinfo(skb)->nr_frags = i + 1;
799}
800
801#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
802#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
803#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
804
805/*
806 * Add data to an sk_buff
807 */
808static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
809{
810 unsigned char *tmp = skb->tail;
811 SKB_LINEAR_ASSERT(skb);
812 skb->tail += len;
813 skb->len += len;
814 return tmp;
815}
816
817/**
818 * skb_put - add data to a buffer
819 * @skb: buffer to use
820 * @len: amount of data to add
821 *
822 * This function extends the used data area of the buffer. If this would
823 * exceed the total buffer size the kernel will panic. A pointer to the
824 * first byte of the extra data is returned.
825 */
826static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
827{
828 unsigned char *tmp = skb->tail;
829 SKB_LINEAR_ASSERT(skb);
830 skb->tail += len;
831 skb->len += len;
832 if (unlikely(skb->tail>skb->end))
833 skb_over_panic(skb, len, current_text_addr());
834 return tmp;
835}
836
837static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
838{
839 skb->data -= len;
840 skb->len += len;
841 return skb->data;
842}
843
844/**
845 * skb_push - add data to the start of a buffer
846 * @skb: buffer to use
847 * @len: amount of data to add
848 *
849 * This function extends the used data area of the buffer at the buffer
850 * start. If this would exceed the total buffer headroom the kernel will
851 * panic. A pointer to the first byte of the extra data is returned.
852 */
853static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
854{
855 skb->data -= len;
856 skb->len += len;
857 if (unlikely(skb->data<skb->head))
858 skb_under_panic(skb, len, current_text_addr());
859 return skb->data;
860}
861
862static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
863{
864 skb->len -= len;
865 BUG_ON(skb->len < skb->data_len);
866 return skb->data += len;
867}
868
869/**
870 * skb_pull - remove data from the start of a buffer
871 * @skb: buffer to use
872 * @len: amount of data to remove
873 *
874 * This function removes data from the start of a buffer, returning
875 * the memory to the headroom. A pointer to the next data in the buffer
876 * is returned. Once the data has been pulled future pushes will overwrite
877 * the old data.
878 */
879static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
880{
881 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
882}
883
884extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
885
886static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
887{
888 if (len > skb_headlen(skb) &&
889 !__pskb_pull_tail(skb, len-skb_headlen(skb)))
890 return NULL;
891 skb->len -= len;
892 return skb->data += len;
893}
894
895static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
896{
897 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
898}
899
900static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
901{
902 if (likely(len <= skb_headlen(skb)))
903 return 1;
904 if (unlikely(len > skb->len))
905 return 0;
906 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
907}
908
909/**
910 * skb_headroom - bytes at buffer head
911 * @skb: buffer to check
912 *
913 * Return the number of bytes of free space at the head of an &sk_buff.
914 */
915static inline int skb_headroom(const struct sk_buff *skb)
916{
917 return skb->data - skb->head;
918}
919
920/**
921 * skb_tailroom - bytes at buffer end
922 * @skb: buffer to check
923 *
924 * Return the number of bytes of free space at the tail of an sk_buff
925 */
926static inline int skb_tailroom(const struct sk_buff *skb)
927{
928 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
929}
930
931/**
932 * skb_reserve - adjust headroom
933 * @skb: buffer to alter
934 * @len: bytes to move
935 *
936 * Increase the headroom of an empty &sk_buff by reducing the tail
937 * room. This is only allowed for an empty buffer.
938 */
David S. Miller82431262006-01-17 02:54:21 -0800939static inline void skb_reserve(struct sk_buff *skb, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
941 skb->data += len;
942 skb->tail += len;
943}
944
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700945static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
946{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700947 return skb->transport_header;
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700948}
949
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300950static inline void skb_reset_transport_header(struct sk_buff *skb)
951{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700952 skb->transport_header = skb->data;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300953}
954
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -0300955static inline void skb_set_transport_header(struct sk_buff *skb,
956 const int offset)
957{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700958 skb->transport_header = skb->data + offset;
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -0300959}
960
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -0700961static inline int skb_transport_offset(const struct sk_buff *skb)
962{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700963 return skb->transport_header - skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -0700964}
965
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700966static inline unsigned char *skb_network_header(const struct sk_buff *skb)
967{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700968 return skb->network_header;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700969}
970
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700971static inline void skb_reset_network_header(struct sk_buff *skb)
972{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700973 skb->network_header = skb->data;
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700974}
975
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300976static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
977{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700978 skb->network_header = skb->data + offset;
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300979}
980
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -0300981static inline int skb_network_offset(const struct sk_buff *skb)
982{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700983 return skb->network_header - skb->data;
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -0300984}
985
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -0300986static inline u32 skb_network_header_len(const struct sk_buff *skb)
987{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700988 return skb->transport_header - skb->network_header;
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -0300989}
990
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -0700991static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
992{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700993 return skb->mac_header;
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -0700994}
995
996static inline int skb_mac_header_was_set(const struct sk_buff *skb)
997{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700998 return skb->mac_header != NULL;
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -0700999}
1000
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001001static inline void skb_reset_mac_header(struct sk_buff *skb)
1002{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001003 skb->mac_header = skb->data;
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001004}
1005
Arnaldo Carvalho de Melo48d49d0c2007-03-10 12:30:58 -03001006static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1007{
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001008 skb->mac_header = skb->data + offset;
Arnaldo Carvalho de Melo48d49d0c2007-03-10 12:30:58 -03001009}
1010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011/*
1012 * CPUs often take a performance hit when accessing unaligned memory
1013 * locations. The actual performance hit varies, it can be small if the
1014 * hardware handles it or large if we have to take an exception and fix it
1015 * in software.
1016 *
1017 * Since an ethernet header is 14 bytes network drivers often end up with
1018 * the IP header at an unaligned offset. The IP header can be aligned by
1019 * shifting the start of the packet by 2 bytes. Drivers should do this
1020 * with:
1021 *
1022 * skb_reserve(NET_IP_ALIGN);
1023 *
1024 * The downside to this alignment of the IP header is that the DMA is now
1025 * unaligned. On some architectures the cost of an unaligned DMA is high
1026 * and this cost outweighs the gains made by aligning the IP header.
1027 *
1028 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1029 * to be overridden.
1030 */
1031#ifndef NET_IP_ALIGN
1032#define NET_IP_ALIGN 2
1033#endif
1034
Anton Blanchard025be812006-03-31 02:27:06 -08001035/*
1036 * The networking layer reserves some headroom in skb data (via
1037 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1038 * the header has to grow. In the default case, if the header has to grow
1039 * 16 bytes or less we avoid the reallocation.
1040 *
1041 * Unfortunately this headroom changes the DMA alignment of the resulting
1042 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1043 * on some architectures. An architecture can override this value,
1044 * perhaps setting it to a cacheline in size (since that will maintain
1045 * cacheline alignment of the DMA). It must be a power of 2.
1046 *
1047 * Various parts of the networking layer expect at least 16 bytes of
1048 * headroom, you should not reduce this.
1049 */
1050#ifndef NET_SKB_PAD
1051#define NET_SKB_PAD 16
1052#endif
1053
Herbert Xu3cc0e872006-06-09 16:13:38 -07001054extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055
1056static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1057{
Herbert Xu3cc0e872006-06-09 16:13:38 -07001058 if (unlikely(skb->data_len)) {
1059 WARN_ON(1);
1060 return;
1061 }
1062 skb->len = len;
1063 skb->tail = skb->data + len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064}
1065
1066/**
1067 * skb_trim - remove end from a buffer
1068 * @skb: buffer to alter
1069 * @len: new length
1070 *
1071 * Cut the length of a buffer down by removing data from the tail. If
1072 * the buffer is already under the length specified it is not modified.
Herbert Xu3cc0e872006-06-09 16:13:38 -07001073 * The skb must be linear.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 */
1075static inline void skb_trim(struct sk_buff *skb, unsigned int len)
1076{
1077 if (skb->len > len)
1078 __skb_trim(skb, len);
1079}
1080
1081
1082static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1083{
Herbert Xu3cc0e872006-06-09 16:13:38 -07001084 if (skb->data_len)
1085 return ___pskb_trim(skb, len);
1086 __skb_trim(skb, len);
1087 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088}
1089
1090static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1091{
1092 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1093}
1094
1095/**
Herbert Xue9fa4f72006-08-13 20:12:58 -07001096 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1097 * @skb: buffer to alter
1098 * @len: new length
1099 *
1100 * This is identical to pskb_trim except that the caller knows that
1101 * the skb is not cloned so we should never get an error due to out-
1102 * of-memory.
1103 */
1104static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1105{
1106 int err = pskb_trim(skb, len);
1107 BUG_ON(err);
1108}
1109
1110/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 * skb_orphan - orphan a buffer
1112 * @skb: buffer to orphan
1113 *
1114 * If a buffer currently has an owner then we call the owner's
1115 * destructor function and make the @skb unowned. The buffer continues
1116 * to exist but is no longer charged to its former owner.
1117 */
1118static inline void skb_orphan(struct sk_buff *skb)
1119{
1120 if (skb->destructor)
1121 skb->destructor(skb);
1122 skb->destructor = NULL;
1123 skb->sk = NULL;
1124}
1125
1126/**
1127 * __skb_queue_purge - empty a list
1128 * @list: list to empty
1129 *
1130 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1131 * the list and one reference dropped. This function does not take the
1132 * list lock and the caller must hold the relevant locks to use it.
1133 */
1134extern void skb_queue_purge(struct sk_buff_head *list);
1135static inline void __skb_queue_purge(struct sk_buff_head *list)
1136{
1137 struct sk_buff *skb;
1138 while ((skb = __skb_dequeue(list)) != NULL)
1139 kfree_skb(skb);
1140}
1141
1142/**
Christoph Hellwigb4e54de2006-07-24 15:31:14 -07001143 * __dev_alloc_skb - allocate an skbuff for receiving
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 * @length: length to allocate
1145 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1146 *
1147 * Allocate a new &sk_buff and assign it a usage count of one. The
1148 * buffer has unspecified headroom built in. Users should allocate
1149 * the headroom they think they need without accounting for the
1150 * built in space. The built in space is used for optimisations.
1151 *
Christoph Hellwig766ea8c2006-08-07 15:49:53 -07001152 * %NULL is returned if there is no free memory.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
Al Virodd0fc662005-10-07 07:46:04 +01001155 gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156{
Anton Blanchard025be812006-03-31 02:27:06 -08001157 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 if (likely(skb))
Anton Blanchard025be812006-03-31 02:27:06 -08001159 skb_reserve(skb, NET_SKB_PAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 return skb;
1161}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
1163/**
Christoph Hellwigb4e54de2006-07-24 15:31:14 -07001164 * dev_alloc_skb - allocate an skbuff for receiving
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 * @length: length to allocate
1166 *
1167 * Allocate a new &sk_buff and assign it a usage count of one. The
1168 * buffer has unspecified headroom built in. Users should allocate
1169 * the headroom they think they need without accounting for the
1170 * built in space. The built in space is used for optimisations.
1171 *
Christoph Hellwig766ea8c2006-08-07 15:49:53 -07001172 * %NULL is returned if there is no free memory. Although this function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 * allocates memory it can be called from an interrupt.
1174 */
1175static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1176{
1177 return __dev_alloc_skb(length, GFP_ATOMIC);
1178}
1179
Christoph Hellwig8af27452006-07-31 22:35:23 -07001180extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1181 unsigned int length, gfp_t gfp_mask);
1182
1183/**
1184 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
1185 * @dev: network device to receive on
1186 * @length: length to allocate
1187 *
1188 * Allocate a new &sk_buff and assign it a usage count of one. The
1189 * buffer has unspecified headroom built in. Users should allocate
1190 * the headroom they think they need without accounting for the
1191 * built in space. The built in space is used for optimisations.
1192 *
1193 * %NULL is returned if there is no free memory. Although this function
1194 * allocates memory it can be called from an interrupt.
1195 */
1196static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1197 unsigned int length)
1198{
1199 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1200}
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202/**
1203 * skb_cow - copy header of skb when it is required
1204 * @skb: buffer to cow
1205 * @headroom: needed headroom
1206 *
1207 * If the skb passed lacks sufficient headroom or its data part
1208 * is shared, data is reallocated. If reallocation fails, an error
1209 * is returned and original skb is not changed.
1210 *
1211 * The result is skb with writable area skb->head...skb->tail
1212 * and at least @headroom of space at head.
1213 */
1214static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1215{
Anton Blanchard025be812006-03-31 02:27:06 -08001216 int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) -
1217 skb_headroom(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219 if (delta < 0)
1220 delta = 0;
1221
1222 if (delta || skb_cloned(skb))
Anton Blanchard025be812006-03-31 02:27:06 -08001223 return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) &
1224 ~(NET_SKB_PAD-1), 0, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 return 0;
1226}
1227
1228/**
1229 * skb_padto - pad an skbuff up to a minimal size
1230 * @skb: buffer to pad
1231 * @len: minimal length
1232 *
1233 * Pads up a buffer to ensure the trailing bytes exist and are
1234 * blanked. If the buffer already contains sufficient data it
Herbert Xu5b057c62006-06-23 02:06:41 -07001235 * is untouched. Otherwise it is extended. Returns zero on
1236 * success. The skb is freed on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 */
1238
Herbert Xu5b057c62006-06-23 02:06:41 -07001239static inline int skb_padto(struct sk_buff *skb, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240{
1241 unsigned int size = skb->len;
1242 if (likely(size >= len))
Herbert Xu5b057c62006-06-23 02:06:41 -07001243 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 return skb_pad(skb, len-size);
1245}
1246
1247static inline int skb_add_data(struct sk_buff *skb,
1248 char __user *from, int copy)
1249{
1250 const int off = skb->len;
1251
1252 if (skb->ip_summed == CHECKSUM_NONE) {
1253 int err = 0;
Al Viro50842052006-11-14 21:36:34 -08001254 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 copy, 0, &err);
1256 if (!err) {
1257 skb->csum = csum_block_add(skb->csum, csum, off);
1258 return 0;
1259 }
1260 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1261 return 0;
1262
1263 __skb_trim(skb, off);
1264 return -EFAULT;
1265}
1266
1267static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1268 struct page *page, int off)
1269{
1270 if (i) {
1271 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1272
1273 return page == frag->page &&
1274 off == frag->page_offset + frag->size;
1275 }
1276 return 0;
1277}
1278
Herbert Xu364c6ba2006-06-09 16:10:40 -07001279static inline int __skb_linearize(struct sk_buff *skb)
1280{
1281 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1282}
1283
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284/**
1285 * skb_linearize - convert paged skb to linear one
1286 * @skb: buffer to linarize
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 *
1288 * If there is no free memory -ENOMEM is returned, otherwise zero
1289 * is returned and the old skb data released.
1290 */
Herbert Xu364c6ba2006-06-09 16:10:40 -07001291static inline int skb_linearize(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292{
Herbert Xu364c6ba2006-06-09 16:10:40 -07001293 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1294}
1295
1296/**
1297 * skb_linearize_cow - make sure skb is linear and writable
1298 * @skb: buffer to process
1299 *
1300 * If there is no free memory -ENOMEM is returned, otherwise zero
1301 * is returned and the old skb data released.
1302 */
1303static inline int skb_linearize_cow(struct sk_buff *skb)
1304{
1305 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1306 __skb_linearize(skb) : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
1309/**
1310 * skb_postpull_rcsum - update checksum for received skb after pull
1311 * @skb: buffer to update
1312 * @start: start of data before pull
1313 * @len: length of data pulled
1314 *
1315 * After doing a pull on a received packet, you need to call this to
Patrick McHardy84fa7932006-08-29 16:44:56 -07001316 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1317 * CHECKSUM_NONE so that it can be recomputed from scratch.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 */
1319
1320static inline void skb_postpull_rcsum(struct sk_buff *skb,
Herbert Xucbb042f2006-03-20 22:43:56 -08001321 const void *start, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
Patrick McHardy84fa7932006-08-29 16:44:56 -07001323 if (skb->ip_summed == CHECKSUM_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1325}
1326
Herbert Xucbb042f2006-03-20 22:43:56 -08001327unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329/**
1330 * pskb_trim_rcsum - trim received skb and update checksum
1331 * @skb: buffer to trim
1332 * @len: new length
1333 *
1334 * This is exactly the same as pskb_trim except that it ensures the
1335 * checksum of received packets are still valid after the operation.
1336 */
1337
1338static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1339{
Stephen Hemminger0e4e4222005-09-08 12:32:03 -07001340 if (likely(len >= skb->len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 return 0;
Patrick McHardy84fa7932006-08-29 16:44:56 -07001342 if (skb->ip_summed == CHECKSUM_COMPLETE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 skb->ip_summed = CHECKSUM_NONE;
1344 return __pskb_trim(skb, len);
1345}
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347#define skb_queue_walk(queue, skb) \
1348 for (skb = (queue)->next; \
1349 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1350 skb = skb->next)
1351
Stephen Hemminger300ce172005-10-30 13:47:34 -08001352#define skb_queue_reverse_walk(queue, skb) \
1353 for (skb = (queue)->prev; \
1354 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1355 skb = skb->prev)
1356
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
1358extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1359 int noblock, int *err);
1360extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1361 struct poll_table_struct *wait);
1362extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1363 int offset, struct iovec *to,
1364 int size);
Herbert Xufb286bb2005-11-10 13:01:24 -08001365extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 int hlen,
1367 struct iovec *iov);
1368extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
Herbert Xu3305b802005-12-13 23:16:37 -08001369extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1370 unsigned int flags);
Al Viro2bbbc862006-11-14 21:37:14 -08001371extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
1372 int len, __wsum csum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1374 void *to, int len);
Herbert Xu357b40a2005-04-19 22:30:14 -07001375extern int skb_store_bits(const struct sk_buff *skb, int offset,
1376 void *from, int len);
Al Viro81d77662006-11-14 21:37:33 -08001377extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 int offset, u8 *to, int len,
Al Viro81d77662006-11-14 21:37:33 -08001379 __wsum csum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1381extern void skb_split(struct sk_buff *skb,
1382 struct sk_buff *skb1, const u32 len);
1383
Herbert Xu576a30e2006-06-27 13:22:38 -07001384extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1387 int len, void *buffer)
1388{
1389 int hlen = skb_headlen(skb);
1390
Patrick McHardy55820ee2005-07-05 14:08:10 -07001391 if (hlen - offset >= len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 return skb->data + offset;
1393
1394 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1395 return NULL;
1396
1397 return buffer;
1398}
1399
1400extern void skb_init(void);
1401extern void skb_add_mtu(int mtu);
1402
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001403/**
1404 * skb_get_timestamp - get timestamp from a skb
1405 * @skb: skb to get stamp from
1406 * @stamp: pointer to struct timeval to store stamp in
1407 *
1408 * Timestamps are stored in the skb as offsets to a base timestamp.
1409 * This function converts the offset back to a struct timeval and stores
1410 * it in stamp.
1411 */
Stephen Hemmingerf2c38392005-09-06 15:48:03 -07001412static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001413{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001414 *stamp = ktime_to_timeval(skb->tstamp);
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001415}
1416
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001417static inline void __net_timestamp(struct sk_buff *skb)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001418{
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001419 skb->tstamp = ktime_get_real();
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001420}
1421
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001422
Herbert Xu759e5d02007-03-25 20:10:56 -07001423extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
Al Virob51655b2006-11-14 21:40:42 -08001424extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
Herbert Xufb286bb2005-11-10 13:01:24 -08001425
1426/**
1427 * skb_checksum_complete - Calculate checksum of an entire packet
1428 * @skb: packet to process
1429 *
1430 * This function calculates the checksum over the entire packet plus
1431 * the value of skb->csum. The latter can be used to supply the
1432 * checksum of a pseudo header as used by TCP/UDP. It returns the
1433 * checksum.
1434 *
1435 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
1436 * this function can be used to verify that checksum on received
1437 * packets. In that case the function should return zero if the
1438 * checksum is correct. In particular, this function will return zero
1439 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1440 * hardware has already verified the correctness of the checksum.
1441 */
1442static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
1443{
1444 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1445 __skb_checksum_complete(skb);
1446}
1447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448#ifdef CONFIG_NETFILTER
1449static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1450{
1451 if (nfct && atomic_dec_and_test(&nfct->use))
1452 nfct->destroy(nfct);
1453}
1454static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1455{
1456 if (nfct)
1457 atomic_inc(&nfct->use);
1458}
Yasuyuki Kozakai9fb9cbb2005-11-09 16:38:16 -08001459#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1460static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1461{
1462 if (skb)
1463 atomic_inc(&skb->users);
1464}
1465static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1466{
1467 if (skb)
1468 kfree_skb(skb);
1469}
1470#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471#ifdef CONFIG_BRIDGE_NETFILTER
1472static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1473{
1474 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1475 kfree(nf_bridge);
1476}
1477static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1478{
1479 if (nf_bridge)
1480 atomic_inc(&nf_bridge->use);
1481}
1482#endif /* CONFIG_BRIDGE_NETFILTER */
Patrick McHardya193a4a2006-03-20 19:23:05 -08001483static inline void nf_reset(struct sk_buff *skb)
1484{
1485 nf_conntrack_put(skb->nfct);
1486 skb->nfct = NULL;
1487#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1488 nf_conntrack_put_reasm(skb->nfct_reasm);
1489 skb->nfct_reasm = NULL;
1490#endif
1491#ifdef CONFIG_BRIDGE_NETFILTER
1492 nf_bridge_put(skb->nf_bridge);
1493 skb->nf_bridge = NULL;
1494#endif
1495}
1496
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07001497/* Note: This doesn't put any conntrack and bridge info in dst. */
1498static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1499{
1500 dst->nfct = src->nfct;
1501 nf_conntrack_get(src->nfct);
1502 dst->nfctinfo = src->nfctinfo;
1503#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1504 dst->nfct_reasm = src->nfct_reasm;
1505 nf_conntrack_get_reasm(src->nfct_reasm);
1506#endif
1507#ifdef CONFIG_BRIDGE_NETFILTER
1508 dst->nf_bridge = src->nf_bridge;
1509 nf_bridge_get(src->nf_bridge);
1510#endif
1511}
1512
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07001513static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1514{
1515 nf_conntrack_put(dst->nfct);
1516#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1517 nf_conntrack_put_reasm(dst->nfct_reasm);
1518#endif
1519#ifdef CONFIG_BRIDGE_NETFILTER
1520 nf_bridge_put(dst->nf_bridge);
1521#endif
1522 __nf_copy(dst, src);
1523}
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525#else /* CONFIG_NETFILTER */
1526static inline void nf_reset(struct sk_buff *skb) {}
Yasuyuki Kozakaiedda5532007-03-14 16:43:37 -07001527static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) {}
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -07001528static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529#endif /* CONFIG_NETFILTER */
1530
James Morris984bc162006-06-09 00:29:17 -07001531#ifdef CONFIG_NETWORK_SECMARK
1532static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1533{
1534 to->secmark = from->secmark;
1535}
1536
1537static inline void skb_init_secmark(struct sk_buff *skb)
1538{
1539 skb->secmark = 0;
1540}
1541#else
1542static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1543{ }
1544
1545static inline void skb_init_secmark(struct sk_buff *skb)
1546{ }
1547#endif
1548
Herbert Xu89114af2006-07-08 13:34:32 -07001549static inline int skb_is_gso(const struct sk_buff *skb)
1550{
1551 return skb_shinfo(skb)->gso_size;
1552}
1553
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554#endif /* __KERNEL__ */
1555#endif /* _LINUX_SKBUFF_H */