Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Routines having to do with the 'struct sk_buff' memory handlers. |
| 3 | * |
Alan Cox | 113aa83 | 2008-10-13 19:01:08 -0700 | [diff] [blame] | 4 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
| 6 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Fixes: |
| 8 | * Alan Cox : Fixed the worst of the load |
| 9 | * balancer bugs. |
| 10 | * Dave Platt : Interrupt stacking fix. |
| 11 | * Richard Kooijman : Timestamp fixes. |
| 12 | * Alan Cox : Changed buffer format. |
| 13 | * Alan Cox : destructor hook for AF_UNIX etc. |
| 14 | * Linus Torvalds : Better skb_clone. |
| 15 | * Alan Cox : Added skb_copy. |
| 16 | * Alan Cox : Added all the changed routines Linus |
| 17 | * only put in the headers |
| 18 | * Ray VanTassle : Fixed --skb->lock in free |
| 19 | * Alan Cox : skb_copy copy arp field |
| 20 | * Andi Kleen : slabified it. |
| 21 | * Robert Olsson : Removed skb_head_pool |
| 22 | * |
| 23 | * NOTE: |
| 24 | * The __skb_ routines should be called with interrupts |
| 25 | * disabled, or you better be *real* sure that the operation is atomic |
| 26 | * with respect to whatever list is being frobbed (e.g. via lock_sock() |
| 27 | * or via disabling bottom half handlers, etc). |
| 28 | * |
| 29 | * This program is free software; you can redistribute it and/or |
| 30 | * modify it under the terms of the GNU General Public License |
| 31 | * as published by the Free Software Foundation; either version |
| 32 | * 2 of the License, or (at your option) any later version. |
| 33 | */ |
| 34 | |
| 35 | /* |
| 36 | * The functions in this file will not compile correctly with gcc 2.4.x |
| 37 | */ |
| 38 | |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 39 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/module.h> |
| 42 | #include <linux/types.h> |
| 43 | #include <linux/kernel.h> |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 44 | #include <linux/kmemcheck.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <linux/mm.h> |
| 46 | #include <linux/interrupt.h> |
| 47 | #include <linux/in.h> |
| 48 | #include <linux/inet.h> |
| 49 | #include <linux/slab.h> |
| 50 | #include <linux/netdevice.h> |
| 51 | #ifdef CONFIG_NET_CLS_ACT |
| 52 | #include <net/pkt_sched.h> |
| 53 | #endif |
| 54 | #include <linux/string.h> |
| 55 | #include <linux/skbuff.h> |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 56 | #include <linux/splice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | #include <linux/cache.h> |
| 58 | #include <linux/rtnetlink.h> |
| 59 | #include <linux/init.h> |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 60 | #include <linux/scatterlist.h> |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 61 | #include <linux/errqueue.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 62 | #include <linux/prefetch.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
| 64 | #include <net/protocol.h> |
| 65 | #include <net/dst.h> |
| 66 | #include <net/sock.h> |
| 67 | #include <net/checksum.h> |
| 68 | #include <net/xfrm.h> |
| 69 | |
| 70 | #include <asm/uaccess.h> |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 71 | #include <trace/events/skb.h> |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 72 | #include <linux/highmem.h> |
Al Viro | a1f8e7f7 | 2006-10-19 16:08:53 -0400 | [diff] [blame] | 73 | |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 74 | struct kmem_cache *skbuff_head_cache __read_mostly; |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 75 | static struct kmem_cache *skbuff_fclone_cache __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 77 | static void sock_pipe_buf_release(struct pipe_inode_info *pipe, |
| 78 | struct pipe_buffer *buf) |
| 79 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 80 | put_page(buf->page); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | static void sock_pipe_buf_get(struct pipe_inode_info *pipe, |
| 84 | struct pipe_buffer *buf) |
| 85 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 86 | get_page(buf->page); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, |
| 90 | struct pipe_buffer *buf) |
| 91 | { |
| 92 | return 1; |
| 93 | } |
| 94 | |
| 95 | |
| 96 | /* Pipe buffer operations for a socket. */ |
Alexey Dobriyan | 28dfef8 | 2009-12-15 16:46:48 -0800 | [diff] [blame] | 97 | static const struct pipe_buf_operations sock_pipe_buf_ops = { |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 98 | .can_merge = 0, |
| 99 | .map = generic_pipe_buf_map, |
| 100 | .unmap = generic_pipe_buf_unmap, |
| 101 | .confirm = generic_pipe_buf_confirm, |
| 102 | .release = sock_pipe_buf_release, |
| 103 | .steal = sock_pipe_buf_steal, |
| 104 | .get = sock_pipe_buf_get, |
| 105 | }; |
| 106 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | /* |
| 108 | * Keep out-of-line to prevent kernel bloat. |
| 109 | * __builtin_return_address is not used because it is not always |
| 110 | * reliable. |
| 111 | */ |
| 112 | |
| 113 | /** |
| 114 | * skb_over_panic - private function |
| 115 | * @skb: buffer |
| 116 | * @sz: size |
| 117 | * @here: address |
| 118 | * |
| 119 | * Out of line support code for skb_put(). Not user callable. |
| 120 | */ |
Rami Rosen | ccb7c77 | 2010-04-20 22:39:53 -0700 | [diff] [blame] | 121 | static void skb_over_panic(struct sk_buff *skb, int sz, void *here) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | { |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 123 | pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", |
| 124 | __func__, here, skb->len, sz, skb->head, skb->data, |
| 125 | (unsigned long)skb->tail, (unsigned long)skb->end, |
| 126 | skb->dev ? skb->dev->name : "<NULL>"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | BUG(); |
| 128 | } |
| 129 | |
| 130 | /** |
| 131 | * skb_under_panic - private function |
| 132 | * @skb: buffer |
| 133 | * @sz: size |
| 134 | * @here: address |
| 135 | * |
| 136 | * Out of line support code for skb_push(). Not user callable. |
| 137 | */ |
| 138 | |
Rami Rosen | ccb7c77 | 2010-04-20 22:39:53 -0700 | [diff] [blame] | 139 | static void skb_under_panic(struct sk_buff *skb, int sz, void *here) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | { |
Joe Perches | e005d19 | 2012-05-16 19:58:40 +0000 | [diff] [blame] | 141 | pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", |
| 142 | __func__, here, skb->len, sz, skb->head, skb->data, |
| 143 | (unsigned long)skb->tail, (unsigned long)skb->end, |
| 144 | skb->dev ? skb->dev->name : "<NULL>"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | BUG(); |
| 146 | } |
| 147 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 148 | |
| 149 | /* |
| 150 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells |
| 151 | * the caller if emergency pfmemalloc reserves are being used. If it is and |
| 152 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves |
| 153 | * may be used. Otherwise, the packet data may be discarded until enough |
| 154 | * memory is free |
| 155 | */ |
| 156 | #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ |
| 157 | __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) |
| 158 | void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip, |
| 159 | bool *pfmemalloc) |
| 160 | { |
| 161 | void *obj; |
| 162 | bool ret_pfmemalloc = false; |
| 163 | |
| 164 | /* |
| 165 | * Try a regular allocation, when that fails and we're not entitled |
| 166 | * to the reserves, fail. |
| 167 | */ |
| 168 | obj = kmalloc_node_track_caller(size, |
| 169 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, |
| 170 | node); |
| 171 | if (obj || !(gfp_pfmemalloc_allowed(flags))) |
| 172 | goto out; |
| 173 | |
| 174 | /* Try again but now we are using pfmemalloc reserves */ |
| 175 | ret_pfmemalloc = true; |
| 176 | obj = kmalloc_node_track_caller(size, flags, node); |
| 177 | |
| 178 | out: |
| 179 | if (pfmemalloc) |
| 180 | *pfmemalloc = ret_pfmemalloc; |
| 181 | |
| 182 | return obj; |
| 183 | } |
| 184 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few |
| 186 | * 'private' fields and also do memory statistics to find all the |
| 187 | * [BEEP] leaks. |
| 188 | * |
| 189 | */ |
| 190 | |
| 191 | /** |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 192 | * __alloc_skb - allocate a network buffer |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | * @size: size to allocate |
| 194 | * @gfp_mask: allocation mask |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 195 | * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache |
| 196 | * instead of head cache and allocate a cloned (child) skb. |
| 197 | * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for |
| 198 | * allocations in case the data is required for writeback |
Christoph Hellwig | b30973f | 2006-12-06 20:32:36 -0800 | [diff] [blame] | 199 | * @node: numa node to allocate memory on |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | * |
| 201 | * Allocate a new &sk_buff. The returned buffer has no headroom and a |
Ben Hutchings | 94b6042 | 2012-06-06 15:23:37 +0000 | [diff] [blame] | 202 | * tail room of at least size bytes. The object has a reference count |
| 203 | * of one. The return is the buffer. On a failure the return is %NULL. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | * |
| 205 | * Buffers may only be allocated from interrupts using a @gfp_mask of |
| 206 | * %GFP_ATOMIC. |
| 207 | */ |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 208 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 209 | int flags, int node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | { |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 211 | struct kmem_cache *cache; |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 212 | struct skb_shared_info *shinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | struct sk_buff *skb; |
| 214 | u8 *data; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 215 | bool pfmemalloc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 217 | cache = (flags & SKB_ALLOC_FCLONE) |
| 218 | ? skbuff_fclone_cache : skbuff_head_cache; |
| 219 | |
| 220 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) |
| 221 | gfp_mask |= __GFP_MEMALLOC; |
Herbert Xu | 8798b3f | 2006-01-23 16:32:45 -0800 | [diff] [blame] | 222 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | /* Get the HEAD */ |
Christoph Hellwig | b30973f | 2006-12-06 20:32:36 -0800 | [diff] [blame] | 224 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | if (!skb) |
| 226 | goto out; |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 227 | prefetchw(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 229 | /* We do our best to align skb_shared_info on a separate cache |
| 230 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives |
| 231 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. |
| 232 | * Both skb->head and skb_shared_info are cache line aligned. |
| 233 | */ |
Tony Lindgren | bc417e3 | 2011-11-02 13:40:28 +0000 | [diff] [blame] | 234 | size = SKB_DATA_ALIGN(size); |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 235 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 236 | data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | if (!data) |
| 238 | goto nodata; |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 239 | /* kmalloc(size) might give us more room than requested. |
| 240 | * Put skb_shared_info exactly at the end of allocated zone, |
| 241 | * to allow max possible filling before reallocation. |
| 242 | */ |
| 243 | size = SKB_WITH_OVERHEAD(ksize(data)); |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 244 | prefetchw(data + size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | |
Arnaldo Carvalho de Melo | ca0605a | 2007-03-19 10:48:59 -0300 | [diff] [blame] | 246 | /* |
Johannes Berg | c800578 | 2008-05-03 20:56:42 -0700 | [diff] [blame] | 247 | * Only clear those fields we need to clear, not those that we will |
| 248 | * actually initialise below. Hence, don't put any more fields after |
| 249 | * the tail pointer in struct sk_buff! |
Arnaldo Carvalho de Melo | ca0605a | 2007-03-19 10:48:59 -0300 | [diff] [blame] | 250 | */ |
| 251 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
Eric Dumazet | 87fb4b7 | 2011-10-13 07:28:54 +0000 | [diff] [blame] | 252 | /* Account for allocated memory : skb + skb->head */ |
| 253 | skb->truesize = SKB_TRUESIZE(size); |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 254 | skb->pfmemalloc = pfmemalloc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | atomic_set(&skb->users, 1); |
| 256 | skb->head = data; |
| 257 | skb->data = data; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 258 | skb_reset_tail_pointer(skb); |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 259 | skb->end = skb->tail + size; |
Stephen Hemminger | 19633e1 | 2009-06-17 05:23:27 +0000 | [diff] [blame] | 260 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 261 | skb->mac_header = ~0U; |
| 262 | #endif |
| 263 | |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 264 | /* make sure we initialize shinfo sequentially */ |
| 265 | shinfo = skb_shinfo(skb); |
Eric Dumazet | ec7d2f2 | 2010-05-05 01:07:37 -0700 | [diff] [blame] | 266 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 267 | atomic_set(&shinfo->dataref, 1); |
Eric Dumazet | c2aa366 | 2011-01-25 23:18:38 +0000 | [diff] [blame] | 268 | kmemcheck_annotate_variable(shinfo->destructor_arg); |
Benjamin LaHaise | 4947d3e | 2006-01-03 14:06:50 -0800 | [diff] [blame] | 269 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 270 | if (flags & SKB_ALLOC_FCLONE) { |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 271 | struct sk_buff *child = skb + 1; |
| 272 | atomic_t *fclone_ref = (atomic_t *) (child + 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 274 | kmemcheck_annotate_bitfield(child, flags1); |
| 275 | kmemcheck_annotate_bitfield(child, flags2); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 276 | skb->fclone = SKB_FCLONE_ORIG; |
| 277 | atomic_set(fclone_ref, 1); |
| 278 | |
| 279 | child->fclone = SKB_FCLONE_UNAVAILABLE; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 280 | child->pfmemalloc = pfmemalloc; |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 281 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | out: |
| 283 | return skb; |
| 284 | nodata: |
Herbert Xu | 8798b3f | 2006-01-23 16:32:45 -0800 | [diff] [blame] | 285 | kmem_cache_free(cache, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | skb = NULL; |
| 287 | goto out; |
| 288 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 289 | EXPORT_SYMBOL(__alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | |
| 291 | /** |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 292 | * build_skb - build a network buffer |
| 293 | * @data: data buffer provided by caller |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 294 | * @frag_size: size of fragment, or 0 if head was kmalloced |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 295 | * |
| 296 | * Allocate a new &sk_buff. Caller provides space holding head and |
| 297 | * skb_shared_info. @data must have been allocated by kmalloc() |
| 298 | * The return is the new skb buffer. |
| 299 | * On a failure the return is %NULL, and @data is not freed. |
| 300 | * Notes : |
| 301 | * Before IO, driver allocates only data buffer where NIC put incoming frame |
| 302 | * Driver should add room at head (NET_SKB_PAD) and |
| 303 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) |
| 304 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it |
| 305 | * before giving packet to stack. |
| 306 | * RX rings only contains data buffers, not full skbs. |
| 307 | */ |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 308 | struct sk_buff *build_skb(void *data, unsigned int frag_size) |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 309 | { |
| 310 | struct skb_shared_info *shinfo; |
| 311 | struct sk_buff *skb; |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 312 | unsigned int size = frag_size ? : ksize(data); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 313 | |
| 314 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); |
| 315 | if (!skb) |
| 316 | return NULL; |
| 317 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 318 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 319 | |
| 320 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
| 321 | skb->truesize = SKB_TRUESIZE(size); |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 322 | skb->head_frag = frag_size != 0; |
Eric Dumazet | b2b5ce9 | 2011-11-14 06:03:34 +0000 | [diff] [blame] | 323 | atomic_set(&skb->users, 1); |
| 324 | skb->head = data; |
| 325 | skb->data = data; |
| 326 | skb_reset_tail_pointer(skb); |
| 327 | skb->end = skb->tail + size; |
| 328 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 329 | skb->mac_header = ~0U; |
| 330 | #endif |
| 331 | |
| 332 | /* make sure we initialize shinfo sequentially */ |
| 333 | shinfo = skb_shinfo(skb); |
| 334 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); |
| 335 | atomic_set(&shinfo->dataref, 1); |
| 336 | kmemcheck_annotate_variable(shinfo->destructor_arg); |
| 337 | |
| 338 | return skb; |
| 339 | } |
| 340 | EXPORT_SYMBOL(build_skb); |
| 341 | |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 342 | struct netdev_alloc_cache { |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 343 | struct page_frag frag; |
| 344 | /* we maintain a pagecount bias, so that we dont dirty cache line |
| 345 | * containing page->_count every time we allocate a fragment. |
| 346 | */ |
| 347 | unsigned int pagecnt_bias; |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 348 | }; |
| 349 | static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); |
| 350 | |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 351 | #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768) |
| 352 | #define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) |
| 353 | #define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 354 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 355 | static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 356 | { |
| 357 | struct netdev_alloc_cache *nc; |
| 358 | void *data = NULL; |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 359 | int order; |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 360 | unsigned long flags; |
| 361 | |
| 362 | local_irq_save(flags); |
| 363 | nc = &__get_cpu_var(netdev_alloc_cache); |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 364 | if (unlikely(!nc->frag.page)) { |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 365 | refill: |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 366 | for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { |
| 367 | gfp_t gfp = gfp_mask; |
| 368 | |
| 369 | if (order) |
| 370 | gfp |= __GFP_COMP | __GFP_NOWARN; |
| 371 | nc->frag.page = alloc_pages(gfp, order); |
| 372 | if (likely(nc->frag.page)) |
| 373 | break; |
| 374 | if (--order < 0) |
| 375 | goto end; |
| 376 | } |
| 377 | nc->frag.size = PAGE_SIZE << order; |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 378 | recycle: |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 379 | atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); |
| 380 | nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; |
| 381 | nc->frag.offset = 0; |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 382 | } |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 383 | |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 384 | if (nc->frag.offset + fragsz > nc->frag.size) { |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 385 | /* avoid unnecessary locked operations if possible */ |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 386 | if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || |
| 387 | atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 388 | goto recycle; |
| 389 | goto refill; |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 390 | } |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 391 | |
Eric Dumazet | 69b08f6 | 2012-09-26 06:46:57 +0000 | [diff] [blame] | 392 | data = page_address(nc->frag.page) + nc->frag.offset; |
| 393 | nc->frag.offset += fragsz; |
Alexander Duyck | 540eb7b | 2012-07-12 14:23:50 +0000 | [diff] [blame] | 394 | nc->pagecnt_bias--; |
| 395 | end: |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 396 | local_irq_restore(flags); |
| 397 | return data; |
| 398 | } |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 399 | |
| 400 | /** |
| 401 | * netdev_alloc_frag - allocate a page fragment |
| 402 | * @fragsz: fragment size |
| 403 | * |
| 404 | * Allocates a frag from a page for receive buffer. |
| 405 | * Uses GFP_ATOMIC allocations. |
| 406 | */ |
| 407 | void *netdev_alloc_frag(unsigned int fragsz) |
| 408 | { |
| 409 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); |
| 410 | } |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 411 | EXPORT_SYMBOL(netdev_alloc_frag); |
| 412 | |
| 413 | /** |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 414 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
| 415 | * @dev: network device to receive on |
| 416 | * @length: length to allocate |
| 417 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
| 418 | * |
| 419 | * Allocate a new &sk_buff and assign it a usage count of one. The |
| 420 | * buffer has unspecified headroom built in. Users should allocate |
| 421 | * the headroom they think they need without accounting for the |
| 422 | * built in space. The built in space is used for optimisations. |
| 423 | * |
| 424 | * %NULL is returned if there is no free memory. |
| 425 | */ |
| 426 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 427 | unsigned int length, gfp_t gfp_mask) |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 428 | { |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 429 | struct sk_buff *skb = NULL; |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 430 | unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + |
| 431 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 432 | |
Eric Dumazet | 310e158 | 2012-07-16 13:15:52 +0200 | [diff] [blame] | 433 | if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 434 | void *data; |
| 435 | |
| 436 | if (sk_memalloc_socks()) |
| 437 | gfp_mask |= __GFP_MEMALLOC; |
| 438 | |
| 439 | data = __netdev_alloc_frag(fragsz, gfp_mask); |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 440 | |
Eric Dumazet | 6f53261 | 2012-05-18 05:12:12 +0000 | [diff] [blame] | 441 | if (likely(data)) { |
| 442 | skb = build_skb(data, fragsz); |
| 443 | if (unlikely(!skb)) |
| 444 | put_page(virt_to_head_page(data)); |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 445 | } |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 446 | } else { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 447 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, |
| 448 | SKB_ALLOC_RX, NUMA_NO_NODE); |
Eric Dumazet | a1c7fff | 2012-05-17 07:34:16 +0000 | [diff] [blame] | 449 | } |
Christoph Hellwig | 7b2e497 | 2006-08-07 16:09:04 -0700 | [diff] [blame] | 450 | if (likely(skb)) { |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 451 | skb_reserve(skb, NET_SKB_PAD); |
Christoph Hellwig | 7b2e497 | 2006-08-07 16:09:04 -0700 | [diff] [blame] | 452 | skb->dev = dev; |
| 453 | } |
Christoph Hellwig | 8af2745 | 2006-07-31 22:35:23 -0700 | [diff] [blame] | 454 | return skb; |
| 455 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 456 | EXPORT_SYMBOL(__netdev_alloc_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 458 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 459 | int size, unsigned int truesize) |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 460 | { |
| 461 | skb_fill_page_desc(skb, i, page, off, size); |
| 462 | skb->len += size; |
| 463 | skb->data_len += size; |
Eric Dumazet | 50269e1 | 2012-03-23 23:59:33 +0000 | [diff] [blame] | 464 | skb->truesize += truesize; |
Peter Zijlstra | 654bed1 | 2008-10-07 14:22:33 -0700 | [diff] [blame] | 465 | } |
| 466 | EXPORT_SYMBOL(skb_add_rx_frag); |
| 467 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 468 | static void skb_drop_list(struct sk_buff **listp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 470 | struct sk_buff *list = *listp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 472 | *listp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | |
| 474 | do { |
| 475 | struct sk_buff *this = list; |
| 476 | list = list->next; |
| 477 | kfree_skb(this); |
| 478 | } while (list); |
| 479 | } |
| 480 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 481 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
| 482 | { |
| 483 | skb_drop_list(&skb_shinfo(skb)->frag_list); |
| 484 | } |
| 485 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | static void skb_clone_fraglist(struct sk_buff *skb) |
| 487 | { |
| 488 | struct sk_buff *list; |
| 489 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 490 | skb_walk_frags(skb, list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | skb_get(list); |
| 492 | } |
| 493 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 494 | static void skb_free_head(struct sk_buff *skb) |
| 495 | { |
| 496 | if (skb->head_frag) |
| 497 | put_page(virt_to_head_page(skb->head)); |
| 498 | else |
| 499 | kfree(skb->head); |
| 500 | } |
| 501 | |
Adrian Bunk | 5bba171 | 2006-06-29 13:02:35 -0700 | [diff] [blame] | 502 | static void skb_release_data(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | { |
| 504 | if (!skb->cloned || |
| 505 | !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, |
| 506 | &skb_shinfo(skb)->dataref)) { |
| 507 | if (skb_shinfo(skb)->nr_frags) { |
| 508 | int i; |
| 509 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 510 | skb_frag_unref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | } |
| 512 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 513 | /* |
| 514 | * If skb buf is from userspace, we need to notify the caller |
| 515 | * the lower device DMA has done; |
| 516 | */ |
| 517 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { |
| 518 | struct ubuf_info *uarg; |
| 519 | |
| 520 | uarg = skb_shinfo(skb)->destructor_arg; |
| 521 | if (uarg->callback) |
| 522 | uarg->callback(uarg); |
| 523 | } |
| 524 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 525 | if (skb_has_frag_list(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | skb_drop_fraglist(skb); |
| 527 | |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 528 | skb_free_head(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | } |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * Free an skbuff by memory without cleaning the state. |
| 534 | */ |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 535 | static void kfree_skbmem(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | { |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 537 | struct sk_buff *other; |
| 538 | atomic_t *fclone_ref; |
| 539 | |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 540 | switch (skb->fclone) { |
| 541 | case SKB_FCLONE_UNAVAILABLE: |
| 542 | kmem_cache_free(skbuff_head_cache, skb); |
| 543 | break; |
| 544 | |
| 545 | case SKB_FCLONE_ORIG: |
| 546 | fclone_ref = (atomic_t *) (skb + 2); |
| 547 | if (atomic_dec_and_test(fclone_ref)) |
| 548 | kmem_cache_free(skbuff_fclone_cache, skb); |
| 549 | break; |
| 550 | |
| 551 | case SKB_FCLONE_CLONE: |
| 552 | fclone_ref = (atomic_t *) (skb + 1); |
| 553 | other = skb - 1; |
| 554 | |
| 555 | /* The clone portion is available for |
| 556 | * fast-cloning again. |
| 557 | */ |
| 558 | skb->fclone = SKB_FCLONE_UNAVAILABLE; |
| 559 | |
| 560 | if (atomic_dec_and_test(fclone_ref)) |
| 561 | kmem_cache_free(skbuff_fclone_cache, other); |
| 562 | break; |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 563 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | } |
| 565 | |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 566 | static void skb_release_head_state(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 568 | skb_dst_drop(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | #ifdef CONFIG_XFRM |
| 570 | secpath_put(skb->sp); |
| 571 | #endif |
Stephen Hemminger | 9c2b332 | 2005-04-19 22:39:42 -0700 | [diff] [blame] | 572 | if (skb->destructor) { |
| 573 | WARN_ON(in_irq()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | skb->destructor(skb); |
| 575 | } |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame] | 576 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
Yasuyuki Kozakai | 5f79e0f | 2007-03-23 11:17:07 -0700 | [diff] [blame] | 577 | nf_conntrack_put(skb->nfct); |
KOVACS Krisztian | 2fc72c7 | 2011-01-12 20:25:08 +0100 | [diff] [blame] | 578 | #endif |
| 579 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED |
Yasuyuki Kozakai | 9fb9cbb | 2005-11-09 16:38:16 -0800 | [diff] [blame] | 580 | nf_conntrack_put_reasm(skb->nfct_reasm); |
| 581 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | #ifdef CONFIG_BRIDGE_NETFILTER |
| 583 | nf_bridge_put(skb->nf_bridge); |
| 584 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | /* XXX: IS this still necessary? - JHS */ |
| 586 | #ifdef CONFIG_NET_SCHED |
| 587 | skb->tc_index = 0; |
| 588 | #ifdef CONFIG_NET_CLS_ACT |
| 589 | skb->tc_verd = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | #endif |
| 591 | #endif |
Lennert Buytenhek | 04a4bb5 | 2008-10-01 02:33:12 -0700 | [diff] [blame] | 592 | } |
| 593 | |
| 594 | /* Free everything but the sk_buff shell. */ |
| 595 | static void skb_release_all(struct sk_buff *skb) |
| 596 | { |
| 597 | skb_release_head_state(skb); |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 598 | skb_release_data(skb); |
| 599 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 601 | /** |
| 602 | * __kfree_skb - private function |
| 603 | * @skb: buffer |
| 604 | * |
| 605 | * Free an sk_buff. Release anything attached to the buffer. |
| 606 | * Clean the state. This is an internal helper function. Users should |
| 607 | * always call kfree_skb |
| 608 | */ |
| 609 | |
| 610 | void __kfree_skb(struct sk_buff *skb) |
| 611 | { |
| 612 | skb_release_all(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | kfree_skbmem(skb); |
| 614 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 615 | EXPORT_SYMBOL(__kfree_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | |
| 617 | /** |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 618 | * kfree_skb - free an sk_buff |
| 619 | * @skb: buffer to free |
| 620 | * |
| 621 | * Drop a reference to the buffer and free it if the usage count has |
| 622 | * hit zero. |
| 623 | */ |
| 624 | void kfree_skb(struct sk_buff *skb) |
| 625 | { |
| 626 | if (unlikely(!skb)) |
| 627 | return; |
| 628 | if (likely(atomic_read(&skb->users) == 1)) |
| 629 | smp_rmb(); |
| 630 | else if (likely(!atomic_dec_and_test(&skb->users))) |
| 631 | return; |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 632 | trace_kfree_skb(skb, __builtin_return_address(0)); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 633 | __kfree_skb(skb); |
| 634 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 635 | EXPORT_SYMBOL(kfree_skb); |
Jörn Engel | 231d06a | 2006-03-20 21:28:35 -0800 | [diff] [blame] | 636 | |
Stephen Hemminger | d1a203e | 2008-11-01 21:01:09 -0700 | [diff] [blame] | 637 | /** |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 638 | * consume_skb - free an skbuff |
| 639 | * @skb: buffer to free |
| 640 | * |
| 641 | * Drop a ref to the buffer and free it if the usage count has hit zero |
| 642 | * Functions identically to kfree_skb, but kfree_skb assumes that the frame |
| 643 | * is being dropped after a failure and notes that |
| 644 | */ |
| 645 | void consume_skb(struct sk_buff *skb) |
| 646 | { |
| 647 | if (unlikely(!skb)) |
| 648 | return; |
| 649 | if (likely(atomic_read(&skb->users) == 1)) |
| 650 | smp_rmb(); |
| 651 | else if (likely(!atomic_dec_and_test(&skb->users))) |
| 652 | return; |
Koki Sanagi | 07dc22e | 2010-08-23 18:46:12 +0900 | [diff] [blame] | 653 | trace_consume_skb(skb); |
Neil Horman | ead2ceb | 2009-03-11 09:49:55 +0000 | [diff] [blame] | 654 | __kfree_skb(skb); |
| 655 | } |
| 656 | EXPORT_SYMBOL(consume_skb); |
| 657 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 658 | static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 659 | { |
| 660 | new->tstamp = old->tstamp; |
| 661 | new->dev = old->dev; |
| 662 | new->transport_header = old->transport_header; |
| 663 | new->network_header = old->network_header; |
| 664 | new->mac_header = old->mac_header; |
Eric Dumazet | 7fee226 | 2010-05-11 23:19:48 +0000 | [diff] [blame] | 665 | skb_dst_copy(new, old); |
Tom Herbert | 0a9627f | 2010-03-16 08:03:29 +0000 | [diff] [blame] | 666 | new->rxhash = old->rxhash; |
Changli Gao | 6461be3 | 2011-08-19 04:44:18 +0000 | [diff] [blame] | 667 | new->ooo_okay = old->ooo_okay; |
Tom Herbert | bdeab99 | 2011-08-14 19:45:55 +0000 | [diff] [blame] | 668 | new->l4_rxhash = old->l4_rxhash; |
Ben Greear | 3bdc0eb | 2012-02-11 15:39:30 +0000 | [diff] [blame] | 669 | new->no_fcs = old->no_fcs; |
Alexey Dobriyan | def8b4f | 2008-10-28 13:24:06 -0700 | [diff] [blame] | 670 | #ifdef CONFIG_XFRM |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 671 | new->sp = secpath_get(old->sp); |
| 672 | #endif |
| 673 | memcpy(new->cb, old->cb, sizeof(old->cb)); |
Herbert Xu | 9bcb97c | 2009-05-22 22:20:02 +0000 | [diff] [blame] | 674 | new->csum = old->csum; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 675 | new->local_df = old->local_df; |
| 676 | new->pkt_type = old->pkt_type; |
| 677 | new->ip_summed = old->ip_summed; |
| 678 | skb_copy_queue_mapping(new, old); |
| 679 | new->priority = old->priority; |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame] | 680 | #if IS_ENABLED(CONFIG_IP_VS) |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 681 | new->ipvs_property = old->ipvs_property; |
| 682 | #endif |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 683 | new->pfmemalloc = old->pfmemalloc; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 684 | new->protocol = old->protocol; |
| 685 | new->mark = old->mark; |
Eric Dumazet | 8964be4 | 2009-11-20 15:35:04 -0800 | [diff] [blame] | 686 | new->skb_iif = old->skb_iif; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 687 | __nf_copy(new, old); |
Igor Maravić | a3bf7ae | 2011-12-12 02:58:22 +0000 | [diff] [blame] | 688 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 689 | new->nf_trace = old->nf_trace; |
| 690 | #endif |
| 691 | #ifdef CONFIG_NET_SCHED |
| 692 | new->tc_index = old->tc_index; |
| 693 | #ifdef CONFIG_NET_CLS_ACT |
| 694 | new->tc_verd = old->tc_verd; |
| 695 | #endif |
| 696 | #endif |
Patrick McHardy | 6aa895b | 2008-07-14 22:49:06 -0700 | [diff] [blame] | 697 | new->vlan_tci = old->vlan_tci; |
| 698 | |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 699 | skb_copy_secmark(new, old); |
| 700 | } |
| 701 | |
Herbert Xu | 82c49a3 | 2009-05-22 22:11:37 +0000 | [diff] [blame] | 702 | /* |
| 703 | * You should not add any new code to this function. Add it to |
| 704 | * __copy_skb_header above instead. |
| 705 | */ |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 706 | static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | #define C(x) n->x = skb->x |
| 709 | |
| 710 | n->next = n->prev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | n->sk = NULL; |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 712 | __copy_skb_header(n, skb); |
| 713 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | C(len); |
| 715 | C(data_len); |
Alexey Dobriyan | 3e6b3b2 | 2007-03-16 15:00:46 -0700 | [diff] [blame] | 716 | C(mac_len); |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 717 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 718 | n->cloned = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | n->nohdr = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | n->destructor = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 | C(tail); |
| 722 | C(end); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 723 | C(head); |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 724 | C(head_frag); |
Paul Moore | 02f1c89 | 2008-01-07 21:56:41 -0800 | [diff] [blame] | 725 | C(data); |
| 726 | C(truesize); |
| 727 | atomic_set(&n->users, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | |
| 729 | atomic_inc(&(skb_shinfo(skb)->dataref)); |
| 730 | skb->cloned = 1; |
| 731 | |
| 732 | return n; |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 733 | #undef C |
| 734 | } |
| 735 | |
| 736 | /** |
| 737 | * skb_morph - morph one skb into another |
| 738 | * @dst: the skb to receive the contents |
| 739 | * @src: the skb to supply the contents |
| 740 | * |
| 741 | * This is identical to skb_clone except that the target skb is |
| 742 | * supplied by the user. |
| 743 | * |
| 744 | * The target skb is returned upon exit. |
| 745 | */ |
| 746 | struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) |
| 747 | { |
Herbert Xu | 2d4baff | 2007-11-26 23:11:19 +0800 | [diff] [blame] | 748 | skb_release_all(dst); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 749 | return __skb_clone(dst, src); |
| 750 | } |
| 751 | EXPORT_SYMBOL_GPL(skb_morph); |
| 752 | |
Ben Hutchings | 2c53040 | 2012-07-10 10:55:09 +0000 | [diff] [blame] | 753 | /** |
| 754 | * skb_copy_ubufs - copy userspace skb frags buffers to kernel |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 755 | * @skb: the skb to modify |
| 756 | * @gfp_mask: allocation priority |
| 757 | * |
| 758 | * This must be called on SKBTX_DEV_ZEROCOPY skb. |
| 759 | * It will copy all frags into kernel and drop the reference |
| 760 | * to userspace pages. |
| 761 | * |
| 762 | * If this function is called from an interrupt gfp_mask() must be |
| 763 | * %GFP_ATOMIC. |
| 764 | * |
| 765 | * Returns 0 on success or a negative error code on failure |
| 766 | * to allocate kernel memory to copy to. |
| 767 | */ |
| 768 | int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 769 | { |
| 770 | int i; |
| 771 | int num_frags = skb_shinfo(skb)->nr_frags; |
| 772 | struct page *page, *head = NULL; |
| 773 | struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; |
| 774 | |
| 775 | for (i = 0; i < num_frags; i++) { |
| 776 | u8 *vaddr; |
| 777 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
| 778 | |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 779 | page = alloc_page(gfp_mask); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 780 | if (!page) { |
| 781 | while (head) { |
| 782 | struct page *next = (struct page *)head->private; |
| 783 | put_page(head); |
| 784 | head = next; |
| 785 | } |
| 786 | return -ENOMEM; |
| 787 | } |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 788 | vaddr = kmap_atomic(skb_frag_page(f)); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 789 | memcpy(page_address(page), |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 790 | vaddr + f->page_offset, skb_frag_size(f)); |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 791 | kunmap_atomic(vaddr); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 792 | page->private = (unsigned long)head; |
| 793 | head = page; |
| 794 | } |
| 795 | |
| 796 | /* skb frags release userspace buffers */ |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 797 | for (i = 0; i < num_frags; i++) |
Ian Campbell | a8605c6 | 2011-10-19 23:01:49 +0000 | [diff] [blame] | 798 | skb_frag_unref(skb, i); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 799 | |
| 800 | uarg->callback(uarg); |
| 801 | |
| 802 | /* skb frags point to kernel buffers */ |
Krishna Kumar | 02756ed | 2012-07-17 02:05:29 +0000 | [diff] [blame] | 803 | for (i = num_frags - 1; i >= 0; i--) { |
| 804 | __skb_fill_page_desc(skb, i, head, 0, |
| 805 | skb_shinfo(skb)->frags[i].size); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 806 | head = (struct page *)head->private; |
| 807 | } |
Michael S. Tsirkin | 48c8301 | 2011-08-31 08:03:29 +0000 | [diff] [blame] | 808 | |
| 809 | skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 810 | return 0; |
| 811 | } |
Michael S. Tsirkin | dcc0fb7 | 2012-07-20 09:23:20 +0000 | [diff] [blame] | 812 | EXPORT_SYMBOL_GPL(skb_copy_ubufs); |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 813 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 814 | /** |
| 815 | * skb_clone - duplicate an sk_buff |
| 816 | * @skb: buffer to clone |
| 817 | * @gfp_mask: allocation priority |
| 818 | * |
| 819 | * Duplicate an &sk_buff. The new one is not owned by a socket. Both |
| 820 | * copies share the same packet data but not structure. The new |
| 821 | * buffer has a reference count of 1. If the allocation fails the |
| 822 | * function returns %NULL otherwise the new buffer is returned. |
| 823 | * |
| 824 | * If this function is called from an interrupt gfp_mask() must be |
| 825 | * %GFP_ATOMIC. |
| 826 | */ |
| 827 | |
| 828 | struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) |
| 829 | { |
| 830 | struct sk_buff *n; |
| 831 | |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 832 | if (skb_orphan_frags(skb, gfp_mask)) |
| 833 | return NULL; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 834 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 835 | n = skb + 1; |
| 836 | if (skb->fclone == SKB_FCLONE_ORIG && |
| 837 | n->fclone == SKB_FCLONE_UNAVAILABLE) { |
| 838 | atomic_t *fclone_ref = (atomic_t *) (n + 1); |
| 839 | n->fclone = SKB_FCLONE_CLONE; |
| 840 | atomic_inc(fclone_ref); |
| 841 | } else { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 842 | if (skb_pfmemalloc(skb)) |
| 843 | gfp_mask |= __GFP_MEMALLOC; |
| 844 | |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 845 | n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); |
| 846 | if (!n) |
| 847 | return NULL; |
Vegard Nossum | fe55f6d | 2008-08-30 12:16:35 +0200 | [diff] [blame] | 848 | |
| 849 | kmemcheck_annotate_bitfield(n, flags1); |
| 850 | kmemcheck_annotate_bitfield(n, flags2); |
Herbert Xu | e0053ec | 2007-10-14 00:37:52 -0700 | [diff] [blame] | 851 | n->fclone = SKB_FCLONE_UNAVAILABLE; |
| 852 | } |
| 853 | |
| 854 | return __skb_clone(n, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 856 | EXPORT_SYMBOL(skb_clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | |
| 858 | static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) |
| 859 | { |
Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 860 | #ifndef NET_SKBUFF_DATA_USES_OFFSET |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | /* |
| 862 | * Shift between the two data areas in bytes |
| 863 | */ |
| 864 | unsigned long offset = new->data - old->data; |
Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 865 | #endif |
Herbert Xu | dec1881 | 2007-10-14 00:37:30 -0700 | [diff] [blame] | 866 | |
| 867 | __copy_skb_header(new, old); |
| 868 | |
Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 869 | #ifndef NET_SKBUFF_DATA_USES_OFFSET |
| 870 | /* {transport,network,mac}_header are relative to skb->head */ |
| 871 | new->transport_header += offset; |
| 872 | new->network_header += offset; |
Stephen Hemminger | 603a8bb | 2009-06-17 12:17:34 +0000 | [diff] [blame] | 873 | if (skb_mac_header_was_set(new)) |
| 874 | new->mac_header += offset; |
Arnaldo Carvalho de Melo | 2e07fa9 | 2007-04-10 21:22:35 -0700 | [diff] [blame] | 875 | #endif |
Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 876 | skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; |
| 877 | skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; |
| 878 | skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | } |
| 880 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 881 | static inline int skb_alloc_rx_flag(const struct sk_buff *skb) |
| 882 | { |
| 883 | if (skb_pfmemalloc(skb)) |
| 884 | return SKB_ALLOC_RX; |
| 885 | return 0; |
| 886 | } |
| 887 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 888 | /** |
| 889 | * skb_copy - create private copy of an sk_buff |
| 890 | * @skb: buffer to copy |
| 891 | * @gfp_mask: allocation priority |
| 892 | * |
| 893 | * Make a copy of both an &sk_buff and its data. This is used when the |
| 894 | * caller wishes to modify the data and needs a private copy of the |
| 895 | * data to alter. Returns %NULL on failure or the pointer to the buffer |
| 896 | * on success. The returned buffer has a reference count of 1. |
| 897 | * |
| 898 | * As by-product this function converts non-linear &sk_buff to linear |
| 899 | * one, so that &sk_buff becomes completely private and caller is allowed |
| 900 | * to modify all the data of returned buffer. This means that this |
| 901 | * function is not recommended for use in circumstances when only |
| 902 | * header is going to be modified. Use pskb_copy() instead. |
| 903 | */ |
| 904 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 905 | struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | { |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 907 | int headerlen = skb_headroom(skb); |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 908 | unsigned int size = skb_end_offset(skb) + skb->data_len; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 909 | struct sk_buff *n = __alloc_skb(size, gfp_mask, |
| 910 | skb_alloc_rx_flag(skb), NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 911 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 | if (!n) |
| 913 | return NULL; |
| 914 | |
| 915 | /* Set the data pointer */ |
| 916 | skb_reserve(n, headerlen); |
| 917 | /* Set the tail pointer and length */ |
| 918 | skb_put(n, skb->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | |
| 920 | if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) |
| 921 | BUG(); |
| 922 | |
| 923 | copy_skb_header(n, skb); |
| 924 | return n; |
| 925 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 926 | EXPORT_SYMBOL(skb_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 927 | |
| 928 | /** |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 929 | * __pskb_copy - create copy of an sk_buff with private head. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | * @skb: buffer to copy |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 931 | * @headroom: headroom of new skb |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | * @gfp_mask: allocation priority |
| 933 | * |
| 934 | * Make a copy of both an &sk_buff and part of its data, located |
| 935 | * in header. Fragmented data remain shared. This is used when |
| 936 | * the caller wishes to modify only header of &sk_buff and needs |
| 937 | * private copy of the header to alter. Returns %NULL on failure |
| 938 | * or the pointer to the buffer on success. |
| 939 | * The returned buffer has a reference count of 1. |
| 940 | */ |
| 941 | |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 942 | struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | { |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 944 | unsigned int size = skb_headlen(skb) + headroom; |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 945 | struct sk_buff *n = __alloc_skb(size, gfp_mask, |
| 946 | skb_alloc_rx_flag(skb), NUMA_NO_NODE); |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 947 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | if (!n) |
| 949 | goto out; |
| 950 | |
| 951 | /* Set the data pointer */ |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 952 | skb_reserve(n, headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 | /* Set the tail pointer and length */ |
| 954 | skb_put(n, skb_headlen(skb)); |
| 955 | /* Copy the bytes */ |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 956 | skb_copy_from_linear_data(skb, n->data, n->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 | |
Herbert Xu | 25f484a | 2006-11-07 14:57:15 -0800 | [diff] [blame] | 958 | n->truesize += skb->data_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | n->data_len = skb->data_len; |
| 960 | n->len = skb->len; |
| 961 | |
| 962 | if (skb_shinfo(skb)->nr_frags) { |
| 963 | int i; |
| 964 | |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 965 | if (skb_orphan_frags(skb, gfp_mask)) { |
| 966 | kfree_skb(n); |
| 967 | n = NULL; |
| 968 | goto out; |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 969 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 971 | skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 972 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | } |
| 974 | skb_shinfo(n)->nr_frags = i; |
| 975 | } |
| 976 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 977 | if (skb_has_frag_list(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; |
| 979 | skb_clone_fraglist(n); |
| 980 | } |
| 981 | |
| 982 | copy_skb_header(n, skb); |
| 983 | out: |
| 984 | return n; |
| 985 | } |
Eric Dumazet | 117632e | 2011-12-03 21:39:53 +0000 | [diff] [blame] | 986 | EXPORT_SYMBOL(__pskb_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | |
| 988 | /** |
| 989 | * pskb_expand_head - reallocate header of &sk_buff |
| 990 | * @skb: buffer to reallocate |
| 991 | * @nhead: room to add at head |
| 992 | * @ntail: room to add at tail |
| 993 | * @gfp_mask: allocation priority |
| 994 | * |
| 995 | * Expands (or creates identical copy, if &nhead and &ntail are zero) |
| 996 | * header of skb. &sk_buff itself is not changed. &sk_buff MUST have |
| 997 | * reference count of 1. Returns zero in the case of success or error, |
| 998 | * if expansion failed. In the last case, &sk_buff is not changed. |
| 999 | * |
| 1000 | * All the pointers pointing into skb header may change and must be |
| 1001 | * reloaded after call to this function. |
| 1002 | */ |
| 1003 | |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1004 | int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1005 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | { |
| 1007 | int i; |
| 1008 | u8 *data; |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 1009 | int size = nhead + skb_end_offset(skb) + ntail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | long off; |
| 1011 | |
Herbert Xu | 4edd87a | 2008-10-01 07:09:38 -0700 | [diff] [blame] | 1012 | BUG_ON(nhead < 0); |
| 1013 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 | if (skb_shared(skb)) |
| 1015 | BUG(); |
| 1016 | |
| 1017 | size = SKB_DATA_ALIGN(size); |
| 1018 | |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1019 | if (skb_pfmemalloc(skb)) |
| 1020 | gfp_mask |= __GFP_MEMALLOC; |
| 1021 | data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), |
| 1022 | gfp_mask, NUMA_NO_NODE, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1023 | if (!data) |
| 1024 | goto nodata; |
Eric Dumazet | 87151b8 | 2012-04-10 20:08:39 +0000 | [diff] [blame] | 1025 | size = SKB_WITH_OVERHEAD(ksize(data)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1026 | |
| 1027 | /* Copy only real data... and, alas, header. This should be |
Eric Dumazet | 6602ceb | 2010-09-01 05:25:10 +0000 | [diff] [blame] | 1028 | * optimized for the cases when header is void. |
| 1029 | */ |
| 1030 | memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); |
| 1031 | |
| 1032 | memcpy((struct skb_shared_info *)(data + size), |
| 1033 | skb_shinfo(skb), |
Eric Dumazet | fed6638 | 2010-07-22 19:09:08 +0000 | [diff] [blame] | 1034 | offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1036 | /* |
| 1037 | * if shinfo is shared we must drop the old head gracefully, but if it |
| 1038 | * is not we can just drop the old head and let the existing refcount |
| 1039 | * be since all we did is relocate the values |
| 1040 | */ |
| 1041 | if (skb_cloned(skb)) { |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1042 | /* copy this zero copy skb frags */ |
Michael S. Tsirkin | 70008aa | 2012-07-20 09:23:10 +0000 | [diff] [blame] | 1043 | if (skb_orphan_frags(skb, gfp_mask)) |
| 1044 | goto nofrags; |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1045 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1046 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1048 | if (skb_has_frag_list(skb)) |
| 1049 | skb_clone_fraglist(skb); |
| 1050 | |
| 1051 | skb_release_data(skb); |
Alexander Duyck | 3e24591 | 2012-05-04 14:26:51 +0000 | [diff] [blame] | 1052 | } else { |
| 1053 | skb_free_head(skb); |
Eric Dumazet | 1fd6304 | 2010-09-02 23:09:32 +0000 | [diff] [blame] | 1054 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | off = (data + nhead) - skb->head; |
| 1056 | |
| 1057 | skb->head = data; |
Eric Dumazet | d3836f2 | 2012-04-27 00:33:38 +0000 | [diff] [blame] | 1058 | skb->head_frag = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | skb->data += off; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1060 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
| 1061 | skb->end = size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1062 | off = nhead; |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1063 | #else |
| 1064 | skb->end = skb->head + size; |
Patrick McHardy | 56eb888 | 2007-04-09 11:45:04 -0700 | [diff] [blame] | 1065 | #endif |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1066 | /* {transport,network,mac}_header and tail are relative to skb->head */ |
| 1067 | skb->tail += off; |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 1068 | skb->transport_header += off; |
| 1069 | skb->network_header += off; |
Stephen Hemminger | 603a8bb | 2009-06-17 12:17:34 +0000 | [diff] [blame] | 1070 | if (skb_mac_header_was_set(skb)) |
| 1071 | skb->mac_header += off; |
Andrea Shepard | 00c5a98 | 2010-07-22 09:12:35 +0000 | [diff] [blame] | 1072 | /* Only adjust this if it actually is csum_start rather than csum */ |
| 1073 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 1074 | skb->csum_start += nhead; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1075 | skb->cloned = 0; |
Patrick McHardy | 334a813 | 2007-06-25 04:35:20 -0700 | [diff] [blame] | 1076 | skb->hdr_len = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 | skb->nohdr = 0; |
| 1078 | atomic_set(&skb_shinfo(skb)->dataref, 1); |
| 1079 | return 0; |
| 1080 | |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1081 | nofrags: |
| 1082 | kfree(data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 | nodata: |
| 1084 | return -ENOMEM; |
| 1085 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1086 | EXPORT_SYMBOL(pskb_expand_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | |
| 1088 | /* Make private copy of skb with writable head and some headroom */ |
| 1089 | |
| 1090 | struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) |
| 1091 | { |
| 1092 | struct sk_buff *skb2; |
| 1093 | int delta = headroom - skb_headroom(skb); |
| 1094 | |
| 1095 | if (delta <= 0) |
| 1096 | skb2 = pskb_copy(skb, GFP_ATOMIC); |
| 1097 | else { |
| 1098 | skb2 = skb_clone(skb, GFP_ATOMIC); |
| 1099 | if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, |
| 1100 | GFP_ATOMIC)) { |
| 1101 | kfree_skb(skb2); |
| 1102 | skb2 = NULL; |
| 1103 | } |
| 1104 | } |
| 1105 | return skb2; |
| 1106 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1107 | EXPORT_SYMBOL(skb_realloc_headroom); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | |
| 1109 | /** |
| 1110 | * skb_copy_expand - copy and expand sk_buff |
| 1111 | * @skb: buffer to copy |
| 1112 | * @newheadroom: new free bytes at head |
| 1113 | * @newtailroom: new free bytes at tail |
| 1114 | * @gfp_mask: allocation priority |
| 1115 | * |
| 1116 | * Make a copy of both an &sk_buff and its data and while doing so |
| 1117 | * allocate additional space. |
| 1118 | * |
| 1119 | * This is used when the caller wishes to modify the data and needs a |
| 1120 | * private copy of the data to alter as well as more space for new fields. |
| 1121 | * Returns %NULL on failure or the pointer to the buffer |
| 1122 | * on success. The returned buffer has a reference count of 1. |
| 1123 | * |
| 1124 | * You must pass %GFP_ATOMIC as the allocation priority if this function |
| 1125 | * is called from an interrupt. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | */ |
| 1127 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1128 | int newheadroom, int newtailroom, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1129 | gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | { |
| 1131 | /* |
| 1132 | * Allocate the copy buffer |
| 1133 | */ |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 1134 | struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, |
| 1135 | gfp_mask, skb_alloc_rx_flag(skb), |
| 1136 | NUMA_NO_NODE); |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1137 | int oldheadroom = skb_headroom(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | int head_copy_len, head_copy_off; |
Herbert Xu | 5288605 | 2007-09-16 16:32:11 -0700 | [diff] [blame] | 1139 | int off; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 | |
| 1141 | if (!n) |
| 1142 | return NULL; |
| 1143 | |
| 1144 | skb_reserve(n, newheadroom); |
| 1145 | |
| 1146 | /* Set the tail pointer and length */ |
| 1147 | skb_put(n, skb->len); |
| 1148 | |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1149 | head_copy_len = oldheadroom; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 | head_copy_off = 0; |
| 1151 | if (newheadroom <= head_copy_len) |
| 1152 | head_copy_len = newheadroom; |
| 1153 | else |
| 1154 | head_copy_off = newheadroom - head_copy_len; |
| 1155 | |
| 1156 | /* Copy the linear header and data. */ |
| 1157 | if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, |
| 1158 | skb->len + head_copy_len)) |
| 1159 | BUG(); |
| 1160 | |
| 1161 | copy_skb_header(n, skb); |
| 1162 | |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1163 | off = newheadroom - oldheadroom; |
David S. Miller | be2b6e6 | 2010-07-22 13:27:09 -0700 | [diff] [blame] | 1164 | if (n->ip_summed == CHECKSUM_PARTIAL) |
| 1165 | n->csum_start += off; |
Herbert Xu | 5288605 | 2007-09-16 16:32:11 -0700 | [diff] [blame] | 1166 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1167 | n->transport_header += off; |
| 1168 | n->network_header += off; |
Stephen Hemminger | 603a8bb | 2009-06-17 12:17:34 +0000 | [diff] [blame] | 1169 | if (skb_mac_header_was_set(skb)) |
| 1170 | n->mac_header += off; |
Herbert Xu | 5288605 | 2007-09-16 16:32:11 -0700 | [diff] [blame] | 1171 | #endif |
Patrick McHardy | efd1e8d | 2007-04-10 18:30:09 -0700 | [diff] [blame] | 1172 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 | return n; |
| 1174 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1175 | EXPORT_SYMBOL(skb_copy_expand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | |
| 1177 | /** |
| 1178 | * skb_pad - zero pad the tail of an skb |
| 1179 | * @skb: buffer to pad |
| 1180 | * @pad: space to pad |
| 1181 | * |
| 1182 | * Ensure that a buffer is followed by a padding area that is zero |
| 1183 | * filled. Used by network drivers which may DMA or transfer data |
| 1184 | * beyond the buffer end onto the wire. |
| 1185 | * |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1186 | * May return error in out of memory cases. The skb is freed on error. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 | */ |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1188 | |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1189 | int skb_pad(struct sk_buff *skb, int pad) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | { |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1191 | int err; |
| 1192 | int ntail; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | /* If the skbuff is non linear tailroom is always zero.. */ |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1195 | if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 | memset(skb->data+skb->len, 0, pad); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1197 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | } |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1199 | |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1200 | ntail = skb->data_len + pad - (skb->end - skb->tail); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1201 | if (likely(skb_cloned(skb) || ntail > 0)) { |
| 1202 | err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); |
| 1203 | if (unlikely(err)) |
| 1204 | goto free_skb; |
| 1205 | } |
| 1206 | |
| 1207 | /* FIXME: The use of this function with non-linear skb's really needs |
| 1208 | * to be audited. |
| 1209 | */ |
| 1210 | err = skb_linearize(skb); |
| 1211 | if (unlikely(err)) |
| 1212 | goto free_skb; |
| 1213 | |
| 1214 | memset(skb->data + skb->len, 0, pad); |
| 1215 | return 0; |
| 1216 | |
| 1217 | free_skb: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | kfree_skb(skb); |
Herbert Xu | 5b057c6 | 2006-06-23 02:06:41 -0700 | [diff] [blame] | 1219 | return err; |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1220 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1221 | EXPORT_SYMBOL(skb_pad); |
YOSHIFUJI Hideaki | 4ec93ed | 2007-02-09 23:24:36 +0900 | [diff] [blame] | 1222 | |
Ilpo Järvinen | 0dde3e1 | 2008-03-27 17:43:41 -0700 | [diff] [blame] | 1223 | /** |
| 1224 | * skb_put - add data to a buffer |
| 1225 | * @skb: buffer to use |
| 1226 | * @len: amount of data to add |
| 1227 | * |
| 1228 | * This function extends the used data area of the buffer. If this would |
| 1229 | * exceed the total buffer size the kernel will panic. A pointer to the |
| 1230 | * first byte of the extra data is returned. |
| 1231 | */ |
| 1232 | unsigned char *skb_put(struct sk_buff *skb, unsigned int len) |
| 1233 | { |
| 1234 | unsigned char *tmp = skb_tail_pointer(skb); |
| 1235 | SKB_LINEAR_ASSERT(skb); |
| 1236 | skb->tail += len; |
| 1237 | skb->len += len; |
| 1238 | if (unlikely(skb->tail > skb->end)) |
| 1239 | skb_over_panic(skb, len, __builtin_return_address(0)); |
| 1240 | return tmp; |
| 1241 | } |
| 1242 | EXPORT_SYMBOL(skb_put); |
| 1243 | |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1244 | /** |
Ilpo Järvinen | c2aa270 | 2008-03-27 17:52:40 -0700 | [diff] [blame] | 1245 | * skb_push - add data to the start of a buffer |
| 1246 | * @skb: buffer to use |
| 1247 | * @len: amount of data to add |
| 1248 | * |
| 1249 | * This function extends the used data area of the buffer at the buffer |
| 1250 | * start. If this would exceed the total buffer headroom the kernel will |
| 1251 | * panic. A pointer to the first byte of the extra data is returned. |
| 1252 | */ |
| 1253 | unsigned char *skb_push(struct sk_buff *skb, unsigned int len) |
| 1254 | { |
| 1255 | skb->data -= len; |
| 1256 | skb->len += len; |
| 1257 | if (unlikely(skb->data<skb->head)) |
| 1258 | skb_under_panic(skb, len, __builtin_return_address(0)); |
| 1259 | return skb->data; |
| 1260 | } |
| 1261 | EXPORT_SYMBOL(skb_push); |
| 1262 | |
| 1263 | /** |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1264 | * skb_pull - remove data from the start of a buffer |
| 1265 | * @skb: buffer to use |
| 1266 | * @len: amount of data to remove |
| 1267 | * |
| 1268 | * This function removes data from the start of a buffer, returning |
| 1269 | * the memory to the headroom. A pointer to the next data in the buffer |
| 1270 | * is returned. Once the data has been pulled future pushes will overwrite |
| 1271 | * the old data. |
| 1272 | */ |
| 1273 | unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) |
| 1274 | { |
David S. Miller | 47d2964 | 2010-05-02 02:21:44 -0700 | [diff] [blame] | 1275 | return skb_pull_inline(skb, len); |
Ilpo Järvinen | 6be8ac2 | 2008-03-27 17:47:24 -0700 | [diff] [blame] | 1276 | } |
| 1277 | EXPORT_SYMBOL(skb_pull); |
| 1278 | |
Ilpo Järvinen | 419ae74 | 2008-03-27 17:54:01 -0700 | [diff] [blame] | 1279 | /** |
| 1280 | * skb_trim - remove end from a buffer |
| 1281 | * @skb: buffer to alter |
| 1282 | * @len: new length |
| 1283 | * |
| 1284 | * Cut the length of a buffer down by removing data from the tail. If |
| 1285 | * the buffer is already under the length specified it is not modified. |
| 1286 | * The skb must be linear. |
| 1287 | */ |
| 1288 | void skb_trim(struct sk_buff *skb, unsigned int len) |
| 1289 | { |
| 1290 | if (skb->len > len) |
| 1291 | __skb_trim(skb, len); |
| 1292 | } |
| 1293 | EXPORT_SYMBOL(skb_trim); |
| 1294 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 1295 | /* Trims skb to length len. It can change skb pointers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 | */ |
| 1297 | |
Herbert Xu | 3cc0e87 | 2006-06-09 16:13:38 -0700 | [diff] [blame] | 1298 | int ___pskb_trim(struct sk_buff *skb, unsigned int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1300 | struct sk_buff **fragp; |
| 1301 | struct sk_buff *frag; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | int offset = skb_headlen(skb); |
| 1303 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 1304 | int i; |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1305 | int err; |
| 1306 | |
| 1307 | if (skb_cloned(skb) && |
| 1308 | unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) |
| 1309 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1311 | i = 0; |
| 1312 | if (offset >= len) |
| 1313 | goto drop_pages; |
| 1314 | |
| 1315 | for (; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1316 | int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1317 | |
| 1318 | if (end < len) { |
| 1319 | offset = end; |
| 1320 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 | } |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1322 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1323 | skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1324 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1325 | drop_pages: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1326 | skb_shinfo(skb)->nr_frags = i; |
| 1327 | |
| 1328 | for (; i < nfrags; i++) |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1329 | skb_frag_unref(skb, i); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1330 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1331 | if (skb_has_frag_list(skb)) |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1332 | skb_drop_fraglist(skb); |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1333 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | } |
| 1335 | |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1336 | for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); |
| 1337 | fragp = &frag->next) { |
| 1338 | int end = offset + frag->len; |
| 1339 | |
| 1340 | if (skb_shared(frag)) { |
| 1341 | struct sk_buff *nfrag; |
| 1342 | |
| 1343 | nfrag = skb_clone(frag, GFP_ATOMIC); |
| 1344 | if (unlikely(!nfrag)) |
| 1345 | return -ENOMEM; |
| 1346 | |
| 1347 | nfrag->next = frag->next; |
Eric Dumazet | 85bb2a6 | 2012-04-19 02:24:53 +0000 | [diff] [blame] | 1348 | consume_skb(frag); |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1349 | frag = nfrag; |
| 1350 | *fragp = frag; |
| 1351 | } |
| 1352 | |
| 1353 | if (end < len) { |
| 1354 | offset = end; |
| 1355 | continue; |
| 1356 | } |
| 1357 | |
| 1358 | if (end > len && |
| 1359 | unlikely((err = pskb_trim(frag, len - offset)))) |
| 1360 | return err; |
| 1361 | |
| 1362 | if (frag->next) |
| 1363 | skb_drop_list(&frag->next); |
| 1364 | break; |
| 1365 | } |
| 1366 | |
Herbert Xu | f4d26fb | 2006-07-30 20:20:28 -0700 | [diff] [blame] | 1367 | done: |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1368 | if (len > skb_headlen(skb)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1369 | skb->data_len -= skb->len - len; |
| 1370 | skb->len = len; |
| 1371 | } else { |
Herbert Xu | 27b437c | 2006-07-13 19:26:39 -0700 | [diff] [blame] | 1372 | skb->len = len; |
| 1373 | skb->data_len = 0; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1374 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1375 | } |
| 1376 | |
| 1377 | return 0; |
| 1378 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1379 | EXPORT_SYMBOL(___pskb_trim); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1380 | |
| 1381 | /** |
| 1382 | * __pskb_pull_tail - advance tail of skb header |
| 1383 | * @skb: buffer to reallocate |
| 1384 | * @delta: number of bytes to advance tail |
| 1385 | * |
| 1386 | * The function makes a sense only on a fragmented &sk_buff, |
| 1387 | * it expands header moving its tail forward and copying necessary |
| 1388 | * data from fragmented part. |
| 1389 | * |
| 1390 | * &sk_buff MUST have reference count of 1. |
| 1391 | * |
| 1392 | * Returns %NULL (and &sk_buff does not change) if pull failed |
| 1393 | * or value of new tail of skb in the case of success. |
| 1394 | * |
| 1395 | * All the pointers pointing into skb header may change and must be |
| 1396 | * reloaded after call to this function. |
| 1397 | */ |
| 1398 | |
| 1399 | /* Moves tail of skb head forward, copying data from fragmented part, |
| 1400 | * when it is necessary. |
| 1401 | * 1. It may fail due to malloc failure. |
| 1402 | * 2. It may change skb pointers. |
| 1403 | * |
| 1404 | * It is pretty complicated. Luckily, it is called only in exceptional cases. |
| 1405 | */ |
| 1406 | unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) |
| 1407 | { |
| 1408 | /* If skb has not enough free space at tail, get new one |
| 1409 | * plus 128 bytes for future expansions. If we have enough |
| 1410 | * room at tail, reallocate without expansion only if skb is cloned. |
| 1411 | */ |
Arnaldo Carvalho de Melo | 4305b54 | 2007-04-19 20:43:29 -0700 | [diff] [blame] | 1412 | int i, k, eat = (skb->tail + delta) - skb->end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1413 | |
| 1414 | if (eat > 0 || skb_cloned(skb)) { |
| 1415 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, |
| 1416 | GFP_ATOMIC)) |
| 1417 | return NULL; |
| 1418 | } |
| 1419 | |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1420 | if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1421 | BUG(); |
| 1422 | |
| 1423 | /* Optimization: no fragments, no reasons to preestimate |
| 1424 | * size of pulled pages. Superb. |
| 1425 | */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 1426 | if (!skb_has_frag_list(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1427 | goto pull_pages; |
| 1428 | |
| 1429 | /* Estimate size of pulled pages. */ |
| 1430 | eat = delta; |
| 1431 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1432 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 1433 | |
| 1434 | if (size >= eat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1435 | goto pull_pages; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1436 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | } |
| 1438 | |
| 1439 | /* If we need update frag list, we are in troubles. |
| 1440 | * Certainly, it possible to add an offset to skb data, |
| 1441 | * but taking into account that pulling is expected to |
| 1442 | * be very rare operation, it is worth to fight against |
| 1443 | * further bloating skb head and crucify ourselves here instead. |
| 1444 | * Pure masohism, indeed. 8)8) |
| 1445 | */ |
| 1446 | if (eat) { |
| 1447 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
| 1448 | struct sk_buff *clone = NULL; |
| 1449 | struct sk_buff *insp = NULL; |
| 1450 | |
| 1451 | do { |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 1452 | BUG_ON(!list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1453 | |
| 1454 | if (list->len <= eat) { |
| 1455 | /* Eaten as whole. */ |
| 1456 | eat -= list->len; |
| 1457 | list = list->next; |
| 1458 | insp = list; |
| 1459 | } else { |
| 1460 | /* Eaten partially. */ |
| 1461 | |
| 1462 | if (skb_shared(list)) { |
| 1463 | /* Sucks! We need to fork list. :-( */ |
| 1464 | clone = skb_clone(list, GFP_ATOMIC); |
| 1465 | if (!clone) |
| 1466 | return NULL; |
| 1467 | insp = list->next; |
| 1468 | list = clone; |
| 1469 | } else { |
| 1470 | /* This may be pulled without |
| 1471 | * problems. */ |
| 1472 | insp = list; |
| 1473 | } |
| 1474 | if (!pskb_pull(list, eat)) { |
Wei Yongjun | f3fbbe0 | 2009-02-25 00:37:32 +0000 | [diff] [blame] | 1475 | kfree_skb(clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | return NULL; |
| 1477 | } |
| 1478 | break; |
| 1479 | } |
| 1480 | } while (eat); |
| 1481 | |
| 1482 | /* Free pulled out fragments. */ |
| 1483 | while ((list = skb_shinfo(skb)->frag_list) != insp) { |
| 1484 | skb_shinfo(skb)->frag_list = list->next; |
| 1485 | kfree_skb(list); |
| 1486 | } |
| 1487 | /* And insert new clone at head. */ |
| 1488 | if (clone) { |
| 1489 | clone->next = list; |
| 1490 | skb_shinfo(skb)->frag_list = clone; |
| 1491 | } |
| 1492 | } |
| 1493 | /* Success! Now we may commit changes to skb data. */ |
| 1494 | |
| 1495 | pull_pages: |
| 1496 | eat = delta; |
| 1497 | k = 0; |
| 1498 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1499 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
| 1500 | |
| 1501 | if (size <= eat) { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1502 | skb_frag_unref(skb, i); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1503 | eat -= size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 | } else { |
| 1505 | skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 1506 | if (eat) { |
| 1507 | skb_shinfo(skb)->frags[k].page_offset += eat; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1508 | skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1509 | eat = 0; |
| 1510 | } |
| 1511 | k++; |
| 1512 | } |
| 1513 | } |
| 1514 | skb_shinfo(skb)->nr_frags = k; |
| 1515 | |
| 1516 | skb->tail += delta; |
| 1517 | skb->data_len -= delta; |
| 1518 | |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 1519 | return skb_tail_pointer(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1521 | EXPORT_SYMBOL(__pskb_pull_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 | |
Eric Dumazet | 22019b1 | 2011-07-29 18:37:31 +0000 | [diff] [blame] | 1523 | /** |
| 1524 | * skb_copy_bits - copy bits from skb to kernel buffer |
| 1525 | * @skb: source skb |
| 1526 | * @offset: offset in source |
| 1527 | * @to: destination buffer |
| 1528 | * @len: number of bytes to copy |
| 1529 | * |
| 1530 | * Copy the specified number of bytes from the source skb to the |
| 1531 | * destination buffer. |
| 1532 | * |
| 1533 | * CAUTION ! : |
| 1534 | * If its prototype is ever changed, |
| 1535 | * check arch/{*}/net/{*}.S files, |
| 1536 | * since it is called from BPF assembly code. |
| 1537 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
| 1539 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1540 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1541 | struct sk_buff *frag_iter; |
| 1542 | int i, copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | |
| 1544 | if (offset > (int)skb->len - len) |
| 1545 | goto fault; |
| 1546 | |
| 1547 | /* Copy header. */ |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1548 | if ((copy = start - offset) > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1549 | if (copy > len) |
| 1550 | copy = len; |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 1551 | skb_copy_from_linear_data_offset(skb, offset, to, copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | if ((len -= copy) == 0) |
| 1553 | return 0; |
| 1554 | offset += copy; |
| 1555 | to += copy; |
| 1556 | } |
| 1557 | |
| 1558 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1559 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1560 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1561 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1562 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1563 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1564 | end = start + skb_frag_size(f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1565 | if ((copy = end - offset) > 0) { |
| 1566 | u8 *vaddr; |
| 1567 | |
| 1568 | if (copy > len) |
| 1569 | copy = len; |
| 1570 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1571 | vaddr = kmap_atomic(skb_frag_page(f)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | memcpy(to, |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1573 | vaddr + f->page_offset + offset - start, |
| 1574 | copy); |
| 1575 | kunmap_atomic(vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1576 | |
| 1577 | if ((len -= copy) == 0) |
| 1578 | return 0; |
| 1579 | offset += copy; |
| 1580 | to += copy; |
| 1581 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1582 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1583 | } |
| 1584 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1585 | skb_walk_frags(skb, frag_iter) { |
| 1586 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1587 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1588 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1589 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1590 | end = start + frag_iter->len; |
| 1591 | if ((copy = end - offset) > 0) { |
| 1592 | if (copy > len) |
| 1593 | copy = len; |
| 1594 | if (skb_copy_bits(frag_iter, offset - start, to, copy)) |
| 1595 | goto fault; |
| 1596 | if ((len -= copy) == 0) |
| 1597 | return 0; |
| 1598 | offset += copy; |
| 1599 | to += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1600 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1601 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1602 | } |
Shirley Ma | a6686f2 | 2011-07-06 12:22:12 +0000 | [diff] [blame] | 1603 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1604 | if (!len) |
| 1605 | return 0; |
| 1606 | |
| 1607 | fault: |
| 1608 | return -EFAULT; |
| 1609 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1610 | EXPORT_SYMBOL(skb_copy_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1611 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1612 | /* |
| 1613 | * Callback from splice_to_pipe(), if we need to release some pages |
| 1614 | * at the end of the spd in case we error'ed out in filling the pipe. |
| 1615 | */ |
| 1616 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) |
| 1617 | { |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1618 | put_page(spd->pages[i]); |
| 1619 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1620 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1621 | static struct page *linear_to_page(struct page *page, unsigned int *len, |
| 1622 | unsigned int *offset, |
| 1623 | struct sk_buff *skb, struct sock *sk) |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1624 | { |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1625 | struct page_frag *pfrag = sk_page_frag(sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1626 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1627 | if (!sk_page_frag_refill(sk, pfrag)) |
| 1628 | return NULL; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1629 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1630 | *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1631 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1632 | memcpy(page_address(pfrag->page) + pfrag->offset, |
| 1633 | page_address(page) + *offset, *len); |
| 1634 | *offset = pfrag->offset; |
| 1635 | pfrag->offset += *len; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1636 | |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1637 | return pfrag->page; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1638 | } |
| 1639 | |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1640 | static bool spd_can_coalesce(const struct splice_pipe_desc *spd, |
| 1641 | struct page *page, |
| 1642 | unsigned int offset) |
| 1643 | { |
| 1644 | return spd->nr_pages && |
| 1645 | spd->pages[spd->nr_pages - 1] == page && |
| 1646 | (spd->partial[spd->nr_pages - 1].offset + |
| 1647 | spd->partial[spd->nr_pages - 1].len == offset); |
| 1648 | } |
| 1649 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1650 | /* |
| 1651 | * Fill page/offset/length into spd, if it can hold more pages. |
| 1652 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1653 | static bool spd_fill_page(struct splice_pipe_desc *spd, |
| 1654 | struct pipe_inode_info *pipe, struct page *page, |
| 1655 | unsigned int *len, unsigned int offset, |
Eric Dumazet | d7ccf7c | 2012-04-23 23:35:04 -0400 | [diff] [blame] | 1656 | struct sk_buff *skb, bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1657 | struct sock *sk) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1658 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1659 | if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1660 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1661 | |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1662 | if (linear) { |
Jarek Poplawski | 7a67e56 | 2009-04-30 05:41:19 -0700 | [diff] [blame] | 1663 | page = linear_to_page(page, len, &offset, skb, sk); |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1664 | if (!page) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1665 | return true; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1666 | } |
| 1667 | if (spd_can_coalesce(spd, page, offset)) { |
| 1668 | spd->partial[spd->nr_pages - 1].len += *len; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1669 | return false; |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1670 | } |
| 1671 | get_page(page); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1672 | spd->pages[spd->nr_pages] = page; |
Jarek Poplawski | 4fb6699 | 2009-02-01 00:41:42 -0800 | [diff] [blame] | 1673 | spd->partial[spd->nr_pages].len = *len; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1674 | spd->partial[spd->nr_pages].offset = offset; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1675 | spd->nr_pages++; |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1676 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1677 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1678 | } |
| 1679 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1680 | static inline void __segment_seek(struct page **page, unsigned int *poff, |
| 1681 | unsigned int *plen, unsigned int off) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1682 | { |
Jarek Poplawski | ce3dd39 | 2009-02-12 16:51:43 -0800 | [diff] [blame] | 1683 | unsigned long n; |
| 1684 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1685 | *poff += off; |
Jarek Poplawski | ce3dd39 | 2009-02-12 16:51:43 -0800 | [diff] [blame] | 1686 | n = *poff / PAGE_SIZE; |
| 1687 | if (n) |
| 1688 | *page = nth_page(*page, n); |
| 1689 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1690 | *poff = *poff % PAGE_SIZE; |
| 1691 | *plen -= off; |
| 1692 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1693 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1694 | static bool __splice_segment(struct page *page, unsigned int poff, |
| 1695 | unsigned int plen, unsigned int *off, |
| 1696 | unsigned int *len, struct sk_buff *skb, |
Eric Dumazet | d7ccf7c | 2012-04-23 23:35:04 -0400 | [diff] [blame] | 1697 | struct splice_pipe_desc *spd, bool linear, |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1698 | struct sock *sk, |
| 1699 | struct pipe_inode_info *pipe) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1700 | { |
| 1701 | if (!*len) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1702 | return true; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1703 | |
| 1704 | /* skip this segment if already processed */ |
| 1705 | if (*off >= plen) { |
| 1706 | *off -= plen; |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1707 | return false; |
Octavian Purdila | db43a28 | 2008-06-27 17:27:21 -0700 | [diff] [blame] | 1708 | } |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1709 | |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1710 | /* ignore any bits we already processed */ |
| 1711 | if (*off) { |
| 1712 | __segment_seek(&page, &poff, &plen, *off); |
| 1713 | *off = 0; |
| 1714 | } |
| 1715 | |
| 1716 | do { |
| 1717 | unsigned int flen = min(*len, plen); |
| 1718 | |
| 1719 | /* the linear region may spread across several pages */ |
| 1720 | flen = min_t(unsigned int, flen, PAGE_SIZE - poff); |
| 1721 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1722 | if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1723 | return true; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1724 | |
| 1725 | __segment_seek(&page, &poff, &plen, flen); |
| 1726 | *len -= flen; |
| 1727 | |
| 1728 | } while (*len && plen); |
| 1729 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1730 | return false; |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1731 | } |
| 1732 | |
| 1733 | /* |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1734 | * Map linear and fragment data from the skb to spd. It reports true if the |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1735 | * pipe is full or if we already spliced the requested length. |
| 1736 | */ |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1737 | static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, |
| 1738 | unsigned int *offset, unsigned int *len, |
| 1739 | struct splice_pipe_desc *spd, struct sock *sk) |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1740 | { |
| 1741 | int seg; |
| 1742 | |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 1743 | /* map the linear part : |
Alexander Duyck | 2996d31 | 2012-05-02 18:18:42 +0000 | [diff] [blame] | 1744 | * If skb->head_frag is set, this 'linear' part is backed by a |
| 1745 | * fragment, and if the head is not shared with any clones then |
| 1746 | * we can avoid a copy since we own the head portion of this page. |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1747 | */ |
Octavian Purdila | 2870c43 | 2008-07-15 00:49:11 -0700 | [diff] [blame] | 1748 | if (__splice_segment(virt_to_page(skb->data), |
| 1749 | (unsigned long) skb->data & (PAGE_SIZE - 1), |
| 1750 | skb_headlen(skb), |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 1751 | offset, len, skb, spd, |
Alexander Duyck | 3a7c1ee4 | 2012-05-03 01:09:42 +0000 | [diff] [blame] | 1752 | skb_head_is_locked(skb), |
Eric Dumazet | 1d0c0b3 | 2012-04-27 02:10:03 +0000 | [diff] [blame] | 1753 | sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1754 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1755 | |
| 1756 | /* |
| 1757 | * then map the fragments |
| 1758 | */ |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1759 | for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { |
| 1760 | const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; |
| 1761 | |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 1762 | if (__splice_segment(skb_frag_page(f), |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1763 | f->page_offset, skb_frag_size(f), |
Eric Dumazet | d7ccf7c | 2012-04-23 23:35:04 -0400 | [diff] [blame] | 1764 | offset, len, skb, spd, false, sk, pipe)) |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1765 | return true; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1766 | } |
| 1767 | |
David S. Miller | a108d5f | 2012-04-23 23:06:11 -0400 | [diff] [blame] | 1768 | return false; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1769 | } |
| 1770 | |
| 1771 | /* |
| 1772 | * Map data from the skb to a pipe. Should handle both the linear part, |
| 1773 | * the fragments, and the frag list. It does NOT handle frag lists within |
| 1774 | * the frag list, if such a thing exists. We'd probably need to recurse to |
| 1775 | * handle that cleanly. |
| 1776 | */ |
Jarek Poplawski | 8b9d372 | 2009-01-19 17:03:56 -0800 | [diff] [blame] | 1777 | int skb_splice_bits(struct sk_buff *skb, unsigned int offset, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1778 | struct pipe_inode_info *pipe, unsigned int tlen, |
| 1779 | unsigned int flags) |
| 1780 | { |
Eric Dumazet | 41c73a0 | 2012-04-22 12:26:16 +0000 | [diff] [blame] | 1781 | struct partial_page partial[MAX_SKB_FRAGS]; |
| 1782 | struct page *pages[MAX_SKB_FRAGS]; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1783 | struct splice_pipe_desc spd = { |
| 1784 | .pages = pages, |
| 1785 | .partial = partial, |
Eric Dumazet | 047fe36 | 2012-06-12 15:24:40 +0200 | [diff] [blame] | 1786 | .nr_pages_max = MAX_SKB_FRAGS, |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1787 | .flags = flags, |
| 1788 | .ops = &sock_pipe_buf_ops, |
| 1789 | .spd_release = sock_spd_release, |
| 1790 | }; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1791 | struct sk_buff *frag_iter; |
Jarek Poplawski | 7a67e56 | 2009-04-30 05:41:19 -0700 | [diff] [blame] | 1792 | struct sock *sk = skb->sk; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1793 | int ret = 0; |
| 1794 | |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1795 | /* |
| 1796 | * __skb_splice_bits() only fails if the output has no room left, |
| 1797 | * so no point in going over the frag_list for the error case. |
| 1798 | */ |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1799 | if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1800 | goto done; |
| 1801 | else if (!tlen) |
| 1802 | goto done; |
| 1803 | |
| 1804 | /* |
| 1805 | * now see if we have a frag_list to map |
| 1806 | */ |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1807 | skb_walk_frags(skb, frag_iter) { |
| 1808 | if (!tlen) |
| 1809 | break; |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1810 | if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1811 | break; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1812 | } |
| 1813 | |
| 1814 | done: |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1815 | if (spd.nr_pages) { |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1816 | /* |
| 1817 | * Drop the socket lock, otherwise we have reverse |
| 1818 | * locking dependencies between sk_lock and i_mutex |
| 1819 | * here as compared to sendfile(). We enter here |
| 1820 | * with the socket lock held, and splice_to_pipe() will |
| 1821 | * grab the pipe inode lock. For sendfile() emulation, |
| 1822 | * we call into ->sendpage() with the i_mutex lock held |
| 1823 | * and networking will grab the socket lock. |
| 1824 | */ |
Octavian Purdila | 293ad60 | 2008-06-04 15:45:58 -0700 | [diff] [blame] | 1825 | release_sock(sk); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1826 | ret = splice_to_pipe(pipe, &spd); |
Octavian Purdila | 293ad60 | 2008-06-04 15:45:58 -0700 | [diff] [blame] | 1827 | lock_sock(sk); |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1828 | } |
| 1829 | |
Jens Axboe | 35f3d14 | 2010-05-20 10:43:18 +0200 | [diff] [blame] | 1830 | return ret; |
Jens Axboe | 9c55e01 | 2007-11-06 23:30:13 -0800 | [diff] [blame] | 1831 | } |
| 1832 | |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1833 | /** |
| 1834 | * skb_store_bits - store bits from kernel buffer to skb |
| 1835 | * @skb: destination buffer |
| 1836 | * @offset: offset in destination |
| 1837 | * @from: source buffer |
| 1838 | * @len: number of bytes to copy |
| 1839 | * |
| 1840 | * Copy the specified number of bytes from the source buffer to the |
| 1841 | * destination skb. This function handles all the messy bits of |
| 1842 | * traversing fragment lists and such. |
| 1843 | */ |
| 1844 | |
Stephen Hemminger | 0c6fcc8 | 2007-04-20 16:40:01 -0700 | [diff] [blame] | 1845 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1846 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1847 | int start = skb_headlen(skb); |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1848 | struct sk_buff *frag_iter; |
| 1849 | int i, copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1850 | |
| 1851 | if (offset > (int)skb->len - len) |
| 1852 | goto fault; |
| 1853 | |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1854 | if ((copy = start - offset) > 0) { |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1855 | if (copy > len) |
| 1856 | copy = len; |
Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 1857 | skb_copy_to_linear_data_offset(skb, offset, from, copy); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1858 | if ((len -= copy) == 0) |
| 1859 | return 0; |
| 1860 | offset += copy; |
| 1861 | from += copy; |
| 1862 | } |
| 1863 | |
| 1864 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 1865 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1866 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1867 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1868 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1869 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 1870 | end = start + skb_frag_size(frag); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1871 | if ((copy = end - offset) > 0) { |
| 1872 | u8 *vaddr; |
| 1873 | |
| 1874 | if (copy > len) |
| 1875 | copy = len; |
| 1876 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1877 | vaddr = kmap_atomic(skb_frag_page(frag)); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1878 | memcpy(vaddr + frag->page_offset + offset - start, |
| 1879 | from, copy); |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1880 | kunmap_atomic(vaddr); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1881 | |
| 1882 | if ((len -= copy) == 0) |
| 1883 | return 0; |
| 1884 | offset += copy; |
| 1885 | from += copy; |
| 1886 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1887 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1888 | } |
| 1889 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1890 | skb_walk_frags(skb, frag_iter) { |
| 1891 | int end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1892 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1893 | WARN_ON(start > offset + len); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1894 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1895 | end = start + frag_iter->len; |
| 1896 | if ((copy = end - offset) > 0) { |
| 1897 | if (copy > len) |
| 1898 | copy = len; |
| 1899 | if (skb_store_bits(frag_iter, offset - start, |
| 1900 | from, copy)) |
| 1901 | goto fault; |
| 1902 | if ((len -= copy) == 0) |
| 1903 | return 0; |
| 1904 | offset += copy; |
| 1905 | from += copy; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1906 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1907 | start = end; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1908 | } |
| 1909 | if (!len) |
| 1910 | return 0; |
| 1911 | |
| 1912 | fault: |
| 1913 | return -EFAULT; |
| 1914 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 1915 | EXPORT_SYMBOL(skb_store_bits); |
| 1916 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1917 | /* Checksum skb data. */ |
| 1918 | |
Al Viro | 2bbbc86 | 2006-11-14 21:37:14 -0800 | [diff] [blame] | 1919 | __wsum skb_checksum(const struct sk_buff *skb, int offset, |
| 1920 | int len, __wsum csum) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1921 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1922 | int start = skb_headlen(skb); |
| 1923 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1924 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1925 | int pos = 0; |
| 1926 | |
| 1927 | /* Checksum header. */ |
| 1928 | if (copy > 0) { |
| 1929 | if (copy > len) |
| 1930 | copy = len; |
| 1931 | csum = csum_partial(skb->data + offset, copy, csum); |
| 1932 | if ((len -= copy) == 0) |
| 1933 | return csum; |
| 1934 | offset += copy; |
| 1935 | pos = copy; |
| 1936 | } |
| 1937 | |
| 1938 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1939 | int end; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1940 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1941 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 1942 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1943 | |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1944 | end = start + skb_frag_size(frag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1945 | if ((copy = end - offset) > 0) { |
Al Viro | 44bb936 | 2006-11-14 21:36:14 -0800 | [diff] [blame] | 1946 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1947 | u8 *vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1948 | |
| 1949 | if (copy > len) |
| 1950 | copy = len; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1951 | vaddr = kmap_atomic(skb_frag_page(frag)); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1952 | csum2 = csum_partial(vaddr + frag->page_offset + |
| 1953 | offset - start, copy, 0); |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 1954 | kunmap_atomic(vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1955 | csum = csum_block_add(csum, csum2, pos); |
| 1956 | if (!(len -= copy)) |
| 1957 | return csum; |
| 1958 | offset += copy; |
| 1959 | pos += copy; |
| 1960 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1961 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1962 | } |
| 1963 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1964 | skb_walk_frags(skb, frag_iter) { |
| 1965 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1966 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1967 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1968 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1969 | end = start + frag_iter->len; |
| 1970 | if ((copy = end - offset) > 0) { |
| 1971 | __wsum csum2; |
| 1972 | if (copy > len) |
| 1973 | copy = len; |
| 1974 | csum2 = skb_checksum(frag_iter, offset - start, |
| 1975 | copy, 0); |
| 1976 | csum = csum_block_add(csum, csum2, pos); |
| 1977 | if ((len -= copy) == 0) |
| 1978 | return csum; |
| 1979 | offset += copy; |
| 1980 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1981 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1982 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1983 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 1984 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1985 | |
| 1986 | return csum; |
| 1987 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 1988 | EXPORT_SYMBOL(skb_checksum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1989 | |
| 1990 | /* Both of above in one bottle. */ |
| 1991 | |
Al Viro | 81d7766 | 2006-11-14 21:37:33 -0800 | [diff] [blame] | 1992 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, |
| 1993 | u8 *to, int len, __wsum csum) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1994 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 1995 | int start = skb_headlen(skb); |
| 1996 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 1997 | struct sk_buff *frag_iter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1998 | int pos = 0; |
| 1999 | |
| 2000 | /* Copy header. */ |
| 2001 | if (copy > 0) { |
| 2002 | if (copy > len) |
| 2003 | copy = len; |
| 2004 | csum = csum_partial_copy_nocheck(skb->data + offset, to, |
| 2005 | copy, csum); |
| 2006 | if ((len -= copy) == 0) |
| 2007 | return csum; |
| 2008 | offset += copy; |
| 2009 | to += copy; |
| 2010 | pos = copy; |
| 2011 | } |
| 2012 | |
| 2013 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2014 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2015 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 2016 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2017 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2018 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2019 | if ((copy = end - offset) > 0) { |
Al Viro | 5084205 | 2006-11-14 21:36:34 -0800 | [diff] [blame] | 2020 | __wsum csum2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2021 | u8 *vaddr; |
| 2022 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 2023 | |
| 2024 | if (copy > len) |
| 2025 | copy = len; |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2026 | vaddr = kmap_atomic(skb_frag_page(frag)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2027 | csum2 = csum_partial_copy_nocheck(vaddr + |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2028 | frag->page_offset + |
| 2029 | offset - start, to, |
| 2030 | copy, 0); |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2031 | kunmap_atomic(vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2032 | csum = csum_block_add(csum, csum2, pos); |
| 2033 | if (!(len -= copy)) |
| 2034 | return csum; |
| 2035 | offset += copy; |
| 2036 | to += copy; |
| 2037 | pos += copy; |
| 2038 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 2039 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2040 | } |
| 2041 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2042 | skb_walk_frags(skb, frag_iter) { |
| 2043 | __wsum csum2; |
| 2044 | int end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2045 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2046 | WARN_ON(start > offset + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2048 | end = start + frag_iter->len; |
| 2049 | if ((copy = end - offset) > 0) { |
| 2050 | if (copy > len) |
| 2051 | copy = len; |
| 2052 | csum2 = skb_copy_and_csum_bits(frag_iter, |
| 2053 | offset - start, |
| 2054 | to, copy, 0); |
| 2055 | csum = csum_block_add(csum, csum2, pos); |
| 2056 | if ((len -= copy) == 0) |
| 2057 | return csum; |
| 2058 | offset += copy; |
| 2059 | to += copy; |
| 2060 | pos += copy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2061 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2062 | start = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2063 | } |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2064 | BUG_ON(len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2065 | return csum; |
| 2066 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2067 | EXPORT_SYMBOL(skb_copy_and_csum_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2068 | |
| 2069 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) |
| 2070 | { |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 2071 | __wsum csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2072 | long csstart; |
| 2073 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2074 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 2075 | csstart = skb_checksum_start_offset(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2076 | else |
| 2077 | csstart = skb_headlen(skb); |
| 2078 | |
Kris Katterjohn | 09a6266 | 2006-01-08 22:24:28 -0800 | [diff] [blame] | 2079 | BUG_ON(csstart > skb_headlen(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2080 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2081 | skb_copy_from_linear_data(skb, to, csstart); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2082 | |
| 2083 | csum = 0; |
| 2084 | if (csstart != skb->len) |
| 2085 | csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, |
| 2086 | skb->len - csstart, 0); |
| 2087 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2088 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
Al Viro | ff1dcad | 2006-11-20 18:07:29 -0800 | [diff] [blame] | 2089 | long csstuff = csstart + skb->csum_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2090 | |
Al Viro | d3bc23e | 2006-11-14 21:24:49 -0800 | [diff] [blame] | 2091 | *((__sum16 *)(to + csstuff)) = csum_fold(csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2092 | } |
| 2093 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2094 | EXPORT_SYMBOL(skb_copy_and_csum_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2095 | |
| 2096 | /** |
| 2097 | * skb_dequeue - remove from the head of the queue |
| 2098 | * @list: list to dequeue from |
| 2099 | * |
| 2100 | * Remove the head of the list. The list lock is taken so the function |
| 2101 | * may be used safely with other locking list functions. The head item is |
| 2102 | * returned or %NULL if the list is empty. |
| 2103 | */ |
| 2104 | |
| 2105 | struct sk_buff *skb_dequeue(struct sk_buff_head *list) |
| 2106 | { |
| 2107 | unsigned long flags; |
| 2108 | struct sk_buff *result; |
| 2109 | |
| 2110 | spin_lock_irqsave(&list->lock, flags); |
| 2111 | result = __skb_dequeue(list); |
| 2112 | spin_unlock_irqrestore(&list->lock, flags); |
| 2113 | return result; |
| 2114 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2115 | EXPORT_SYMBOL(skb_dequeue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2116 | |
| 2117 | /** |
| 2118 | * skb_dequeue_tail - remove from the tail of the queue |
| 2119 | * @list: list to dequeue from |
| 2120 | * |
| 2121 | * Remove the tail of the list. The list lock is taken so the function |
| 2122 | * may be used safely with other locking list functions. The tail item is |
| 2123 | * returned or %NULL if the list is empty. |
| 2124 | */ |
| 2125 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) |
| 2126 | { |
| 2127 | unsigned long flags; |
| 2128 | struct sk_buff *result; |
| 2129 | |
| 2130 | spin_lock_irqsave(&list->lock, flags); |
| 2131 | result = __skb_dequeue_tail(list); |
| 2132 | spin_unlock_irqrestore(&list->lock, flags); |
| 2133 | return result; |
| 2134 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2135 | EXPORT_SYMBOL(skb_dequeue_tail); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2136 | |
| 2137 | /** |
| 2138 | * skb_queue_purge - empty a list |
| 2139 | * @list: list to empty |
| 2140 | * |
| 2141 | * Delete all buffers on an &sk_buff list. Each buffer is removed from |
| 2142 | * the list and one reference dropped. This function takes the list |
| 2143 | * lock and is atomic with respect to other list locking functions. |
| 2144 | */ |
| 2145 | void skb_queue_purge(struct sk_buff_head *list) |
| 2146 | { |
| 2147 | struct sk_buff *skb; |
| 2148 | while ((skb = skb_dequeue(list)) != NULL) |
| 2149 | kfree_skb(skb); |
| 2150 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2151 | EXPORT_SYMBOL(skb_queue_purge); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2152 | |
| 2153 | /** |
| 2154 | * skb_queue_head - queue a buffer at the list head |
| 2155 | * @list: list to use |
| 2156 | * @newsk: buffer to queue |
| 2157 | * |
| 2158 | * Queue a buffer at the start of the list. This function takes the |
| 2159 | * list lock and can be used safely with other locking &sk_buff functions |
| 2160 | * safely. |
| 2161 | * |
| 2162 | * A buffer cannot be placed on two lists at the same time. |
| 2163 | */ |
| 2164 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) |
| 2165 | { |
| 2166 | unsigned long flags; |
| 2167 | |
| 2168 | spin_lock_irqsave(&list->lock, flags); |
| 2169 | __skb_queue_head(list, newsk); |
| 2170 | spin_unlock_irqrestore(&list->lock, flags); |
| 2171 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2172 | EXPORT_SYMBOL(skb_queue_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2173 | |
| 2174 | /** |
| 2175 | * skb_queue_tail - queue a buffer at the list tail |
| 2176 | * @list: list to use |
| 2177 | * @newsk: buffer to queue |
| 2178 | * |
| 2179 | * Queue a buffer at the tail of the list. This function takes the |
| 2180 | * list lock and can be used safely with other locking &sk_buff functions |
| 2181 | * safely. |
| 2182 | * |
| 2183 | * A buffer cannot be placed on two lists at the same time. |
| 2184 | */ |
| 2185 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) |
| 2186 | { |
| 2187 | unsigned long flags; |
| 2188 | |
| 2189 | spin_lock_irqsave(&list->lock, flags); |
| 2190 | __skb_queue_tail(list, newsk); |
| 2191 | spin_unlock_irqrestore(&list->lock, flags); |
| 2192 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2193 | EXPORT_SYMBOL(skb_queue_tail); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2194 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2195 | /** |
| 2196 | * skb_unlink - remove a buffer from a list |
| 2197 | * @skb: buffer to remove |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2198 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2199 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2200 | * Remove a packet from a list. The list locks are taken and this |
| 2201 | * function is atomic with respect to other list locked calls |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2202 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2203 | * You must know what list the SKB is on. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2204 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2205 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2206 | { |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2207 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2208 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2209 | spin_lock_irqsave(&list->lock, flags); |
| 2210 | __skb_unlink(skb, list); |
| 2211 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2212 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2213 | EXPORT_SYMBOL(skb_unlink); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2215 | /** |
| 2216 | * skb_append - append a buffer |
| 2217 | * @old: buffer to insert after |
| 2218 | * @newsk: buffer to insert |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2219 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2220 | * |
| 2221 | * Place a packet after a given packet in a list. The list locks are taken |
| 2222 | * and this function is atomic with respect to other list locked calls. |
| 2223 | * A buffer cannot be placed on two lists at the same time. |
| 2224 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2225 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2226 | { |
| 2227 | unsigned long flags; |
| 2228 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2229 | spin_lock_irqsave(&list->lock, flags); |
Gerrit Renker | 7de6c03 | 2008-04-14 00:05:09 -0700 | [diff] [blame] | 2230 | __skb_queue_after(list, old, newsk); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2231 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2232 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2233 | EXPORT_SYMBOL(skb_append); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2234 | |
| 2235 | /** |
| 2236 | * skb_insert - insert a buffer |
| 2237 | * @old: buffer to insert before |
| 2238 | * @newsk: buffer to insert |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2239 | * @list: list to use |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2240 | * |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2241 | * Place a packet before a given packet in a list. The list locks are |
| 2242 | * taken and this function is atomic with respect to other list locked |
| 2243 | * calls. |
| 2244 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2245 | * A buffer cannot be placed on two lists at the same time. |
| 2246 | */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2247 | void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2248 | { |
| 2249 | unsigned long flags; |
| 2250 | |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 2251 | spin_lock_irqsave(&list->lock, flags); |
| 2252 | __skb_insert(newsk, old->prev, old, list); |
| 2253 | spin_unlock_irqrestore(&list->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2255 | EXPORT_SYMBOL(skb_insert); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2256 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2257 | static inline void skb_split_inside_header(struct sk_buff *skb, |
| 2258 | struct sk_buff* skb1, |
| 2259 | const u32 len, const int pos) |
| 2260 | { |
| 2261 | int i; |
| 2262 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2263 | skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), |
| 2264 | pos - len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2265 | /* And move data appendix as is. */ |
| 2266 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
| 2267 | skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; |
| 2268 | |
| 2269 | skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; |
| 2270 | skb_shinfo(skb)->nr_frags = 0; |
| 2271 | skb1->data_len = skb->data_len; |
| 2272 | skb1->len += skb1->data_len; |
| 2273 | skb->data_len = 0; |
| 2274 | skb->len = len; |
Arnaldo Carvalho de Melo | 27a884d | 2007-04-19 20:29:13 -0700 | [diff] [blame] | 2275 | skb_set_tail_pointer(skb, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2276 | } |
| 2277 | |
| 2278 | static inline void skb_split_no_header(struct sk_buff *skb, |
| 2279 | struct sk_buff* skb1, |
| 2280 | const u32 len, int pos) |
| 2281 | { |
| 2282 | int i, k = 0; |
| 2283 | const int nfrags = skb_shinfo(skb)->nr_frags; |
| 2284 | |
| 2285 | skb_shinfo(skb)->nr_frags = 0; |
| 2286 | skb1->len = skb1->data_len = skb->len - len; |
| 2287 | skb->len = len; |
| 2288 | skb->data_len = len - pos; |
| 2289 | |
| 2290 | for (i = 0; i < nfrags; i++) { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2291 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2292 | |
| 2293 | if (pos + size > len) { |
| 2294 | skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 2295 | |
| 2296 | if (pos < len) { |
| 2297 | /* Split frag. |
| 2298 | * We have two variants in this case: |
| 2299 | * 1. Move all the frag to the second |
| 2300 | * part, if it is possible. F.e. |
| 2301 | * this approach is mandatory for TUX, |
| 2302 | * where splitting is expensive. |
| 2303 | * 2. Split is accurately. We make this. |
| 2304 | */ |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2305 | skb_frag_ref(skb, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2306 | skb_shinfo(skb1)->frags[0].page_offset += len - pos; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2307 | skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); |
| 2308 | skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2309 | skb_shinfo(skb)->nr_frags++; |
| 2310 | } |
| 2311 | k++; |
| 2312 | } else |
| 2313 | skb_shinfo(skb)->nr_frags++; |
| 2314 | pos += size; |
| 2315 | } |
| 2316 | skb_shinfo(skb1)->nr_frags = k; |
| 2317 | } |
| 2318 | |
| 2319 | /** |
| 2320 | * skb_split - Split fragmented skb to two parts at length len. |
| 2321 | * @skb: the buffer to split |
| 2322 | * @skb1: the buffer to receive the second part |
| 2323 | * @len: new length for skb |
| 2324 | */ |
| 2325 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) |
| 2326 | { |
| 2327 | int pos = skb_headlen(skb); |
| 2328 | |
| 2329 | if (len < pos) /* Split line is inside header. */ |
| 2330 | skb_split_inside_header(skb, skb1, len, pos); |
| 2331 | else /* Second chunk has no header, nothing to copy. */ |
| 2332 | skb_split_no_header(skb, skb1, len, pos); |
| 2333 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2334 | EXPORT_SYMBOL(skb_split); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2335 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 2336 | /* Shifting from/to a cloned skb is a no-go. |
| 2337 | * |
| 2338 | * Caller cannot keep skb_shinfo related pointers past calling here! |
| 2339 | */ |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2340 | static int skb_prepare_for_shift(struct sk_buff *skb) |
| 2341 | { |
Ilpo Järvinen | 0ace285 | 2008-11-24 21:30:21 -0800 | [diff] [blame] | 2342 | return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2343 | } |
| 2344 | |
| 2345 | /** |
| 2346 | * skb_shift - Shifts paged data partially from skb to another |
| 2347 | * @tgt: buffer into which tail data gets added |
| 2348 | * @skb: buffer from which the paged data comes from |
| 2349 | * @shiftlen: shift up to this many bytes |
| 2350 | * |
| 2351 | * Attempts to shift up to shiftlen worth of bytes, which may be less than |
Feng King | 20e994a | 2011-11-21 01:47:11 +0000 | [diff] [blame] | 2352 | * the length of the skb, from skb to tgt. Returns number bytes shifted. |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2353 | * It's up to caller to free skb if everything was shifted. |
| 2354 | * |
| 2355 | * If @tgt runs out of frags, the whole operation is aborted. |
| 2356 | * |
| 2357 | * Skb cannot include anything else but paged data while tgt is allowed |
| 2358 | * to have non-paged data as well. |
| 2359 | * |
| 2360 | * TODO: full sized shift could be optimized but that would need |
| 2361 | * specialized skb free'er to handle frags without up-to-date nr_frags. |
| 2362 | */ |
| 2363 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) |
| 2364 | { |
| 2365 | int from, to, merge, todo; |
| 2366 | struct skb_frag_struct *fragfrom, *fragto; |
| 2367 | |
| 2368 | BUG_ON(shiftlen > skb->len); |
| 2369 | BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ |
| 2370 | |
| 2371 | todo = shiftlen; |
| 2372 | from = 0; |
| 2373 | to = skb_shinfo(tgt)->nr_frags; |
| 2374 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 2375 | |
| 2376 | /* Actual merge is delayed until the point when we know we can |
| 2377 | * commit all, so that we don't have to undo partial changes |
| 2378 | */ |
| 2379 | if (!to || |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2380 | !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), |
| 2381 | fragfrom->page_offset)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2382 | merge = -1; |
| 2383 | } else { |
| 2384 | merge = to - 1; |
| 2385 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2386 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2387 | if (todo < 0) { |
| 2388 | if (skb_prepare_for_shift(skb) || |
| 2389 | skb_prepare_for_shift(tgt)) |
| 2390 | return 0; |
| 2391 | |
Ilpo Järvinen | 9f782db | 2008-11-25 13:57:01 -0800 | [diff] [blame] | 2392 | /* All previous frag pointers might be stale! */ |
| 2393 | fragfrom = &skb_shinfo(skb)->frags[from]; |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2394 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 2395 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2396 | skb_frag_size_add(fragto, shiftlen); |
| 2397 | skb_frag_size_sub(fragfrom, shiftlen); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2398 | fragfrom->page_offset += shiftlen; |
| 2399 | |
| 2400 | goto onlymerged; |
| 2401 | } |
| 2402 | |
| 2403 | from++; |
| 2404 | } |
| 2405 | |
| 2406 | /* Skip full, not-fitting skb to avoid expensive operations */ |
| 2407 | if ((shiftlen == skb->len) && |
| 2408 | (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) |
| 2409 | return 0; |
| 2410 | |
| 2411 | if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) |
| 2412 | return 0; |
| 2413 | |
| 2414 | while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { |
| 2415 | if (to == MAX_SKB_FRAGS) |
| 2416 | return 0; |
| 2417 | |
| 2418 | fragfrom = &skb_shinfo(skb)->frags[from]; |
| 2419 | fragto = &skb_shinfo(tgt)->frags[to]; |
| 2420 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2421 | if (todo >= skb_frag_size(fragfrom)) { |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2422 | *fragto = *fragfrom; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2423 | todo -= skb_frag_size(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2424 | from++; |
| 2425 | to++; |
| 2426 | |
| 2427 | } else { |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2428 | __skb_frag_ref(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2429 | fragto->page = fragfrom->page; |
| 2430 | fragto->page_offset = fragfrom->page_offset; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2431 | skb_frag_size_set(fragto, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2432 | |
| 2433 | fragfrom->page_offset += todo; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2434 | skb_frag_size_sub(fragfrom, todo); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2435 | todo = 0; |
| 2436 | |
| 2437 | to++; |
| 2438 | break; |
| 2439 | } |
| 2440 | } |
| 2441 | |
| 2442 | /* Ready to "commit" this state change to tgt */ |
| 2443 | skb_shinfo(tgt)->nr_frags = to; |
| 2444 | |
| 2445 | if (merge >= 0) { |
| 2446 | fragfrom = &skb_shinfo(skb)->frags[0]; |
| 2447 | fragto = &skb_shinfo(tgt)->frags[merge]; |
| 2448 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2449 | skb_frag_size_add(fragto, skb_frag_size(fragfrom)); |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2450 | __skb_frag_unref(fragfrom); |
Ilpo Järvinen | 832d11c | 2008-11-24 21:20:15 -0800 | [diff] [blame] | 2451 | } |
| 2452 | |
| 2453 | /* Reposition in the original skb */ |
| 2454 | to = 0; |
| 2455 | while (from < skb_shinfo(skb)->nr_frags) |
| 2456 | skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; |
| 2457 | skb_shinfo(skb)->nr_frags = to; |
| 2458 | |
| 2459 | BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); |
| 2460 | |
| 2461 | onlymerged: |
| 2462 | /* Most likely the tgt won't ever need its checksum anymore, skb on |
| 2463 | * the other hand might need it if it needs to be resent |
| 2464 | */ |
| 2465 | tgt->ip_summed = CHECKSUM_PARTIAL; |
| 2466 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 2467 | |
| 2468 | /* Yak, is it really working this way? Some helper please? */ |
| 2469 | skb->len -= shiftlen; |
| 2470 | skb->data_len -= shiftlen; |
| 2471 | skb->truesize -= shiftlen; |
| 2472 | tgt->len += shiftlen; |
| 2473 | tgt->data_len += shiftlen; |
| 2474 | tgt->truesize += shiftlen; |
| 2475 | |
| 2476 | return shiftlen; |
| 2477 | } |
| 2478 | |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2479 | /** |
| 2480 | * skb_prepare_seq_read - Prepare a sequential read of skb data |
| 2481 | * @skb: the buffer to read |
| 2482 | * @from: lower offset of data to be read |
| 2483 | * @to: upper offset of data to be read |
| 2484 | * @st: state variable |
| 2485 | * |
| 2486 | * Initializes the specified state variable. Must be called before |
| 2487 | * invoking skb_seq_read() for the first time. |
| 2488 | */ |
| 2489 | void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, |
| 2490 | unsigned int to, struct skb_seq_state *st) |
| 2491 | { |
| 2492 | st->lower_offset = from; |
| 2493 | st->upper_offset = to; |
| 2494 | st->root_skb = st->cur_skb = skb; |
| 2495 | st->frag_idx = st->stepped_offset = 0; |
| 2496 | st->frag_data = NULL; |
| 2497 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2498 | EXPORT_SYMBOL(skb_prepare_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2499 | |
| 2500 | /** |
| 2501 | * skb_seq_read - Sequentially read skb data |
| 2502 | * @consumed: number of bytes consumed by the caller so far |
| 2503 | * @data: destination pointer for data to be returned |
| 2504 | * @st: state variable |
| 2505 | * |
| 2506 | * Reads a block of skb data at &consumed relative to the |
| 2507 | * lower offset specified to skb_prepare_seq_read(). Assigns |
| 2508 | * the head of the data block to &data and returns the length |
| 2509 | * of the block or 0 if the end of the skb data or the upper |
| 2510 | * offset has been reached. |
| 2511 | * |
| 2512 | * The caller is not required to consume all of the data |
| 2513 | * returned, i.e. &consumed is typically set to the number |
| 2514 | * of bytes already consumed and the next call to |
| 2515 | * skb_seq_read() will return the remaining part of the block. |
| 2516 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 2517 | * Note 1: The size of each block of data returned can be arbitrary, |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2518 | * this limitation is the cost for zerocopy seqeuental |
| 2519 | * reads of potentially non linear data. |
| 2520 | * |
Randy Dunlap | bc2cda1 | 2008-02-13 15:03:25 -0800 | [diff] [blame] | 2521 | * Note 2: Fragment lists within fragments are not implemented |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2522 | * at the moment, state->root_skb could be replaced with |
| 2523 | * a stack for this purpose. |
| 2524 | */ |
| 2525 | unsigned int skb_seq_read(unsigned int consumed, const u8 **data, |
| 2526 | struct skb_seq_state *st) |
| 2527 | { |
| 2528 | unsigned int block_limit, abs_offset = consumed + st->lower_offset; |
| 2529 | skb_frag_t *frag; |
| 2530 | |
| 2531 | if (unlikely(abs_offset >= st->upper_offset)) |
| 2532 | return 0; |
| 2533 | |
| 2534 | next_skb: |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 2535 | block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2536 | |
Thomas Chenault | 995b337 | 2009-05-18 21:43:27 -0700 | [diff] [blame] | 2537 | if (abs_offset < block_limit && !st->frag_data) { |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 2538 | *data = st->cur_skb->data + (abs_offset - st->stepped_offset); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2539 | return block_limit - abs_offset; |
| 2540 | } |
| 2541 | |
| 2542 | if (st->frag_idx == 0 && !st->frag_data) |
| 2543 | st->stepped_offset += skb_headlen(st->cur_skb); |
| 2544 | |
| 2545 | while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { |
| 2546 | frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2547 | block_limit = skb_frag_size(frag) + st->stepped_offset; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2548 | |
| 2549 | if (abs_offset < block_limit) { |
| 2550 | if (!st->frag_data) |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2551 | st->frag_data = kmap_atomic(skb_frag_page(frag)); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2552 | |
| 2553 | *data = (u8 *) st->frag_data + frag->page_offset + |
| 2554 | (abs_offset - st->stepped_offset); |
| 2555 | |
| 2556 | return block_limit - abs_offset; |
| 2557 | } |
| 2558 | |
| 2559 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2560 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2561 | st->frag_data = NULL; |
| 2562 | } |
| 2563 | |
| 2564 | st->frag_idx++; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2565 | st->stepped_offset += skb_frag_size(frag); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2566 | } |
| 2567 | |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 2568 | if (st->frag_data) { |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2569 | kunmap_atomic(st->frag_data); |
Olaf Kirch | 5b5a60d | 2007-06-23 23:11:52 -0700 | [diff] [blame] | 2570 | st->frag_data = NULL; |
| 2571 | } |
| 2572 | |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 2573 | if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 2574 | st->cur_skb = skb_shinfo(st->root_skb)->frag_list; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2575 | st->frag_idx = 0; |
| 2576 | goto next_skb; |
Shyam Iyer | 71b3346 | 2009-01-29 16:12:42 -0800 | [diff] [blame] | 2577 | } else if (st->cur_skb->next) { |
| 2578 | st->cur_skb = st->cur_skb->next; |
Herbert Xu | 95e3b24 | 2009-01-29 16:07:52 -0800 | [diff] [blame] | 2579 | st->frag_idx = 0; |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2580 | goto next_skb; |
| 2581 | } |
| 2582 | |
| 2583 | return 0; |
| 2584 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2585 | EXPORT_SYMBOL(skb_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2586 | |
| 2587 | /** |
| 2588 | * skb_abort_seq_read - Abort a sequential read of skb data |
| 2589 | * @st: state variable |
| 2590 | * |
| 2591 | * Must be called if skb_seq_read() was not called until it |
| 2592 | * returned 0. |
| 2593 | */ |
| 2594 | void skb_abort_seq_read(struct skb_seq_state *st) |
| 2595 | { |
| 2596 | if (st->frag_data) |
Eric Dumazet | 51c56b0 | 2012-04-05 11:35:15 +0200 | [diff] [blame] | 2597 | kunmap_atomic(st->frag_data); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2598 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2599 | EXPORT_SYMBOL(skb_abort_seq_read); |
Thomas Graf | 677e90e | 2005-06-23 20:59:51 -0700 | [diff] [blame] | 2600 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2601 | #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) |
| 2602 | |
| 2603 | static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, |
| 2604 | struct ts_config *conf, |
| 2605 | struct ts_state *state) |
| 2606 | { |
| 2607 | return skb_seq_read(offset, text, TS_SKB_CB(state)); |
| 2608 | } |
| 2609 | |
| 2610 | static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) |
| 2611 | { |
| 2612 | skb_abort_seq_read(TS_SKB_CB(state)); |
| 2613 | } |
| 2614 | |
| 2615 | /** |
| 2616 | * skb_find_text - Find a text pattern in skb data |
| 2617 | * @skb: the buffer to look in |
| 2618 | * @from: search offset |
| 2619 | * @to: search limit |
| 2620 | * @config: textsearch configuration |
| 2621 | * @state: uninitialized textsearch state variable |
| 2622 | * |
| 2623 | * Finds a pattern in the skb data according to the specified |
| 2624 | * textsearch configuration. Use textsearch_next() to retrieve |
| 2625 | * subsequent occurrences of the pattern. Returns the offset |
| 2626 | * to the first occurrence or UINT_MAX if no match was found. |
| 2627 | */ |
| 2628 | unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, |
| 2629 | unsigned int to, struct ts_config *config, |
| 2630 | struct ts_state *state) |
| 2631 | { |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 2632 | unsigned int ret; |
| 2633 | |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2634 | config->get_next_block = skb_ts_get_next_block; |
| 2635 | config->finish = skb_ts_finish; |
| 2636 | |
| 2637 | skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); |
| 2638 | |
Phil Oester | f72b948 | 2006-06-26 00:00:57 -0700 | [diff] [blame] | 2639 | ret = textsearch_find(config, state); |
| 2640 | return (ret <= to - from ? ret : UINT_MAX); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2641 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2642 | EXPORT_SYMBOL(skb_find_text); |
Thomas Graf | 3fc7e8a | 2005-06-23 21:00:17 -0700 | [diff] [blame] | 2643 | |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2644 | /** |
Ben Hutchings | 2c53040 | 2012-07-10 10:55:09 +0000 | [diff] [blame] | 2645 | * skb_append_datato_frags - append the user data to a skb |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2646 | * @sk: sock structure |
| 2647 | * @skb: skb structure to be appened with user data. |
| 2648 | * @getfrag: call back function to be used for getting the user data |
| 2649 | * @from: pointer to user message iov |
| 2650 | * @length: length of the iov message |
| 2651 | * |
| 2652 | * Description: This procedure append the user data in the fragment part |
| 2653 | * of the skb if any page alloc fails user this procedure returns -ENOMEM |
| 2654 | */ |
| 2655 | int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, |
Martin Waitz | dab9630 | 2005-12-05 13:40:12 -0800 | [diff] [blame] | 2656 | int (*getfrag)(void *from, char *to, int offset, |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2657 | int len, int odd, struct sk_buff *skb), |
| 2658 | void *from, int length) |
| 2659 | { |
| 2660 | int frg_cnt = 0; |
| 2661 | skb_frag_t *frag = NULL; |
| 2662 | struct page *page = NULL; |
| 2663 | int copy, left; |
| 2664 | int offset = 0; |
| 2665 | int ret; |
| 2666 | |
| 2667 | do { |
| 2668 | /* Return error if we don't have space for new frag */ |
| 2669 | frg_cnt = skb_shinfo(skb)->nr_frags; |
| 2670 | if (frg_cnt >= MAX_SKB_FRAGS) |
| 2671 | return -EFAULT; |
| 2672 | |
| 2673 | /* allocate a new page for next frag */ |
| 2674 | page = alloc_pages(sk->sk_allocation, 0); |
| 2675 | |
| 2676 | /* If alloc_page fails just return failure and caller will |
| 2677 | * free previous allocated pages by doing kfree_skb() |
| 2678 | */ |
| 2679 | if (page == NULL) |
| 2680 | return -ENOMEM; |
| 2681 | |
| 2682 | /* initialize the next frag */ |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2683 | skb_fill_page_desc(skb, frg_cnt, page, 0, 0); |
| 2684 | skb->truesize += PAGE_SIZE; |
| 2685 | atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); |
| 2686 | |
| 2687 | /* get the new initialized frag */ |
| 2688 | frg_cnt = skb_shinfo(skb)->nr_frags; |
| 2689 | frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; |
| 2690 | |
| 2691 | /* copy the user data to page */ |
| 2692 | left = PAGE_SIZE - frag->page_offset; |
| 2693 | copy = (length > left)? left : length; |
| 2694 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2695 | ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2696 | offset, copy, 0, skb); |
| 2697 | if (ret < 0) |
| 2698 | return -EFAULT; |
| 2699 | |
| 2700 | /* copy was successful so update the size parameters */ |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2701 | skb_frag_size_add(frag, copy); |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2702 | skb->len += copy; |
| 2703 | skb->data_len += copy; |
| 2704 | offset += copy; |
| 2705 | length -= copy; |
| 2706 | |
| 2707 | } while (length > 0); |
| 2708 | |
| 2709 | return 0; |
| 2710 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 2711 | EXPORT_SYMBOL(skb_append_datato_frags); |
Ananda Raju | e89e9cf | 2005-10-18 15:46:41 -0700 | [diff] [blame] | 2712 | |
Herbert Xu | cbb042f9 | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 2713 | /** |
| 2714 | * skb_pull_rcsum - pull skb and update receive checksum |
| 2715 | * @skb: buffer to update |
Herbert Xu | cbb042f9 | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 2716 | * @len: length of data pulled |
| 2717 | * |
| 2718 | * This function performs an skb_pull on the packet and updates |
Urs Thuermann | fee54fa | 2008-02-12 22:03:25 -0800 | [diff] [blame] | 2719 | * the CHECKSUM_COMPLETE checksum. It should be used on |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 2720 | * receive path processing instead of skb_pull unless you know |
| 2721 | * that the checksum difference is zero (e.g., a valid IP header) |
| 2722 | * or you are setting ip_summed to CHECKSUM_NONE. |
Herbert Xu | cbb042f9 | 2006-03-20 22:43:56 -0800 | [diff] [blame] | 2723 | */ |
| 2724 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) |
| 2725 | { |
| 2726 | BUG_ON(len > skb->len); |
| 2727 | skb->len -= len; |
| 2728 | BUG_ON(skb->len < skb->data_len); |
| 2729 | skb_postpull_rcsum(skb, skb->data, len); |
| 2730 | return skb->data += len; |
| 2731 | } |
Arnaldo Carvalho de Melo | f94691a | 2006-03-20 22:47:55 -0800 | [diff] [blame] | 2732 | EXPORT_SYMBOL_GPL(skb_pull_rcsum); |
| 2733 | |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2734 | /** |
| 2735 | * skb_segment - Perform protocol segmentation on skb. |
| 2736 | * @skb: buffer to segment |
Herbert Xu | 576a30e | 2006-06-27 13:22:38 -0700 | [diff] [blame] | 2737 | * @features: features for the output path (see dev->features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2738 | * |
| 2739 | * This function performs segmentation on the given skb. It returns |
Ben Hutchings | 4c821d7 | 2008-04-13 21:52:48 -0700 | [diff] [blame] | 2740 | * a pointer to the first in a list of new skbs for the segments. |
| 2741 | * In case of error it returns ERR_PTR(err). |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2742 | */ |
Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 2743 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2744 | { |
| 2745 | struct sk_buff *segs = NULL; |
| 2746 | struct sk_buff *tail = NULL; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2747 | struct sk_buff *fskb = skb_shinfo(skb)->frag_list; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2748 | unsigned int mss = skb_shinfo(skb)->gso_size; |
Arnaldo Carvalho de Melo | 98e399f | 2007-03-19 15:33:04 -0700 | [diff] [blame] | 2749 | unsigned int doffset = skb->data - skb_mac_header(skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2750 | unsigned int offset = doffset; |
| 2751 | unsigned int headroom; |
| 2752 | unsigned int len; |
Michał Mirosław | 04ed3e7 | 2011-01-24 15:32:47 -0800 | [diff] [blame] | 2753 | int sg = !!(features & NETIF_F_SG); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2754 | int nfrags = skb_shinfo(skb)->nr_frags; |
| 2755 | int err = -ENOMEM; |
| 2756 | int i = 0; |
| 2757 | int pos; |
| 2758 | |
| 2759 | __skb_push(skb, doffset); |
| 2760 | headroom = skb_headroom(skb); |
| 2761 | pos = skb_headlen(skb); |
| 2762 | |
| 2763 | do { |
| 2764 | struct sk_buff *nskb; |
| 2765 | skb_frag_t *frag; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 2766 | int hsize; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2767 | int size; |
| 2768 | |
| 2769 | len = skb->len - offset; |
| 2770 | if (len > mss) |
| 2771 | len = mss; |
| 2772 | |
| 2773 | hsize = skb_headlen(skb) - offset; |
| 2774 | if (hsize < 0) |
| 2775 | hsize = 0; |
Herbert Xu | c8884ed | 2006-10-29 15:59:41 -0800 | [diff] [blame] | 2776 | if (hsize > len || !sg) |
| 2777 | hsize = len; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2778 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2779 | if (!hsize && i >= nfrags) { |
| 2780 | BUG_ON(fskb->len != len); |
| 2781 | |
| 2782 | pos += len; |
| 2783 | nskb = skb_clone(fskb, GFP_ATOMIC); |
| 2784 | fskb = fskb->next; |
| 2785 | |
| 2786 | if (unlikely(!nskb)) |
| 2787 | goto err; |
| 2788 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 2789 | hsize = skb_end_offset(nskb); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2790 | if (skb_cow_head(nskb, doffset + headroom)) { |
| 2791 | kfree_skb(nskb); |
| 2792 | goto err; |
| 2793 | } |
| 2794 | |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 2795 | nskb->truesize += skb_end_offset(nskb) - hsize; |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2796 | skb_release_head_state(nskb); |
| 2797 | __skb_push(nskb, doffset); |
| 2798 | } else { |
Mel Gorman | c93bdd0 | 2012-07-31 16:44:19 -0700 | [diff] [blame] | 2799 | nskb = __alloc_skb(hsize + doffset + headroom, |
| 2800 | GFP_ATOMIC, skb_alloc_rx_flag(skb), |
| 2801 | NUMA_NO_NODE); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2802 | |
| 2803 | if (unlikely(!nskb)) |
| 2804 | goto err; |
| 2805 | |
| 2806 | skb_reserve(nskb, headroom); |
| 2807 | __skb_put(nskb, doffset); |
| 2808 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2809 | |
| 2810 | if (segs) |
| 2811 | tail->next = nskb; |
| 2812 | else |
| 2813 | segs = nskb; |
| 2814 | tail = nskb; |
| 2815 | |
Herbert Xu | 6f85a12 | 2008-08-15 14:55:02 -0700 | [diff] [blame] | 2816 | __copy_skb_header(nskb, skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2817 | nskb->mac_len = skb->mac_len; |
| 2818 | |
Eric Dumazet | 3d3be43 | 2010-09-01 00:50:51 +0000 | [diff] [blame] | 2819 | /* nskb and skb might have different headroom */ |
| 2820 | if (nskb->ip_summed == CHECKSUM_PARTIAL) |
| 2821 | nskb->csum_start += skb_headroom(nskb) - headroom; |
| 2822 | |
Arnaldo Carvalho de Melo | 459a98e | 2007-03-19 15:30:44 -0700 | [diff] [blame] | 2823 | skb_reset_mac_header(nskb); |
Arnaldo Carvalho de Melo | ddc7b8e | 2007-03-15 21:42:27 -0300 | [diff] [blame] | 2824 | skb_set_network_header(nskb, skb->mac_len); |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 2825 | nskb->transport_header = (nskb->network_header + |
| 2826 | skb_network_header_len(skb)); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2827 | skb_copy_from_linear_data(skb, nskb->data, doffset); |
| 2828 | |
Herbert Xu | 2f18185 | 2009-03-28 23:39:18 -0700 | [diff] [blame] | 2829 | if (fskb != skb_shinfo(skb)->frag_list) |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2830 | continue; |
| 2831 | |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2832 | if (!sg) { |
Herbert Xu | 6f85a12 | 2008-08-15 14:55:02 -0700 | [diff] [blame] | 2833 | nskb->ip_summed = CHECKSUM_NONE; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2834 | nskb->csum = skb_copy_and_csum_bits(skb, offset, |
| 2835 | skb_put(nskb, len), |
| 2836 | len, 0); |
| 2837 | continue; |
| 2838 | } |
| 2839 | |
| 2840 | frag = skb_shinfo(nskb)->frags; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2841 | |
Arnaldo Carvalho de Melo | d626f62 | 2007-03-27 18:55:52 -0300 | [diff] [blame] | 2842 | skb_copy_from_linear_data_offset(skb, offset, |
| 2843 | skb_put(nskb, hsize), hsize); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2844 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2845 | while (pos < offset + len && i < nfrags) { |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2846 | *frag = skb_shinfo(skb)->frags[i]; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 2847 | __skb_frag_ref(frag); |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2848 | size = skb_frag_size(frag); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2849 | |
| 2850 | if (pos < offset) { |
| 2851 | frag->page_offset += offset - pos; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2852 | skb_frag_size_sub(frag, offset - pos); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2853 | } |
| 2854 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2855 | skb_shinfo(nskb)->nr_frags++; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2856 | |
| 2857 | if (pos + size <= offset + len) { |
| 2858 | i++; |
| 2859 | pos += size; |
| 2860 | } else { |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2861 | skb_frag_size_sub(frag, pos + size - (offset + len)); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2862 | goto skip_fraglist; |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2863 | } |
| 2864 | |
| 2865 | frag++; |
| 2866 | } |
| 2867 | |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2868 | if (pos < offset + len) { |
| 2869 | struct sk_buff *fskb2 = fskb; |
| 2870 | |
| 2871 | BUG_ON(pos + fskb->len != offset + len); |
| 2872 | |
| 2873 | pos += fskb->len; |
| 2874 | fskb = fskb->next; |
| 2875 | |
| 2876 | if (fskb2->next) { |
| 2877 | fskb2 = skb_clone(fskb2, GFP_ATOMIC); |
| 2878 | if (!fskb2) |
| 2879 | goto err; |
| 2880 | } else |
| 2881 | skb_get(fskb2); |
| 2882 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 2883 | SKB_FRAG_ASSERT(nskb); |
Herbert Xu | 89319d38 | 2008-12-15 23:26:06 -0800 | [diff] [blame] | 2884 | skb_shinfo(nskb)->frag_list = fskb2; |
| 2885 | } |
| 2886 | |
| 2887 | skip_fraglist: |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2888 | nskb->data_len = len - hsize; |
| 2889 | nskb->len += nskb->data_len; |
| 2890 | nskb->truesize += nskb->data_len; |
| 2891 | } while ((offset += len) < skb->len); |
| 2892 | |
| 2893 | return segs; |
| 2894 | |
| 2895 | err: |
| 2896 | while ((skb = segs)) { |
| 2897 | segs = skb->next; |
Patrick McHardy | b08d584 | 2007-02-27 09:57:37 -0800 | [diff] [blame] | 2898 | kfree_skb(skb); |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2899 | } |
| 2900 | return ERR_PTR(err); |
| 2901 | } |
Herbert Xu | f4c50d9 | 2006-06-22 03:02:40 -0700 | [diff] [blame] | 2902 | EXPORT_SYMBOL_GPL(skb_segment); |
| 2903 | |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2904 | int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
| 2905 | { |
| 2906 | struct sk_buff *p = *head; |
| 2907 | struct sk_buff *nskb; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2908 | struct skb_shared_info *skbinfo = skb_shinfo(skb); |
| 2909 | struct skb_shared_info *pinfo = skb_shinfo(p); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2910 | unsigned int headroom; |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2911 | unsigned int len = skb_gro_len(skb); |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 2912 | unsigned int offset = skb_gro_offset(skb); |
| 2913 | unsigned int headlen = skb_headlen(skb); |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 2914 | unsigned int delta_truesize; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2915 | |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2916 | if (p->len + len >= 65536) |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2917 | return -E2BIG; |
| 2918 | |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2919 | if (pinfo->frag_list) |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2920 | goto merge; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 2921 | else if (headlen <= offset) { |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 2922 | skb_frag_t *frag; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 2923 | skb_frag_t *frag2; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2924 | int i = skbinfo->nr_frags; |
| 2925 | int nr_frags = pinfo->nr_frags + i; |
Herbert Xu | 42da699 | 2009-05-26 18:50:19 +0000 | [diff] [blame] | 2926 | |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 2927 | offset -= headlen; |
| 2928 | |
| 2929 | if (nr_frags > MAX_SKB_FRAGS) |
Herbert Xu | 81705ad | 2009-01-29 14:19:51 +0000 | [diff] [blame] | 2930 | return -E2BIG; |
| 2931 | |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2932 | pinfo->nr_frags = nr_frags; |
| 2933 | skbinfo->nr_frags = 0; |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 2934 | |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 2935 | frag = pinfo->frags + nr_frags; |
| 2936 | frag2 = skbinfo->frags + i; |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 2937 | do { |
| 2938 | *--frag = *--frag2; |
| 2939 | } while (--i); |
| 2940 | |
| 2941 | frag->page_offset += offset; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 2942 | skb_frag_size_sub(frag, offset); |
Herbert Xu | 66e92fc | 2009-05-26 18:50:32 +0000 | [diff] [blame] | 2943 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 2944 | /* all fragments truesize : remove (head size + sk_buff) */ |
Alexander Duyck | ec47ea8 | 2012-05-04 14:26:56 +0000 | [diff] [blame] | 2945 | delta_truesize = skb->truesize - |
| 2946 | SKB_TRUESIZE(skb_end_offset(skb)); |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 2947 | |
Herbert Xu | f557206 | 2009-01-14 20:40:03 -0800 | [diff] [blame] | 2948 | skb->truesize -= skb->data_len; |
| 2949 | skb->len -= skb->data_len; |
| 2950 | skb->data_len = 0; |
| 2951 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 2952 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 2953 | goto done; |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 2954 | } else if (skb->head_frag) { |
| 2955 | int nr_frags = pinfo->nr_frags; |
| 2956 | skb_frag_t *frag = pinfo->frags + nr_frags; |
| 2957 | struct page *page = virt_to_head_page(skb->head); |
| 2958 | unsigned int first_size = headlen - offset; |
| 2959 | unsigned int first_offset; |
| 2960 | |
| 2961 | if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) |
| 2962 | return -E2BIG; |
| 2963 | |
| 2964 | first_offset = skb->data - |
| 2965 | (unsigned char *)page_address(page) + |
| 2966 | offset; |
| 2967 | |
| 2968 | pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; |
| 2969 | |
| 2970 | frag->page.p = page; |
| 2971 | frag->page_offset = first_offset; |
| 2972 | skb_frag_size_set(frag, first_size); |
| 2973 | |
| 2974 | memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); |
| 2975 | /* We dont need to clear skbinfo->nr_frags here */ |
| 2976 | |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 2977 | delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
Eric Dumazet | d7e8883 | 2012-04-30 08:10:34 +0000 | [diff] [blame] | 2978 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; |
| 2979 | goto done; |
Herbert Xu | 69c0cab | 2009-11-17 05:18:18 -0800 | [diff] [blame] | 2980 | } else if (skb_gro_len(p) != pinfo->gso_size) |
| 2981 | return -E2BIG; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2982 | |
| 2983 | headroom = skb_headroom(p); |
Eric Dumazet | 3d3be43 | 2010-09-01 00:50:51 +0000 | [diff] [blame] | 2984 | nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2985 | if (unlikely(!nskb)) |
| 2986 | return -ENOMEM; |
| 2987 | |
| 2988 | __copy_skb_header(nskb, p); |
| 2989 | nskb->mac_len = p->mac_len; |
| 2990 | |
| 2991 | skb_reserve(nskb, headroom); |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2992 | __skb_put(nskb, skb_gro_offset(p)); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2993 | |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2994 | skb_set_mac_header(nskb, skb_mac_header(p) - p->data); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 2995 | skb_set_network_header(nskb, skb_network_offset(p)); |
| 2996 | skb_set_transport_header(nskb, skb_transport_offset(p)); |
| 2997 | |
Herbert Xu | 8691173 | 2009-01-29 14:19:50 +0000 | [diff] [blame] | 2998 | __skb_pull(p, skb_gro_offset(p)); |
| 2999 | memcpy(skb_mac_header(nskb), skb_mac_header(p), |
| 3000 | p->data - skb_mac_header(p)); |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3001 | |
| 3002 | *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); |
| 3003 | skb_shinfo(nskb)->frag_list = p; |
Herbert Xu | 9aaa156 | 2009-05-26 18:50:33 +0000 | [diff] [blame] | 3004 | skb_shinfo(nskb)->gso_size = pinfo->gso_size; |
Herbert Xu | 622e0ca | 2010-05-20 23:07:56 -0700 | [diff] [blame] | 3005 | pinfo->gso_size = 0; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3006 | skb_header_release(p); |
| 3007 | nskb->prev = p; |
| 3008 | |
| 3009 | nskb->data_len += p->len; |
Eric Dumazet | de8261c | 2012-02-13 04:09:20 +0000 | [diff] [blame] | 3010 | nskb->truesize += p->truesize; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3011 | nskb->len += p->len; |
| 3012 | |
| 3013 | *head = nskb; |
| 3014 | nskb->next = p->next; |
| 3015 | p->next = NULL; |
| 3016 | |
| 3017 | p = nskb; |
| 3018 | |
| 3019 | merge: |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3020 | delta_truesize = skb->truesize; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3021 | if (offset > headlen) { |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 3022 | unsigned int eat = offset - headlen; |
| 3023 | |
| 3024 | skbinfo->frags[0].page_offset += eat; |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3025 | skb_frag_size_sub(&skbinfo->frags[0], eat); |
Michal Schmidt | d1dc7ab | 2011-01-24 12:08:48 +0000 | [diff] [blame] | 3026 | skb->data_len -= eat; |
| 3027 | skb->len -= eat; |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3028 | offset = headlen; |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 3029 | } |
| 3030 | |
Herbert Xu | 67147ba | 2009-05-26 18:50:22 +0000 | [diff] [blame] | 3031 | __skb_pull(skb, offset); |
Herbert Xu | 5603502 | 2009-02-05 21:26:52 -0800 | [diff] [blame] | 3032 | |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3033 | p->prev->next = skb; |
| 3034 | p->prev = skb; |
| 3035 | skb_header_release(skb); |
| 3036 | |
Herbert Xu | 5d38a07 | 2009-01-04 16:13:40 -0800 | [diff] [blame] | 3037 | done: |
| 3038 | NAPI_GRO_CB(p)->count++; |
Herbert Xu | 37fe473 | 2009-01-17 19:48:13 +0000 | [diff] [blame] | 3039 | p->data_len += len; |
Eric Dumazet | 715dc1f | 2012-05-02 23:33:21 +0000 | [diff] [blame] | 3040 | p->truesize += delta_truesize; |
Herbert Xu | 37fe473 | 2009-01-17 19:48:13 +0000 | [diff] [blame] | 3041 | p->len += len; |
Herbert Xu | 71d93b3 | 2008-12-15 23:42:33 -0800 | [diff] [blame] | 3042 | |
| 3043 | NAPI_GRO_CB(skb)->same_flow = 1; |
| 3044 | return 0; |
| 3045 | } |
| 3046 | EXPORT_SYMBOL_GPL(skb_gro_receive); |
| 3047 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3048 | void __init skb_init(void) |
| 3049 | { |
| 3050 | skbuff_head_cache = kmem_cache_create("skbuff_head_cache", |
| 3051 | sizeof(struct sk_buff), |
| 3052 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 3053 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 3054 | NULL); |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 3055 | skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", |
| 3056 | (2*sizeof(struct sk_buff)) + |
| 3057 | sizeof(atomic_t), |
| 3058 | 0, |
Alexey Dobriyan | e5d679f33 | 2006-08-26 19:25:52 -0700 | [diff] [blame] | 3059 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 3060 | NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3061 | } |
| 3062 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3063 | /** |
| 3064 | * skb_to_sgvec - Fill a scatter-gather list from a socket buffer |
| 3065 | * @skb: Socket buffer containing the buffers to be mapped |
| 3066 | * @sg: The scatter-gather list to map into |
| 3067 | * @offset: The offset into the buffer's contents to start mapping |
| 3068 | * @len: Length of buffer space to be mapped |
| 3069 | * |
| 3070 | * Fill the specified scatter-gather list with mappings/pointers into a |
| 3071 | * region of the buffer space attached to a socket buffer. |
| 3072 | */ |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3073 | static int |
| 3074 | __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3075 | { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3076 | int start = skb_headlen(skb); |
| 3077 | int i, copy = start - offset; |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3078 | struct sk_buff *frag_iter; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3079 | int elt = 0; |
| 3080 | |
| 3081 | if (copy > 0) { |
| 3082 | if (copy > len) |
| 3083 | copy = len; |
Jens Axboe | 642f149 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 3084 | sg_set_buf(sg, skb->data + offset, copy); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3085 | elt++; |
| 3086 | if ((len -= copy) == 0) |
| 3087 | return elt; |
| 3088 | offset += copy; |
| 3089 | } |
| 3090 | |
| 3091 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3092 | int end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3093 | |
Ilpo Järvinen | 547b792 | 2008-07-25 21:43:18 -0700 | [diff] [blame] | 3094 | WARN_ON(start > offset + len); |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3095 | |
Eric Dumazet | 9e903e0 | 2011-10-18 21:00:24 +0000 | [diff] [blame] | 3096 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3097 | if ((copy = end - offset) > 0) { |
| 3098 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 3099 | |
| 3100 | if (copy > len) |
| 3101 | copy = len; |
Ian Campbell | ea2ab69 | 2011-08-22 23:44:58 +0000 | [diff] [blame] | 3102 | sg_set_page(&sg[elt], skb_frag_page(frag), copy, |
Jens Axboe | 642f149 | 2007-10-24 11:20:47 +0200 | [diff] [blame] | 3103 | frag->page_offset+offset-start); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3104 | elt++; |
| 3105 | if (!(len -= copy)) |
| 3106 | return elt; |
| 3107 | offset += copy; |
| 3108 | } |
David S. Miller | 1a028e5 | 2007-04-27 15:21:23 -0700 | [diff] [blame] | 3109 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3110 | } |
| 3111 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3112 | skb_walk_frags(skb, frag_iter) { |
| 3113 | int end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3114 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3115 | WARN_ON(start > offset + len); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3116 | |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3117 | end = start + frag_iter->len; |
| 3118 | if ((copy = end - offset) > 0) { |
| 3119 | if (copy > len) |
| 3120 | copy = len; |
| 3121 | elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
| 3122 | copy); |
| 3123 | if ((len -= copy) == 0) |
| 3124 | return elt; |
| 3125 | offset += copy; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3126 | } |
David S. Miller | fbb398a | 2009-06-09 00:18:59 -0700 | [diff] [blame] | 3127 | start = end; |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3128 | } |
| 3129 | BUG_ON(len); |
| 3130 | return elt; |
| 3131 | } |
| 3132 | |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3133 | int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
| 3134 | { |
| 3135 | int nsg = __skb_to_sgvec(skb, sg, offset, len); |
| 3136 | |
Jens Axboe | c46f233 | 2007-10-31 12:06:37 +0100 | [diff] [blame] | 3137 | sg_mark_end(&sg[nsg - 1]); |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3138 | |
| 3139 | return nsg; |
| 3140 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3141 | EXPORT_SYMBOL_GPL(skb_to_sgvec); |
David S. Miller | 51c739d | 2007-10-30 21:29:29 -0700 | [diff] [blame] | 3142 | |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3143 | /** |
| 3144 | * skb_cow_data - Check that a socket buffer's data buffers are writable |
| 3145 | * @skb: The socket buffer to check. |
| 3146 | * @tailbits: Amount of trailing space to be added |
| 3147 | * @trailer: Returned pointer to the skb where the @tailbits space begins |
| 3148 | * |
| 3149 | * Make sure that the data buffers attached to a socket buffer are |
| 3150 | * writable. If they are not, private copies are made of the data buffers |
| 3151 | * and the socket buffer is set to use these instead. |
| 3152 | * |
| 3153 | * If @tailbits is given, make sure that there is space to write @tailbits |
| 3154 | * bytes of data beyond current end of socket buffer. @trailer will be |
| 3155 | * set to point to the skb in which this space begins. |
| 3156 | * |
| 3157 | * The number of scatterlist elements required to completely map the |
| 3158 | * COW'd and extended socket buffer will be returned. |
| 3159 | */ |
| 3160 | int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) |
| 3161 | { |
| 3162 | int copyflag; |
| 3163 | int elt; |
| 3164 | struct sk_buff *skb1, **skb_p; |
| 3165 | |
| 3166 | /* If skb is cloned or its head is paged, reallocate |
| 3167 | * head pulling out all the pages (pages are considered not writable |
| 3168 | * at the moment even if they are anonymous). |
| 3169 | */ |
| 3170 | if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && |
| 3171 | __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) |
| 3172 | return -ENOMEM; |
| 3173 | |
| 3174 | /* Easy case. Most of packets will go this way. */ |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3175 | if (!skb_has_frag_list(skb)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3176 | /* A little of trouble, not enough of space for trailer. |
| 3177 | * This should not happen, when stack is tuned to generate |
| 3178 | * good frames. OK, on miss we reallocate and reserve even more |
| 3179 | * space, 128 bytes is fair. */ |
| 3180 | |
| 3181 | if (skb_tailroom(skb) < tailbits && |
| 3182 | pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) |
| 3183 | return -ENOMEM; |
| 3184 | |
| 3185 | /* Voila! */ |
| 3186 | *trailer = skb; |
| 3187 | return 1; |
| 3188 | } |
| 3189 | |
| 3190 | /* Misery. We are in troubles, going to mincer fragments... */ |
| 3191 | |
| 3192 | elt = 1; |
| 3193 | skb_p = &skb_shinfo(skb)->frag_list; |
| 3194 | copyflag = 0; |
| 3195 | |
| 3196 | while ((skb1 = *skb_p) != NULL) { |
| 3197 | int ntail = 0; |
| 3198 | |
| 3199 | /* The fragment is partially pulled by someone, |
| 3200 | * this can happen on input. Copy it and everything |
| 3201 | * after it. */ |
| 3202 | |
| 3203 | if (skb_shared(skb1)) |
| 3204 | copyflag = 1; |
| 3205 | |
| 3206 | /* If the skb is the last, worry about trailer. */ |
| 3207 | |
| 3208 | if (skb1->next == NULL && tailbits) { |
| 3209 | if (skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3210 | skb_has_frag_list(skb1) || |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3211 | skb_tailroom(skb1) < tailbits) |
| 3212 | ntail = tailbits + 128; |
| 3213 | } |
| 3214 | |
| 3215 | if (copyflag || |
| 3216 | skb_cloned(skb1) || |
| 3217 | ntail || |
| 3218 | skb_shinfo(skb1)->nr_frags || |
David S. Miller | 21dc330 | 2010-08-23 00:13:46 -0700 | [diff] [blame] | 3219 | skb_has_frag_list(skb1)) { |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3220 | struct sk_buff *skb2; |
| 3221 | |
| 3222 | /* Fuck, we are miserable poor guys... */ |
| 3223 | if (ntail == 0) |
| 3224 | skb2 = skb_copy(skb1, GFP_ATOMIC); |
| 3225 | else |
| 3226 | skb2 = skb_copy_expand(skb1, |
| 3227 | skb_headroom(skb1), |
| 3228 | ntail, |
| 3229 | GFP_ATOMIC); |
| 3230 | if (unlikely(skb2 == NULL)) |
| 3231 | return -ENOMEM; |
| 3232 | |
| 3233 | if (skb1->sk) |
| 3234 | skb_set_owner_w(skb2, skb1->sk); |
| 3235 | |
| 3236 | /* Looking around. Are we still alive? |
| 3237 | * OK, link new skb, drop old one */ |
| 3238 | |
| 3239 | skb2->next = skb1->next; |
| 3240 | *skb_p = skb2; |
| 3241 | kfree_skb(skb1); |
| 3242 | skb1 = skb2; |
| 3243 | } |
| 3244 | elt++; |
| 3245 | *trailer = skb1; |
| 3246 | skb_p = &skb1->next; |
| 3247 | } |
| 3248 | |
| 3249 | return elt; |
| 3250 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3251 | EXPORT_SYMBOL_GPL(skb_cow_data); |
David Howells | 716ea3a | 2007-04-02 20:19:53 -0700 | [diff] [blame] | 3252 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3253 | static void sock_rmem_free(struct sk_buff *skb) |
| 3254 | { |
| 3255 | struct sock *sk = skb->sk; |
| 3256 | |
| 3257 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
| 3258 | } |
| 3259 | |
| 3260 | /* |
| 3261 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) |
| 3262 | */ |
| 3263 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) |
| 3264 | { |
Eric Dumazet | 110c433 | 2012-04-06 10:49:10 +0200 | [diff] [blame] | 3265 | int len = skb->len; |
| 3266 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3267 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 3268 | (unsigned int)sk->sk_rcvbuf) |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3269 | return -ENOMEM; |
| 3270 | |
| 3271 | skb_orphan(skb); |
| 3272 | skb->sk = sk; |
| 3273 | skb->destructor = sock_rmem_free; |
| 3274 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); |
| 3275 | |
Eric Dumazet | abb57ea | 2011-05-18 02:21:31 -0400 | [diff] [blame] | 3276 | /* before exiting rcu section, make sure dst is refcounted */ |
| 3277 | skb_dst_force(skb); |
| 3278 | |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3279 | skb_queue_tail(&sk->sk_error_queue, skb); |
| 3280 | if (!sock_flag(sk, SOCK_DEAD)) |
Eric Dumazet | 110c433 | 2012-04-06 10:49:10 +0200 | [diff] [blame] | 3281 | sk->sk_data_ready(sk, len); |
Eric Dumazet | b1faf56 | 2010-05-31 23:44:05 -0700 | [diff] [blame] | 3282 | return 0; |
| 3283 | } |
| 3284 | EXPORT_SYMBOL(sock_queue_err_skb); |
| 3285 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3286 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
| 3287 | struct skb_shared_hwtstamps *hwtstamps) |
| 3288 | { |
| 3289 | struct sock *sk = orig_skb->sk; |
| 3290 | struct sock_exterr_skb *serr; |
| 3291 | struct sk_buff *skb; |
| 3292 | int err; |
| 3293 | |
| 3294 | if (!sk) |
| 3295 | return; |
| 3296 | |
| 3297 | skb = skb_clone(orig_skb, GFP_ATOMIC); |
| 3298 | if (!skb) |
| 3299 | return; |
| 3300 | |
| 3301 | if (hwtstamps) { |
| 3302 | *skb_hwtstamps(skb) = |
| 3303 | *hwtstamps; |
| 3304 | } else { |
| 3305 | /* |
| 3306 | * no hardware time stamps available, |
Oliver Hartkopp | 2244d07 | 2010-08-17 08:59:14 +0000 | [diff] [blame] | 3307 | * so keep the shared tx_flags and only |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3308 | * store software time stamp |
| 3309 | */ |
| 3310 | skb->tstamp = ktime_get_real(); |
| 3311 | } |
| 3312 | |
| 3313 | serr = SKB_EXT_ERR(skb); |
| 3314 | memset(serr, 0, sizeof(*serr)); |
| 3315 | serr->ee.ee_errno = ENOMSG; |
| 3316 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 3317 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3318 | err = sock_queue_err_skb(sk, skb); |
Eric Dumazet | 2903037 | 2010-05-29 00:20:48 -0700 | [diff] [blame] | 3319 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3320 | if (err) |
| 3321 | kfree_skb(skb); |
| 3322 | } |
| 3323 | EXPORT_SYMBOL_GPL(skb_tstamp_tx); |
| 3324 | |
Johannes Berg | 6e3e939 | 2011-11-09 10:15:42 +0100 | [diff] [blame] | 3325 | void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
| 3326 | { |
| 3327 | struct sock *sk = skb->sk; |
| 3328 | struct sock_exterr_skb *serr; |
| 3329 | int err; |
| 3330 | |
| 3331 | skb->wifi_acked_valid = 1; |
| 3332 | skb->wifi_acked = acked; |
| 3333 | |
| 3334 | serr = SKB_EXT_ERR(skb); |
| 3335 | memset(serr, 0, sizeof(*serr)); |
| 3336 | serr->ee.ee_errno = ENOMSG; |
| 3337 | serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; |
| 3338 | |
| 3339 | err = sock_queue_err_skb(sk, skb); |
| 3340 | if (err) |
| 3341 | kfree_skb(skb); |
| 3342 | } |
| 3343 | EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); |
| 3344 | |
Patrick Ohly | ac45f60 | 2009-02-12 05:03:37 +0000 | [diff] [blame] | 3345 | |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3346 | /** |
| 3347 | * skb_partial_csum_set - set up and verify partial csum values for packet |
| 3348 | * @skb: the skb to set |
| 3349 | * @start: the number of bytes after skb->data to start checksumming. |
| 3350 | * @off: the offset from start to place the checksum. |
| 3351 | * |
| 3352 | * For untrusted partially-checksummed packets, we need to make sure the values |
| 3353 | * for skb->csum_start and skb->csum_offset are valid so we don't oops. |
| 3354 | * |
| 3355 | * This function checks and sets those values and skb->ip_summed: if this |
| 3356 | * returns false you should drop the packet. |
| 3357 | */ |
| 3358 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) |
| 3359 | { |
Herbert Xu | 5ff8dda | 2009-06-04 01:22:01 +0000 | [diff] [blame] | 3360 | if (unlikely(start > skb_headlen(skb)) || |
| 3361 | unlikely((int)start + off > skb_headlen(skb) - 2)) { |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 3362 | net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", |
| 3363 | start, off, skb_headlen(skb)); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3364 | return false; |
| 3365 | } |
| 3366 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 3367 | skb->csum_start = skb_headroom(skb) + start; |
| 3368 | skb->csum_offset = off; |
| 3369 | return true; |
| 3370 | } |
David S. Miller | b4ac530fc | 2009-02-10 02:09:24 -0800 | [diff] [blame] | 3371 | EXPORT_SYMBOL_GPL(skb_partial_csum_set); |
Rusty Russell | f35d9d8 | 2008-02-04 23:49:54 -0500 | [diff] [blame] | 3372 | |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 3373 | void __skb_warn_lro_forwarding(const struct sk_buff *skb) |
| 3374 | { |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 3375 | net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", |
| 3376 | skb->dev->name); |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 3377 | } |
Ben Hutchings | 4497b07 | 2008-06-19 16:22:28 -0700 | [diff] [blame] | 3378 | EXPORT_SYMBOL(__skb_warn_lro_forwarding); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 3379 | |
| 3380 | void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) |
| 3381 | { |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 3382 | if (head_stolen) { |
| 3383 | skb_release_head_state(skb); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 3384 | kmem_cache_free(skbuff_head_cache, skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 3385 | } else { |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 3386 | __kfree_skb(skb); |
Eric Dumazet | 3d861f6 | 2012-10-22 09:03:40 +0000 | [diff] [blame] | 3387 | } |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 3388 | } |
| 3389 | EXPORT_SYMBOL(kfree_skb_partial); |
| 3390 | |
| 3391 | /** |
| 3392 | * skb_try_coalesce - try to merge skb to prior one |
| 3393 | * @to: prior buffer |
| 3394 | * @from: buffer to add |
| 3395 | * @fragstolen: pointer to boolean |
Randy Dunlap | c6c4b97 | 2012-06-08 14:01:44 +0000 | [diff] [blame] | 3396 | * @delta_truesize: how much more was allocated than was requested |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 3397 | */ |
| 3398 | bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, |
| 3399 | bool *fragstolen, int *delta_truesize) |
| 3400 | { |
| 3401 | int i, delta, len = from->len; |
| 3402 | |
| 3403 | *fragstolen = false; |
| 3404 | |
| 3405 | if (skb_cloned(to)) |
| 3406 | return false; |
| 3407 | |
| 3408 | if (len <= skb_tailroom(to)) { |
| 3409 | BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); |
| 3410 | *delta_truesize = 0; |
| 3411 | return true; |
| 3412 | } |
| 3413 | |
| 3414 | if (skb_has_frag_list(to) || skb_has_frag_list(from)) |
| 3415 | return false; |
| 3416 | |
| 3417 | if (skb_headlen(from) != 0) { |
| 3418 | struct page *page; |
| 3419 | unsigned int offset; |
| 3420 | |
| 3421 | if (skb_shinfo(to)->nr_frags + |
| 3422 | skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) |
| 3423 | return false; |
| 3424 | |
| 3425 | if (skb_head_is_locked(from)) |
| 3426 | return false; |
| 3427 | |
| 3428 | delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); |
| 3429 | |
| 3430 | page = virt_to_head_page(from->head); |
| 3431 | offset = from->data - (unsigned char *)page_address(page); |
| 3432 | |
| 3433 | skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, |
| 3434 | page, offset, skb_headlen(from)); |
| 3435 | *fragstolen = true; |
| 3436 | } else { |
| 3437 | if (skb_shinfo(to)->nr_frags + |
| 3438 | skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) |
| 3439 | return false; |
| 3440 | |
Weiping Pan | f4b549a | 2012-09-28 20:15:30 +0000 | [diff] [blame] | 3441 | delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 3442 | } |
| 3443 | |
| 3444 | WARN_ON_ONCE(delta < len); |
| 3445 | |
| 3446 | memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, |
| 3447 | skb_shinfo(from)->frags, |
| 3448 | skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); |
| 3449 | skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; |
| 3450 | |
| 3451 | if (!skb_cloned(from)) |
| 3452 | skb_shinfo(from)->nr_frags = 0; |
| 3453 | |
Li RongQing | 8ea853f | 2012-09-18 16:53:21 +0000 | [diff] [blame] | 3454 | /* if the skb is not cloned this does nothing |
| 3455 | * since we set nr_frags to 0. |
| 3456 | */ |
Eric Dumazet | bad43ca | 2012-05-19 03:02:02 +0000 | [diff] [blame] | 3457 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) |
| 3458 | skb_frag_ref(from, i); |
| 3459 | |
| 3460 | to->truesize += delta; |
| 3461 | to->len += len; |
| 3462 | to->data_len += len; |
| 3463 | |
| 3464 | *delta_truesize = delta; |
| 3465 | return true; |
| 3466 | } |
| 3467 | EXPORT_SYMBOL(skb_try_coalesce); |