blob: f262dbfc7f0684c34f44bc39ea7615aa8e82e3f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090012 * Fixes:
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090035 * Ulises Alonso : Frame number limit removal and
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * packet_set_ring memory leak.
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070037 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090040 * byte arrays at the end of sockaddr_ll
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070041 * and packet_mreq.
Johann Baudy69e3c752009-05-18 22:11:22 -070042 * Johann Baudy : Added TX RING.
chetan lokef6fb8f102011-08-19 10:18:16 +000043 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/mm.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080057#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
Herbert Xuffbc6112007-02-04 23:33:10 -080065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090067#include <linux/slab.h>
Neil Horman0e3125c2010-11-16 10:26:47 -080068#include <linux/vmalloc.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020069#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040079#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
Herbert Xu905db442009-01-30 14:12:06 -080086#include <linux/mutex.h>
Eric Dumazet05423b22009-10-26 18:40:35 -070087#include <linux/if_vlan.h>
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -080088#include <linux/virtio_net.h>
Richard Cochraned85b562010-04-07 22:41:28 +000089#include <linux/errqueue.h>
Scott McMillan614f60f2010-06-02 05:53:56 -070090#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
95
Pavel Emelyanov2787b042012-08-13 05:49:39 +000096#include "internal.h"
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 Assumptions:
100 - if device has no dev->hard_header routine, it adds and removes ll header
101 inside itself. In this case ll header is invisible outside of device,
102 but higher levels still should reserve dev->hard_header_len.
103 Some devices are enough clever to reallocate skb, when header
104 will not fit to reserved space (tunnel), another ones are silly
105 (PPP).
106 - packet socket receives packets with pulled ll header,
107 so that SOCK_RAW should push it back.
108
109On receive:
110-----------
111
112Incoming, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700113 mac_header -> ll header
114 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116Outgoing, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700117 mac_header -> ll header
118 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120Incoming, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700121 mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 header. PPP makes it, that is wrong, because introduce
YOSHIFUJI Hideakidb0c58f2007-07-19 10:44:35 +0900123 assymetry between rx and tx paths.
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700124 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126Outgoing, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700127 mac_header -> data. ll header is still not built!
128 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130Resume
131 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132
133
134On transmit:
135------------
136
137dev->hard_header != NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700138 mac_header -> ll header
139 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141dev->hard_header == NULL (ll header is added by device, we cannot control it)
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700142 mac_header -> data
143 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145 We should set nh.raw on output to correct posistion,
146 packet classifier depends on it.
147 */
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/* Private packet socket structures. */
150
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700151/* identical to struct packet_mreq except it has
152 * a longer address field.
153 */
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000154struct packet_mreq_max {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700155 int mr_ifindex;
156 unsigned short mr_type;
157 unsigned short mr_alen;
158 unsigned char mr_address[MAX_ADDR_LEN];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159};
David S. Millera2efcfa2007-05-29 13:12:50 -0700160
chetan lokef6fb8f102011-08-19 10:18:16 +0000161static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -0700162 int closing, int tx_ring);
163
chetan lokef6fb8f102011-08-19 10:18:16 +0000164
165#define V3_ALIGNMENT (8)
166
chetan lokebc59ba32011-08-25 10:43:30 +0000167#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
chetan lokef6fb8f102011-08-19 10:18:16 +0000168
169#define BLK_PLUS_PRIV(sz_of_priv) \
170 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
171
chetan lokef6fb8f102011-08-19 10:18:16 +0000172#define PGV_FROM_VMALLOC 1
Johann Baudy69e3c752009-05-18 22:11:22 -0700173
chetan lokef6fb8f102011-08-19 10:18:16 +0000174#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
175#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
176#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
177#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
178#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
179#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
180#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
181
Johann Baudy69e3c752009-05-18 22:11:22 -0700182struct packet_sock;
183static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
chetan lokef6fb8f102011-08-19 10:18:16 +0000185static void *packet_previous_frame(struct packet_sock *po,
186 struct packet_ring_buffer *rb,
187 int status);
188static void packet_increment_head(struct packet_ring_buffer *buff);
chetan lokebc59ba32011-08-25 10:43:30 +0000189static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
190 struct tpacket_block_desc *);
191static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f102011-08-19 10:18:16 +0000192 struct packet_sock *);
chetan lokebc59ba32011-08-25 10:43:30 +0000193static void prb_retire_current_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f102011-08-19 10:18:16 +0000194 struct packet_sock *, unsigned int status);
chetan lokebc59ba32011-08-25 10:43:30 +0000195static int prb_queue_frozen(struct tpacket_kbdq_core *);
196static void prb_open_block(struct tpacket_kbdq_core *,
197 struct tpacket_block_desc *);
chetan lokef6fb8f102011-08-19 10:18:16 +0000198static void prb_retire_rx_blk_timer_expired(unsigned long);
chetan lokebc59ba32011-08-25 10:43:30 +0000199static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
200static void prb_init_blk_timer(struct packet_sock *,
201 struct tpacket_kbdq_core *,
202 void (*func) (unsigned long));
203static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
204static void prb_clear_rxhash(struct tpacket_kbdq_core *,
205 struct tpacket3_hdr *);
206static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
207 struct tpacket3_hdr *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208static void packet_flush_mclist(struct sock *sk);
209
Herbert Xuffbc6112007-02-04 23:33:10 -0800210struct packet_skb_cb {
211 unsigned int origlen;
212 union {
213 struct sockaddr_pkt pkt;
214 struct sockaddr_ll ll;
215 } sa;
216};
217
218#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
Herbert Xu8dc41942007-02-04 23:31:32 -0800219
chetan lokebc59ba32011-08-25 10:43:30 +0000220#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
chetan lokef6fb8f102011-08-19 10:18:16 +0000221#define GET_PBLOCK_DESC(x, bid) \
chetan lokebc59ba32011-08-25 10:43:30 +0000222 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
chetan lokef6fb8f102011-08-19 10:18:16 +0000223#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
chetan lokebc59ba32011-08-25 10:43:30 +0000224 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
chetan lokef6fb8f102011-08-19 10:18:16 +0000225#define GET_NEXT_PRB_BLK_NUM(x) \
226 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
227 ((x)->kactive_blk_num+1) : 0)
228
David S. Millerdc99f602011-07-05 01:45:05 -0700229static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
230static void __fanout_link(struct sock *sk, struct packet_sock *po);
231
David S. Millerce06b032011-07-04 01:44:29 -0700232/* register_prot_hook must be invoked with the po->bind_lock held,
233 * or from a context in which asynchronous accesses to the packet
234 * socket is not possible (packet_create()).
235 */
236static void register_prot_hook(struct sock *sk)
237{
238 struct packet_sock *po = pkt_sk(sk);
239 if (!po->running) {
David S. Millerdc99f602011-07-05 01:45:05 -0700240 if (po->fanout)
241 __fanout_link(sk, po);
242 else
243 dev_add_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700244 sock_hold(sk);
245 po->running = 1;
246 }
247}
248
249/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
250 * held. If the sync parameter is true, we will temporarily drop
251 * the po->bind_lock and do a synchronize_net to make sure no
252 * asynchronous packet processing paths still refer to the elements
253 * of po->prot_hook. If the sync parameter is false, it is the
254 * callers responsibility to take care of this.
255 */
256static void __unregister_prot_hook(struct sock *sk, bool sync)
257{
258 struct packet_sock *po = pkt_sk(sk);
259
260 po->running = 0;
David S. Millerdc99f602011-07-05 01:45:05 -0700261 if (po->fanout)
262 __fanout_unlink(sk, po);
263 else
264 __dev_remove_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700265 __sock_put(sk);
266
267 if (sync) {
268 spin_unlock(&po->bind_lock);
269 synchronize_net();
270 spin_lock(&po->bind_lock);
271 }
272}
273
274static void unregister_prot_hook(struct sock *sk, bool sync)
275{
276 struct packet_sock *po = pkt_sk(sk);
277
278 if (po->running)
279 __unregister_prot_hook(sk, sync);
280}
281
Changli Gaof6dafa92010-12-07 04:26:16 +0000282static inline __pure struct page *pgv_to_page(void *addr)
Changli Gao0af55bb2010-12-01 02:52:20 +0000283{
284 if (is_vmalloc_addr(addr))
285 return vmalloc_to_page(addr);
286 return virt_to_page(addr);
287}
288
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700289static void __packet_set_status(struct packet_sock *po, void *frame, int status)
290{
291 union {
292 struct tpacket_hdr *h1;
293 struct tpacket2_hdr *h2;
294 void *raw;
295 } h;
296
297 h.raw = frame;
298 switch (po->tp_version) {
299 case TPACKET_V1:
300 h.h1->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000301 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700302 break;
303 case TPACKET_V2:
304 h.h2->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000305 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700306 break;
chetan lokef6fb8f102011-08-19 10:18:16 +0000307 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700308 default:
chetan lokef6fb8f102011-08-19 10:18:16 +0000309 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700310 BUG();
311 }
312
313 smp_wmb();
314}
315
316static int __packet_get_status(struct packet_sock *po, void *frame)
317{
318 union {
319 struct tpacket_hdr *h1;
320 struct tpacket2_hdr *h2;
321 void *raw;
322 } h;
323
324 smp_rmb();
325
326 h.raw = frame;
327 switch (po->tp_version) {
328 case TPACKET_V1:
Changli Gao0af55bb2010-12-01 02:52:20 +0000329 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700330 return h.h1->tp_status;
331 case TPACKET_V2:
Changli Gao0af55bb2010-12-01 02:52:20 +0000332 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700333 return h.h2->tp_status;
chetan lokef6fb8f102011-08-19 10:18:16 +0000334 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700335 default:
chetan lokef6fb8f102011-08-19 10:18:16 +0000336 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700337 BUG();
338 return 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700339 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340}
Johann Baudy69e3c752009-05-18 22:11:22 -0700341
342static void *packet_lookup_frame(struct packet_sock *po,
343 struct packet_ring_buffer *rb,
344 unsigned int position,
345 int status)
346{
347 unsigned int pg_vec_pos, frame_offset;
348 union {
349 struct tpacket_hdr *h1;
350 struct tpacket2_hdr *h2;
351 void *raw;
352 } h;
353
354 pg_vec_pos = position / rb->frames_per_block;
355 frame_offset = position % rb->frames_per_block;
356
Neil Horman0e3125c2010-11-16 10:26:47 -0800357 h.raw = rb->pg_vec[pg_vec_pos].buffer +
358 (frame_offset * rb->frame_size);
Johann Baudy69e3c752009-05-18 22:11:22 -0700359
360 if (status != __packet_get_status(po, h.raw))
361 return NULL;
362
363 return h.raw;
364}
365
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000366static void *packet_current_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -0700367 struct packet_ring_buffer *rb,
368 int status)
369{
370 return packet_lookup_frame(po, rb, rb->head, status);
371}
372
chetan lokebc59ba32011-08-25 10:43:30 +0000373static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f102011-08-19 10:18:16 +0000374{
375 del_timer_sync(&pkc->retire_blk_timer);
376}
377
378static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
379 int tx_ring,
380 struct sk_buff_head *rb_queue)
381{
chetan lokebc59ba32011-08-25 10:43:30 +0000382 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f102011-08-19 10:18:16 +0000383
384 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
385
386 spin_lock(&rb_queue->lock);
387 pkc->delete_blk_timer = 1;
388 spin_unlock(&rb_queue->lock);
389
390 prb_del_retire_blk_timer(pkc);
391}
392
393static void prb_init_blk_timer(struct packet_sock *po,
chetan lokebc59ba32011-08-25 10:43:30 +0000394 struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000395 void (*func) (unsigned long))
396{
397 init_timer(&pkc->retire_blk_timer);
398 pkc->retire_blk_timer.data = (long)po;
399 pkc->retire_blk_timer.function = func;
400 pkc->retire_blk_timer.expires = jiffies;
401}
402
403static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
404{
chetan lokebc59ba32011-08-25 10:43:30 +0000405 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f102011-08-19 10:18:16 +0000406
407 if (tx_ring)
408 BUG();
409
410 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
411 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
412}
413
414static int prb_calc_retire_blk_tmo(struct packet_sock *po,
415 int blk_size_in_bytes)
416{
417 struct net_device *dev;
418 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000419 struct ethtool_cmd ecmd;
420 int err;
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000421 u32 speed;
chetan lokef6fb8f102011-08-19 10:18:16 +0000422
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000423 rtnl_lock();
424 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
425 if (unlikely(!dev)) {
426 rtnl_unlock();
chetan lokef6fb8f102011-08-19 10:18:16 +0000427 return DEFAULT_PRB_RETIRE_TOV;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000428 }
429 err = __ethtool_get_settings(dev, &ecmd);
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000430 speed = ethtool_cmd_speed(&ecmd);
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000431 rtnl_unlock();
432 if (!err) {
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000433 /*
434 * If the link speed is so slow you don't really
435 * need to worry about perf anyways
436 */
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000437 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000438 return DEFAULT_PRB_RETIRE_TOV;
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000439 } else {
440 msec = 1;
441 div = speed / 1000;
chetan lokef6fb8f102011-08-19 10:18:16 +0000442 }
443 }
444
445 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
446
447 if (div)
448 mbits /= div;
449
450 tmo = mbits * msec;
451
452 if (div)
453 return tmo+1;
454 return tmo;
455}
456
chetan lokebc59ba32011-08-25 10:43:30 +0000457static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
chetan lokef6fb8f102011-08-19 10:18:16 +0000458 union tpacket_req_u *req_u)
459{
460 p1->feature_req_word = req_u->req3.tp_feature_req_word;
461}
462
463static void init_prb_bdqc(struct packet_sock *po,
464 struct packet_ring_buffer *rb,
465 struct pgv *pg_vec,
466 union tpacket_req_u *req_u, int tx_ring)
467{
chetan lokebc59ba32011-08-25 10:43:30 +0000468 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
469 struct tpacket_block_desc *pbd;
chetan lokef6fb8f102011-08-19 10:18:16 +0000470
471 memset(p1, 0x0, sizeof(*p1));
472
473 p1->knxt_seq_num = 1;
474 p1->pkbdq = pg_vec;
chetan lokebc59ba32011-08-25 10:43:30 +0000475 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
Joe Perchese3192692012-06-03 17:41:40 +0000476 p1->pkblk_start = pg_vec[0].buffer;
chetan lokef6fb8f102011-08-19 10:18:16 +0000477 p1->kblk_size = req_u->req3.tp_block_size;
478 p1->knum_blocks = req_u->req3.tp_block_nr;
479 p1->hdrlen = po->tp_hdrlen;
480 p1->version = po->tp_version;
481 p1->last_kactive_blk_num = 0;
482 po->stats_u.stats3.tp_freeze_q_cnt = 0;
483 if (req_u->req3.tp_retire_blk_tov)
484 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
485 else
486 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
487 req_u->req3.tp_block_size);
488 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
489 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
490
491 prb_init_ft_ops(p1, req_u);
492 prb_setup_retire_blk_timer(po, tx_ring);
493 prb_open_block(p1, pbd);
494}
495
496/* Do NOT update the last_blk_num first.
497 * Assumes sk_buff_head lock is held.
498 */
chetan lokebc59ba32011-08-25 10:43:30 +0000499static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f102011-08-19 10:18:16 +0000500{
501 mod_timer(&pkc->retire_blk_timer,
502 jiffies + pkc->tov_in_jiffies);
503 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
504}
505
506/*
507 * Timer logic:
508 * 1) We refresh the timer only when we open a block.
509 * By doing this we don't waste cycles refreshing the timer
510 * on packet-by-packet basis.
511 *
512 * With a 1MB block-size, on a 1Gbps line, it will take
513 * i) ~8 ms to fill a block + ii) memcpy etc.
514 * In this cut we are not accounting for the memcpy time.
515 *
516 * So, if the user sets the 'tmo' to 10ms then the timer
517 * will never fire while the block is still getting filled
518 * (which is what we want). However, the user could choose
519 * to close a block early and that's fine.
520 *
521 * But when the timer does fire, we check whether or not to refresh it.
522 * Since the tmo granularity is in msecs, it is not too expensive
523 * to refresh the timer, lets say every '8' msecs.
524 * Either the user can set the 'tmo' or we can derive it based on
525 * a) line-speed and b) block-size.
526 * prb_calc_retire_blk_tmo() calculates the tmo.
527 *
528 */
529static void prb_retire_rx_blk_timer_expired(unsigned long data)
530{
531 struct packet_sock *po = (struct packet_sock *)data;
chetan lokebc59ba32011-08-25 10:43:30 +0000532 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
chetan lokef6fb8f102011-08-19 10:18:16 +0000533 unsigned int frozen;
chetan lokebc59ba32011-08-25 10:43:30 +0000534 struct tpacket_block_desc *pbd;
chetan lokef6fb8f102011-08-19 10:18:16 +0000535
536 spin_lock(&po->sk.sk_receive_queue.lock);
537
538 frozen = prb_queue_frozen(pkc);
539 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
540
541 if (unlikely(pkc->delete_blk_timer))
542 goto out;
543
544 /* We only need to plug the race when the block is partially filled.
545 * tpacket_rcv:
546 * lock(); increment BLOCK_NUM_PKTS; unlock()
547 * copy_bits() is in progress ...
548 * timer fires on other cpu:
549 * we can't retire the current block because copy_bits
550 * is in progress.
551 *
552 */
553 if (BLOCK_NUM_PKTS(pbd)) {
554 while (atomic_read(&pkc->blk_fill_in_prog)) {
555 /* Waiting for skb_copy_bits to finish... */
556 cpu_relax();
557 }
558 }
559
560 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
561 if (!frozen) {
562 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
563 if (!prb_dispatch_next_block(pkc, po))
564 goto refresh_timer;
565 else
566 goto out;
567 } else {
568 /* Case 1. Queue was frozen because user-space was
569 * lagging behind.
570 */
571 if (prb_curr_blk_in_use(pkc, pbd)) {
572 /*
573 * Ok, user-space is still behind.
574 * So just refresh the timer.
575 */
576 goto refresh_timer;
577 } else {
578 /* Case 2. queue was frozen,user-space caught up,
579 * now the link went idle && the timer fired.
580 * We don't have a block to close.So we open this
581 * block and restart the timer.
582 * opening a block thaws the queue,restarts timer
583 * Thawing/timer-refresh is a side effect.
584 */
585 prb_open_block(pkc, pbd);
586 goto out;
587 }
588 }
589 }
590
591refresh_timer:
592 _prb_refresh_rx_retire_blk_timer(pkc);
593
594out:
595 spin_unlock(&po->sk.sk_receive_queue.lock);
596}
597
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000598static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
chetan lokebc59ba32011-08-25 10:43:30 +0000599 struct tpacket_block_desc *pbd1, __u32 status)
chetan lokef6fb8f102011-08-19 10:18:16 +0000600{
601 /* Flush everything minus the block header */
602
603#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
604 u8 *start, *end;
605
606 start = (u8 *)pbd1;
607
608 /* Skip the block header(we know header WILL fit in 4K) */
609 start += PAGE_SIZE;
610
611 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
612 for (; start < end; start += PAGE_SIZE)
613 flush_dcache_page(pgv_to_page(start));
614
615 smp_wmb();
616#endif
617
618 /* Now update the block status. */
619
620 BLOCK_STATUS(pbd1) = status;
621
622 /* Flush the block header */
623
624#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
625 start = (u8 *)pbd1;
626 flush_dcache_page(pgv_to_page(start));
627
628 smp_wmb();
629#endif
630}
631
632/*
633 * Side effect:
634 *
635 * 1) flush the block
636 * 2) Increment active_blk_num
637 *
638 * Note:We DONT refresh the timer on purpose.
639 * Because almost always the next block will be opened.
640 */
chetan lokebc59ba32011-08-25 10:43:30 +0000641static void prb_close_block(struct tpacket_kbdq_core *pkc1,
642 struct tpacket_block_desc *pbd1,
chetan lokef6fb8f102011-08-19 10:18:16 +0000643 struct packet_sock *po, unsigned int stat)
644{
645 __u32 status = TP_STATUS_USER | stat;
646
647 struct tpacket3_hdr *last_pkt;
chetan lokebc59ba32011-08-25 10:43:30 +0000648 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f102011-08-19 10:18:16 +0000649
650 if (po->stats.tp_drops)
651 status |= TP_STATUS_LOSING;
652
653 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
654 last_pkt->tp_next_offset = 0;
655
656 /* Get the ts of the last pkt */
657 if (BLOCK_NUM_PKTS(pbd1)) {
658 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
659 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
660 } else {
661 /* Ok, we tmo'd - so get the current time */
662 struct timespec ts;
663 getnstimeofday(&ts);
664 h1->ts_last_pkt.ts_sec = ts.tv_sec;
665 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
666 }
667
668 smp_wmb();
669
670 /* Flush the block */
671 prb_flush_block(pkc1, pbd1, status);
672
673 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
674}
675
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000676static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f102011-08-19 10:18:16 +0000677{
678 pkc->reset_pending_on_curr_blk = 0;
679}
680
681/*
682 * Side effect of opening a block:
683 *
684 * 1) prb_queue is thawed.
685 * 2) retire_blk_timer is refreshed.
686 *
687 */
chetan lokebc59ba32011-08-25 10:43:30 +0000688static void prb_open_block(struct tpacket_kbdq_core *pkc1,
689 struct tpacket_block_desc *pbd1)
chetan lokef6fb8f102011-08-19 10:18:16 +0000690{
691 struct timespec ts;
chetan lokebc59ba32011-08-25 10:43:30 +0000692 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f102011-08-19 10:18:16 +0000693
694 smp_rmb();
695
696 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
697
698 /* We could have just memset this but we will lose the
699 * flexibility of making the priv area sticky
700 */
701 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
702 BLOCK_NUM_PKTS(pbd1) = 0;
703 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
704 getnstimeofday(&ts);
705 h1->ts_first_pkt.ts_sec = ts.tv_sec;
706 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
707 pkc1->pkblk_start = (char *)pbd1;
Joe Perchese3192692012-06-03 17:41:40 +0000708 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
chetan lokef6fb8f102011-08-19 10:18:16 +0000709 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
710 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
711 pbd1->version = pkc1->version;
712 pkc1->prev = pkc1->nxt_offset;
713 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
714 prb_thaw_queue(pkc1);
715 _prb_refresh_rx_retire_blk_timer(pkc1);
716
717 smp_wmb();
718
719 return;
720 }
721
722 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
723 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
724 dump_stack();
725 BUG();
726}
727
728/*
729 * Queue freeze logic:
730 * 1) Assume tp_block_nr = 8 blocks.
731 * 2) At time 't0', user opens Rx ring.
732 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
733 * 4) user-space is either sleeping or processing block '0'.
734 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
735 * it will close block-7,loop around and try to fill block '0'.
736 * call-flow:
737 * __packet_lookup_frame_in_block
738 * prb_retire_current_block()
739 * prb_dispatch_next_block()
740 * |->(BLOCK_STATUS == USER) evaluates to true
741 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
742 * 6) Now there are two cases:
743 * 6.1) Link goes idle right after the queue is frozen.
744 * But remember, the last open_block() refreshed the timer.
745 * When this timer expires,it will refresh itself so that we can
746 * re-open block-0 in near future.
747 * 6.2) Link is busy and keeps on receiving packets. This is a simple
748 * case and __packet_lookup_frame_in_block will check if block-0
749 * is free and can now be re-used.
750 */
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000751static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000752 struct packet_sock *po)
753{
754 pkc->reset_pending_on_curr_blk = 1;
755 po->stats_u.stats3.tp_freeze_q_cnt++;
756}
757
758#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
759
760/*
761 * If the next block is free then we will dispatch it
762 * and return a good offset.
763 * Else, we will freeze the queue.
764 * So, caller must check the return value.
765 */
chetan lokebc59ba32011-08-25 10:43:30 +0000766static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000767 struct packet_sock *po)
768{
chetan lokebc59ba32011-08-25 10:43:30 +0000769 struct tpacket_block_desc *pbd;
chetan lokef6fb8f102011-08-19 10:18:16 +0000770
771 smp_rmb();
772
773 /* 1. Get current block num */
774 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
775
776 /* 2. If this block is currently in_use then freeze the queue */
777 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
778 prb_freeze_queue(pkc, po);
779 return NULL;
780 }
781
782 /*
783 * 3.
784 * open this block and return the offset where the first packet
785 * needs to get stored.
786 */
787 prb_open_block(pkc, pbd);
788 return (void *)pkc->nxt_offset;
789}
790
chetan lokebc59ba32011-08-25 10:43:30 +0000791static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000792 struct packet_sock *po, unsigned int status)
793{
chetan lokebc59ba32011-08-25 10:43:30 +0000794 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
chetan lokef6fb8f102011-08-19 10:18:16 +0000795
796 /* retire/close the current block */
797 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
798 /*
799 * Plug the case where copy_bits() is in progress on
800 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
801 * have space to copy the pkt in the current block and
802 * called prb_retire_current_block()
803 *
804 * We don't need to worry about the TMO case because
805 * the timer-handler already handled this case.
806 */
807 if (!(status & TP_STATUS_BLK_TMO)) {
808 while (atomic_read(&pkc->blk_fill_in_prog)) {
809 /* Waiting for skb_copy_bits to finish... */
810 cpu_relax();
811 }
812 }
813 prb_close_block(pkc, pbd, po, status);
814 return;
815 }
816
817 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
818 dump_stack();
819 BUG();
820}
821
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000822static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
chetan lokebc59ba32011-08-25 10:43:30 +0000823 struct tpacket_block_desc *pbd)
chetan lokef6fb8f102011-08-19 10:18:16 +0000824{
825 return TP_STATUS_USER & BLOCK_STATUS(pbd);
826}
827
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000828static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f102011-08-19 10:18:16 +0000829{
830 return pkc->reset_pending_on_curr_blk;
831}
832
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000833static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
chetan lokef6fb8f102011-08-19 10:18:16 +0000834{
chetan lokebc59ba32011-08-25 10:43:30 +0000835 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
chetan lokef6fb8f102011-08-19 10:18:16 +0000836 atomic_dec(&pkc->blk_fill_in_prog);
837}
838
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000839static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000840 struct tpacket3_hdr *ppd)
841{
842 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
843}
844
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000845static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000846 struct tpacket3_hdr *ppd)
847{
848 ppd->hv1.tp_rxhash = 0;
849}
850
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000851static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000852 struct tpacket3_hdr *ppd)
853{
854 if (vlan_tx_tag_present(pkc->skb)) {
855 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
856 ppd->tp_status = TP_STATUS_VLAN_VALID;
857 } else {
danborkmann@iogearbox.net9e670302012-08-20 03:34:03 +0000858 ppd->hv1.tp_vlan_tci = 0;
859 ppd->tp_status = TP_STATUS_AVAILABLE;
chetan lokef6fb8f102011-08-19 10:18:16 +0000860 }
861}
862
chetan lokebc59ba32011-08-25 10:43:30 +0000863static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000864 struct tpacket3_hdr *ppd)
865{
866 prb_fill_vlan_info(pkc, ppd);
867
868 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
869 prb_fill_rxhash(pkc, ppd);
870 else
871 prb_clear_rxhash(pkc, ppd);
872}
873
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000874static void prb_fill_curr_block(char *curr,
chetan lokebc59ba32011-08-25 10:43:30 +0000875 struct tpacket_kbdq_core *pkc,
876 struct tpacket_block_desc *pbd,
chetan lokef6fb8f102011-08-19 10:18:16 +0000877 unsigned int len)
878{
879 struct tpacket3_hdr *ppd;
880
881 ppd = (struct tpacket3_hdr *)curr;
882 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
883 pkc->prev = curr;
884 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
885 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
886 BLOCK_NUM_PKTS(pbd) += 1;
887 atomic_inc(&pkc->blk_fill_in_prog);
888 prb_run_all_ft_ops(pkc, ppd);
889}
890
891/* Assumes caller has the sk->rx_queue.lock */
892static void *__packet_lookup_frame_in_block(struct packet_sock *po,
893 struct sk_buff *skb,
894 int status,
895 unsigned int len
896 )
897{
chetan lokebc59ba32011-08-25 10:43:30 +0000898 struct tpacket_kbdq_core *pkc;
899 struct tpacket_block_desc *pbd;
chetan lokef6fb8f102011-08-19 10:18:16 +0000900 char *curr, *end;
901
Joe Perchese3192692012-06-03 17:41:40 +0000902 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
chetan lokef6fb8f102011-08-19 10:18:16 +0000903 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
904
905 /* Queue is frozen when user space is lagging behind */
906 if (prb_queue_frozen(pkc)) {
907 /*
908 * Check if that last block which caused the queue to freeze,
909 * is still in_use by user-space.
910 */
911 if (prb_curr_blk_in_use(pkc, pbd)) {
912 /* Can't record this packet */
913 return NULL;
914 } else {
915 /*
916 * Ok, the block was released by user-space.
917 * Now let's open that block.
918 * opening a block also thaws the queue.
919 * Thawing is a side effect.
920 */
921 prb_open_block(pkc, pbd);
922 }
923 }
924
925 smp_mb();
926 curr = pkc->nxt_offset;
927 pkc->skb = skb;
Joe Perchese3192692012-06-03 17:41:40 +0000928 end = (char *)pbd + pkc->kblk_size;
chetan lokef6fb8f102011-08-19 10:18:16 +0000929
930 /* first try the current block */
931 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
932 prb_fill_curr_block(curr, pkc, pbd, len);
933 return (void *)curr;
934 }
935
936 /* Ok, close the current block */
937 prb_retire_current_block(pkc, po, 0);
938
939 /* Now, try to dispatch the next block */
940 curr = (char *)prb_dispatch_next_block(pkc, po);
941 if (curr) {
942 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
943 prb_fill_curr_block(curr, pkc, pbd, len);
944 return (void *)curr;
945 }
946
947 /*
948 * No free blocks are available.user_space hasn't caught up yet.
949 * Queue was just frozen and now this packet will get dropped.
950 */
951 return NULL;
952}
953
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000954static void *packet_current_rx_frame(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +0000955 struct sk_buff *skb,
956 int status, unsigned int len)
957{
958 char *curr = NULL;
959 switch (po->tp_version) {
960 case TPACKET_V1:
961 case TPACKET_V2:
962 curr = packet_lookup_frame(po, &po->rx_ring,
963 po->rx_ring.head, status);
964 return curr;
965 case TPACKET_V3:
966 return __packet_lookup_frame_in_block(po, skb, status, len);
967 default:
968 WARN(1, "TPACKET version not supported\n");
969 BUG();
Ying Xue99aa3472012-08-06 16:27:10 +0000970 return NULL;
chetan lokef6fb8f102011-08-19 10:18:16 +0000971 }
972}
973
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000974static void *prb_lookup_block(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +0000975 struct packet_ring_buffer *rb,
976 unsigned int previous,
977 int status)
978{
chetan lokebc59ba32011-08-25 10:43:30 +0000979 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
980 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
chetan lokef6fb8f102011-08-19 10:18:16 +0000981
982 if (status != BLOCK_STATUS(pbd))
983 return NULL;
984 return pbd;
985}
986
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000987static int prb_previous_blk_num(struct packet_ring_buffer *rb)
chetan lokef6fb8f102011-08-19 10:18:16 +0000988{
989 unsigned int prev;
990 if (rb->prb_bdqc.kactive_blk_num)
991 prev = rb->prb_bdqc.kactive_blk_num-1;
992 else
993 prev = rb->prb_bdqc.knum_blocks-1;
994 return prev;
995}
996
997/* Assumes caller has held the rx_queue.lock */
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000998static void *__prb_previous_block(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +0000999 struct packet_ring_buffer *rb,
1000 int status)
1001{
1002 unsigned int previous = prb_previous_blk_num(rb);
1003 return prb_lookup_block(po, rb, previous, status);
1004}
1005
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001006static void *packet_previous_rx_frame(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +00001007 struct packet_ring_buffer *rb,
1008 int status)
1009{
1010 if (po->tp_version <= TPACKET_V2)
1011 return packet_previous_frame(po, rb, status);
1012
1013 return __prb_previous_block(po, rb, status);
1014}
1015
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001016static void packet_increment_rx_head(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +00001017 struct packet_ring_buffer *rb)
1018{
1019 switch (po->tp_version) {
1020 case TPACKET_V1:
1021 case TPACKET_V2:
1022 return packet_increment_head(rb);
1023 case TPACKET_V3:
1024 default:
1025 WARN(1, "TPACKET version not supported.\n");
1026 BUG();
1027 return;
1028 }
1029}
1030
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001031static void *packet_previous_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -07001032 struct packet_ring_buffer *rb,
1033 int status)
1034{
1035 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1036 return packet_lookup_frame(po, rb, previous, status);
1037}
1038
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001039static void packet_increment_head(struct packet_ring_buffer *buff)
Johann Baudy69e3c752009-05-18 22:11:22 -07001040{
1041 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1042}
1043
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044static void packet_sock_destruct(struct sock *sk)
1045{
Richard Cochraned85b562010-04-07 22:41:28 +00001046 skb_queue_purge(&sk->sk_error_queue);
1047
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001048 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1049 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 if (!sock_flag(sk, SOCK_DEAD)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001052 pr_err("Attempt to release alive packet socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 return;
1054 }
1055
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08001056 sk_refcnt_debug_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057}
1058
David S. Millerdc99f602011-07-05 01:45:05 -07001059static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1060{
1061 int x = atomic_read(&f->rr_cur) + 1;
1062
1063 if (x >= num)
1064 x = 0;
1065
1066 return x;
1067}
1068
1069static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1070{
1071 u32 idx, hash = skb->rxhash;
1072
1073 idx = ((u64)hash * num) >> 32;
1074
1075 return f->arr[idx];
1076}
1077
1078static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1079{
1080 int cur, old;
1081
1082 cur = atomic_read(&f->rr_cur);
1083 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1084 fanout_rr_next(f, num))) != cur)
1085 cur = old;
1086 return f->arr[cur];
1087}
1088
David S. Miller95ec3eb2011-07-06 01:56:38 -07001089static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1090{
1091 unsigned int cpu = smp_processor_id();
1092
1093 return f->arr[cpu % num];
1094}
1095
David S. Miller95ec3eb2011-07-06 01:56:38 -07001096static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1097 struct packet_type *pt, struct net_device *orig_dev)
David S. Millerdc99f602011-07-05 01:45:05 -07001098{
1099 struct packet_fanout *f = pt->af_packet_priv;
1100 unsigned int num = f->num_members;
1101 struct packet_sock *po;
1102 struct sock *sk;
1103
1104 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1105 !num) {
1106 kfree_skb(skb);
1107 return 0;
1108 }
1109
David S. Miller95ec3eb2011-07-06 01:56:38 -07001110 switch (f->type) {
1111 case PACKET_FANOUT_HASH:
1112 default:
1113 if (f->defrag) {
Eric Dumazetbc416d92011-10-06 10:28:31 +00001114 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001115 if (!skb)
1116 return 0;
1117 }
1118 skb_get_rxhash(skb);
1119 sk = fanout_demux_hash(f, skb, num);
1120 break;
1121 case PACKET_FANOUT_LB:
1122 sk = fanout_demux_lb(f, skb, num);
1123 break;
1124 case PACKET_FANOUT_CPU:
1125 sk = fanout_demux_cpu(f, skb, num);
1126 break;
David S. Miller7736d332011-07-05 01:43:20 -07001127 }
1128
David S. Millerdc99f602011-07-05 01:45:05 -07001129 po = pkt_sk(sk);
1130
1131 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1132}
1133
Pavel Emelyanovfff33212012-08-16 05:36:48 +00001134DEFINE_MUTEX(fanout_mutex);
1135EXPORT_SYMBOL_GPL(fanout_mutex);
David S. Millerdc99f602011-07-05 01:45:05 -07001136static LIST_HEAD(fanout_list);
1137
1138static void __fanout_link(struct sock *sk, struct packet_sock *po)
1139{
1140 struct packet_fanout *f = po->fanout;
1141
1142 spin_lock(&f->lock);
1143 f->arr[f->num_members] = sk;
1144 smp_wmb();
1145 f->num_members++;
1146 spin_unlock(&f->lock);
1147}
1148
1149static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1150{
1151 struct packet_fanout *f = po->fanout;
1152 int i;
1153
1154 spin_lock(&f->lock);
1155 for (i = 0; i < f->num_members; i++) {
1156 if (f->arr[i] == sk)
1157 break;
1158 }
1159 BUG_ON(i >= f->num_members);
1160 f->arr[i] = f->arr[f->num_members - 1];
1161 f->num_members--;
1162 spin_unlock(&f->lock);
1163}
1164
Fengguang Wua0dfb262012-08-23 19:51:21 +08001165static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001166{
1167 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1168 return true;
1169
1170 return false;
1171}
1172
David S. Miller7736d332011-07-05 01:43:20 -07001173static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
David S. Millerdc99f602011-07-05 01:45:05 -07001174{
1175 struct packet_sock *po = pkt_sk(sk);
1176 struct packet_fanout *f, *match;
David S. Miller7736d332011-07-05 01:43:20 -07001177 u8 type = type_flags & 0xff;
1178 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001179 int err;
1180
1181 switch (type) {
1182 case PACKET_FANOUT_HASH:
1183 case PACKET_FANOUT_LB:
David S. Miller95ec3eb2011-07-06 01:56:38 -07001184 case PACKET_FANOUT_CPU:
David S. Millerdc99f602011-07-05 01:45:05 -07001185 break;
1186 default:
1187 return -EINVAL;
1188 }
1189
1190 if (!po->running)
1191 return -EINVAL;
1192
1193 if (po->fanout)
1194 return -EALREADY;
1195
1196 mutex_lock(&fanout_mutex);
1197 match = NULL;
1198 list_for_each_entry(f, &fanout_list, list) {
1199 if (f->id == id &&
1200 read_pnet(&f->net) == sock_net(sk)) {
1201 match = f;
1202 break;
1203 }
1204 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001205 err = -EINVAL;
David S. Miller7736d332011-07-05 01:43:20 -07001206 if (match && match->defrag != defrag)
Eric Dumazetafe62c62011-07-07 06:41:29 -07001207 goto out;
David S. Millerdc99f602011-07-05 01:45:05 -07001208 if (!match) {
Eric Dumazetafe62c62011-07-07 06:41:29 -07001209 err = -ENOMEM;
David S. Millerdc99f602011-07-05 01:45:05 -07001210 match = kzalloc(sizeof(*match), GFP_KERNEL);
Eric Dumazetafe62c62011-07-07 06:41:29 -07001211 if (!match)
1212 goto out;
1213 write_pnet(&match->net, sock_net(sk));
1214 match->id = id;
1215 match->type = type;
1216 match->defrag = defrag;
1217 atomic_set(&match->rr_cur, 0);
1218 INIT_LIST_HEAD(&match->list);
1219 spin_lock_init(&match->lock);
1220 atomic_set(&match->sk_ref, 0);
1221 match->prot_hook.type = po->prot_hook.type;
1222 match->prot_hook.dev = po->prot_hook.dev;
1223 match->prot_hook.func = packet_rcv_fanout;
1224 match->prot_hook.af_packet_priv = match;
Eric Leblondc0de08d2012-08-16 22:02:58 +00001225 match->prot_hook.id_match = match_fanout_group;
Eric Dumazetafe62c62011-07-07 06:41:29 -07001226 dev_add_pack(&match->prot_hook);
1227 list_add(&match->list, &fanout_list);
1228 }
1229 err = -EINVAL;
1230 if (match->type == type &&
1231 match->prot_hook.type == po->prot_hook.type &&
1232 match->prot_hook.dev == po->prot_hook.dev) {
1233 err = -ENOSPC;
1234 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1235 __dev_remove_pack(&po->prot_hook);
1236 po->fanout = match;
1237 atomic_inc(&match->sk_ref);
1238 __fanout_link(sk, po);
1239 err = 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001240 }
1241 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001242out:
David S. Millerdc99f602011-07-05 01:45:05 -07001243 mutex_unlock(&fanout_mutex);
1244 return err;
1245}
1246
1247static void fanout_release(struct sock *sk)
1248{
1249 struct packet_sock *po = pkt_sk(sk);
1250 struct packet_fanout *f;
1251
1252 f = po->fanout;
1253 if (!f)
1254 return;
1255
Pavel Emelyanovfff33212012-08-16 05:36:48 +00001256 mutex_lock(&fanout_mutex);
David S. Millerdc99f602011-07-05 01:45:05 -07001257 po->fanout = NULL;
1258
David S. Millerdc99f602011-07-05 01:45:05 -07001259 if (atomic_dec_and_test(&f->sk_ref)) {
1260 list_del(&f->list);
1261 dev_remove_pack(&f->prot_hook);
1262 kfree(f);
1263 }
1264 mutex_unlock(&fanout_mutex);
1265}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001267static const struct proto_ops packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001269static const struct proto_ops packet_ops_spkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001271static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1272 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273{
1274 struct sock *sk;
1275 struct sockaddr_pkt *spkt;
1276
1277 /*
1278 * When we registered the protocol we saved the socket in the data
1279 * field for just this event.
1280 */
1281
1282 sk = pt->af_packet_priv;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001283
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 /*
1285 * Yank back the headers [hope the device set this
1286 * right or kerboom...]
1287 *
1288 * Incoming packets have ll header pulled,
1289 * push it back.
1290 *
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001291 * For outgoing ones skb->data == skb_mac_header(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 * so that this procedure is noop.
1293 */
1294
1295 if (skb->pkt_type == PACKET_LOOPBACK)
1296 goto out;
1297
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001298 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001299 goto out;
1300
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001301 skb = skb_share_check(skb, GFP_ATOMIC);
1302 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 goto oom;
1304
1305 /* drop any routing info */
Eric Dumazetadf30902009-06-02 05:19:30 +00001306 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
Phil Oester84531c22005-07-12 11:57:52 -07001308 /* drop conntrack reference */
1309 nf_reset(skb);
1310
Herbert Xuffbc6112007-02-04 23:33:10 -08001311 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001313 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
1315 /*
1316 * The SOCK_PACKET socket receives _all_ frames.
1317 */
1318
1319 spkt->spkt_family = dev->type;
1320 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1321 spkt->spkt_protocol = skb->protocol;
1322
1323 /*
1324 * Charge the memory to the socket. This is done specifically
1325 * to prevent sockets using all the memory up.
1326 */
1327
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001328 if (sock_queue_rcv_skb(sk, skb) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 return 0;
1330
1331out:
1332 kfree_skb(skb);
1333oom:
1334 return 0;
1335}
1336
1337
1338/*
1339 * Output a raw packet to a device layer. This bypasses all the other
1340 * protocol layers and you must therefore supply it with a complete frame
1341 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1344 struct msghdr *msg, size_t len)
1345{
1346 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001347 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001348 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 struct net_device *dev;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001350 __be16 proto = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 int err;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001352 int extra_len = 0;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001353
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001355 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 */
1357
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001358 if (saddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 if (msg->msg_namelen < sizeof(struct sockaddr))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001360 return -EINVAL;
1361 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1362 proto = saddr->spkt_protocol;
1363 } else
1364 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
1366 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001367 * Find the device first to size check it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 */
1369
danborkmann@iogearbox.netde74e922012-06-10 08:59:28 +00001370 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001371retry:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001372 rcu_read_lock();
1373 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 err = -ENODEV;
1375 if (dev == NULL)
1376 goto out_unlock;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001377
David S. Millerd5e76b02007-01-25 19:30:36 -08001378 err = -ENETDOWN;
1379 if (!(dev->flags & IFF_UP))
1380 goto out_unlock;
1381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 /*
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001383 * You may not queue a frame bigger than the mtu. This is the lowest level
1384 * raw protocol and you must do your own fragmentation at this level.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001386
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001387 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1388 if (!netif_supports_nofcs(dev)) {
1389 err = -EPROTONOSUPPORT;
1390 goto out_unlock;
1391 }
1392 extra_len = 4; /* We're doing our own CRC */
1393 }
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001396 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 goto out_unlock;
1398
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001399 if (!skb) {
1400 size_t reserved = LL_RESERVED_SPACE(dev);
Herbert Xu4ce40912011-11-18 02:20:05 +00001401 int tlen = dev->needed_tailroom;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001402 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001404 rcu_read_unlock();
Herbert Xu4ce40912011-11-18 02:20:05 +00001405 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001406 if (skb == NULL)
1407 return -ENOBUFS;
1408 /* FIXME: Save some space for broken drivers that write a hard
1409 * header at transmission time by themselves. PPP is the notable
1410 * one here. This should really be fixed at the driver level.
1411 */
1412 skb_reserve(skb, reserved);
1413 skb_reset_network_header(skb);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001414
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001415 /* Try to align data part correctly */
1416 if (hhlen) {
1417 skb->data -= hhlen;
1418 skb->tail -= hhlen;
1419 if (len < hhlen)
1420 skb_reset_network_header(skb);
1421 }
1422 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1423 if (err)
1424 goto out_free;
1425 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 }
1427
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001428 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00001429 /* Earlier code assumed this would be a VLAN pkt,
1430 * double-check this now that we have the actual
1431 * packet in hand.
1432 */
1433 struct ethhdr *ehdr;
1434 skb_reset_mac_header(skb);
1435 ehdr = eth_hdr(skb);
1436 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1437 err = -EMSGSIZE;
1438 goto out_unlock;
1439 }
1440 }
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001441
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 skb->protocol = proto;
1443 skb->dev = dev;
1444 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001445 skb->mark = sk->sk_mark;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001446 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00001447 if (err < 0)
1448 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001450 if (unlikely(extra_len == 4))
1451 skb->no_fcs = 1;
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 dev_queue_xmit(skb);
Eric Dumazet654d1f82009-11-02 10:43:32 +01001454 rcu_read_unlock();
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001455 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457out_unlock:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001458 rcu_read_unlock();
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001459out_free:
1460 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 return err;
1462}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001464static unsigned int run_filter(const struct sk_buff *skb,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001465 const struct sock *sk,
David S. Millerdbcb5852007-01-24 15:21:02 -08001466 unsigned int res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467{
1468 struct sk_filter *filter;
1469
Eric Dumazet80f8f102011-01-18 07:46:52 +00001470 rcu_read_lock();
1471 filter = rcu_dereference(sk->sk_filter);
David S. Millerdbcb5852007-01-24 15:21:02 -08001472 if (filter != NULL)
Eric Dumazet0a148422011-04-20 09:27:32 +00001473 res = SK_RUN_FILTER(filter, skb);
Eric Dumazet80f8f102011-01-18 07:46:52 +00001474 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
David S. Millerdbcb5852007-01-24 15:21:02 -08001476 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477}
1478
1479/*
Eric Dumazet62ab0812010-12-06 20:50:09 +00001480 * This function makes lazy skb cloning in hope that most of packets
1481 * are discarded by BPF.
1482 *
1483 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1484 * and skb->cb are mangled. It works because (and until) packets
1485 * falling here are owned by current CPU. Output packets are cloned
1486 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1487 * sequencially, so that if we return skb to original state on exit,
1488 * we will not harm anyone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 */
1490
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001491static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1492 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493{
1494 struct sock *sk;
1495 struct sockaddr_ll *sll;
1496 struct packet_sock *po;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001497 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001499 unsigned int snaplen, res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501 if (skb->pkt_type == PACKET_LOOPBACK)
1502 goto drop;
1503
1504 sk = pt->af_packet_priv;
1505 po = pkt_sk(sk);
1506
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001507 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001508 goto drop;
1509
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 skb->dev = dev;
1511
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001512 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 /* The device has an explicit notion of ll header,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001514 * exported to higher levels.
1515 *
1516 * Otherwise, the device hides details of its frame
1517 * structure, so that corresponding packet head is
1518 * never delivered to user.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 */
1520 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001521 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 else if (skb->pkt_type == PACKET_OUTGOING) {
1523 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001524 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 }
1526 }
1527
1528 snaplen = skb->len;
1529
David S. Millerdbcb5852007-01-24 15:21:02 -08001530 res = run_filter(skb, sk, snaplen);
1531 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001532 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001533 if (snaplen > res)
1534 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001536 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 goto drop_n_acct;
1538
1539 if (skb_shared(skb)) {
1540 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1541 if (nskb == NULL)
1542 goto drop_n_acct;
1543
1544 if (skb_head != skb->data) {
1545 skb->data = skb_head;
1546 skb->len = skb_len;
1547 }
Eric Dumazetabc4e4f2012-04-19 02:24:42 +00001548 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 skb = nskb;
1550 }
1551
Herbert Xuffbc6112007-02-04 23:33:10 -08001552 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1553 sizeof(skb->cb));
1554
1555 sll = &PACKET_SKB_CB(skb)->sa.ll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 sll->sll_family = AF_PACKET;
1557 sll->sll_hatype = dev->type;
1558 sll->sll_protocol = skb->protocol;
1559 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001560 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001561 sll->sll_ifindex = orig_dev->ifindex;
1562 else
1563 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001565 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
Herbert Xuffbc6112007-02-04 23:33:10 -08001567 PACKET_SKB_CB(skb)->origlen = skb->len;
Herbert Xu8dc41942007-02-04 23:31:32 -08001568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 if (pskb_trim(skb, snaplen))
1570 goto drop_n_acct;
1571
1572 skb_set_owner_r(skb, sk);
1573 skb->dev = NULL;
Eric Dumazetadf30902009-06-02 05:19:30 +00001574 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
Phil Oester84531c22005-07-12 11:57:52 -07001576 /* drop conntrack reference */
1577 nf_reset(skb);
1578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 spin_lock(&sk->sk_receive_queue.lock);
1580 po->stats.tp_packets++;
Neil Horman3b885782009-10-12 13:26:31 -07001581 skb->dropcount = atomic_read(&sk->sk_drops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 __skb_queue_tail(&sk->sk_receive_queue, skb);
1583 spin_unlock(&sk->sk_receive_queue.lock);
1584 sk->sk_data_ready(sk, skb->len);
1585 return 0;
1586
1587drop_n_acct:
Willem de Bruijn7091fbd2011-09-30 10:38:28 +00001588 spin_lock(&sk->sk_receive_queue.lock);
1589 po->stats.tp_drops++;
1590 atomic_inc(&sk->sk_drops);
1591 spin_unlock(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592
1593drop_n_restore:
1594 if (skb_head != skb->data && skb_shared(skb)) {
1595 skb->data = skb_head;
1596 skb->len = skb_len;
1597 }
1598drop:
Neil Hormanead2ceb2009-03-11 09:49:55 +00001599 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 return 0;
1601}
1602
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001603static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1604 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
1606 struct sock *sk;
1607 struct packet_sock *po;
1608 struct sockaddr_ll *sll;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001609 union {
1610 struct tpacket_hdr *h1;
1611 struct tpacket2_hdr *h2;
chetan lokef6fb8f102011-08-19 10:18:16 +00001612 struct tpacket3_hdr *h3;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001613 void *raw;
1614 } h;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001615 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001617 unsigned int snaplen, res;
chetan lokef6fb8f102011-08-19 10:18:16 +00001618 unsigned long status = TP_STATUS_USER;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001619 unsigned short macoff, netoff, hdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 struct sk_buff *copy_skb = NULL;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001621 struct timeval tv;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001622 struct timespec ts;
Scott McMillan614f60f2010-06-02 05:53:56 -07001623 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624
1625 if (skb->pkt_type == PACKET_LOOPBACK)
1626 goto drop;
1627
1628 sk = pt->af_packet_priv;
1629 po = pkt_sk(sk);
1630
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001631 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001632 goto drop;
1633
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001634 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001636 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 else if (skb->pkt_type == PACKET_OUTGOING) {
1638 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001639 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 }
1641 }
1642
Herbert Xu8dc41942007-02-04 23:31:32 -08001643 if (skb->ip_summed == CHECKSUM_PARTIAL)
1644 status |= TP_STATUS_CSUMNOTREADY;
1645
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 snaplen = skb->len;
1647
David S. Millerdbcb5852007-01-24 15:21:02 -08001648 res = run_filter(skb, sk, snaplen);
1649 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001650 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001651 if (snaplen > res)
1652 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
1654 if (sk->sk_type == SOCK_DGRAM) {
Patrick McHardy8913336a2008-07-18 18:05:19 -07001655 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1656 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 } else {
Eric Dumazet95c96172012-04-15 05:58:06 +00001658 unsigned int maclen = skb_network_offset(skb);
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001659 netoff = TPACKET_ALIGN(po->tp_hdrlen +
Patrick McHardy8913336a2008-07-18 18:05:19 -07001660 (maclen < 16 ? 16 : maclen)) +
1661 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 macoff = netoff - maclen;
1663 }
chetan lokef6fb8f102011-08-19 10:18:16 +00001664 if (po->tp_version <= TPACKET_V2) {
1665 if (macoff + snaplen > po->rx_ring.frame_size) {
1666 if (po->copy_thresh &&
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001667 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
chetan lokef6fb8f102011-08-19 10:18:16 +00001668 if (skb_shared(skb)) {
1669 copy_skb = skb_clone(skb, GFP_ATOMIC);
1670 } else {
1671 copy_skb = skb_get(skb);
1672 skb_head = skb->data;
1673 }
1674 if (copy_skb)
1675 skb_set_owner_r(copy_skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 }
chetan lokef6fb8f102011-08-19 10:18:16 +00001677 snaplen = po->rx_ring.frame_size - macoff;
1678 if ((int)snaplen < 0)
1679 snaplen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 spin_lock(&sk->sk_receive_queue.lock);
chetan lokef6fb8f102011-08-19 10:18:16 +00001683 h.raw = packet_current_rx_frame(po, skb,
1684 TP_STATUS_KERNEL, (macoff+snaplen));
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001685 if (!h.raw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 goto ring_is_full;
chetan lokef6fb8f102011-08-19 10:18:16 +00001687 if (po->tp_version <= TPACKET_V2) {
1688 packet_increment_rx_head(po, &po->rx_ring);
1689 /*
1690 * LOSING will be reported till you read the stats,
1691 * because it's COR - Clear On Read.
1692 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1693 * at packet level.
1694 */
1695 if (po->stats.tp_drops)
1696 status |= TP_STATUS_LOSING;
1697 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 po->stats.tp_packets++;
1699 if (copy_skb) {
1700 status |= TP_STATUS_COPY;
1701 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 spin_unlock(&sk->sk_receive_queue.lock);
1704
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001705 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001707 switch (po->tp_version) {
1708 case TPACKET_V1:
1709 h.h1->tp_len = skb->len;
1710 h.h1->tp_snaplen = snaplen;
1711 h.h1->tp_mac = macoff;
1712 h.h1->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001713 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1714 && shhwtstamps->syststamp.tv64)
1715 tv = ktime_to_timeval(shhwtstamps->syststamp);
1716 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1717 && shhwtstamps->hwtstamp.tv64)
1718 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1719 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001720 tv = ktime_to_timeval(skb->tstamp);
1721 else
1722 do_gettimeofday(&tv);
1723 h.h1->tp_sec = tv.tv_sec;
1724 h.h1->tp_usec = tv.tv_usec;
1725 hdrlen = sizeof(*h.h1);
1726 break;
1727 case TPACKET_V2:
1728 h.h2->tp_len = skb->len;
1729 h.h2->tp_snaplen = snaplen;
1730 h.h2->tp_mac = macoff;
1731 h.h2->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001732 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1733 && shhwtstamps->syststamp.tv64)
1734 ts = ktime_to_timespec(shhwtstamps->syststamp);
1735 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1736 && shhwtstamps->hwtstamp.tv64)
1737 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1738 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001739 ts = ktime_to_timespec(skb->tstamp);
1740 else
1741 getnstimeofday(&ts);
1742 h.h2->tp_sec = ts.tv_sec;
1743 h.h2->tp_nsec = ts.tv_nsec;
Ben Greeara3bcc232011-06-01 06:49:10 +00001744 if (vlan_tx_tag_present(skb)) {
1745 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1746 status |= TP_STATUS_VLAN_VALID;
1747 } else {
1748 h.h2->tp_vlan_tci = 0;
1749 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07001750 h.h2->tp_padding = 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001751 hdrlen = sizeof(*h.h2);
1752 break;
chetan lokef6fb8f102011-08-19 10:18:16 +00001753 case TPACKET_V3:
1754 /* tp_nxt_offset,vlan are already populated above.
1755 * So DONT clear those fields here
1756 */
1757 h.h3->tp_status |= status;
1758 h.h3->tp_len = skb->len;
1759 h.h3->tp_snaplen = snaplen;
1760 h.h3->tp_mac = macoff;
1761 h.h3->tp_net = netoff;
1762 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1763 && shhwtstamps->syststamp.tv64)
1764 ts = ktime_to_timespec(shhwtstamps->syststamp);
1765 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1766 && shhwtstamps->hwtstamp.tv64)
1767 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1768 else if (skb->tstamp.tv64)
1769 ts = ktime_to_timespec(skb->tstamp);
1770 else
1771 getnstimeofday(&ts);
1772 h.h3->tp_sec = ts.tv_sec;
1773 h.h3->tp_nsec = ts.tv_nsec;
1774 hdrlen = sizeof(*h.h3);
1775 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001776 default:
1777 BUG();
1778 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001780 sll = h.raw + TPACKET_ALIGN(hdrlen);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001781 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 sll->sll_family = AF_PACKET;
1783 sll->sll_hatype = dev->type;
1784 sll->sll_protocol = skb->protocol;
1785 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001786 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001787 sll->sll_ifindex = orig_dev->ifindex;
1788 else
1789 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Ralf Baechlee16aa202006-12-07 00:11:33 -08001791 smp_mb();
Changli Gaof6dafa92010-12-07 04:26:16 +00001792#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 {
Changli Gao0af55bb2010-12-01 02:52:20 +00001794 u8 *start, *end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795
chetan lokef6fb8f102011-08-19 10:18:16 +00001796 if (po->tp_version <= TPACKET_V2) {
1797 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1798 + macoff + snaplen);
1799 for (start = h.raw; start < end; start += PAGE_SIZE)
1800 flush_dcache_page(pgv_to_page(start));
1801 }
Chetan Lokecc9f01b2011-07-14 08:36:33 -07001802 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 }
Changli Gaof6dafa92010-12-07 04:26:16 +00001804#endif
chetan lokef6fb8f102011-08-19 10:18:16 +00001805 if (po->tp_version <= TPACKET_V2)
1806 __packet_set_status(po, h.raw, status);
1807 else
1808 prb_clear_blk_fill_status(&po->rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
1810 sk->sk_data_ready(sk, 0);
1811
1812drop_n_restore:
1813 if (skb_head != skb->data && skb_shared(skb)) {
1814 skb->data = skb_head;
1815 skb->len = skb_len;
1816 }
1817drop:
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001818 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 return 0;
1820
1821ring_is_full:
1822 po->stats.tp_drops++;
1823 spin_unlock(&sk->sk_receive_queue.lock);
1824
1825 sk->sk_data_ready(sk, 0);
Wei Yongjunacb5d752009-02-25 00:36:42 +00001826 kfree_skb(copy_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 goto drop_n_restore;
1828}
1829
Johann Baudy69e3c752009-05-18 22:11:22 -07001830static void tpacket_destruct_skb(struct sk_buff *skb)
1831{
1832 struct packet_sock *po = pkt_sk(skb->sk);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001833 void *ph;
Johann Baudy69e3c752009-05-18 22:11:22 -07001834
Johann Baudy69e3c752009-05-18 22:11:22 -07001835 if (likely(po->tx_ring.pg_vec)) {
1836 ph = skb_shinfo(skb)->destructor_arg;
Johann Baudy69e3c752009-05-18 22:11:22 -07001837 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1838 atomic_dec(&po->tx_ring.pending);
1839 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1840 }
1841
1842 sock_wfree(skb);
1843}
1844
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001845static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1846 void *frame, struct net_device *dev, int size_max,
Herbert Xuae641942011-11-18 02:20:04 +00001847 __be16 proto, unsigned char *addr, int hlen)
Johann Baudy69e3c752009-05-18 22:11:22 -07001848{
1849 union {
1850 struct tpacket_hdr *h1;
1851 struct tpacket2_hdr *h2;
1852 void *raw;
1853 } ph;
1854 int to_write, offset, len, tp_len, nr_frags, len_max;
1855 struct socket *sock = po->sk.sk_socket;
1856 struct page *page;
1857 void *data;
1858 int err;
1859
1860 ph.raw = frame;
1861
1862 skb->protocol = proto;
1863 skb->dev = dev;
1864 skb->priority = po->sk.sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001865 skb->mark = po->sk.sk_mark;
Johann Baudy69e3c752009-05-18 22:11:22 -07001866 skb_shinfo(skb)->destructor_arg = ph.raw;
1867
1868 switch (po->tp_version) {
1869 case TPACKET_V2:
1870 tp_len = ph.h2->tp_len;
1871 break;
1872 default:
1873 tp_len = ph.h1->tp_len;
1874 break;
1875 }
1876 if (unlikely(tp_len > size_max)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001877 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
Johann Baudy69e3c752009-05-18 22:11:22 -07001878 return -EMSGSIZE;
1879 }
1880
Herbert Xuae641942011-11-18 02:20:04 +00001881 skb_reserve(skb, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07001882 skb_reset_network_header(skb);
1883
Paul Chavent5920cd3a2012-11-06 23:10:47 +00001884 if (po->tp_tx_has_off) {
1885 int off_min, off_max, off;
1886 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
1887 off_max = po->tx_ring.frame_size - tp_len;
1888 if (sock->type == SOCK_DGRAM) {
1889 switch (po->tp_version) {
1890 case TPACKET_V2:
1891 off = ph.h2->tp_net;
1892 break;
1893 default:
1894 off = ph.h1->tp_net;
1895 break;
1896 }
1897 } else {
1898 switch (po->tp_version) {
1899 case TPACKET_V2:
1900 off = ph.h2->tp_mac;
1901 break;
1902 default:
1903 off = ph.h1->tp_mac;
1904 break;
1905 }
1906 }
1907 if (unlikely((off < off_min) || (off_max < off)))
1908 return -EINVAL;
1909 data = ph.raw + off;
1910 } else {
1911 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1912 }
Johann Baudy69e3c752009-05-18 22:11:22 -07001913 to_write = tp_len;
1914
1915 if (sock->type == SOCK_DGRAM) {
1916 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1917 NULL, tp_len);
1918 if (unlikely(err < 0))
1919 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001920 } else if (dev->hard_header_len) {
Johann Baudy69e3c752009-05-18 22:11:22 -07001921 /* net device doesn't like empty head */
1922 if (unlikely(tp_len <= dev->hard_header_len)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001923 pr_err("packet size is too short (%d < %d)\n",
1924 tp_len, dev->hard_header_len);
Johann Baudy69e3c752009-05-18 22:11:22 -07001925 return -EINVAL;
1926 }
1927
1928 skb_push(skb, dev->hard_header_len);
1929 err = skb_store_bits(skb, 0, data,
1930 dev->hard_header_len);
1931 if (unlikely(err))
1932 return err;
1933
1934 data += dev->hard_header_len;
1935 to_write -= dev->hard_header_len;
1936 }
1937
Johann Baudy69e3c752009-05-18 22:11:22 -07001938 offset = offset_in_page(data);
1939 len_max = PAGE_SIZE - offset;
1940 len = ((to_write > len_max) ? len_max : to_write);
1941
1942 skb->data_len = to_write;
1943 skb->len += to_write;
1944 skb->truesize += to_write;
1945 atomic_add(to_write, &po->sk.sk_wmem_alloc);
1946
1947 while (likely(to_write)) {
1948 nr_frags = skb_shinfo(skb)->nr_frags;
1949
1950 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001951 pr_err("Packet exceed the number of skb frags(%lu)\n",
1952 MAX_SKB_FRAGS);
Johann Baudy69e3c752009-05-18 22:11:22 -07001953 return -EFAULT;
1954 }
1955
Changli Gao0af55bb2010-12-01 02:52:20 +00001956 page = pgv_to_page(data);
1957 data += len;
Johann Baudy69e3c752009-05-18 22:11:22 -07001958 flush_dcache_page(page);
1959 get_page(page);
Changli Gao0af55bb2010-12-01 02:52:20 +00001960 skb_fill_page_desc(skb, nr_frags, page, offset, len);
Johann Baudy69e3c752009-05-18 22:11:22 -07001961 to_write -= len;
1962 offset = 0;
1963 len_max = PAGE_SIZE;
1964 len = ((to_write > len_max) ? len_max : to_write);
1965 }
1966
1967 return tp_len;
1968}
1969
1970static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1971{
Johann Baudy69e3c752009-05-18 22:11:22 -07001972 struct sk_buff *skb;
1973 struct net_device *dev;
1974 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00001975 bool need_rls_dev = false;
1976 int err, reserve = 0;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001977 void *ph;
1978 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Johann Baudy69e3c752009-05-18 22:11:22 -07001979 int tp_len, size_max;
1980 unsigned char *addr;
1981 int len_sum = 0;
danborkmann@iogearbox.net9e670302012-08-20 03:34:03 +00001982 int status = TP_STATUS_AVAILABLE;
Herbert Xuae641942011-11-18 02:20:04 +00001983 int hlen, tlen;
Johann Baudy69e3c752009-05-18 22:11:22 -07001984
Johann Baudy69e3c752009-05-18 22:11:22 -07001985 mutex_lock(&po->pg_vec_lock);
1986
Johann Baudy69e3c752009-05-18 22:11:22 -07001987 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00001988 dev = po->prot_hook.dev;
Johann Baudy69e3c752009-05-18 22:11:22 -07001989 proto = po->num;
1990 addr = NULL;
1991 } else {
1992 err = -EINVAL;
1993 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1994 goto out;
1995 if (msg->msg_namelen < (saddr->sll_halen
1996 + offsetof(struct sockaddr_ll,
1997 sll_addr)))
1998 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07001999 proto = saddr->sll_protocol;
2000 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002001 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2002 need_rls_dev = true;
Johann Baudy69e3c752009-05-18 22:11:22 -07002003 }
2004
Johann Baudy69e3c752009-05-18 22:11:22 -07002005 err = -ENXIO;
2006 if (unlikely(dev == NULL))
2007 goto out;
2008
2009 reserve = dev->hard_header_len;
2010
2011 err = -ENETDOWN;
2012 if (unlikely(!(dev->flags & IFF_UP)))
2013 goto out_put;
2014
2015 size_max = po->tx_ring.frame_size
Gabor Gombasb5dd8842009-10-29 03:19:11 -07002016 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
Johann Baudy69e3c752009-05-18 22:11:22 -07002017
2018 if (size_max > dev->mtu + reserve)
2019 size_max = dev->mtu + reserve;
2020
2021 do {
2022 ph = packet_current_frame(po, &po->tx_ring,
2023 TP_STATUS_SEND_REQUEST);
2024
2025 if (unlikely(ph == NULL)) {
2026 schedule();
2027 continue;
2028 }
2029
2030 status = TP_STATUS_SEND_REQUEST;
Herbert Xuae641942011-11-18 02:20:04 +00002031 hlen = LL_RESERVED_SPACE(dev);
2032 tlen = dev->needed_tailroom;
Johann Baudy69e3c752009-05-18 22:11:22 -07002033 skb = sock_alloc_send_skb(&po->sk,
Herbert Xuae641942011-11-18 02:20:04 +00002034 hlen + tlen + sizeof(struct sockaddr_ll),
Johann Baudy69e3c752009-05-18 22:11:22 -07002035 0, &err);
2036
2037 if (unlikely(skb == NULL))
2038 goto out_status;
2039
2040 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
Herbert Xuae641942011-11-18 02:20:04 +00002041 addr, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07002042
2043 if (unlikely(tp_len < 0)) {
2044 if (po->tp_loss) {
2045 __packet_set_status(po, ph,
2046 TP_STATUS_AVAILABLE);
2047 packet_increment_head(&po->tx_ring);
2048 kfree_skb(skb);
2049 continue;
2050 } else {
2051 status = TP_STATUS_WRONG_FORMAT;
2052 err = tp_len;
2053 goto out_status;
2054 }
2055 }
2056
2057 skb->destructor = tpacket_destruct_skb;
2058 __packet_set_status(po, ph, TP_STATUS_SENDING);
2059 atomic_inc(&po->tx_ring.pending);
2060
2061 status = TP_STATUS_SEND_REQUEST;
2062 err = dev_queue_xmit(skb);
Jarek Poplawskieb70df12010-01-10 22:04:19 +00002063 if (unlikely(err > 0)) {
2064 err = net_xmit_errno(err);
2065 if (err && __packet_get_status(po, ph) ==
2066 TP_STATUS_AVAILABLE) {
2067 /* skb was destructed already */
2068 skb = NULL;
2069 goto out_status;
2070 }
2071 /*
2072 * skb was dropped but not destructed yet;
2073 * let's treat it like congestion or err < 0
2074 */
2075 err = 0;
2076 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002077 packet_increment_head(&po->tx_ring);
2078 len_sum += tp_len;
Joe Perchesf64f9e72009-11-29 16:55:45 -08002079 } while (likely((ph != NULL) ||
2080 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2081 (atomic_read(&po->tx_ring.pending))))
2082 );
Johann Baudy69e3c752009-05-18 22:11:22 -07002083
2084 err = len_sum;
2085 goto out_put;
2086
Johann Baudy69e3c752009-05-18 22:11:22 -07002087out_status:
2088 __packet_set_status(po, ph, status);
2089 kfree_skb(skb);
2090out_put:
Ben Greear827d9782011-06-01 07:18:53 +00002091 if (need_rls_dev)
2092 dev_put(dev);
Johann Baudy69e3c752009-05-18 22:11:22 -07002093out:
2094 mutex_unlock(&po->pg_vec_lock);
2095 return err;
2096}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Olof Johanssoneea49cc92011-11-02 11:00:49 +00002098static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2099 size_t reserve, size_t len,
2100 size_t linear, int noblock,
2101 int *err)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002102{
2103 struct sk_buff *skb;
2104
2105 /* Under a page? Don't bother with paged skb. */
2106 if (prepad + len < PAGE_SIZE || !linear)
2107 linear = len;
2108
2109 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2110 err);
2111 if (!skb)
2112 return NULL;
2113
2114 skb_reserve(skb, reserve);
2115 skb_put(skb, linear);
2116 skb->data_len = len - linear;
2117 skb->len += len - linear;
2118
2119 return skb;
2120}
2121
Johann Baudy69e3c752009-05-18 22:11:22 -07002122static int packet_snd(struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 struct msghdr *msg, size_t len)
2124{
2125 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002126 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 struct sk_buff *skb;
2128 struct net_device *dev;
Al Viro0e11c912006-11-08 00:26:29 -08002129 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002130 bool need_rls_dev = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 unsigned char *addr;
Ben Greear827d9782011-06-01 07:18:53 +00002132 int err, reserve = 0;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002133 struct virtio_net_hdr vnet_hdr = { 0 };
2134 int offset = 0;
2135 int vnet_hdr_len;
2136 struct packet_sock *po = pkt_sk(sk);
2137 unsigned short gso_type = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002138 int hlen, tlen;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002139 int extra_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002142 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002144
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002146 dev = po->prot_hook.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 proto = po->num;
2148 addr = NULL;
2149 } else {
2150 err = -EINVAL;
2151 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2152 goto out;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002153 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2154 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 proto = saddr->sll_protocol;
2156 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002157 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2158 need_rls_dev = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 }
2160
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 err = -ENXIO;
2162 if (dev == NULL)
2163 goto out_unlock;
2164 if (sock->type == SOCK_RAW)
2165 reserve = dev->hard_header_len;
2166
David S. Millerd5e76b02007-01-25 19:30:36 -08002167 err = -ENETDOWN;
2168 if (!(dev->flags & IFF_UP))
2169 goto out_unlock;
2170
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002171 if (po->has_vnet_hdr) {
2172 vnet_hdr_len = sizeof(vnet_hdr);
2173
2174 err = -EINVAL;
2175 if (len < vnet_hdr_len)
2176 goto out_unlock;
2177
2178 len -= vnet_hdr_len;
2179
2180 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2181 vnet_hdr_len);
2182 if (err < 0)
2183 goto out_unlock;
2184
2185 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2186 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2187 vnet_hdr.hdr_len))
2188 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2189 vnet_hdr.csum_offset + 2;
2190
2191 err = -EINVAL;
2192 if (vnet_hdr.hdr_len > len)
2193 goto out_unlock;
2194
2195 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2196 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2197 case VIRTIO_NET_HDR_GSO_TCPV4:
2198 gso_type = SKB_GSO_TCPV4;
2199 break;
2200 case VIRTIO_NET_HDR_GSO_TCPV6:
2201 gso_type = SKB_GSO_TCPV6;
2202 break;
2203 case VIRTIO_NET_HDR_GSO_UDP:
2204 gso_type = SKB_GSO_UDP;
2205 break;
2206 default:
2207 goto out_unlock;
2208 }
2209
2210 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2211 gso_type |= SKB_GSO_TCP_ECN;
2212
2213 if (vnet_hdr.gso_size == 0)
2214 goto out_unlock;
2215
2216 }
2217 }
2218
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002219 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2220 if (!netif_supports_nofcs(dev)) {
2221 err = -EPROTONOSUPPORT;
2222 goto out_unlock;
2223 }
2224 extra_len = 4; /* We're doing our own CRC */
2225 }
2226
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002228 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 goto out_unlock;
2230
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002231 err = -ENOBUFS;
Herbert Xuae641942011-11-18 02:20:04 +00002232 hlen = LL_RESERVED_SPACE(dev);
2233 tlen = dev->needed_tailroom;
2234 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002235 msg->msg_flags & MSG_DONTWAIT, &err);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002236 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 goto out_unlock;
2238
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002239 skb_set_network_header(skb, reserve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002241 err = -EINVAL;
2242 if (sock->type == SOCK_DGRAM &&
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002243 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002244 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245
2246 /* Returns -EFAULT on error */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002247 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 if (err)
2249 goto out_free;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002250 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00002251 if (err < 0)
2252 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002254 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00002255 /* Earlier code assumed this would be a VLAN pkt,
2256 * double-check this now that we have the actual
2257 * packet in hand.
2258 */
2259 struct ethhdr *ehdr;
2260 skb_reset_mac_header(skb);
2261 ehdr = eth_hdr(skb);
2262 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2263 err = -EMSGSIZE;
2264 goto out_free;
2265 }
2266 }
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 skb->protocol = proto;
2269 skb->dev = dev;
2270 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00002271 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002273 if (po->has_vnet_hdr) {
2274 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2275 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2276 vnet_hdr.csum_offset)) {
2277 err = -EINVAL;
2278 goto out_free;
2279 }
2280 }
2281
2282 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2283 skb_shinfo(skb)->gso_type = gso_type;
2284
2285 /* Header must be checked, and gso_segs computed. */
2286 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2287 skb_shinfo(skb)->gso_segs = 0;
2288
2289 len += vnet_hdr_len;
2290 }
2291
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002292 if (unlikely(extra_len == 4))
2293 skb->no_fcs = 1;
2294
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 /*
2296 * Now send it
2297 */
2298
2299 err = dev_queue_xmit(skb);
2300 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2301 goto out_unlock;
2302
Ben Greear827d9782011-06-01 07:18:53 +00002303 if (need_rls_dev)
2304 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002306 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
2308out_free:
2309 kfree_skb(skb);
2310out_unlock:
Ben Greear827d9782011-06-01 07:18:53 +00002311 if (dev && need_rls_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 dev_put(dev);
2313out:
2314 return err;
2315}
2316
Johann Baudy69e3c752009-05-18 22:11:22 -07002317static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2318 struct msghdr *msg, size_t len)
2319{
Johann Baudy69e3c752009-05-18 22:11:22 -07002320 struct sock *sk = sock->sk;
2321 struct packet_sock *po = pkt_sk(sk);
2322 if (po->tx_ring.pg_vec)
2323 return tpacket_snd(po, msg);
2324 else
Johann Baudy69e3c752009-05-18 22:11:22 -07002325 return packet_snd(sock, msg, len);
2326}
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328/*
2329 * Close a PACKET socket. This is fairly simple. We immediately go
2330 * to 'closed' state and remove our protocol entry in the device list.
2331 */
2332
2333static int packet_release(struct socket *sock)
2334{
2335 struct sock *sk = sock->sk;
2336 struct packet_sock *po;
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08002337 struct net *net;
chetan lokef6fb8f102011-08-19 10:18:16 +00002338 union tpacket_req_u req_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
2340 if (!sk)
2341 return 0;
2342
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002343 net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 po = pkt_sk(sk);
2345
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002346 mutex_lock(&net->packet.sklist_lock);
stephen hemminger808f5112010-02-22 07:57:18 +00002347 sk_del_node_init_rcu(sk);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002348 mutex_unlock(&net->packet.sklist_lock);
2349
2350 preempt_disable();
Eric Dumazet920de802008-11-24 00:09:29 -08002351 sock_prot_inuse_add(net, sk->sk_prot, -1);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002352 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
stephen hemminger808f5112010-02-22 07:57:18 +00002354 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002355 unregister_prot_hook(sk, false);
Ben Greear160ff182011-06-01 07:18:52 +00002356 if (po->prot_hook.dev) {
2357 dev_put(po->prot_hook.dev);
2358 po->prot_hook.dev = NULL;
2359 }
stephen hemminger808f5112010-02-22 07:57:18 +00002360 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 packet_flush_mclist(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
chetan lokef6fb8f102011-08-19 10:18:16 +00002364 memset(&req_u, 0, sizeof(req_u));
Johann Baudy69e3c752009-05-18 22:11:22 -07002365
2366 if (po->rx_ring.pg_vec)
chetan lokef6fb8f102011-08-19 10:18:16 +00002367 packet_set_ring(sk, &req_u, 1, 0);
Johann Baudy69e3c752009-05-18 22:11:22 -07002368
2369 if (po->tx_ring.pg_vec)
chetan lokef6fb8f102011-08-19 10:18:16 +00002370 packet_set_ring(sk, &req_u, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
David S. Millerdc99f602011-07-05 01:45:05 -07002372 fanout_release(sk);
2373
stephen hemminger808f5112010-02-22 07:57:18 +00002374 synchronize_net();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 /*
2376 * Now the socket is dead. No more input will appear.
2377 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 sock_orphan(sk);
2379 sock->sk = NULL;
2380
2381 /* Purge queues */
2382
2383 skb_queue_purge(&sk->sk_receive_queue);
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002384 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
2386 sock_put(sk);
2387 return 0;
2388}
2389
2390/*
2391 * Attach a packet hook.
2392 */
2393
Al Viro0e11c912006-11-08 00:26:29 -08002394static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395{
2396 struct packet_sock *po = pkt_sk(sk);
David S. Millerdc99f602011-07-05 01:45:05 -07002397
Wei Yongjunaef950b2011-12-27 22:32:41 -05002398 if (po->fanout) {
2399 if (dev)
2400 dev_put(dev);
2401
David S. Millerdc99f602011-07-05 01:45:05 -07002402 return -EINVAL;
Wei Yongjunaef950b2011-12-27 22:32:41 -05002403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404
2405 lock_sock(sk);
2406
2407 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002408 unregister_prot_hook(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 po->num = protocol;
2410 po->prot_hook.type = protocol;
Ben Greear160ff182011-06-01 07:18:52 +00002411 if (po->prot_hook.dev)
2412 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 po->prot_hook.dev = dev;
2414
2415 po->ifindex = dev ? dev->ifindex : 0;
2416
2417 if (protocol == 0)
2418 goto out_unlock;
2419
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002420 if (!dev || (dev->flags & IFF_UP)) {
David S. Millerce06b032011-07-04 01:44:29 -07002421 register_prot_hook(sk);
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002422 } else {
2423 sk->sk_err = ENETDOWN;
2424 if (!sock_flag(sk, SOCK_DEAD))
2425 sk->sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 }
2427
2428out_unlock:
2429 spin_unlock(&po->bind_lock);
2430 release_sock(sk);
2431 return 0;
2432}
2433
2434/*
2435 * Bind a packet socket to a device
2436 */
2437
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002438static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2439 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002441 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 char name[15];
2443 struct net_device *dev;
2444 int err = -ENODEV;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002445
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 /*
2447 * Check legality
2448 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002449
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002450 if (addr_len != sizeof(struct sockaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002452 strlcpy(name, uaddr->sa_data, sizeof(name));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002454 dev = dev_get_by_name(sock_net(sk), name);
Ben Greear160ff182011-06-01 07:18:52 +00002455 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 return err;
2458}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459
2460static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2461{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002462 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2463 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 struct net_device *dev = NULL;
2465 int err;
2466
2467
2468 /*
2469 * Check legality
2470 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002471
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 if (addr_len < sizeof(struct sockaddr_ll))
2473 return -EINVAL;
2474 if (sll->sll_family != AF_PACKET)
2475 return -EINVAL;
2476
2477 if (sll->sll_ifindex) {
2478 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002479 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 if (dev == NULL)
2481 goto out;
2482 }
2483 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484
2485out:
2486 return err;
2487}
2488
2489static struct proto packet_proto = {
2490 .name = "PACKET",
2491 .owner = THIS_MODULE,
2492 .obj_size = sizeof(struct packet_sock),
2493};
2494
2495/*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002496 * Create a packet of type SOCK_PACKET.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 */
2498
Eric Paris3f378b62009-11-05 22:18:14 -08002499static int packet_create(struct net *net, struct socket *sock, int protocol,
2500 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501{
2502 struct sock *sk;
2503 struct packet_sock *po;
Al Viro0e11c912006-11-08 00:26:29 -08002504 __be16 proto = (__force __be16)protocol; /* weird, but documented */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 int err;
2506
2507 if (!capable(CAP_NET_RAW))
2508 return -EPERM;
David S. Millerbe020972007-05-29 13:16:31 -07002509 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2510 sock->type != SOCK_PACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 return -ESOCKTNOSUPPORT;
2512
2513 sock->state = SS_UNCONNECTED;
2514
2515 err = -ENOBUFS;
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07002516 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 if (sk == NULL)
2518 goto out;
2519
2520 sock->ops = &packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 if (sock->type == SOCK_PACKET)
2522 sock->ops = &packet_ops_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002523
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 sock_init_data(sock, sk);
2525
2526 po = pkt_sk(sk);
2527 sk->sk_family = PF_PACKET;
Al Viro0e11c912006-11-08 00:26:29 -08002528 po->num = proto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529
2530 sk->sk_destruct = packet_sock_destruct;
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002531 sk_refcnt_debug_inc(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
2533 /*
2534 * Attach a protocol block
2535 */
2536
2537 spin_lock_init(&po->bind_lock);
Herbert Xu905db442009-01-30 14:12:06 -08002538 mutex_init(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 po->prot_hook.func = packet_rcv;
David S. Millerbe020972007-05-29 13:16:31 -07002540
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 if (sock->type == SOCK_PACKET)
2542 po->prot_hook.func = packet_rcv_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002543
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 po->prot_hook.af_packet_priv = sk;
2545
Al Viro0e11c912006-11-08 00:26:29 -08002546 if (proto) {
2547 po->prot_hook.type = proto;
David S. Millerce06b032011-07-04 01:44:29 -07002548 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 }
2550
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002551 mutex_lock(&net->packet.sklist_lock);
stephen hemminger808f5112010-02-22 07:57:18 +00002552 sk_add_node_rcu(sk, &net->packet.sklist);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002553 mutex_unlock(&net->packet.sklist_lock);
2554
2555 preempt_disable();
Eric Dumazet36804532008-11-19 14:25:35 -08002556 sock_prot_inuse_add(net, &packet_proto, 1);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002557 preempt_enable();
stephen hemminger808f5112010-02-22 07:57:18 +00002558
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002559 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560out:
2561 return err;
2562}
2563
Richard Cochraned85b562010-04-07 22:41:28 +00002564static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2565{
2566 struct sock_exterr_skb *serr;
2567 struct sk_buff *skb, *skb2;
2568 int copied, err;
2569
2570 err = -EAGAIN;
2571 skb = skb_dequeue(&sk->sk_error_queue);
2572 if (skb == NULL)
2573 goto out;
2574
2575 copied = skb->len;
2576 if (copied > len) {
2577 msg->msg_flags |= MSG_TRUNC;
2578 copied = len;
2579 }
2580 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2581 if (err)
2582 goto out_free_skb;
2583
2584 sock_recv_timestamp(msg, sk, skb);
2585
2586 serr = SKB_EXT_ERR(skb);
2587 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2588 sizeof(serr->ee), &serr->ee);
2589
2590 msg->msg_flags |= MSG_ERRQUEUE;
2591 err = copied;
2592
2593 /* Reset and regenerate socket error */
2594 spin_lock_bh(&sk->sk_error_queue.lock);
2595 sk->sk_err = 0;
2596 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2597 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2598 spin_unlock_bh(&sk->sk_error_queue.lock);
2599 sk->sk_error_report(sk);
2600 } else
2601 spin_unlock_bh(&sk->sk_error_queue.lock);
2602
2603out_free_skb:
2604 kfree_skb(skb);
2605out:
2606 return err;
2607}
2608
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609/*
2610 * Pull a packet from our receive queue and hand it to the user.
2611 * If necessary we block.
2612 */
2613
2614static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2615 struct msghdr *msg, size_t len, int flags)
2616{
2617 struct sock *sk = sock->sk;
2618 struct sk_buff *skb;
2619 int copied, err;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002620 struct sockaddr_ll *sll;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002621 int vnet_hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
2623 err = -EINVAL;
Richard Cochraned85b562010-04-07 22:41:28 +00002624 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 goto out;
2626
2627#if 0
2628 /* What error should we return now? EUNATTACH? */
2629 if (pkt_sk(sk)->ifindex < 0)
2630 return -ENODEV;
2631#endif
2632
Richard Cochraned85b562010-04-07 22:41:28 +00002633 if (flags & MSG_ERRQUEUE) {
2634 err = packet_recv_error(sk, msg, len);
2635 goto out;
2636 }
2637
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 * Call the generic datagram receiver. This handles all sorts
2640 * of horrible races and re-entrancy so we can forget about it
2641 * in the protocol layers.
2642 *
2643 * Now it will return ENETDOWN, if device have just gone down,
2644 * but then it will block.
2645 */
2646
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002647 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
2649 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002650 * An error occurred so return it. Because skb_recv_datagram()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 * handles the blocking we don't see and worry about blocking
2652 * retries.
2653 */
2654
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002655 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 goto out;
2657
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002658 if (pkt_sk(sk)->has_vnet_hdr) {
2659 struct virtio_net_hdr vnet_hdr = { 0 };
2660
2661 err = -EINVAL;
2662 vnet_hdr_len = sizeof(vnet_hdr);
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002663 if (len < vnet_hdr_len)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002664 goto out_free;
2665
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002666 len -= vnet_hdr_len;
2667
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002668 if (skb_is_gso(skb)) {
2669 struct skb_shared_info *sinfo = skb_shinfo(skb);
2670
2671 /* This is a hint as to how much should be linear. */
2672 vnet_hdr.hdr_len = skb_headlen(skb);
2673 vnet_hdr.gso_size = sinfo->gso_size;
2674 if (sinfo->gso_type & SKB_GSO_TCPV4)
2675 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2676 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2677 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2678 else if (sinfo->gso_type & SKB_GSO_UDP)
2679 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2680 else if (sinfo->gso_type & SKB_GSO_FCOE)
2681 goto out_free;
2682 else
2683 BUG();
2684 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2685 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2686 } else
2687 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2688
2689 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2690 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +00002691 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002692 vnet_hdr.csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +00002693 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2694 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002695 } /* else everything is zero */
2696
2697 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2698 vnet_hdr_len);
2699 if (err < 0)
2700 goto out_free;
2701 }
2702
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 /*
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002704 * If the address length field is there to be filled in, we fill
2705 * it in now.
2706 */
2707
Herbert Xuffbc6112007-02-04 23:33:10 -08002708 sll = &PACKET_SKB_CB(skb)->sa.ll;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002709 if (sock->type == SOCK_PACKET)
2710 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2711 else
2712 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2713
2714 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 * You lose any data beyond the buffer you gave. If it worries a
2716 * user program they can ask the device for its MTU anyway.
2717 */
2718
2719 copied = skb->len;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002720 if (copied > len) {
2721 copied = len;
2722 msg->msg_flags |= MSG_TRUNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 }
2724
2725 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2726 if (err)
2727 goto out_free;
2728
Neil Horman3b885782009-10-12 13:26:31 -07002729 sock_recv_ts_and_drops(msg, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730
2731 if (msg->msg_name)
Herbert Xuffbc6112007-02-04 23:33:10 -08002732 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2733 msg->msg_namelen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734
Herbert Xu8dc41942007-02-04 23:31:32 -08002735 if (pkt_sk(sk)->auxdata) {
Herbert Xuffbc6112007-02-04 23:33:10 -08002736 struct tpacket_auxdata aux;
2737
2738 aux.tp_status = TP_STATUS_USER;
2739 if (skb->ip_summed == CHECKSUM_PARTIAL)
2740 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2741 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2742 aux.tp_snaplen = skb->len;
2743 aux.tp_mac = 0;
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002744 aux.tp_net = skb_network_offset(skb);
Ben Greeara3bcc232011-06-01 06:49:10 +00002745 if (vlan_tx_tag_present(skb)) {
2746 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2747 aux.tp_status |= TP_STATUS_VLAN_VALID;
2748 } else {
2749 aux.tp_vlan_tci = 0;
2750 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07002751 aux.tp_padding = 0;
Herbert Xuffbc6112007-02-04 23:33:10 -08002752 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
Herbert Xu8dc41942007-02-04 23:31:32 -08002753 }
2754
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 /*
2756 * Free or return the buffer as appropriate. Again this
2757 * hides all the races and re-entrancy issues from us.
2758 */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002759 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
2761out_free:
2762 skb_free_datagram(sk, skb);
2763out:
2764 return err;
2765}
2766
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2768 int *uaddr_len, int peer)
2769{
2770 struct net_device *dev;
2771 struct sock *sk = sock->sk;
2772
2773 if (peer)
2774 return -EOPNOTSUPP;
2775
2776 uaddr->sa_family = AF_PACKET;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002777 rcu_read_lock();
2778 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2779 if (dev)
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002780 strncpy(uaddr->sa_data, dev->name, 14);
Eric Dumazet654d1f82009-11-02 10:43:32 +01002781 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 memset(uaddr->sa_data, 0, 14);
Eric Dumazet654d1f82009-11-02 10:43:32 +01002783 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 *uaddr_len = sizeof(*uaddr);
2785
2786 return 0;
2787}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
2789static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2790 int *uaddr_len, int peer)
2791{
2792 struct net_device *dev;
2793 struct sock *sk = sock->sk;
2794 struct packet_sock *po = pkt_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00002795 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796
2797 if (peer)
2798 return -EOPNOTSUPP;
2799
2800 sll->sll_family = AF_PACKET;
2801 sll->sll_ifindex = po->ifindex;
2802 sll->sll_protocol = po->num;
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002803 sll->sll_pkttype = 0;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002804 rcu_read_lock();
2805 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 if (dev) {
2807 sll->sll_hatype = dev->type;
2808 sll->sll_halen = dev->addr_len;
2809 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 } else {
2811 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2812 sll->sll_halen = 0;
2813 }
Eric Dumazet654d1f82009-11-02 10:43:32 +01002814 rcu_read_unlock();
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002815 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
2817 return 0;
2818}
2819
Wang Chen2aeb0b82008-07-14 20:49:46 -07002820static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2821 int what)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822{
2823 switch (i->type) {
2824 case PACKET_MR_MULTICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002825 if (i->alen != dev->addr_len)
2826 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 if (what > 0)
Jiri Pirko22bedad32010-04-01 21:22:57 +00002828 return dev_mc_add(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 else
Jiri Pirko22bedad32010-04-01 21:22:57 +00002830 return dev_mc_del(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 break;
2832 case PACKET_MR_PROMISC:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002833 return dev_set_promiscuity(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 break;
2835 case PACKET_MR_ALLMULTI:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002836 return dev_set_allmulti(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 break;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002838 case PACKET_MR_UNICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002839 if (i->alen != dev->addr_len)
2840 return -EINVAL;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002841 if (what > 0)
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002842 return dev_uc_add(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002843 else
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002844 return dev_uc_del(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002845 break;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002846 default:
2847 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 }
Wang Chen2aeb0b82008-07-14 20:49:46 -07002849 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850}
2851
2852static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2853{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002854 for ( ; i; i = i->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 if (i->ifindex == dev->ifindex)
2856 packet_dev_mc(dev, i, what);
2857 }
2858}
2859
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002860static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861{
2862 struct packet_sock *po = pkt_sk(sk);
2863 struct packet_mclist *ml, *i;
2864 struct net_device *dev;
2865 int err;
2866
2867 rtnl_lock();
2868
2869 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002870 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 if (!dev)
2872 goto done;
2873
2874 err = -EINVAL;
Jiri Pirko11625632010-03-02 20:40:01 +00002875 if (mreq->mr_alen > dev->addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 goto done;
2877
2878 err = -ENOBUFS;
Kris Katterjohn8b3a7002006-01-11 15:56:43 -08002879 i = kmalloc(sizeof(*i), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 if (i == NULL)
2881 goto done;
2882
2883 err = 0;
2884 for (ml = po->mclist; ml; ml = ml->next) {
2885 if (ml->ifindex == mreq->mr_ifindex &&
2886 ml->type == mreq->mr_type &&
2887 ml->alen == mreq->mr_alen &&
2888 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2889 ml->count++;
2890 /* Free the new element ... */
2891 kfree(i);
2892 goto done;
2893 }
2894 }
2895
2896 i->type = mreq->mr_type;
2897 i->ifindex = mreq->mr_ifindex;
2898 i->alen = mreq->mr_alen;
2899 memcpy(i->addr, mreq->mr_address, i->alen);
2900 i->count = 1;
2901 i->next = po->mclist;
2902 po->mclist = i;
Wang Chen2aeb0b82008-07-14 20:49:46 -07002903 err = packet_dev_mc(dev, i, 1);
2904 if (err) {
2905 po->mclist = i->next;
2906 kfree(i);
2907 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
2909done:
2910 rtnl_unlock();
2911 return err;
2912}
2913
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002914static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915{
2916 struct packet_mclist *ml, **mlp;
2917
2918 rtnl_lock();
2919
2920 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2921 if (ml->ifindex == mreq->mr_ifindex &&
2922 ml->type == mreq->mr_type &&
2923 ml->alen == mreq->mr_alen &&
2924 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2925 if (--ml->count == 0) {
2926 struct net_device *dev;
2927 *mlp = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00002928 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2929 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 kfree(ml);
2932 }
2933 rtnl_unlock();
2934 return 0;
2935 }
2936 }
2937 rtnl_unlock();
2938 return -EADDRNOTAVAIL;
2939}
2940
2941static void packet_flush_mclist(struct sock *sk)
2942{
2943 struct packet_sock *po = pkt_sk(sk);
2944 struct packet_mclist *ml;
2945
2946 if (!po->mclist)
2947 return;
2948
2949 rtnl_lock();
2950 while ((ml = po->mclist) != NULL) {
2951 struct net_device *dev;
2952
2953 po->mclist = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00002954 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2955 if (dev != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 kfree(ml);
2958 }
2959 rtnl_unlock();
2960}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961
2962static int
David S. Millerb7058842009-09-30 16:12:20 -07002963packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964{
2965 struct sock *sk = sock->sk;
Herbert Xu8dc41942007-02-04 23:31:32 -08002966 struct packet_sock *po = pkt_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 int ret;
2968
2969 if (level != SOL_PACKET)
2970 return -ENOPROTOOPT;
2971
Johann Baudy69e3c752009-05-18 22:11:22 -07002972 switch (optname) {
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002973 case PACKET_ADD_MEMBERSHIP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 case PACKET_DROP_MEMBERSHIP:
2975 {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002976 struct packet_mreq_max mreq;
2977 int len = optlen;
2978 memset(&mreq, 0, sizeof(mreq));
2979 if (len < sizeof(struct packet_mreq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 return -EINVAL;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002981 if (len > sizeof(mreq))
2982 len = sizeof(mreq);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002983 if (copy_from_user(&mreq, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984 return -EFAULT;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002985 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
2986 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 if (optname == PACKET_ADD_MEMBERSHIP)
2988 ret = packet_mc_add(sk, &mreq);
2989 else
2990 ret = packet_mc_drop(sk, &mreq);
2991 return ret;
2992 }
David S. Millera2efcfa2007-05-29 13:12:50 -07002993
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 case PACKET_RX_RING:
Johann Baudy69e3c752009-05-18 22:11:22 -07002995 case PACKET_TX_RING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 {
chetan lokef6fb8f102011-08-19 10:18:16 +00002997 union tpacket_req_u req_u;
2998 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999
chetan lokef6fb8f102011-08-19 10:18:16 +00003000 switch (po->tp_version) {
3001 case TPACKET_V1:
3002 case TPACKET_V2:
3003 len = sizeof(req_u.req);
3004 break;
3005 case TPACKET_V3:
3006 default:
3007 len = sizeof(req_u.req3);
3008 break;
3009 }
3010 if (optlen < len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 return -EINVAL;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003012 if (pkt_sk(sk)->has_vnet_hdr)
3013 return -EINVAL;
chetan lokef6fb8f102011-08-19 10:18:16 +00003014 if (copy_from_user(&req_u.req, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 return -EFAULT;
chetan lokef6fb8f102011-08-19 10:18:16 +00003016 return packet_set_ring(sk, &req_u, 0,
3017 optname == PACKET_TX_RING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018 }
3019 case PACKET_COPY_THRESH:
3020 {
3021 int val;
3022
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003023 if (optlen != sizeof(val))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003025 if (copy_from_user(&val, optval, sizeof(val)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 return -EFAULT;
3027
3028 pkt_sk(sk)->copy_thresh = val;
3029 return 0;
3030 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003031 case PACKET_VERSION:
3032 {
3033 int val;
3034
3035 if (optlen != sizeof(val))
3036 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003037 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003038 return -EBUSY;
3039 if (copy_from_user(&val, optval, sizeof(val)))
3040 return -EFAULT;
3041 switch (val) {
3042 case TPACKET_V1:
3043 case TPACKET_V2:
chetan lokef6fb8f102011-08-19 10:18:16 +00003044 case TPACKET_V3:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003045 po->tp_version = val;
3046 return 0;
3047 default:
3048 return -EINVAL;
3049 }
3050 }
Patrick McHardy8913336a2008-07-18 18:05:19 -07003051 case PACKET_RESERVE:
3052 {
3053 unsigned int val;
3054
3055 if (optlen != sizeof(val))
3056 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003057 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardy8913336a2008-07-18 18:05:19 -07003058 return -EBUSY;
3059 if (copy_from_user(&val, optval, sizeof(val)))
3060 return -EFAULT;
3061 po->tp_reserve = val;
3062 return 0;
3063 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003064 case PACKET_LOSS:
3065 {
3066 unsigned int val;
3067
3068 if (optlen != sizeof(val))
3069 return -EINVAL;
3070 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3071 return -EBUSY;
3072 if (copy_from_user(&val, optval, sizeof(val)))
3073 return -EFAULT;
3074 po->tp_loss = !!val;
3075 return 0;
3076 }
Herbert Xu8dc41942007-02-04 23:31:32 -08003077 case PACKET_AUXDATA:
3078 {
3079 int val;
3080
3081 if (optlen < sizeof(val))
3082 return -EINVAL;
3083 if (copy_from_user(&val, optval, sizeof(val)))
3084 return -EFAULT;
3085
3086 po->auxdata = !!val;
3087 return 0;
3088 }
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003089 case PACKET_ORIGDEV:
3090 {
3091 int val;
3092
3093 if (optlen < sizeof(val))
3094 return -EINVAL;
3095 if (copy_from_user(&val, optval, sizeof(val)))
3096 return -EFAULT;
3097
3098 po->origdev = !!val;
3099 return 0;
3100 }
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003101 case PACKET_VNET_HDR:
3102 {
3103 int val;
3104
3105 if (sock->type != SOCK_RAW)
3106 return -EINVAL;
3107 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3108 return -EBUSY;
3109 if (optlen < sizeof(val))
3110 return -EINVAL;
3111 if (copy_from_user(&val, optval, sizeof(val)))
3112 return -EFAULT;
3113
3114 po->has_vnet_hdr = !!val;
3115 return 0;
3116 }
Scott McMillan614f60f2010-06-02 05:53:56 -07003117 case PACKET_TIMESTAMP:
3118 {
3119 int val;
3120
3121 if (optlen != sizeof(val))
3122 return -EINVAL;
3123 if (copy_from_user(&val, optval, sizeof(val)))
3124 return -EFAULT;
3125
3126 po->tp_tstamp = val;
3127 return 0;
3128 }
David S. Millerdc99f602011-07-05 01:45:05 -07003129 case PACKET_FANOUT:
3130 {
3131 int val;
3132
3133 if (optlen != sizeof(val))
3134 return -EINVAL;
3135 if (copy_from_user(&val, optval, sizeof(val)))
3136 return -EFAULT;
3137
3138 return fanout_add(sk, val & 0xffff, val >> 16);
3139 }
Paul Chavent5920cd3a2012-11-06 23:10:47 +00003140 case PACKET_TX_HAS_OFF:
3141 {
3142 unsigned int val;
3143
3144 if (optlen != sizeof(val))
3145 return -EINVAL;
3146 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3147 return -EBUSY;
3148 if (copy_from_user(&val, optval, sizeof(val)))
3149 return -EFAULT;
3150 po->tp_tx_has_off = !!val;
3151 return 0;
3152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 default:
3154 return -ENOPROTOOPT;
3155 }
3156}
3157
3158static int packet_getsockopt(struct socket *sock, int level, int optname,
3159 char __user *optval, int __user *optlen)
3160{
3161 int len;
Eric Dumazetc06fff62012-04-19 21:56:11 +00003162 int val, lv = sizeof(val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 struct sock *sk = sock->sk;
3164 struct packet_sock *po = pkt_sk(sk);
Eric Dumazetc06fff62012-04-19 21:56:11 +00003165 void *data = &val;
Herbert Xu8dc41942007-02-04 23:31:32 -08003166 struct tpacket_stats st;
chetan lokef6fb8f102011-08-19 10:18:16 +00003167 union tpacket_stats_u st_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168
3169 if (level != SOL_PACKET)
3170 return -ENOPROTOOPT;
3171
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003172 if (get_user(len, optlen))
3173 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174
3175 if (len < 0)
3176 return -EINVAL;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003177
Johann Baudy69e3c752009-05-18 22:11:22 -07003178 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 case PACKET_STATISTICS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 spin_lock_bh(&sk->sk_receive_queue.lock);
chetan lokef6fb8f102011-08-19 10:18:16 +00003181 if (po->tp_version == TPACKET_V3) {
Eric Dumazetc06fff62012-04-19 21:56:11 +00003182 lv = sizeof(struct tpacket_stats_v3);
chetan lokef6fb8f102011-08-19 10:18:16 +00003183 memcpy(&st_u.stats3, &po->stats,
Eric Dumazetc06fff62012-04-19 21:56:11 +00003184 sizeof(struct tpacket_stats));
chetan lokef6fb8f102011-08-19 10:18:16 +00003185 st_u.stats3.tp_freeze_q_cnt =
Eric Dumazetc06fff62012-04-19 21:56:11 +00003186 po->stats_u.stats3.tp_freeze_q_cnt;
chetan lokef6fb8f102011-08-19 10:18:16 +00003187 st_u.stats3.tp_packets += po->stats.tp_drops;
3188 data = &st_u.stats3;
3189 } else {
Eric Dumazetc06fff62012-04-19 21:56:11 +00003190 lv = sizeof(struct tpacket_stats);
chetan lokef6fb8f102011-08-19 10:18:16 +00003191 st = po->stats;
3192 st.tp_packets += st.tp_drops;
3193 data = &st;
3194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 memset(&po->stats, 0, sizeof(st));
3196 spin_unlock_bh(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 break;
Herbert Xu8dc41942007-02-04 23:31:32 -08003198 case PACKET_AUXDATA:
Herbert Xu8dc41942007-02-04 23:31:32 -08003199 val = po->auxdata;
Herbert Xu8dc41942007-02-04 23:31:32 -08003200 break;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003201 case PACKET_ORIGDEV:
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003202 val = po->origdev;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003203 break;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003204 case PACKET_VNET_HDR:
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003205 val = po->has_vnet_hdr;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003206 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003207 case PACKET_VERSION:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003208 val = po->tp_version;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003209 break;
3210 case PACKET_HDRLEN:
3211 if (len > sizeof(int))
3212 len = sizeof(int);
3213 if (copy_from_user(&val, optval, len))
3214 return -EFAULT;
3215 switch (val) {
3216 case TPACKET_V1:
3217 val = sizeof(struct tpacket_hdr);
3218 break;
3219 case TPACKET_V2:
3220 val = sizeof(struct tpacket2_hdr);
3221 break;
chetan lokef6fb8f102011-08-19 10:18:16 +00003222 case TPACKET_V3:
3223 val = sizeof(struct tpacket3_hdr);
3224 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003225 default:
3226 return -EINVAL;
3227 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003228 break;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003229 case PACKET_RESERVE:
Patrick McHardy8913336a2008-07-18 18:05:19 -07003230 val = po->tp_reserve;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003231 break;
Johann Baudy69e3c752009-05-18 22:11:22 -07003232 case PACKET_LOSS:
Johann Baudy69e3c752009-05-18 22:11:22 -07003233 val = po->tp_loss;
Johann Baudy69e3c752009-05-18 22:11:22 -07003234 break;
Scott McMillan614f60f2010-06-02 05:53:56 -07003235 case PACKET_TIMESTAMP:
Scott McMillan614f60f2010-06-02 05:53:56 -07003236 val = po->tp_tstamp;
Scott McMillan614f60f2010-06-02 05:53:56 -07003237 break;
David S. Millerdc99f602011-07-05 01:45:05 -07003238 case PACKET_FANOUT:
David S. Millerdc99f602011-07-05 01:45:05 -07003239 val = (po->fanout ?
3240 ((u32)po->fanout->id |
3241 ((u32)po->fanout->type << 16)) :
3242 0);
David S. Millerdc99f602011-07-05 01:45:05 -07003243 break;
Paul Chavent5920cd3a2012-11-06 23:10:47 +00003244 case PACKET_TX_HAS_OFF:
3245 val = po->tp_tx_has_off;
3246 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 default:
3248 return -ENOPROTOOPT;
3249 }
3250
Eric Dumazetc06fff62012-04-19 21:56:11 +00003251 if (len > lv)
3252 len = lv;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003253 if (put_user(len, optlen))
3254 return -EFAULT;
Herbert Xu8dc41942007-02-04 23:31:32 -08003255 if (copy_to_user(optval, data, len))
3256 return -EFAULT;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003257 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258}
3259
3260
3261static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3262{
3263 struct sock *sk;
3264 struct hlist_node *node;
Jason Lunzad930652007-02-20 23:19:54 -08003265 struct net_device *dev = data;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003266 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267
stephen hemminger808f5112010-02-22 07:57:18 +00003268 rcu_read_lock();
3269 sk_for_each_rcu(sk, node, &net->packet.sklist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270 struct packet_sock *po = pkt_sk(sk);
3271
3272 switch (msg) {
3273 case NETDEV_UNREGISTER:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 if (po->mclist)
3275 packet_dev_mclist(dev, po->mclist, -1);
David S. Millera2efcfa2007-05-29 13:12:50 -07003276 /* fallthrough */
3277
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 case NETDEV_DOWN:
3279 if (dev->ifindex == po->ifindex) {
3280 spin_lock(&po->bind_lock);
3281 if (po->running) {
David S. Millerce06b032011-07-04 01:44:29 -07003282 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 sk->sk_err = ENETDOWN;
3284 if (!sock_flag(sk, SOCK_DEAD))
3285 sk->sk_error_report(sk);
3286 }
3287 if (msg == NETDEV_UNREGISTER) {
3288 po->ifindex = -1;
Ben Greear160ff182011-06-01 07:18:52 +00003289 if (po->prot_hook.dev)
3290 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291 po->prot_hook.dev = NULL;
3292 }
3293 spin_unlock(&po->bind_lock);
3294 }
3295 break;
3296 case NETDEV_UP:
stephen hemminger808f5112010-02-22 07:57:18 +00003297 if (dev->ifindex == po->ifindex) {
3298 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003299 if (po->num)
3300 register_prot_hook(sk);
stephen hemminger808f5112010-02-22 07:57:18 +00003301 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 break;
3304 }
3305 }
stephen hemminger808f5112010-02-22 07:57:18 +00003306 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 return NOTIFY_DONE;
3308}
3309
3310
3311static int packet_ioctl(struct socket *sock, unsigned int cmd,
3312 unsigned long arg)
3313{
3314 struct sock *sk = sock->sk;
3315
Johann Baudy69e3c752009-05-18 22:11:22 -07003316 switch (cmd) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003317 case SIOCOUTQ:
3318 {
3319 int amount = sk_wmem_alloc_get(sk);
Eric Dumazet31e6d362009-06-17 19:05:41 -07003320
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003321 return put_user(amount, (int __user *)arg);
3322 }
3323 case SIOCINQ:
3324 {
3325 struct sk_buff *skb;
3326 int amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003328 spin_lock_bh(&sk->sk_receive_queue.lock);
3329 skb = skb_peek(&sk->sk_receive_queue);
3330 if (skb)
3331 amount = skb->len;
3332 spin_unlock_bh(&sk->sk_receive_queue.lock);
3333 return put_user(amount, (int __user *)arg);
3334 }
3335 case SIOCGSTAMP:
3336 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3337 case SIOCGSTAMPNS:
3338 return sock_get_timestampns(sk, (struct timespec __user *)arg);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003339
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340#ifdef CONFIG_INET
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003341 case SIOCADDRT:
3342 case SIOCDELRT:
3343 case SIOCDARP:
3344 case SIOCGARP:
3345 case SIOCSARP:
3346 case SIOCGIFADDR:
3347 case SIOCSIFADDR:
3348 case SIOCGIFBRDADDR:
3349 case SIOCSIFBRDADDR:
3350 case SIOCGIFNETMASK:
3351 case SIOCSIFNETMASK:
3352 case SIOCGIFDSTADDR:
3353 case SIOCSIFDSTADDR:
3354 case SIOCSIFFLAGS:
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003355 return inet_dgram_ops.ioctl(sock, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356#endif
3357
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003358 default:
3359 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 }
3361 return 0;
3362}
3363
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003364static unsigned int packet_poll(struct file *file, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 poll_table *wait)
3366{
3367 struct sock *sk = sock->sk;
3368 struct packet_sock *po = pkt_sk(sk);
3369 unsigned int mask = datagram_poll(file, sock, wait);
3370
3371 spin_lock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003372 if (po->rx_ring.pg_vec) {
chetan lokef6fb8f102011-08-19 10:18:16 +00003373 if (!packet_previous_rx_frame(po, &po->rx_ring,
3374 TP_STATUS_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375 mask |= POLLIN | POLLRDNORM;
3376 }
3377 spin_unlock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003378 spin_lock_bh(&sk->sk_write_queue.lock);
3379 if (po->tx_ring.pg_vec) {
3380 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3381 mask |= POLLOUT | POLLWRNORM;
3382 }
3383 spin_unlock_bh(&sk->sk_write_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 return mask;
3385}
3386
3387
3388/* Dirty? Well, I still did not learn better way to account
3389 * for user mmaps.
3390 */
3391
3392static void packet_mm_open(struct vm_area_struct *vma)
3393{
3394 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003395 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003397
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 if (sk)
3399 atomic_inc(&pkt_sk(sk)->mapped);
3400}
3401
3402static void packet_mm_close(struct vm_area_struct *vma)
3403{
3404 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003405 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003407
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 if (sk)
3409 atomic_dec(&pkt_sk(sk)->mapped);
3410}
3411
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003412static const struct vm_operations_struct packet_mmap_ops = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003413 .open = packet_mm_open,
3414 .close = packet_mm_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415};
3416
Neil Horman0e3125c2010-11-16 10:26:47 -08003417static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3418 unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419{
3420 int i;
3421
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003422 for (i = 0; i < len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003423 if (likely(pg_vec[i].buffer)) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003424 if (is_vmalloc_addr(pg_vec[i].buffer))
Neil Horman0e3125c2010-11-16 10:26:47 -08003425 vfree(pg_vec[i].buffer);
3426 else
3427 free_pages((unsigned long)pg_vec[i].buffer,
3428 order);
3429 pg_vec[i].buffer = NULL;
3430 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 }
3432 kfree(pg_vec);
3433}
3434
Olof Johanssoneea49cc92011-11-02 11:00:49 +00003435static char *alloc_one_pg_vec_page(unsigned long order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003436{
Neil Horman0e3125c2010-11-16 10:26:47 -08003437 char *buffer = NULL;
3438 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3439 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
Eric Dumazet719bfea2009-04-15 03:39:52 -07003440
Neil Horman0e3125c2010-11-16 10:26:47 -08003441 buffer = (char *) __get_free_pages(gfp_flags, order);
3442
3443 if (buffer)
3444 return buffer;
3445
3446 /*
3447 * __get_free_pages failed, fall back to vmalloc
3448 */
Eric Dumazetbbce5a52010-11-20 07:31:54 +00003449 buffer = vzalloc((1 << order) * PAGE_SIZE);
Neil Horman0e3125c2010-11-16 10:26:47 -08003450
3451 if (buffer)
3452 return buffer;
3453
3454 /*
3455 * vmalloc failed, lets dig into swap here
3456 */
Neil Horman0e3125c2010-11-16 10:26:47 -08003457 gfp_flags &= ~__GFP_NORETRY;
3458 buffer = (char *)__get_free_pages(gfp_flags, order);
3459 if (buffer)
3460 return buffer;
3461
3462 /*
3463 * complete and utter failure
3464 */
3465 return NULL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003466}
3467
Neil Horman0e3125c2010-11-16 10:26:47 -08003468static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003469{
3470 unsigned int block_nr = req->tp_block_nr;
Neil Horman0e3125c2010-11-16 10:26:47 -08003471 struct pgv *pg_vec;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003472 int i;
3473
Neil Horman0e3125c2010-11-16 10:26:47 -08003474 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003475 if (unlikely(!pg_vec))
3476 goto out;
3477
3478 for (i = 0; i < block_nr; i++) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003479 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
Neil Horman0e3125c2010-11-16 10:26:47 -08003480 if (unlikely(!pg_vec[i].buffer))
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003481 goto out_free_pgvec;
3482 }
3483
3484out:
3485 return pg_vec;
3486
3487out_free_pgvec:
3488 free_pg_vec(pg_vec, order, block_nr);
3489 pg_vec = NULL;
3490 goto out;
3491}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492
chetan lokef6fb8f102011-08-19 10:18:16 +00003493static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -07003494 int closing, int tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495{
Neil Horman0e3125c2010-11-16 10:26:47 -08003496 struct pgv *pg_vec = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 struct packet_sock *po = pkt_sk(sk);
Al Viro0e11c912006-11-08 00:26:29 -08003498 int was_running, order = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003499 struct packet_ring_buffer *rb;
3500 struct sk_buff_head *rb_queue;
Al Viro0e11c912006-11-08 00:26:29 -08003501 __be16 num;
chetan lokef6fb8f102011-08-19 10:18:16 +00003502 int err = -EINVAL;
3503 /* Added to avoid minimal code churn */
3504 struct tpacket_req *req = &req_u->req;
3505
3506 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3507 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3508 WARN(1, "Tx-ring is not supported.\n");
3509 goto out;
3510 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003511
3512 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3513 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3514
3515 err = -EBUSY;
3516 if (!closing) {
3517 if (atomic_read(&po->mapped))
3518 goto out;
3519 if (atomic_read(&rb->pending))
3520 goto out;
3521 }
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003522
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 if (req->tp_block_nr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 /* Sanity tests and some calculations */
Johann Baudy69e3c752009-05-18 22:11:22 -07003525 err = -EBUSY;
3526 if (unlikely(rb->pg_vec))
3527 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003529 switch (po->tp_version) {
3530 case TPACKET_V1:
3531 po->tp_hdrlen = TPACKET_HDRLEN;
3532 break;
3533 case TPACKET_V2:
3534 po->tp_hdrlen = TPACKET2_HDRLEN;
3535 break;
chetan lokef6fb8f102011-08-19 10:18:16 +00003536 case TPACKET_V3:
3537 po->tp_hdrlen = TPACKET3_HDRLEN;
3538 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003539 }
3540
Johann Baudy69e3c752009-05-18 22:11:22 -07003541 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003542 if (unlikely((int)req->tp_block_size <= 0))
Johann Baudy69e3c752009-05-18 22:11:22 -07003543 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003544 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003545 goto out;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003546 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
Johann Baudy69e3c752009-05-18 22:11:22 -07003547 po->tp_reserve))
3548 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003549 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003550 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551
Johann Baudy69e3c752009-05-18 22:11:22 -07003552 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3553 if (unlikely(rb->frames_per_block <= 0))
3554 goto out;
3555 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3556 req->tp_frame_nr))
3557 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558
3559 err = -ENOMEM;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003560 order = get_order(req->tp_block_size);
3561 pg_vec = alloc_pg_vec(req, order);
3562 if (unlikely(!pg_vec))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 goto out;
chetan lokef6fb8f102011-08-19 10:18:16 +00003564 switch (po->tp_version) {
3565 case TPACKET_V3:
3566 /* Transmit path is not supported. We checked
3567 * it above but just being paranoid
3568 */
3569 if (!tx_ring)
3570 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3571 break;
3572 default:
3573 break;
3574 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003575 }
3576 /* Done */
3577 else {
3578 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003579 if (unlikely(req->tp_frame_nr))
Johann Baudy69e3c752009-05-18 22:11:22 -07003580 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581 }
3582
3583 lock_sock(sk);
3584
3585 /* Detach socket from network */
3586 spin_lock(&po->bind_lock);
3587 was_running = po->running;
3588 num = po->num;
3589 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 po->num = 0;
David S. Millerce06b032011-07-04 01:44:29 -07003591 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592 }
3593 spin_unlock(&po->bind_lock);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003594
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 synchronize_net();
3596
3597 err = -EBUSY;
Herbert Xu905db442009-01-30 14:12:06 -08003598 mutex_lock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599 if (closing || atomic_read(&po->mapped) == 0) {
3600 err = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003601 spin_lock_bh(&rb_queue->lock);
Changli Gaoc053fd92010-12-10 16:02:20 -08003602 swap(rb->pg_vec, pg_vec);
Johann Baudy69e3c752009-05-18 22:11:22 -07003603 rb->frame_max = (req->tp_frame_nr - 1);
3604 rb->head = 0;
3605 rb->frame_size = req->tp_frame_size;
3606 spin_unlock_bh(&rb_queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
Changli Gaoc053fd92010-12-10 16:02:20 -08003608 swap(rb->pg_vec_order, order);
3609 swap(rb->pg_vec_len, req->tp_block_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610
Johann Baudy69e3c752009-05-18 22:11:22 -07003611 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3612 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3613 tpacket_rcv : packet_rcv;
3614 skb_queue_purge(rb_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 if (atomic_read(&po->mapped))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003616 pr_err("packet_mmap: vma is busy: %d\n",
3617 atomic_read(&po->mapped));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 }
Herbert Xu905db442009-01-30 14:12:06 -08003619 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620
3621 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003622 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 po->num = num;
David S. Millerce06b032011-07-04 01:44:29 -07003624 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 }
3626 spin_unlock(&po->bind_lock);
chetan lokef6fb8f102011-08-19 10:18:16 +00003627 if (closing && (po->tp_version > TPACKET_V2)) {
3628 /* Because we don't support block-based V3 on tx-ring */
3629 if (!tx_ring)
3630 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3631 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 release_sock(sk);
3633
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 if (pg_vec)
3635 free_pg_vec(pg_vec, order, req->tp_block_nr);
3636out:
3637 return err;
3638}
3639
Johann Baudy69e3c752009-05-18 22:11:22 -07003640static int packet_mmap(struct file *file, struct socket *sock,
3641 struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642{
3643 struct sock *sk = sock->sk;
3644 struct packet_sock *po = pkt_sk(sk);
Johann Baudy69e3c752009-05-18 22:11:22 -07003645 unsigned long size, expected_size;
3646 struct packet_ring_buffer *rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 unsigned long start;
3648 int err = -EINVAL;
3649 int i;
3650
3651 if (vma->vm_pgoff)
3652 return -EINVAL;
3653
Herbert Xu905db442009-01-30 14:12:06 -08003654 mutex_lock(&po->pg_vec_lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003655
3656 expected_size = 0;
3657 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3658 if (rb->pg_vec) {
3659 expected_size += rb->pg_vec_len
3660 * rb->pg_vec_pages
3661 * PAGE_SIZE;
3662 }
3663 }
3664
3665 if (expected_size == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07003667
3668 size = vma->vm_end - vma->vm_start;
3669 if (size != expected_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 goto out;
3671
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 start = vma->vm_start;
Johann Baudy69e3c752009-05-18 22:11:22 -07003673 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3674 if (rb->pg_vec == NULL)
3675 continue;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003676
Johann Baudy69e3c752009-05-18 22:11:22 -07003677 for (i = 0; i < rb->pg_vec_len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003678 struct page *page;
3679 void *kaddr = rb->pg_vec[i].buffer;
Johann Baudy69e3c752009-05-18 22:11:22 -07003680 int pg_num;
3681
Changli Gaoc56b4d92010-12-01 02:52:57 +00003682 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3683 page = pgv_to_page(kaddr);
Johann Baudy69e3c752009-05-18 22:11:22 -07003684 err = vm_insert_page(vma, start, page);
3685 if (unlikely(err))
3686 goto out;
3687 start += PAGE_SIZE;
Neil Horman0e3125c2010-11-16 10:26:47 -08003688 kaddr += PAGE_SIZE;
Johann Baudy69e3c752009-05-18 22:11:22 -07003689 }
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003690 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003692
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003693 atomic_inc(&po->mapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 vma->vm_ops = &packet_mmap_ops;
3695 err = 0;
3696
3697out:
Herbert Xu905db442009-01-30 14:12:06 -08003698 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 return err;
3700}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003702static const struct proto_ops packet_ops_spkt = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 .family = PF_PACKET,
3704 .owner = THIS_MODULE,
3705 .release = packet_release,
3706 .bind = packet_bind_spkt,
3707 .connect = sock_no_connect,
3708 .socketpair = sock_no_socketpair,
3709 .accept = sock_no_accept,
3710 .getname = packet_getname_spkt,
3711 .poll = datagram_poll,
3712 .ioctl = packet_ioctl,
3713 .listen = sock_no_listen,
3714 .shutdown = sock_no_shutdown,
3715 .setsockopt = sock_no_setsockopt,
3716 .getsockopt = sock_no_getsockopt,
3717 .sendmsg = packet_sendmsg_spkt,
3718 .recvmsg = packet_recvmsg,
3719 .mmap = sock_no_mmap,
3720 .sendpage = sock_no_sendpage,
3721};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003723static const struct proto_ops packet_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724 .family = PF_PACKET,
3725 .owner = THIS_MODULE,
3726 .release = packet_release,
3727 .bind = packet_bind,
3728 .connect = sock_no_connect,
3729 .socketpair = sock_no_socketpair,
3730 .accept = sock_no_accept,
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003731 .getname = packet_getname,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 .poll = packet_poll,
3733 .ioctl = packet_ioctl,
3734 .listen = sock_no_listen,
3735 .shutdown = sock_no_shutdown,
3736 .setsockopt = packet_setsockopt,
3737 .getsockopt = packet_getsockopt,
3738 .sendmsg = packet_sendmsg,
3739 .recvmsg = packet_recvmsg,
3740 .mmap = packet_mmap,
3741 .sendpage = sock_no_sendpage,
3742};
3743
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003744static const struct net_proto_family packet_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 .family = PF_PACKET,
3746 .create = packet_create,
3747 .owner = THIS_MODULE,
3748};
3749
3750static struct notifier_block packet_netdev_notifier = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003751 .notifier_call = packet_notifier,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752};
3753
3754#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755
3756static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
stephen hemminger808f5112010-02-22 07:57:18 +00003757 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758{
Denis V. Luneve372c412007-11-19 22:31:54 -08003759 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003760
3761 rcu_read_lock();
3762 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763}
3764
3765static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3766{
Herbert Xu1bf40952007-12-16 14:04:02 -08003767 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003768 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769}
3770
3771static void packet_seq_stop(struct seq_file *seq, void *v)
stephen hemminger808f5112010-02-22 07:57:18 +00003772 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773{
stephen hemminger808f5112010-02-22 07:57:18 +00003774 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775}
3776
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003777static int packet_seq_show(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778{
3779 if (v == SEQ_START_TOKEN)
3780 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3781 else {
Li Zefanb7ceabd2010-02-08 23:19:29 +00003782 struct sock *s = sk_entry(v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 const struct packet_sock *po = pkt_sk(s);
3784
3785 seq_printf(seq,
Dan Rosenberg71338aa2011-05-23 12:17:35 +00003786 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 s,
3788 atomic_read(&s->sk_refcnt),
3789 s->sk_type,
3790 ntohs(po->num),
3791 po->ifindex,
3792 po->running,
3793 atomic_read(&s->sk_rmem_alloc),
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06003794 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003795 sock_i_ino(s));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796 }
3797
3798 return 0;
3799}
3800
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003801static const struct seq_operations packet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 .start = packet_seq_start,
3803 .next = packet_seq_next,
3804 .stop = packet_seq_stop,
3805 .show = packet_seq_show,
3806};
3807
3808static int packet_seq_open(struct inode *inode, struct file *file)
3809{
Denis V. Luneve372c412007-11-19 22:31:54 -08003810 return seq_open_net(inode, file, &packet_seq_ops,
3811 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812}
3813
Arjan van de Venda7071d2007-02-12 00:55:36 -08003814static const struct file_operations packet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 .owner = THIS_MODULE,
3816 .open = packet_seq_open,
3817 .read = seq_read,
3818 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003819 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820};
3821
3822#endif
3823
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003824static int __net_init packet_net_init(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003825{
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00003826 mutex_init(&net->packet.sklist_lock);
Denis V. Lunev2aaef4e2007-12-11 04:19:54 -08003827 INIT_HLIST_HEAD(&net->packet.sklist);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003828
3829 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3830 return -ENOMEM;
3831
3832 return 0;
3833}
3834
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003835static void __net_exit packet_net_exit(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003836{
3837 proc_net_remove(net, "packet");
3838}
3839
3840static struct pernet_operations packet_net_ops = {
3841 .init = packet_net_init,
3842 .exit = packet_net_exit,
3843};
3844
3845
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846static void __exit packet_exit(void)
3847{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 unregister_netdevice_notifier(&packet_netdev_notifier);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003849 unregister_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 sock_unregister(PF_PACKET);
3851 proto_unregister(&packet_proto);
3852}
3853
3854static int __init packet_init(void)
3855{
3856 int rc = proto_register(&packet_proto, 0);
3857
3858 if (rc != 0)
3859 goto out;
3860
3861 sock_register(&packet_family_ops);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003862 register_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863 register_netdevice_notifier(&packet_netdev_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864out:
3865 return rc;
3866}
3867
3868module_init(packet_init);
3869module_exit(packet_exit);
3870MODULE_LICENSE("GPL");
3871MODULE_ALIAS_NETPROTO(PF_PACKET);