blob: 0c0f6c9a90e74107e3c60f12b7020f0c546e68bc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090012 * Fixes:
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090035 * Ulises Alonso : Frame number limit removal and
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * packet_set_ring memory leak.
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070037 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090040 * byte arrays at the end of sockaddr_ll
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070041 * and packet_mreq.
Johann Baudy69e3c752009-05-18 22:11:22 -070042 * Johann Baudy : Added TX RING.
chetan lokef6fb8f12011-08-19 10:18:16 +000043 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/mm.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080057#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
Herbert Xuffbc6112007-02-04 23:33:10 -080065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090067#include <linux/slab.h>
Neil Horman0e3125c2010-11-16 10:26:47 -080068#include <linux/vmalloc.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020069#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040079#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
Herbert Xu905db442009-01-30 14:12:06 -080086#include <linux/mutex.h>
Eric Dumazet05423b22009-10-26 18:40:35 -070087#include <linux/if_vlan.h>
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -080088#include <linux/virtio_net.h>
Richard Cochraned85b562010-04-07 22:41:28 +000089#include <linux/errqueue.h>
Scott McMillan614f60f2010-06-02 05:53:56 -070090#include <linux/net_tstamp.h>
Phil Sutter0f75b092013-08-02 11:37:39 +020091#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93#ifdef CONFIG_INET
94#include <net/inet_common.h>
95#endif
96
Pavel Emelyanov2787b042012-08-13 05:49:39 +000097#include "internal.h"
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 Assumptions:
101 - if device has no dev->hard_header routine, it adds and removes ll header
102 inside itself. In this case ll header is invisible outside of device,
103 but higher levels still should reserve dev->hard_header_len.
104 Some devices are enough clever to reallocate skb, when header
105 will not fit to reserved space (tunnel), another ones are silly
106 (PPP).
107 - packet socket receives packets with pulled ll header,
108 so that SOCK_RAW should push it back.
109
110On receive:
111-----------
112
113Incoming, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700114 mac_header -> ll header
115 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
117Outgoing, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700118 mac_header -> ll header
119 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121Incoming, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700122 mac_header -> UNKNOWN position. It is very likely, that it points to ll
123 header. PPP makes it, that is wrong, because introduce
YOSHIFUJI Hideakidb0c58f2007-07-19 10:44:35 +0900124 assymetry between rx and tx paths.
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700125 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127Outgoing, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700128 mac_header -> data. ll header is still not built!
129 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131Resume
132 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
133
134
135On transmit:
136------------
137
138dev->hard_header != NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700139 mac_header -> ll header
140 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142dev->hard_header == NULL (ll header is added by device, we cannot control it)
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700143 mac_header -> data
144 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 We should set nh.raw on output to correct posistion,
147 packet classifier depends on it.
148 */
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150/* Private packet socket structures. */
151
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700152/* identical to struct packet_mreq except it has
153 * a longer address field.
154 */
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000155struct packet_mreq_max {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700156 int mr_ifindex;
157 unsigned short mr_type;
158 unsigned short mr_alen;
159 unsigned char mr_address[MAX_ADDR_LEN];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160};
David S. Millera2efcfa2007-05-29 13:12:50 -0700161
Daniel Borkmann184f4892013-04-16 01:57:46 +0000162union tpacket_uhdr {
163 struct tpacket_hdr *h1;
164 struct tpacket2_hdr *h2;
165 struct tpacket3_hdr *h3;
166 void *raw;
167};
168
chetan lokef6fb8f12011-08-19 10:18:16 +0000169static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -0700170 int closing, int tx_ring);
171
chetan lokef6fb8f12011-08-19 10:18:16 +0000172#define V3_ALIGNMENT (8)
173
chetan lokebc59ba32011-08-25 10:43:30 +0000174#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
chetan lokef6fb8f12011-08-19 10:18:16 +0000175
176#define BLK_PLUS_PRIV(sz_of_priv) \
177 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
chetan lokef6fb8f12011-08-19 10:18:16 +0000179#define PGV_FROM_VMALLOC 1
Johann Baudy69e3c752009-05-18 22:11:22 -0700180
chetan lokef6fb8f12011-08-19 10:18:16 +0000181#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
182#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
183#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
184#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
185#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
186#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
187#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
188
Johann Baudy69e3c752009-05-18 22:11:22 -0700189struct packet_sock;
190static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
Willem de Bruijn77f65eb2013-03-19 10:18:11 +0000191static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192 struct packet_type *pt, struct net_device *orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
chetan lokef6fb8f12011-08-19 10:18:16 +0000194static void *packet_previous_frame(struct packet_sock *po,
195 struct packet_ring_buffer *rb,
196 int status);
197static void packet_increment_head(struct packet_ring_buffer *buff);
chetan lokebc59ba32011-08-25 10:43:30 +0000198static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
199 struct tpacket_block_desc *);
200static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000201 struct packet_sock *);
chetan lokebc59ba32011-08-25 10:43:30 +0000202static void prb_retire_current_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000203 struct packet_sock *, unsigned int status);
chetan lokebc59ba32011-08-25 10:43:30 +0000204static int prb_queue_frozen(struct tpacket_kbdq_core *);
205static void prb_open_block(struct tpacket_kbdq_core *,
206 struct tpacket_block_desc *);
chetan lokef6fb8f12011-08-19 10:18:16 +0000207static void prb_retire_rx_blk_timer_expired(unsigned long);
chetan lokebc59ba32011-08-25 10:43:30 +0000208static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
209static void prb_init_blk_timer(struct packet_sock *,
210 struct tpacket_kbdq_core *,
211 void (*func) (unsigned long));
212static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
213static void prb_clear_rxhash(struct tpacket_kbdq_core *,
214 struct tpacket3_hdr *);
215static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
216 struct tpacket3_hdr *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217static void packet_flush_mclist(struct sock *sk);
218
Herbert Xuffbc6112007-02-04 23:33:10 -0800219struct packet_skb_cb {
220 unsigned int origlen;
221 union {
222 struct sockaddr_pkt pkt;
223 struct sockaddr_ll ll;
224 } sa;
225};
226
227#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
Herbert Xu8dc41942007-02-04 23:31:32 -0800228
chetan lokebc59ba32011-08-25 10:43:30 +0000229#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
chetan lokef6fb8f12011-08-19 10:18:16 +0000230#define GET_PBLOCK_DESC(x, bid) \
chetan lokebc59ba32011-08-25 10:43:30 +0000231 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000232#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
chetan lokebc59ba32011-08-25 10:43:30 +0000233 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000234#define GET_NEXT_PRB_BLK_NUM(x) \
235 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
236 ((x)->kactive_blk_num+1) : 0)
237
David S. Millerdc99f602011-07-05 01:45:05 -0700238static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
239static void __fanout_link(struct sock *sk, struct packet_sock *po);
240
David S. Millerce06b032011-07-04 01:44:29 -0700241/* register_prot_hook must be invoked with the po->bind_lock held,
242 * or from a context in which asynchronous accesses to the packet
243 * socket is not possible (packet_create()).
244 */
245static void register_prot_hook(struct sock *sk)
246{
247 struct packet_sock *po = pkt_sk(sk);
248 if (!po->running) {
David S. Millerdc99f602011-07-05 01:45:05 -0700249 if (po->fanout)
250 __fanout_link(sk, po);
251 else
252 dev_add_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700253 sock_hold(sk);
254 po->running = 1;
255 }
256}
257
258/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
259 * held. If the sync parameter is true, we will temporarily drop
260 * the po->bind_lock and do a synchronize_net to make sure no
261 * asynchronous packet processing paths still refer to the elements
262 * of po->prot_hook. If the sync parameter is false, it is the
263 * callers responsibility to take care of this.
264 */
265static void __unregister_prot_hook(struct sock *sk, bool sync)
266{
267 struct packet_sock *po = pkt_sk(sk);
268
269 po->running = 0;
David S. Millerdc99f602011-07-05 01:45:05 -0700270 if (po->fanout)
271 __fanout_unlink(sk, po);
272 else
273 __dev_remove_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700274 __sock_put(sk);
275
276 if (sync) {
277 spin_unlock(&po->bind_lock);
278 synchronize_net();
279 spin_lock(&po->bind_lock);
280 }
281}
282
283static void unregister_prot_hook(struct sock *sk, bool sync)
284{
285 struct packet_sock *po = pkt_sk(sk);
286
287 if (po->running)
288 __unregister_prot_hook(sk, sync);
289}
290
Changli Gaof6dafa92010-12-07 04:26:16 +0000291static inline __pure struct page *pgv_to_page(void *addr)
Changli Gao0af55bb2010-12-01 02:52:20 +0000292{
293 if (is_vmalloc_addr(addr))
294 return vmalloc_to_page(addr);
295 return virt_to_page(addr);
296}
297
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700298static void __packet_set_status(struct packet_sock *po, void *frame, int status)
299{
Daniel Borkmann184f4892013-04-16 01:57:46 +0000300 union tpacket_uhdr h;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700301
302 h.raw = frame;
303 switch (po->tp_version) {
304 case TPACKET_V1:
305 h.h1->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000306 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700307 break;
308 case TPACKET_V2:
309 h.h2->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000310 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700311 break;
chetan lokef6fb8f12011-08-19 10:18:16 +0000312 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700313 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000314 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700315 BUG();
316 }
317
318 smp_wmb();
319}
320
321static int __packet_get_status(struct packet_sock *po, void *frame)
322{
Daniel Borkmann184f4892013-04-16 01:57:46 +0000323 union tpacket_uhdr h;
Johann Baudy69e3c752009-05-18 22:11:22 -0700324
325 smp_rmb();
326
327 h.raw = frame;
328 switch (po->tp_version) {
329 case TPACKET_V1:
Changli Gao0af55bb2010-12-01 02:52:20 +0000330 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700331 return h.h1->tp_status;
332 case TPACKET_V2:
Changli Gao0af55bb2010-12-01 02:52:20 +0000333 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700334 return h.h2->tp_status;
chetan lokef6fb8f12011-08-19 10:18:16 +0000335 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700336 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000337 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700338 BUG();
339 return 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700340 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341}
Johann Baudy69e3c752009-05-18 22:11:22 -0700342
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +0000343static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
344 unsigned int flags)
Daniel Borkmann7a513842013-04-23 00:39:29 +0000345{
346 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
347
348 if (shhwtstamps) {
349 if ((flags & SOF_TIMESTAMPING_SYS_HARDWARE) &&
350 ktime_to_timespec_cond(shhwtstamps->syststamp, ts))
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +0000351 return TP_STATUS_TS_SYS_HARDWARE;
Daniel Borkmann7a513842013-04-23 00:39:29 +0000352 if ((flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
353 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +0000354 return TP_STATUS_TS_RAW_HARDWARE;
Daniel Borkmann7a513842013-04-23 00:39:29 +0000355 }
356
357 if (ktime_to_timespec_cond(skb->tstamp, ts))
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +0000358 return TP_STATUS_TS_SOFTWARE;
Daniel Borkmann7a513842013-04-23 00:39:29 +0000359
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +0000360 return 0;
Daniel Borkmann7a513842013-04-23 00:39:29 +0000361}
362
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +0000363static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
364 struct sk_buff *skb)
Willem de Bruijn2e313962013-04-23 00:39:28 +0000365{
366 union tpacket_uhdr h;
367 struct timespec ts;
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +0000368 __u32 ts_status;
Willem de Bruijn2e313962013-04-23 00:39:28 +0000369
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +0000370 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
371 return 0;
Willem de Bruijn2e313962013-04-23 00:39:28 +0000372
373 h.raw = frame;
374 switch (po->tp_version) {
375 case TPACKET_V1:
376 h.h1->tp_sec = ts.tv_sec;
377 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
378 break;
379 case TPACKET_V2:
380 h.h2->tp_sec = ts.tv_sec;
381 h.h2->tp_nsec = ts.tv_nsec;
382 break;
383 case TPACKET_V3:
384 default:
385 WARN(1, "TPACKET version not supported.\n");
386 BUG();
387 }
388
389 /* one flush is safe, as both fields always lie on the same cacheline */
390 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
391 smp_wmb();
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +0000392
393 return ts_status;
Willem de Bruijn2e313962013-04-23 00:39:28 +0000394}
395
Johann Baudy69e3c752009-05-18 22:11:22 -0700396static void *packet_lookup_frame(struct packet_sock *po,
397 struct packet_ring_buffer *rb,
398 unsigned int position,
399 int status)
400{
401 unsigned int pg_vec_pos, frame_offset;
Daniel Borkmann184f4892013-04-16 01:57:46 +0000402 union tpacket_uhdr h;
Johann Baudy69e3c752009-05-18 22:11:22 -0700403
404 pg_vec_pos = position / rb->frames_per_block;
405 frame_offset = position % rb->frames_per_block;
406
Neil Horman0e3125c2010-11-16 10:26:47 -0800407 h.raw = rb->pg_vec[pg_vec_pos].buffer +
408 (frame_offset * rb->frame_size);
Johann Baudy69e3c752009-05-18 22:11:22 -0700409
410 if (status != __packet_get_status(po, h.raw))
411 return NULL;
412
413 return h.raw;
414}
415
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000416static void *packet_current_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -0700417 struct packet_ring_buffer *rb,
418 int status)
419{
420 return packet_lookup_frame(po, rb, rb->head, status);
421}
422
chetan lokebc59ba32011-08-25 10:43:30 +0000423static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000424{
425 del_timer_sync(&pkc->retire_blk_timer);
426}
427
428static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
429 int tx_ring,
430 struct sk_buff_head *rb_queue)
431{
chetan lokebc59ba32011-08-25 10:43:30 +0000432 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000433
434 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
435
436 spin_lock(&rb_queue->lock);
437 pkc->delete_blk_timer = 1;
438 spin_unlock(&rb_queue->lock);
439
440 prb_del_retire_blk_timer(pkc);
441}
442
443static void prb_init_blk_timer(struct packet_sock *po,
chetan lokebc59ba32011-08-25 10:43:30 +0000444 struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000445 void (*func) (unsigned long))
446{
447 init_timer(&pkc->retire_blk_timer);
448 pkc->retire_blk_timer.data = (long)po;
449 pkc->retire_blk_timer.function = func;
450 pkc->retire_blk_timer.expires = jiffies;
451}
452
453static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
454{
chetan lokebc59ba32011-08-25 10:43:30 +0000455 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000456
457 if (tx_ring)
458 BUG();
459
460 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
461 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
462}
463
464static int prb_calc_retire_blk_tmo(struct packet_sock *po,
465 int blk_size_in_bytes)
466{
467 struct net_device *dev;
468 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000469 struct ethtool_cmd ecmd;
470 int err;
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000471 u32 speed;
chetan lokef6fb8f12011-08-19 10:18:16 +0000472
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000473 rtnl_lock();
474 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
475 if (unlikely(!dev)) {
476 rtnl_unlock();
chetan lokef6fb8f12011-08-19 10:18:16 +0000477 return DEFAULT_PRB_RETIRE_TOV;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000478 }
479 err = __ethtool_get_settings(dev, &ecmd);
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000480 speed = ethtool_cmd_speed(&ecmd);
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000481 rtnl_unlock();
482 if (!err) {
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000483 /*
484 * If the link speed is so slow you don't really
485 * need to worry about perf anyways
486 */
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000487 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000488 return DEFAULT_PRB_RETIRE_TOV;
parav.pandit@emulex.come440cf22012-06-27 03:56:12 +0000489 } else {
490 msec = 1;
491 div = speed / 1000;
chetan lokef6fb8f12011-08-19 10:18:16 +0000492 }
493 }
494
495 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
496
497 if (div)
498 mbits /= div;
499
500 tmo = mbits * msec;
501
502 if (div)
503 return tmo+1;
504 return tmo;
505}
506
chetan lokebc59ba32011-08-25 10:43:30 +0000507static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000508 union tpacket_req_u *req_u)
509{
510 p1->feature_req_word = req_u->req3.tp_feature_req_word;
511}
512
513static void init_prb_bdqc(struct packet_sock *po,
514 struct packet_ring_buffer *rb,
515 struct pgv *pg_vec,
516 union tpacket_req_u *req_u, int tx_ring)
517{
chetan lokebc59ba32011-08-25 10:43:30 +0000518 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
519 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000520
521 memset(p1, 0x0, sizeof(*p1));
522
523 p1->knxt_seq_num = 1;
524 p1->pkbdq = pg_vec;
chetan lokebc59ba32011-08-25 10:43:30 +0000525 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
Joe Perchese3192692012-06-03 17:41:40 +0000526 p1->pkblk_start = pg_vec[0].buffer;
chetan lokef6fb8f12011-08-19 10:18:16 +0000527 p1->kblk_size = req_u->req3.tp_block_size;
528 p1->knum_blocks = req_u->req3.tp_block_nr;
529 p1->hdrlen = po->tp_hdrlen;
530 p1->version = po->tp_version;
531 p1->last_kactive_blk_num = 0;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +0000532 po->stats.stats3.tp_freeze_q_cnt = 0;
chetan lokef6fb8f12011-08-19 10:18:16 +0000533 if (req_u->req3.tp_retire_blk_tov)
534 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
535 else
536 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
537 req_u->req3.tp_block_size);
538 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
539 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
540
541 prb_init_ft_ops(p1, req_u);
542 prb_setup_retire_blk_timer(po, tx_ring);
543 prb_open_block(p1, pbd);
544}
545
546/* Do NOT update the last_blk_num first.
547 * Assumes sk_buff_head lock is held.
548 */
chetan lokebc59ba32011-08-25 10:43:30 +0000549static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000550{
551 mod_timer(&pkc->retire_blk_timer,
552 jiffies + pkc->tov_in_jiffies);
553 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
554}
555
556/*
557 * Timer logic:
558 * 1) We refresh the timer only when we open a block.
559 * By doing this we don't waste cycles refreshing the timer
560 * on packet-by-packet basis.
561 *
562 * With a 1MB block-size, on a 1Gbps line, it will take
563 * i) ~8 ms to fill a block + ii) memcpy etc.
564 * In this cut we are not accounting for the memcpy time.
565 *
566 * So, if the user sets the 'tmo' to 10ms then the timer
567 * will never fire while the block is still getting filled
568 * (which is what we want). However, the user could choose
569 * to close a block early and that's fine.
570 *
571 * But when the timer does fire, we check whether or not to refresh it.
572 * Since the tmo granularity is in msecs, it is not too expensive
573 * to refresh the timer, lets say every '8' msecs.
574 * Either the user can set the 'tmo' or we can derive it based on
575 * a) line-speed and b) block-size.
576 * prb_calc_retire_blk_tmo() calculates the tmo.
577 *
578 */
579static void prb_retire_rx_blk_timer_expired(unsigned long data)
580{
581 struct packet_sock *po = (struct packet_sock *)data;
chetan lokebc59ba32011-08-25 10:43:30 +0000582 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000583 unsigned int frozen;
chetan lokebc59ba32011-08-25 10:43:30 +0000584 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000585
586 spin_lock(&po->sk.sk_receive_queue.lock);
587
588 frozen = prb_queue_frozen(pkc);
589 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
590
591 if (unlikely(pkc->delete_blk_timer))
592 goto out;
593
594 /* We only need to plug the race when the block is partially filled.
595 * tpacket_rcv:
596 * lock(); increment BLOCK_NUM_PKTS; unlock()
597 * copy_bits() is in progress ...
598 * timer fires on other cpu:
599 * we can't retire the current block because copy_bits
600 * is in progress.
601 *
602 */
603 if (BLOCK_NUM_PKTS(pbd)) {
604 while (atomic_read(&pkc->blk_fill_in_prog)) {
605 /* Waiting for skb_copy_bits to finish... */
606 cpu_relax();
607 }
608 }
609
610 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
611 if (!frozen) {
612 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
613 if (!prb_dispatch_next_block(pkc, po))
614 goto refresh_timer;
615 else
616 goto out;
617 } else {
618 /* Case 1. Queue was frozen because user-space was
619 * lagging behind.
620 */
621 if (prb_curr_blk_in_use(pkc, pbd)) {
622 /*
623 * Ok, user-space is still behind.
624 * So just refresh the timer.
625 */
626 goto refresh_timer;
627 } else {
628 /* Case 2. queue was frozen,user-space caught up,
629 * now the link went idle && the timer fired.
630 * We don't have a block to close.So we open this
631 * block and restart the timer.
632 * opening a block thaws the queue,restarts timer
633 * Thawing/timer-refresh is a side effect.
634 */
635 prb_open_block(pkc, pbd);
636 goto out;
637 }
638 }
639 }
640
641refresh_timer:
642 _prb_refresh_rx_retire_blk_timer(pkc);
643
644out:
645 spin_unlock(&po->sk.sk_receive_queue.lock);
646}
647
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000648static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
chetan lokebc59ba32011-08-25 10:43:30 +0000649 struct tpacket_block_desc *pbd1, __u32 status)
chetan lokef6fb8f12011-08-19 10:18:16 +0000650{
651 /* Flush everything minus the block header */
652
653#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
654 u8 *start, *end;
655
656 start = (u8 *)pbd1;
657
658 /* Skip the block header(we know header WILL fit in 4K) */
659 start += PAGE_SIZE;
660
661 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
662 for (; start < end; start += PAGE_SIZE)
663 flush_dcache_page(pgv_to_page(start));
664
665 smp_wmb();
666#endif
667
668 /* Now update the block status. */
669
670 BLOCK_STATUS(pbd1) = status;
671
672 /* Flush the block header */
673
674#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
675 start = (u8 *)pbd1;
676 flush_dcache_page(pgv_to_page(start));
677
678 smp_wmb();
679#endif
680}
681
682/*
683 * Side effect:
684 *
685 * 1) flush the block
686 * 2) Increment active_blk_num
687 *
688 * Note:We DONT refresh the timer on purpose.
689 * Because almost always the next block will be opened.
690 */
chetan lokebc59ba32011-08-25 10:43:30 +0000691static void prb_close_block(struct tpacket_kbdq_core *pkc1,
692 struct tpacket_block_desc *pbd1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000693 struct packet_sock *po, unsigned int stat)
694{
695 __u32 status = TP_STATUS_USER | stat;
696
697 struct tpacket3_hdr *last_pkt;
chetan lokebc59ba32011-08-25 10:43:30 +0000698 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000699
Daniel Borkmannee80fbf2013-04-19 06:12:29 +0000700 if (po->stats.stats3.tp_drops)
chetan lokef6fb8f12011-08-19 10:18:16 +0000701 status |= TP_STATUS_LOSING;
702
703 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
704 last_pkt->tp_next_offset = 0;
705
706 /* Get the ts of the last pkt */
707 if (BLOCK_NUM_PKTS(pbd1)) {
708 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
709 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
710 } else {
711 /* Ok, we tmo'd - so get the current time */
712 struct timespec ts;
713 getnstimeofday(&ts);
714 h1->ts_last_pkt.ts_sec = ts.tv_sec;
715 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
716 }
717
718 smp_wmb();
719
720 /* Flush the block */
721 prb_flush_block(pkc1, pbd1, status);
722
723 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
724}
725
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000726static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000727{
728 pkc->reset_pending_on_curr_blk = 0;
729}
730
731/*
732 * Side effect of opening a block:
733 *
734 * 1) prb_queue is thawed.
735 * 2) retire_blk_timer is refreshed.
736 *
737 */
chetan lokebc59ba32011-08-25 10:43:30 +0000738static void prb_open_block(struct tpacket_kbdq_core *pkc1,
739 struct tpacket_block_desc *pbd1)
chetan lokef6fb8f12011-08-19 10:18:16 +0000740{
741 struct timespec ts;
chetan lokebc59ba32011-08-25 10:43:30 +0000742 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000743
744 smp_rmb();
745
Daniel Borkmann8da30562013-05-03 02:57:00 +0000746 /* We could have just memset this but we will lose the
747 * flexibility of making the priv area sticky
748 */
chetan lokef6fb8f12011-08-19 10:18:16 +0000749
Daniel Borkmann8da30562013-05-03 02:57:00 +0000750 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
751 BLOCK_NUM_PKTS(pbd1) = 0;
752 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
chetan lokef6fb8f12011-08-19 10:18:16 +0000753
Daniel Borkmann8da30562013-05-03 02:57:00 +0000754 getnstimeofday(&ts);
chetan lokef6fb8f12011-08-19 10:18:16 +0000755
Daniel Borkmann8da30562013-05-03 02:57:00 +0000756 h1->ts_first_pkt.ts_sec = ts.tv_sec;
757 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
chetan lokef6fb8f12011-08-19 10:18:16 +0000758
Daniel Borkmann8da30562013-05-03 02:57:00 +0000759 pkc1->pkblk_start = (char *)pbd1;
760 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
761
762 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
763 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
764
765 pbd1->version = pkc1->version;
766 pkc1->prev = pkc1->nxt_offset;
767 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
768
769 prb_thaw_queue(pkc1);
770 _prb_refresh_rx_retire_blk_timer(pkc1);
771
772 smp_wmb();
chetan lokef6fb8f12011-08-19 10:18:16 +0000773}
774
775/*
776 * Queue freeze logic:
777 * 1) Assume tp_block_nr = 8 blocks.
778 * 2) At time 't0', user opens Rx ring.
779 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
780 * 4) user-space is either sleeping or processing block '0'.
781 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
782 * it will close block-7,loop around and try to fill block '0'.
783 * call-flow:
784 * __packet_lookup_frame_in_block
785 * prb_retire_current_block()
786 * prb_dispatch_next_block()
787 * |->(BLOCK_STATUS == USER) evaluates to true
788 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
789 * 6) Now there are two cases:
790 * 6.1) Link goes idle right after the queue is frozen.
791 * But remember, the last open_block() refreshed the timer.
792 * When this timer expires,it will refresh itself so that we can
793 * re-open block-0 in near future.
794 * 6.2) Link is busy and keeps on receiving packets. This is a simple
795 * case and __packet_lookup_frame_in_block will check if block-0
796 * is free and can now be re-used.
797 */
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000798static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000799 struct packet_sock *po)
800{
801 pkc->reset_pending_on_curr_blk = 1;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +0000802 po->stats.stats3.tp_freeze_q_cnt++;
chetan lokef6fb8f12011-08-19 10:18:16 +0000803}
804
805#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
806
807/*
808 * If the next block is free then we will dispatch it
809 * and return a good offset.
810 * Else, we will freeze the queue.
811 * So, caller must check the return value.
812 */
chetan lokebc59ba32011-08-25 10:43:30 +0000813static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000814 struct packet_sock *po)
815{
chetan lokebc59ba32011-08-25 10:43:30 +0000816 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000817
818 smp_rmb();
819
820 /* 1. Get current block num */
821 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
822
823 /* 2. If this block is currently in_use then freeze the queue */
824 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
825 prb_freeze_queue(pkc, po);
826 return NULL;
827 }
828
829 /*
830 * 3.
831 * open this block and return the offset where the first packet
832 * needs to get stored.
833 */
834 prb_open_block(pkc, pbd);
835 return (void *)pkc->nxt_offset;
836}
837
chetan lokebc59ba32011-08-25 10:43:30 +0000838static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000839 struct packet_sock *po, unsigned int status)
840{
chetan lokebc59ba32011-08-25 10:43:30 +0000841 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
chetan lokef6fb8f12011-08-19 10:18:16 +0000842
843 /* retire/close the current block */
844 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
845 /*
846 * Plug the case where copy_bits() is in progress on
847 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
848 * have space to copy the pkt in the current block and
849 * called prb_retire_current_block()
850 *
851 * We don't need to worry about the TMO case because
852 * the timer-handler already handled this case.
853 */
854 if (!(status & TP_STATUS_BLK_TMO)) {
855 while (atomic_read(&pkc->blk_fill_in_prog)) {
856 /* Waiting for skb_copy_bits to finish... */
857 cpu_relax();
858 }
859 }
860 prb_close_block(pkc, pbd, po, status);
861 return;
862 }
chetan lokef6fb8f12011-08-19 10:18:16 +0000863}
864
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000865static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
chetan lokebc59ba32011-08-25 10:43:30 +0000866 struct tpacket_block_desc *pbd)
chetan lokef6fb8f12011-08-19 10:18:16 +0000867{
868 return TP_STATUS_USER & BLOCK_STATUS(pbd);
869}
870
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000871static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000872{
873 return pkc->reset_pending_on_curr_blk;
874}
875
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000876static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +0000877{
chetan lokebc59ba32011-08-25 10:43:30 +0000878 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
chetan lokef6fb8f12011-08-19 10:18:16 +0000879 atomic_dec(&pkc->blk_fill_in_prog);
880}
881
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000882static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000883 struct tpacket3_hdr *ppd)
884{
885 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
886}
887
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000888static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000889 struct tpacket3_hdr *ppd)
890{
891 ppd->hv1.tp_rxhash = 0;
892}
893
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000894static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000895 struct tpacket3_hdr *ppd)
896{
897 if (vlan_tx_tag_present(pkc->skb)) {
898 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
899 ppd->tp_status = TP_STATUS_VLAN_VALID;
900 } else {
danborkmann@iogearbox.net9e670302012-08-20 03:34:03 +0000901 ppd->hv1.tp_vlan_tci = 0;
902 ppd->tp_status = TP_STATUS_AVAILABLE;
chetan lokef6fb8f12011-08-19 10:18:16 +0000903 }
904}
905
chetan lokebc59ba32011-08-25 10:43:30 +0000906static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000907 struct tpacket3_hdr *ppd)
908{
909 prb_fill_vlan_info(pkc, ppd);
910
911 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
912 prb_fill_rxhash(pkc, ppd);
913 else
914 prb_clear_rxhash(pkc, ppd);
915}
916
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000917static void prb_fill_curr_block(char *curr,
chetan lokebc59ba32011-08-25 10:43:30 +0000918 struct tpacket_kbdq_core *pkc,
919 struct tpacket_block_desc *pbd,
chetan lokef6fb8f12011-08-19 10:18:16 +0000920 unsigned int len)
921{
922 struct tpacket3_hdr *ppd;
923
924 ppd = (struct tpacket3_hdr *)curr;
925 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
926 pkc->prev = curr;
927 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
928 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
929 BLOCK_NUM_PKTS(pbd) += 1;
930 atomic_inc(&pkc->blk_fill_in_prog);
931 prb_run_all_ft_ops(pkc, ppd);
932}
933
934/* Assumes caller has the sk->rx_queue.lock */
935static void *__packet_lookup_frame_in_block(struct packet_sock *po,
936 struct sk_buff *skb,
937 int status,
938 unsigned int len
939 )
940{
chetan lokebc59ba32011-08-25 10:43:30 +0000941 struct tpacket_kbdq_core *pkc;
942 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000943 char *curr, *end;
944
Joe Perchese3192692012-06-03 17:41:40 +0000945 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
chetan lokef6fb8f12011-08-19 10:18:16 +0000946 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
947
948 /* Queue is frozen when user space is lagging behind */
949 if (prb_queue_frozen(pkc)) {
950 /*
951 * Check if that last block which caused the queue to freeze,
952 * is still in_use by user-space.
953 */
954 if (prb_curr_blk_in_use(pkc, pbd)) {
955 /* Can't record this packet */
956 return NULL;
957 } else {
958 /*
959 * Ok, the block was released by user-space.
960 * Now let's open that block.
961 * opening a block also thaws the queue.
962 * Thawing is a side effect.
963 */
964 prb_open_block(pkc, pbd);
965 }
966 }
967
968 smp_mb();
969 curr = pkc->nxt_offset;
970 pkc->skb = skb;
Joe Perchese3192692012-06-03 17:41:40 +0000971 end = (char *)pbd + pkc->kblk_size;
chetan lokef6fb8f12011-08-19 10:18:16 +0000972
973 /* first try the current block */
974 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
975 prb_fill_curr_block(curr, pkc, pbd, len);
976 return (void *)curr;
977 }
978
979 /* Ok, close the current block */
980 prb_retire_current_block(pkc, po, 0);
981
982 /* Now, try to dispatch the next block */
983 curr = (char *)prb_dispatch_next_block(pkc, po);
984 if (curr) {
985 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
986 prb_fill_curr_block(curr, pkc, pbd, len);
987 return (void *)curr;
988 }
989
990 /*
991 * No free blocks are available.user_space hasn't caught up yet.
992 * Queue was just frozen and now this packet will get dropped.
993 */
994 return NULL;
995}
996
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000997static void *packet_current_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +0000998 struct sk_buff *skb,
999 int status, unsigned int len)
1000{
1001 char *curr = NULL;
1002 switch (po->tp_version) {
1003 case TPACKET_V1:
1004 case TPACKET_V2:
1005 curr = packet_lookup_frame(po, &po->rx_ring,
1006 po->rx_ring.head, status);
1007 return curr;
1008 case TPACKET_V3:
1009 return __packet_lookup_frame_in_block(po, skb, status, len);
1010 default:
1011 WARN(1, "TPACKET version not supported\n");
1012 BUG();
Ying Xue99aa3472012-08-06 16:27:10 +00001013 return NULL;
chetan lokef6fb8f12011-08-19 10:18:16 +00001014 }
1015}
1016
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001017static void *prb_lookup_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001018 struct packet_ring_buffer *rb,
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001019 unsigned int idx,
chetan lokef6fb8f12011-08-19 10:18:16 +00001020 int status)
1021{
chetan lokebc59ba32011-08-25 10:43:30 +00001022 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001023 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
chetan lokef6fb8f12011-08-19 10:18:16 +00001024
1025 if (status != BLOCK_STATUS(pbd))
1026 return NULL;
1027 return pbd;
1028}
1029
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001030static int prb_previous_blk_num(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +00001031{
1032 unsigned int prev;
1033 if (rb->prb_bdqc.kactive_blk_num)
1034 prev = rb->prb_bdqc.kactive_blk_num-1;
1035 else
1036 prev = rb->prb_bdqc.knum_blocks-1;
1037 return prev;
1038}
1039
1040/* Assumes caller has held the rx_queue.lock */
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001041static void *__prb_previous_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001042 struct packet_ring_buffer *rb,
1043 int status)
1044{
1045 unsigned int previous = prb_previous_blk_num(rb);
1046 return prb_lookup_block(po, rb, previous, status);
1047}
1048
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001049static void *packet_previous_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001050 struct packet_ring_buffer *rb,
1051 int status)
1052{
1053 if (po->tp_version <= TPACKET_V2)
1054 return packet_previous_frame(po, rb, status);
1055
1056 return __prb_previous_block(po, rb, status);
1057}
1058
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001059static void packet_increment_rx_head(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001060 struct packet_ring_buffer *rb)
1061{
1062 switch (po->tp_version) {
1063 case TPACKET_V1:
1064 case TPACKET_V2:
1065 return packet_increment_head(rb);
1066 case TPACKET_V3:
1067 default:
1068 WARN(1, "TPACKET version not supported.\n");
1069 BUG();
1070 return;
1071 }
1072}
1073
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001074static void *packet_previous_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -07001075 struct packet_ring_buffer *rb,
1076 int status)
1077{
1078 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1079 return packet_lookup_frame(po, rb, previous, status);
1080}
1081
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001082static void packet_increment_head(struct packet_ring_buffer *buff)
Johann Baudy69e3c752009-05-18 22:11:22 -07001083{
1084 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1085}
1086
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001087static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1088{
1089 struct sock *sk = &po->sk;
1090 bool has_room;
1091
1092 if (po->prot_hook.func != tpacket_rcv)
1093 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1094 <= sk->sk_rcvbuf;
1095
1096 spin_lock(&sk->sk_receive_queue.lock);
1097 if (po->tp_version == TPACKET_V3)
1098 has_room = prb_lookup_block(po, &po->rx_ring,
1099 po->rx_ring.prb_bdqc.kactive_blk_num,
1100 TP_STATUS_KERNEL);
1101 else
1102 has_room = packet_lookup_frame(po, &po->rx_ring,
1103 po->rx_ring.head,
1104 TP_STATUS_KERNEL);
1105 spin_unlock(&sk->sk_receive_queue.lock);
1106
1107 return has_room;
1108}
1109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110static void packet_sock_destruct(struct sock *sk)
1111{
Richard Cochraned85b562010-04-07 22:41:28 +00001112 skb_queue_purge(&sk->sk_error_queue);
1113
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001114 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1115 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
1117 if (!sock_flag(sk, SOCK_DEAD)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001118 pr_err("Attempt to release alive packet socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 return;
1120 }
1121
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08001122 sk_refcnt_debug_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123}
1124
David S. Millerdc99f602011-07-05 01:45:05 -07001125static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1126{
1127 int x = atomic_read(&f->rr_cur) + 1;
1128
1129 if (x >= num)
1130 x = 0;
1131
1132 return x;
1133}
1134
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001135static unsigned int fanout_demux_hash(struct packet_fanout *f,
1136 struct sk_buff *skb,
1137 unsigned int num)
David S. Millerdc99f602011-07-05 01:45:05 -07001138{
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001139 return (((u64)skb->rxhash) * num) >> 32;
David S. Millerdc99f602011-07-05 01:45:05 -07001140}
1141
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001142static unsigned int fanout_demux_lb(struct packet_fanout *f,
1143 struct sk_buff *skb,
1144 unsigned int num)
David S. Millerdc99f602011-07-05 01:45:05 -07001145{
1146 int cur, old;
1147
1148 cur = atomic_read(&f->rr_cur);
1149 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1150 fanout_rr_next(f, num))) != cur)
1151 cur = old;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001152 return cur;
David S. Millerdc99f602011-07-05 01:45:05 -07001153}
1154
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001155static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1156 struct sk_buff *skb,
1157 unsigned int num)
David S. Miller95ec3eb2011-07-06 01:56:38 -07001158{
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001159 return smp_processor_id() % num;
1160}
David S. Miller95ec3eb2011-07-06 01:56:38 -07001161
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001162static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1163 struct sk_buff *skb,
1164 unsigned int idx, unsigned int skip,
1165 unsigned int num)
1166{
1167 unsigned int i, j;
1168
1169 i = j = min_t(int, f->next[idx], num - 1);
1170 do {
1171 if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1172 if (i != j)
1173 f->next[idx] = i;
1174 return i;
1175 }
1176 if (++i == num)
1177 i = 0;
1178 } while (i != j);
1179
1180 return idx;
1181}
1182
1183static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1184{
1185 return f->flags & (flag >> 8);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001186}
1187
David S. Miller95ec3eb2011-07-06 01:56:38 -07001188static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1189 struct packet_type *pt, struct net_device *orig_dev)
David S. Millerdc99f602011-07-05 01:45:05 -07001190{
1191 struct packet_fanout *f = pt->af_packet_priv;
1192 unsigned int num = f->num_members;
1193 struct packet_sock *po;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001194 unsigned int idx;
David S. Millerdc99f602011-07-05 01:45:05 -07001195
1196 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1197 !num) {
1198 kfree_skb(skb);
1199 return 0;
1200 }
1201
David S. Miller95ec3eb2011-07-06 01:56:38 -07001202 switch (f->type) {
1203 case PACKET_FANOUT_HASH:
1204 default:
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001205 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
Eric Dumazetbc416d92011-10-06 10:28:31 +00001206 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001207 if (!skb)
1208 return 0;
1209 }
1210 skb_get_rxhash(skb);
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001211 idx = fanout_demux_hash(f, skb, num);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001212 break;
1213 case PACKET_FANOUT_LB:
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001214 idx = fanout_demux_lb(f, skb, num);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001215 break;
1216 case PACKET_FANOUT_CPU:
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001217 idx = fanout_demux_cpu(f, skb, num);
1218 break;
1219 case PACKET_FANOUT_ROLLOVER:
1220 idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001221 break;
David S. Miller7736d332011-07-05 01:43:20 -07001222 }
1223
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001224 po = pkt_sk(f->arr[idx]);
1225 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1226 unlikely(!packet_rcv_has_room(po, skb))) {
1227 idx = fanout_demux_rollover(f, skb, idx, idx, num);
1228 po = pkt_sk(f->arr[idx]);
1229 }
David S. Millerdc99f602011-07-05 01:45:05 -07001230
1231 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1232}
1233
Pavel Emelyanovfff33212012-08-16 05:36:48 +00001234DEFINE_MUTEX(fanout_mutex);
1235EXPORT_SYMBOL_GPL(fanout_mutex);
David S. Millerdc99f602011-07-05 01:45:05 -07001236static LIST_HEAD(fanout_list);
1237
1238static void __fanout_link(struct sock *sk, struct packet_sock *po)
1239{
1240 struct packet_fanout *f = po->fanout;
1241
1242 spin_lock(&f->lock);
1243 f->arr[f->num_members] = sk;
1244 smp_wmb();
1245 f->num_members++;
1246 spin_unlock(&f->lock);
1247}
1248
1249static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1250{
1251 struct packet_fanout *f = po->fanout;
1252 int i;
1253
1254 spin_lock(&f->lock);
1255 for (i = 0; i < f->num_members; i++) {
1256 if (f->arr[i] == sk)
1257 break;
1258 }
1259 BUG_ON(i >= f->num_members);
1260 f->arr[i] = f->arr[f->num_members - 1];
1261 f->num_members--;
1262 spin_unlock(&f->lock);
1263}
1264
Fengguang Wua0dfb262012-08-23 19:51:21 +08001265static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001266{
1267 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1268 return true;
1269
1270 return false;
1271}
1272
David S. Miller7736d332011-07-05 01:43:20 -07001273static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
David S. Millerdc99f602011-07-05 01:45:05 -07001274{
1275 struct packet_sock *po = pkt_sk(sk);
1276 struct packet_fanout *f, *match;
David S. Miller7736d332011-07-05 01:43:20 -07001277 u8 type = type_flags & 0xff;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001278 u8 flags = type_flags >> 8;
David S. Millerdc99f602011-07-05 01:45:05 -07001279 int err;
1280
1281 switch (type) {
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001282 case PACKET_FANOUT_ROLLOVER:
1283 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1284 return -EINVAL;
David S. Millerdc99f602011-07-05 01:45:05 -07001285 case PACKET_FANOUT_HASH:
1286 case PACKET_FANOUT_LB:
David S. Miller95ec3eb2011-07-06 01:56:38 -07001287 case PACKET_FANOUT_CPU:
David S. Millerdc99f602011-07-05 01:45:05 -07001288 break;
1289 default:
1290 return -EINVAL;
1291 }
1292
1293 if (!po->running)
1294 return -EINVAL;
1295
1296 if (po->fanout)
1297 return -EALREADY;
1298
1299 mutex_lock(&fanout_mutex);
1300 match = NULL;
1301 list_for_each_entry(f, &fanout_list, list) {
1302 if (f->id == id &&
1303 read_pnet(&f->net) == sock_net(sk)) {
1304 match = f;
1305 break;
1306 }
1307 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001308 err = -EINVAL;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001309 if (match && match->flags != flags)
Eric Dumazetafe62c62011-07-07 06:41:29 -07001310 goto out;
David S. Millerdc99f602011-07-05 01:45:05 -07001311 if (!match) {
Eric Dumazetafe62c62011-07-07 06:41:29 -07001312 err = -ENOMEM;
David S. Millerdc99f602011-07-05 01:45:05 -07001313 match = kzalloc(sizeof(*match), GFP_KERNEL);
Eric Dumazetafe62c62011-07-07 06:41:29 -07001314 if (!match)
1315 goto out;
1316 write_pnet(&match->net, sock_net(sk));
1317 match->id = id;
1318 match->type = type;
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00001319 match->flags = flags;
Eric Dumazetafe62c62011-07-07 06:41:29 -07001320 atomic_set(&match->rr_cur, 0);
1321 INIT_LIST_HEAD(&match->list);
1322 spin_lock_init(&match->lock);
1323 atomic_set(&match->sk_ref, 0);
1324 match->prot_hook.type = po->prot_hook.type;
1325 match->prot_hook.dev = po->prot_hook.dev;
1326 match->prot_hook.func = packet_rcv_fanout;
1327 match->prot_hook.af_packet_priv = match;
Eric Leblondc0de08d2012-08-16 22:02:58 +00001328 match->prot_hook.id_match = match_fanout_group;
Eric Dumazetafe62c62011-07-07 06:41:29 -07001329 dev_add_pack(&match->prot_hook);
1330 list_add(&match->list, &fanout_list);
1331 }
1332 err = -EINVAL;
1333 if (match->type == type &&
1334 match->prot_hook.type == po->prot_hook.type &&
1335 match->prot_hook.dev == po->prot_hook.dev) {
1336 err = -ENOSPC;
1337 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1338 __dev_remove_pack(&po->prot_hook);
1339 po->fanout = match;
1340 atomic_inc(&match->sk_ref);
1341 __fanout_link(sk, po);
1342 err = 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001343 }
1344 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001345out:
David S. Millerdc99f602011-07-05 01:45:05 -07001346 mutex_unlock(&fanout_mutex);
1347 return err;
1348}
1349
1350static void fanout_release(struct sock *sk)
1351{
1352 struct packet_sock *po = pkt_sk(sk);
1353 struct packet_fanout *f;
1354
1355 f = po->fanout;
1356 if (!f)
1357 return;
1358
Pavel Emelyanovfff33212012-08-16 05:36:48 +00001359 mutex_lock(&fanout_mutex);
David S. Millerdc99f602011-07-05 01:45:05 -07001360 po->fanout = NULL;
1361
David S. Millerdc99f602011-07-05 01:45:05 -07001362 if (atomic_dec_and_test(&f->sk_ref)) {
1363 list_del(&f->list);
1364 dev_remove_pack(&f->prot_hook);
1365 kfree(f);
1366 }
1367 mutex_unlock(&fanout_mutex);
1368}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001370static const struct proto_ops packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001372static const struct proto_ops packet_ops_spkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001374static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1375 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376{
1377 struct sock *sk;
1378 struct sockaddr_pkt *spkt;
1379
1380 /*
1381 * When we registered the protocol we saved the socket in the data
1382 * field for just this event.
1383 */
1384
1385 sk = pt->af_packet_priv;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 /*
1388 * Yank back the headers [hope the device set this
1389 * right or kerboom...]
1390 *
1391 * Incoming packets have ll header pulled,
1392 * push it back.
1393 *
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001394 * For outgoing ones skb->data == skb_mac_header(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 * so that this procedure is noop.
1396 */
1397
1398 if (skb->pkt_type == PACKET_LOOPBACK)
1399 goto out;
1400
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001401 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001402 goto out;
1403
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001404 skb = skb_share_check(skb, GFP_ATOMIC);
1405 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 goto oom;
1407
1408 /* drop any routing info */
Eric Dumazetadf30902009-06-02 05:19:30 +00001409 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Phil Oester84531c22005-07-12 11:57:52 -07001411 /* drop conntrack reference */
1412 nf_reset(skb);
1413
Herbert Xuffbc6112007-02-04 23:33:10 -08001414 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001416 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418 /*
1419 * The SOCK_PACKET socket receives _all_ frames.
1420 */
1421
1422 spkt->spkt_family = dev->type;
1423 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1424 spkt->spkt_protocol = skb->protocol;
1425
1426 /*
1427 * Charge the memory to the socket. This is done specifically
1428 * to prevent sockets using all the memory up.
1429 */
1430
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001431 if (sock_queue_rcv_skb(sk, skb) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 return 0;
1433
1434out:
1435 kfree_skb(skb);
1436oom:
1437 return 0;
1438}
1439
1440
1441/*
1442 * Output a raw packet to a device layer. This bypasses all the other
1443 * protocol layers and you must therefore supply it with a complete frame
1444 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1447 struct msghdr *msg, size_t len)
1448{
1449 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001450 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001451 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 struct net_device *dev;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001453 __be16 proto = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 int err;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001455 int extra_len = 0;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001458 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 */
1460
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001461 if (saddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 if (msg->msg_namelen < sizeof(struct sockaddr))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001463 return -EINVAL;
1464 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1465 proto = saddr->spkt_protocol;
1466 } else
1467 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
1469 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001470 * Find the device first to size check it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 */
1472
danborkmann@iogearbox.netde74e922012-06-10 08:59:28 +00001473 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001474retry:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001475 rcu_read_lock();
1476 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 err = -ENODEV;
1478 if (dev == NULL)
1479 goto out_unlock;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001480
David S. Millerd5e76b02007-01-25 19:30:36 -08001481 err = -ENETDOWN;
1482 if (!(dev->flags & IFF_UP))
1483 goto out_unlock;
1484
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 /*
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001486 * You may not queue a frame bigger than the mtu. This is the lowest level
1487 * raw protocol and you must do your own fragmentation at this level.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001489
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001490 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1491 if (!netif_supports_nofcs(dev)) {
1492 err = -EPROTONOSUPPORT;
1493 goto out_unlock;
1494 }
1495 extra_len = 4; /* We're doing our own CRC */
1496 }
1497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001499 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 goto out_unlock;
1501
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001502 if (!skb) {
1503 size_t reserved = LL_RESERVED_SPACE(dev);
Herbert Xu4ce40912011-11-18 02:20:05 +00001504 int tlen = dev->needed_tailroom;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001505 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001507 rcu_read_unlock();
Herbert Xu4ce40912011-11-18 02:20:05 +00001508 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001509 if (skb == NULL)
1510 return -ENOBUFS;
1511 /* FIXME: Save some space for broken drivers that write a hard
1512 * header at transmission time by themselves. PPP is the notable
1513 * one here. This should really be fixed at the driver level.
1514 */
1515 skb_reserve(skb, reserved);
1516 skb_reset_network_header(skb);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001517
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001518 /* Try to align data part correctly */
1519 if (hhlen) {
1520 skb->data -= hhlen;
1521 skb->tail -= hhlen;
1522 if (len < hhlen)
1523 skb_reset_network_header(skb);
1524 }
1525 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1526 if (err)
1527 goto out_free;
1528 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 }
1530
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001531 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00001532 /* Earlier code assumed this would be a VLAN pkt,
1533 * double-check this now that we have the actual
1534 * packet in hand.
1535 */
1536 struct ethhdr *ehdr;
1537 skb_reset_mac_header(skb);
1538 ehdr = eth_hdr(skb);
1539 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1540 err = -EMSGSIZE;
1541 goto out_unlock;
1542 }
1543 }
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 skb->protocol = proto;
1546 skb->dev = dev;
1547 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001548 skb->mark = sk->sk_mark;
Daniel Borkmannbf84a012013-04-14 08:08:13 +00001549
1550 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001552 if (unlikely(extra_len == 4))
1553 skb->no_fcs = 1;
1554
Jason Wang40893fd2013-03-26 23:11:22 +00001555 skb_probe_transport_header(skb, 0);
Jason Wangc1aad272013-03-25 20:19:57 +00001556
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 dev_queue_xmit(skb);
Eric Dumazet654d1f82009-11-02 10:43:32 +01001558 rcu_read_unlock();
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001559 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561out_unlock:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001562 rcu_read_unlock();
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001563out_free:
1564 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 return err;
1566}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001568static unsigned int run_filter(const struct sk_buff *skb,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001569 const struct sock *sk,
David S. Millerdbcb5852007-01-24 15:21:02 -08001570 unsigned int res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
1572 struct sk_filter *filter;
1573
Eric Dumazet80f8f102011-01-18 07:46:52 +00001574 rcu_read_lock();
1575 filter = rcu_dereference(sk->sk_filter);
David S. Millerdbcb5852007-01-24 15:21:02 -08001576 if (filter != NULL)
Eric Dumazet0a148422011-04-20 09:27:32 +00001577 res = SK_RUN_FILTER(filter, skb);
Eric Dumazet80f8f102011-01-18 07:46:52 +00001578 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
David S. Millerdbcb5852007-01-24 15:21:02 -08001580 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581}
1582
1583/*
Eric Dumazet62ab0812010-12-06 20:50:09 +00001584 * This function makes lazy skb cloning in hope that most of packets
1585 * are discarded by BPF.
1586 *
1587 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1588 * and skb->cb are mangled. It works because (and until) packets
1589 * falling here are owned by current CPU. Output packets are cloned
1590 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1591 * sequencially, so that if we return skb to original state on exit,
1592 * we will not harm anyone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 */
1594
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001595static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1596 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597{
1598 struct sock *sk;
1599 struct sockaddr_ll *sll;
1600 struct packet_sock *po;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001601 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001603 unsigned int snaplen, res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
1605 if (skb->pkt_type == PACKET_LOOPBACK)
1606 goto drop;
1607
1608 sk = pt->af_packet_priv;
1609 po = pkt_sk(sk);
1610
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001611 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001612 goto drop;
1613
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 skb->dev = dev;
1615
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001616 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 /* The device has an explicit notion of ll header,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001618 * exported to higher levels.
1619 *
1620 * Otherwise, the device hides details of its frame
1621 * structure, so that corresponding packet head is
1622 * never delivered to user.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 */
1624 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001625 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 else if (skb->pkt_type == PACKET_OUTGOING) {
1627 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001628 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 }
1630 }
1631
1632 snaplen = skb->len;
1633
David S. Millerdbcb5852007-01-24 15:21:02 -08001634 res = run_filter(skb, sk, snaplen);
1635 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001636 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001637 if (snaplen > res)
1638 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001640 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 goto drop_n_acct;
1642
1643 if (skb_shared(skb)) {
1644 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1645 if (nskb == NULL)
1646 goto drop_n_acct;
1647
1648 if (skb_head != skb->data) {
1649 skb->data = skb_head;
1650 skb->len = skb_len;
1651 }
Eric Dumazetabc4e4f2012-04-19 02:24:42 +00001652 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 skb = nskb;
1654 }
1655
Herbert Xuffbc6112007-02-04 23:33:10 -08001656 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1657 sizeof(skb->cb));
1658
1659 sll = &PACKET_SKB_CB(skb)->sa.ll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 sll->sll_family = AF_PACKET;
1661 sll->sll_hatype = dev->type;
1662 sll->sll_protocol = skb->protocol;
1663 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001664 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001665 sll->sll_ifindex = orig_dev->ifindex;
1666 else
1667 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001669 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
Herbert Xuffbc6112007-02-04 23:33:10 -08001671 PACKET_SKB_CB(skb)->origlen = skb->len;
Herbert Xu8dc41942007-02-04 23:31:32 -08001672
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 if (pskb_trim(skb, snaplen))
1674 goto drop_n_acct;
1675
1676 skb_set_owner_r(skb, sk);
1677 skb->dev = NULL;
Eric Dumazetadf30902009-06-02 05:19:30 +00001678 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
Phil Oester84531c22005-07-12 11:57:52 -07001680 /* drop conntrack reference */
1681 nf_reset(skb);
1682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 spin_lock(&sk->sk_receive_queue.lock);
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00001684 po->stats.stats1.tp_packets++;
Neil Horman3b885782009-10-12 13:26:31 -07001685 skb->dropcount = atomic_read(&sk->sk_drops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 __skb_queue_tail(&sk->sk_receive_queue, skb);
1687 spin_unlock(&sk->sk_receive_queue.lock);
1688 sk->sk_data_ready(sk, skb->len);
1689 return 0;
1690
1691drop_n_acct:
Willem de Bruijn7091fbd2011-09-30 10:38:28 +00001692 spin_lock(&sk->sk_receive_queue.lock);
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00001693 po->stats.stats1.tp_drops++;
Willem de Bruijn7091fbd2011-09-30 10:38:28 +00001694 atomic_inc(&sk->sk_drops);
1695 spin_unlock(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
1697drop_n_restore:
1698 if (skb_head != skb->data && skb_shared(skb)) {
1699 skb->data = skb_head;
1700 skb->len = skb_len;
1701 }
1702drop:
Neil Hormanead2ceb2009-03-11 09:49:55 +00001703 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 return 0;
1705}
1706
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001707static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1708 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709{
1710 struct sock *sk;
1711 struct packet_sock *po;
1712 struct sockaddr_ll *sll;
Daniel Borkmann184f4892013-04-16 01:57:46 +00001713 union tpacket_uhdr h;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001714 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001716 unsigned int snaplen, res;
chetan lokef6fb8f12011-08-19 10:18:16 +00001717 unsigned long status = TP_STATUS_USER;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001718 unsigned short macoff, netoff, hdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 struct sk_buff *copy_skb = NULL;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001720 struct timespec ts;
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +00001721 __u32 ts_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 if (skb->pkt_type == PACKET_LOOPBACK)
1724 goto drop;
1725
1726 sk = pt->af_packet_priv;
1727 po = pkt_sk(sk);
1728
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001729 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001730 goto drop;
1731
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001732 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001734 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 else if (skb->pkt_type == PACKET_OUTGOING) {
1736 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001737 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 }
1739 }
1740
Herbert Xu8dc41942007-02-04 23:31:32 -08001741 if (skb->ip_summed == CHECKSUM_PARTIAL)
1742 status |= TP_STATUS_CSUMNOTREADY;
1743
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 snaplen = skb->len;
1745
David S. Millerdbcb5852007-01-24 15:21:02 -08001746 res = run_filter(skb, sk, snaplen);
1747 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001748 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001749 if (snaplen > res)
1750 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
1752 if (sk->sk_type == SOCK_DGRAM) {
Patrick McHardy8913336a2008-07-18 18:05:19 -07001753 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1754 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 } else {
Eric Dumazet95c96172012-04-15 05:58:06 +00001756 unsigned int maclen = skb_network_offset(skb);
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001757 netoff = TPACKET_ALIGN(po->tp_hdrlen +
Patrick McHardy8913336a2008-07-18 18:05:19 -07001758 (maclen < 16 ? 16 : maclen)) +
1759 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 macoff = netoff - maclen;
1761 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001762 if (po->tp_version <= TPACKET_V2) {
1763 if (macoff + snaplen > po->rx_ring.frame_size) {
1764 if (po->copy_thresh &&
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001765 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
chetan lokef6fb8f12011-08-19 10:18:16 +00001766 if (skb_shared(skb)) {
1767 copy_skb = skb_clone(skb, GFP_ATOMIC);
1768 } else {
1769 copy_skb = skb_get(skb);
1770 skb_head = skb->data;
1771 }
1772 if (copy_skb)
1773 skb_set_owner_r(copy_skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001775 snaplen = po->rx_ring.frame_size - macoff;
1776 if ((int)snaplen < 0)
1777 snaplen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 spin_lock(&sk->sk_receive_queue.lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00001781 h.raw = packet_current_rx_frame(po, skb,
1782 TP_STATUS_KERNEL, (macoff+snaplen));
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001783 if (!h.raw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 goto ring_is_full;
chetan lokef6fb8f12011-08-19 10:18:16 +00001785 if (po->tp_version <= TPACKET_V2) {
1786 packet_increment_rx_head(po, &po->rx_ring);
1787 /*
1788 * LOSING will be reported till you read the stats,
1789 * because it's COR - Clear On Read.
1790 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1791 * at packet level.
1792 */
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00001793 if (po->stats.stats1.tp_drops)
chetan lokef6fb8f12011-08-19 10:18:16 +00001794 status |= TP_STATUS_LOSING;
1795 }
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00001796 po->stats.stats1.tp_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 if (copy_skb) {
1798 status |= TP_STATUS_COPY;
1799 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 spin_unlock(&sk->sk_receive_queue.lock);
1802
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001803 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +00001804
1805 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
Daniel Borkmann7a513842013-04-23 00:39:29 +00001806 getnstimeofday(&ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +00001808 status |= ts_status;
1809
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001810 switch (po->tp_version) {
1811 case TPACKET_V1:
1812 h.h1->tp_len = skb->len;
1813 h.h1->tp_snaplen = snaplen;
1814 h.h1->tp_mac = macoff;
1815 h.h1->tp_net = netoff;
Daniel Borkmann4b457bd2013-04-16 01:29:11 +00001816 h.h1->tp_sec = ts.tv_sec;
1817 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001818 hdrlen = sizeof(*h.h1);
1819 break;
1820 case TPACKET_V2:
1821 h.h2->tp_len = skb->len;
1822 h.h2->tp_snaplen = snaplen;
1823 h.h2->tp_mac = macoff;
1824 h.h2->tp_net = netoff;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001825 h.h2->tp_sec = ts.tv_sec;
1826 h.h2->tp_nsec = ts.tv_nsec;
Ben Greeara3bcc232011-06-01 06:49:10 +00001827 if (vlan_tx_tag_present(skb)) {
1828 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1829 status |= TP_STATUS_VLAN_VALID;
1830 } else {
1831 h.h2->tp_vlan_tci = 0;
1832 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07001833 h.h2->tp_padding = 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001834 hdrlen = sizeof(*h.h2);
1835 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00001836 case TPACKET_V3:
1837 /* tp_nxt_offset,vlan are already populated above.
1838 * So DONT clear those fields here
1839 */
1840 h.h3->tp_status |= status;
1841 h.h3->tp_len = skb->len;
1842 h.h3->tp_snaplen = snaplen;
1843 h.h3->tp_mac = macoff;
1844 h.h3->tp_net = netoff;
chetan lokef6fb8f12011-08-19 10:18:16 +00001845 h.h3->tp_sec = ts.tv_sec;
1846 h.h3->tp_nsec = ts.tv_nsec;
1847 hdrlen = sizeof(*h.h3);
1848 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001849 default:
1850 BUG();
1851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001853 sll = h.raw + TPACKET_ALIGN(hdrlen);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001854 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 sll->sll_family = AF_PACKET;
1856 sll->sll_hatype = dev->type;
1857 sll->sll_protocol = skb->protocol;
1858 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001859 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001860 sll->sll_ifindex = orig_dev->ifindex;
1861 else
1862 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Ralf Baechlee16aa202006-12-07 00:11:33 -08001864 smp_mb();
Changli Gaof6dafa92010-12-07 04:26:16 +00001865#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 {
Changli Gao0af55bb2010-12-01 02:52:20 +00001867 u8 *start, *end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
chetan lokef6fb8f12011-08-19 10:18:16 +00001869 if (po->tp_version <= TPACKET_V2) {
1870 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1871 + macoff + snaplen);
1872 for (start = h.raw; start < end; start += PAGE_SIZE)
1873 flush_dcache_page(pgv_to_page(start));
1874 }
Chetan Lokecc9f01b2011-07-14 08:36:33 -07001875 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 }
Changli Gaof6dafa92010-12-07 04:26:16 +00001877#endif
chetan lokef6fb8f12011-08-19 10:18:16 +00001878 if (po->tp_version <= TPACKET_V2)
1879 __packet_set_status(po, h.raw, status);
1880 else
1881 prb_clear_blk_fill_status(&po->rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883 sk->sk_data_ready(sk, 0);
1884
1885drop_n_restore:
1886 if (skb_head != skb->data && skb_shared(skb)) {
1887 skb->data = skb_head;
1888 skb->len = skb_len;
1889 }
1890drop:
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001891 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 return 0;
1893
1894ring_is_full:
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00001895 po->stats.stats1.tp_drops++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 spin_unlock(&sk->sk_receive_queue.lock);
1897
1898 sk->sk_data_ready(sk, 0);
Wei Yongjunacb5d752009-02-25 00:36:42 +00001899 kfree_skb(copy_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 goto drop_n_restore;
1901}
1902
Johann Baudy69e3c752009-05-18 22:11:22 -07001903static void tpacket_destruct_skb(struct sk_buff *skb)
1904{
1905 struct packet_sock *po = pkt_sk(skb->sk);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001906 void *ph;
Johann Baudy69e3c752009-05-18 22:11:22 -07001907
Johann Baudy69e3c752009-05-18 22:11:22 -07001908 if (likely(po->tx_ring.pg_vec)) {
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +00001909 __u32 ts;
1910
Johann Baudy69e3c752009-05-18 22:11:22 -07001911 ph = skb_shinfo(skb)->destructor_arg;
Johann Baudy69e3c752009-05-18 22:11:22 -07001912 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1913 atomic_dec(&po->tx_ring.pending);
Daniel Borkmannb9c32fb2013-04-23 00:39:31 +00001914
1915 ts = __packet_set_timestamp(po, ph, skb);
1916 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
Johann Baudy69e3c752009-05-18 22:11:22 -07001917 }
1918
1919 sock_wfree(skb);
1920}
1921
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001922static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1923 void *frame, struct net_device *dev, int size_max,
Herbert Xuae641942011-11-18 02:20:04 +00001924 __be16 proto, unsigned char *addr, int hlen)
Johann Baudy69e3c752009-05-18 22:11:22 -07001925{
Daniel Borkmann184f4892013-04-16 01:57:46 +00001926 union tpacket_uhdr ph;
Phil Suttercbd89ac2013-08-02 11:37:40 +02001927 int to_write, offset, len, tp_len, nr_frags, len_max, max_frame_len;
Johann Baudy69e3c752009-05-18 22:11:22 -07001928 struct socket *sock = po->sk.sk_socket;
1929 struct page *page;
1930 void *data;
1931 int err;
1932
1933 ph.raw = frame;
1934
1935 skb->protocol = proto;
1936 skb->dev = dev;
1937 skb->priority = po->sk.sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001938 skb->mark = po->sk.sk_mark;
Willem de Bruijn2e313962013-04-23 00:39:28 +00001939 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
Johann Baudy69e3c752009-05-18 22:11:22 -07001940 skb_shinfo(skb)->destructor_arg = ph.raw;
1941
1942 switch (po->tp_version) {
1943 case TPACKET_V2:
1944 tp_len = ph.h2->tp_len;
1945 break;
1946 default:
1947 tp_len = ph.h1->tp_len;
1948 break;
1949 }
Johann Baudy69e3c752009-05-18 22:11:22 -07001950
Herbert Xuae641942011-11-18 02:20:04 +00001951 skb_reserve(skb, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07001952 skb_reset_network_header(skb);
Jason Wang40893fd2013-03-26 23:11:22 +00001953 skb_probe_transport_header(skb, 0);
Jason Wangc1aad272013-03-25 20:19:57 +00001954
Paul Chavent5920cd3a2012-11-06 23:10:47 +00001955 if (po->tp_tx_has_off) {
1956 int off_min, off_max, off;
1957 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
1958 off_max = po->tx_ring.frame_size - tp_len;
1959 if (sock->type == SOCK_DGRAM) {
1960 switch (po->tp_version) {
1961 case TPACKET_V2:
1962 off = ph.h2->tp_net;
1963 break;
1964 default:
1965 off = ph.h1->tp_net;
1966 break;
1967 }
1968 } else {
1969 switch (po->tp_version) {
1970 case TPACKET_V2:
1971 off = ph.h2->tp_mac;
1972 break;
1973 default:
1974 off = ph.h1->tp_mac;
1975 break;
1976 }
1977 }
1978 if (unlikely((off < off_min) || (off_max < off)))
1979 return -EINVAL;
1980 data = ph.raw + off;
1981 } else {
1982 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1983 }
Johann Baudy69e3c752009-05-18 22:11:22 -07001984 to_write = tp_len;
1985
1986 if (sock->type == SOCK_DGRAM) {
1987 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1988 NULL, tp_len);
1989 if (unlikely(err < 0))
1990 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001991 } else if (dev->hard_header_len) {
Johann Baudy69e3c752009-05-18 22:11:22 -07001992 /* net device doesn't like empty head */
1993 if (unlikely(tp_len <= dev->hard_header_len)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001994 pr_err("packet size is too short (%d < %d)\n",
1995 tp_len, dev->hard_header_len);
Johann Baudy69e3c752009-05-18 22:11:22 -07001996 return -EINVAL;
1997 }
1998
1999 skb_push(skb, dev->hard_header_len);
2000 err = skb_store_bits(skb, 0, data,
2001 dev->hard_header_len);
2002 if (unlikely(err))
2003 return err;
2004
Phil Sutter0f75b092013-08-02 11:37:39 +02002005 if (dev->type == ARPHRD_ETHER)
2006 skb->protocol = eth_type_trans(skb, dev);
2007
Johann Baudy69e3c752009-05-18 22:11:22 -07002008 data += dev->hard_header_len;
2009 to_write -= dev->hard_header_len;
2010 }
2011
Phil Suttercbd89ac2013-08-02 11:37:40 +02002012 max_frame_len = dev->mtu + dev->hard_header_len;
2013 if (skb->protocol == htons(ETH_P_8021Q))
2014 max_frame_len += VLAN_HLEN;
2015
2016 if (size_max > max_frame_len)
2017 size_max = max_frame_len;
2018
2019 if (unlikely(tp_len > size_max)) {
2020 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2021 return -EMSGSIZE;
2022 }
2023
Johann Baudy69e3c752009-05-18 22:11:22 -07002024 offset = offset_in_page(data);
2025 len_max = PAGE_SIZE - offset;
2026 len = ((to_write > len_max) ? len_max : to_write);
2027
2028 skb->data_len = to_write;
2029 skb->len += to_write;
2030 skb->truesize += to_write;
2031 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2032
2033 while (likely(to_write)) {
2034 nr_frags = skb_shinfo(skb)->nr_frags;
2035
2036 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002037 pr_err("Packet exceed the number of skb frags(%lu)\n",
2038 MAX_SKB_FRAGS);
Johann Baudy69e3c752009-05-18 22:11:22 -07002039 return -EFAULT;
2040 }
2041
Changli Gao0af55bb2010-12-01 02:52:20 +00002042 page = pgv_to_page(data);
2043 data += len;
Johann Baudy69e3c752009-05-18 22:11:22 -07002044 flush_dcache_page(page);
2045 get_page(page);
Changli Gao0af55bb2010-12-01 02:52:20 +00002046 skb_fill_page_desc(skb, nr_frags, page, offset, len);
Johann Baudy69e3c752009-05-18 22:11:22 -07002047 to_write -= len;
2048 offset = 0;
2049 len_max = PAGE_SIZE;
2050 len = ((to_write > len_max) ? len_max : to_write);
2051 }
2052
2053 return tp_len;
2054}
2055
2056static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2057{
Johann Baudy69e3c752009-05-18 22:11:22 -07002058 struct sk_buff *skb;
2059 struct net_device *dev;
2060 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002061 bool need_rls_dev = false;
Phil Suttercbd89ac2013-08-02 11:37:40 +02002062 int err;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002063 void *ph;
2064 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Johann Baudy69e3c752009-05-18 22:11:22 -07002065 int tp_len, size_max;
2066 unsigned char *addr;
2067 int len_sum = 0;
danborkmann@iogearbox.net9e670302012-08-20 03:34:03 +00002068 int status = TP_STATUS_AVAILABLE;
Herbert Xuae641942011-11-18 02:20:04 +00002069 int hlen, tlen;
Johann Baudy69e3c752009-05-18 22:11:22 -07002070
Johann Baudy69e3c752009-05-18 22:11:22 -07002071 mutex_lock(&po->pg_vec_lock);
2072
Johann Baudy69e3c752009-05-18 22:11:22 -07002073 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002074 dev = po->prot_hook.dev;
Johann Baudy69e3c752009-05-18 22:11:22 -07002075 proto = po->num;
2076 addr = NULL;
2077 } else {
2078 err = -EINVAL;
2079 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2080 goto out;
2081 if (msg->msg_namelen < (saddr->sll_halen
2082 + offsetof(struct sockaddr_ll,
2083 sll_addr)))
2084 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07002085 proto = saddr->sll_protocol;
2086 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002087 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2088 need_rls_dev = true;
Johann Baudy69e3c752009-05-18 22:11:22 -07002089 }
2090
Johann Baudy69e3c752009-05-18 22:11:22 -07002091 err = -ENXIO;
2092 if (unlikely(dev == NULL))
2093 goto out;
2094
Johann Baudy69e3c752009-05-18 22:11:22 -07002095 err = -ENETDOWN;
2096 if (unlikely(!(dev->flags & IFF_UP)))
2097 goto out_put;
2098
2099 size_max = po->tx_ring.frame_size
Gabor Gombasb5dd8842009-10-29 03:19:11 -07002100 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
Johann Baudy69e3c752009-05-18 22:11:22 -07002101
Johann Baudy69e3c752009-05-18 22:11:22 -07002102 do {
2103 ph = packet_current_frame(po, &po->tx_ring,
2104 TP_STATUS_SEND_REQUEST);
2105
2106 if (unlikely(ph == NULL)) {
2107 schedule();
2108 continue;
2109 }
2110
2111 status = TP_STATUS_SEND_REQUEST;
Herbert Xuae641942011-11-18 02:20:04 +00002112 hlen = LL_RESERVED_SPACE(dev);
2113 tlen = dev->needed_tailroom;
Johann Baudy69e3c752009-05-18 22:11:22 -07002114 skb = sock_alloc_send_skb(&po->sk,
Herbert Xuae641942011-11-18 02:20:04 +00002115 hlen + tlen + sizeof(struct sockaddr_ll),
Johann Baudy69e3c752009-05-18 22:11:22 -07002116 0, &err);
2117
2118 if (unlikely(skb == NULL))
2119 goto out_status;
2120
2121 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
Herbert Xuae641942011-11-18 02:20:04 +00002122 addr, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07002123
2124 if (unlikely(tp_len < 0)) {
2125 if (po->tp_loss) {
2126 __packet_set_status(po, ph,
2127 TP_STATUS_AVAILABLE);
2128 packet_increment_head(&po->tx_ring);
2129 kfree_skb(skb);
2130 continue;
2131 } else {
2132 status = TP_STATUS_WRONG_FORMAT;
2133 err = tp_len;
2134 goto out_status;
2135 }
2136 }
2137
2138 skb->destructor = tpacket_destruct_skb;
2139 __packet_set_status(po, ph, TP_STATUS_SENDING);
2140 atomic_inc(&po->tx_ring.pending);
2141
2142 status = TP_STATUS_SEND_REQUEST;
2143 err = dev_queue_xmit(skb);
Jarek Poplawskieb70df12010-01-10 22:04:19 +00002144 if (unlikely(err > 0)) {
2145 err = net_xmit_errno(err);
2146 if (err && __packet_get_status(po, ph) ==
2147 TP_STATUS_AVAILABLE) {
2148 /* skb was destructed already */
2149 skb = NULL;
2150 goto out_status;
2151 }
2152 /*
2153 * skb was dropped but not destructed yet;
2154 * let's treat it like congestion or err < 0
2155 */
2156 err = 0;
2157 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002158 packet_increment_head(&po->tx_ring);
2159 len_sum += tp_len;
Joe Perchesf64f9e72009-11-29 16:55:45 -08002160 } while (likely((ph != NULL) ||
2161 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2162 (atomic_read(&po->tx_ring.pending))))
2163 );
Johann Baudy69e3c752009-05-18 22:11:22 -07002164
2165 err = len_sum;
2166 goto out_put;
2167
Johann Baudy69e3c752009-05-18 22:11:22 -07002168out_status:
2169 __packet_set_status(po, ph, status);
2170 kfree_skb(skb);
2171out_put:
Ben Greear827d9782011-06-01 07:18:53 +00002172 if (need_rls_dev)
2173 dev_put(dev);
Johann Baudy69e3c752009-05-18 22:11:22 -07002174out:
2175 mutex_unlock(&po->pg_vec_lock);
2176 return err;
2177}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
Olof Johanssoneea49cc92011-11-02 11:00:49 +00002179static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2180 size_t reserve, size_t len,
2181 size_t linear, int noblock,
2182 int *err)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002183{
2184 struct sk_buff *skb;
2185
2186 /* Under a page? Don't bother with paged skb. */
2187 if (prepad + len < PAGE_SIZE || !linear)
2188 linear = len;
2189
2190 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2191 err);
2192 if (!skb)
2193 return NULL;
2194
2195 skb_reserve(skb, reserve);
2196 skb_put(skb, linear);
2197 skb->data_len = len - linear;
2198 skb->len += len - linear;
2199
2200 return skb;
2201}
2202
Johann Baudy69e3c752009-05-18 22:11:22 -07002203static int packet_snd(struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 struct msghdr *msg, size_t len)
2205{
2206 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002207 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 struct sk_buff *skb;
2209 struct net_device *dev;
Al Viro0e11c912006-11-08 00:26:29 -08002210 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002211 bool need_rls_dev = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 unsigned char *addr;
Ben Greear827d9782011-06-01 07:18:53 +00002213 int err, reserve = 0;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002214 struct virtio_net_hdr vnet_hdr = { 0 };
2215 int offset = 0;
2216 int vnet_hdr_len;
2217 struct packet_sock *po = pkt_sk(sk);
2218 unsigned short gso_type = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002219 int hlen, tlen;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002220 int extra_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221
2222 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002223 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002227 dev = po->prot_hook.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 proto = po->num;
2229 addr = NULL;
2230 } else {
2231 err = -EINVAL;
2232 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2233 goto out;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002234 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2235 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 proto = saddr->sll_protocol;
2237 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002238 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2239 need_rls_dev = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 }
2241
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 err = -ENXIO;
2243 if (dev == NULL)
2244 goto out_unlock;
2245 if (sock->type == SOCK_RAW)
2246 reserve = dev->hard_header_len;
2247
David S. Millerd5e76b02007-01-25 19:30:36 -08002248 err = -ENETDOWN;
2249 if (!(dev->flags & IFF_UP))
2250 goto out_unlock;
2251
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002252 if (po->has_vnet_hdr) {
2253 vnet_hdr_len = sizeof(vnet_hdr);
2254
2255 err = -EINVAL;
2256 if (len < vnet_hdr_len)
2257 goto out_unlock;
2258
2259 len -= vnet_hdr_len;
2260
2261 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2262 vnet_hdr_len);
2263 if (err < 0)
2264 goto out_unlock;
2265
2266 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2267 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2268 vnet_hdr.hdr_len))
2269 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2270 vnet_hdr.csum_offset + 2;
2271
2272 err = -EINVAL;
2273 if (vnet_hdr.hdr_len > len)
2274 goto out_unlock;
2275
2276 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2277 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2278 case VIRTIO_NET_HDR_GSO_TCPV4:
2279 gso_type = SKB_GSO_TCPV4;
2280 break;
2281 case VIRTIO_NET_HDR_GSO_TCPV6:
2282 gso_type = SKB_GSO_TCPV6;
2283 break;
2284 case VIRTIO_NET_HDR_GSO_UDP:
2285 gso_type = SKB_GSO_UDP;
2286 break;
2287 default:
2288 goto out_unlock;
2289 }
2290
2291 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2292 gso_type |= SKB_GSO_TCP_ECN;
2293
2294 if (vnet_hdr.gso_size == 0)
2295 goto out_unlock;
2296
2297 }
2298 }
2299
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002300 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2301 if (!netif_supports_nofcs(dev)) {
2302 err = -EPROTONOSUPPORT;
2303 goto out_unlock;
2304 }
2305 extra_len = 4; /* We're doing our own CRC */
2306 }
2307
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002309 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 goto out_unlock;
2311
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002312 err = -ENOBUFS;
Herbert Xuae641942011-11-18 02:20:04 +00002313 hlen = LL_RESERVED_SPACE(dev);
2314 tlen = dev->needed_tailroom;
2315 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002316 msg->msg_flags & MSG_DONTWAIT, &err);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002317 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 goto out_unlock;
2319
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002320 skb_set_network_header(skb, reserve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002322 err = -EINVAL;
2323 if (sock->type == SOCK_DGRAM &&
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002324 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002325 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326
2327 /* Returns -EFAULT on error */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002328 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 if (err)
2330 goto out_free;
Daniel Borkmannbf84a012013-04-14 08:08:13 +00002331
2332 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333
Phil Sutter0f75b092013-08-02 11:37:39 +02002334 if (dev->type == ARPHRD_ETHER) {
2335 skb->protocol = eth_type_trans(skb, dev);
Phil Sutterc483e022013-08-02 11:37:41 +02002336 if (skb->protocol == htons(ETH_P_8021Q))
2337 reserve += VLAN_HLEN;
Phil Sutter0f75b092013-08-02 11:37:39 +02002338 } else {
2339 skb->protocol = proto;
2340 skb->dev = dev;
2341 }
2342
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002343 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
Phil Sutterc483e022013-08-02 11:37:41 +02002344 err = -EMSGSIZE;
2345 goto out_free;
Ben Greear57f89bf2011-02-11 09:35:18 +00002346 }
2347
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00002349 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002351 if (po->has_vnet_hdr) {
2352 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2353 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2354 vnet_hdr.csum_offset)) {
2355 err = -EINVAL;
2356 goto out_free;
2357 }
2358 }
2359
2360 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2361 skb_shinfo(skb)->gso_type = gso_type;
2362
2363 /* Header must be checked, and gso_segs computed. */
2364 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2365 skb_shinfo(skb)->gso_segs = 0;
2366
2367 len += vnet_hdr_len;
2368 }
2369
Jason Wang40893fd2013-03-26 23:11:22 +00002370 skb_probe_transport_header(skb, reserve);
Jason Wangc1aad272013-03-25 20:19:57 +00002371
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002372 if (unlikely(extra_len == 4))
2373 skb->no_fcs = 1;
2374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 /*
2376 * Now send it
2377 */
2378
2379 err = dev_queue_xmit(skb);
2380 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2381 goto out_unlock;
2382
Ben Greear827d9782011-06-01 07:18:53 +00002383 if (need_rls_dev)
2384 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002386 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
2388out_free:
2389 kfree_skb(skb);
2390out_unlock:
Ben Greear827d9782011-06-01 07:18:53 +00002391 if (dev && need_rls_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392 dev_put(dev);
2393out:
2394 return err;
2395}
2396
Johann Baudy69e3c752009-05-18 22:11:22 -07002397static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2398 struct msghdr *msg, size_t len)
2399{
Johann Baudy69e3c752009-05-18 22:11:22 -07002400 struct sock *sk = sock->sk;
2401 struct packet_sock *po = pkt_sk(sk);
2402 if (po->tx_ring.pg_vec)
2403 return tpacket_snd(po, msg);
2404 else
Johann Baudy69e3c752009-05-18 22:11:22 -07002405 return packet_snd(sock, msg, len);
2406}
2407
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408/*
2409 * Close a PACKET socket. This is fairly simple. We immediately go
2410 * to 'closed' state and remove our protocol entry in the device list.
2411 */
2412
2413static int packet_release(struct socket *sock)
2414{
2415 struct sock *sk = sock->sk;
2416 struct packet_sock *po;
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08002417 struct net *net;
chetan lokef6fb8f12011-08-19 10:18:16 +00002418 union tpacket_req_u req_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
2420 if (!sk)
2421 return 0;
2422
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002423 net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 po = pkt_sk(sk);
2425
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002426 mutex_lock(&net->packet.sklist_lock);
stephen hemminger808f5112010-02-22 07:57:18 +00002427 sk_del_node_init_rcu(sk);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002428 mutex_unlock(&net->packet.sklist_lock);
2429
2430 preempt_disable();
Eric Dumazet920de802008-11-24 00:09:29 -08002431 sock_prot_inuse_add(net, sk->sk_prot, -1);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002432 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
stephen hemminger808f5112010-02-22 07:57:18 +00002434 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002435 unregister_prot_hook(sk, false);
Ben Greear160ff182011-06-01 07:18:52 +00002436 if (po->prot_hook.dev) {
2437 dev_put(po->prot_hook.dev);
2438 po->prot_hook.dev = NULL;
2439 }
stephen hemminger808f5112010-02-22 07:57:18 +00002440 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 packet_flush_mclist(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
Phil Sutter9665d5d2013-02-01 07:21:41 +00002444 if (po->rx_ring.pg_vec) {
2445 memset(&req_u, 0, sizeof(req_u));
chetan lokef6fb8f12011-08-19 10:18:16 +00002446 packet_set_ring(sk, &req_u, 1, 0);
Phil Sutter9665d5d2013-02-01 07:21:41 +00002447 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002448
Phil Sutter9665d5d2013-02-01 07:21:41 +00002449 if (po->tx_ring.pg_vec) {
2450 memset(&req_u, 0, sizeof(req_u));
chetan lokef6fb8f12011-08-19 10:18:16 +00002451 packet_set_ring(sk, &req_u, 1, 1);
Phil Sutter9665d5d2013-02-01 07:21:41 +00002452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
David S. Millerdc99f602011-07-05 01:45:05 -07002454 fanout_release(sk);
2455
stephen hemminger808f5112010-02-22 07:57:18 +00002456 synchronize_net();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 /*
2458 * Now the socket is dead. No more input will appear.
2459 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 sock_orphan(sk);
2461 sock->sk = NULL;
2462
2463 /* Purge queues */
2464
2465 skb_queue_purge(&sk->sk_receive_queue);
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002466 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
2468 sock_put(sk);
2469 return 0;
2470}
2471
2472/*
2473 * Attach a packet hook.
2474 */
2475
Al Viro0e11c912006-11-08 00:26:29 -08002476static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477{
2478 struct packet_sock *po = pkt_sk(sk);
David S. Millerdc99f602011-07-05 01:45:05 -07002479
Wei Yongjunaef950b2011-12-27 22:32:41 -05002480 if (po->fanout) {
2481 if (dev)
2482 dev_put(dev);
2483
David S. Millerdc99f602011-07-05 01:45:05 -07002484 return -EINVAL;
Wei Yongjunaef950b2011-12-27 22:32:41 -05002485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486
2487 lock_sock(sk);
2488
2489 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002490 unregister_prot_hook(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 po->num = protocol;
2492 po->prot_hook.type = protocol;
Ben Greear160ff182011-06-01 07:18:52 +00002493 if (po->prot_hook.dev)
2494 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 po->prot_hook.dev = dev;
2496
2497 po->ifindex = dev ? dev->ifindex : 0;
2498
2499 if (protocol == 0)
2500 goto out_unlock;
2501
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002502 if (!dev || (dev->flags & IFF_UP)) {
David S. Millerce06b032011-07-04 01:44:29 -07002503 register_prot_hook(sk);
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002504 } else {
2505 sk->sk_err = ENETDOWN;
2506 if (!sock_flag(sk, SOCK_DEAD))
2507 sk->sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 }
2509
2510out_unlock:
2511 spin_unlock(&po->bind_lock);
2512 release_sock(sk);
2513 return 0;
2514}
2515
2516/*
2517 * Bind a packet socket to a device
2518 */
2519
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002520static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2521 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002523 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 char name[15];
2525 struct net_device *dev;
2526 int err = -ENODEV;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002527
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 /*
2529 * Check legality
2530 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002531
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002532 if (addr_len != sizeof(struct sockaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002534 strlcpy(name, uaddr->sa_data, sizeof(name));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002536 dev = dev_get_by_name(sock_net(sk), name);
Ben Greear160ff182011-06-01 07:18:52 +00002537 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 return err;
2540}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
2542static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2543{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002544 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2545 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 struct net_device *dev = NULL;
2547 int err;
2548
2549
2550 /*
2551 * Check legality
2552 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002553
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 if (addr_len < sizeof(struct sockaddr_ll))
2555 return -EINVAL;
2556 if (sll->sll_family != AF_PACKET)
2557 return -EINVAL;
2558
2559 if (sll->sll_ifindex) {
2560 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002561 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 if (dev == NULL)
2563 goto out;
2564 }
2565 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
2567out:
2568 return err;
2569}
2570
2571static struct proto packet_proto = {
2572 .name = "PACKET",
2573 .owner = THIS_MODULE,
2574 .obj_size = sizeof(struct packet_sock),
2575};
2576
2577/*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002578 * Create a packet of type SOCK_PACKET.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 */
2580
Eric Paris3f378b62009-11-05 22:18:14 -08002581static int packet_create(struct net *net, struct socket *sock, int protocol,
2582 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583{
2584 struct sock *sk;
2585 struct packet_sock *po;
Al Viro0e11c912006-11-08 00:26:29 -08002586 __be16 proto = (__force __be16)protocol; /* weird, but documented */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 int err;
2588
Eric W. Biedermandf008c92012-11-16 03:03:07 +00002589 if (!ns_capable(net->user_ns, CAP_NET_RAW))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 return -EPERM;
David S. Millerbe020972007-05-29 13:16:31 -07002591 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2592 sock->type != SOCK_PACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 return -ESOCKTNOSUPPORT;
2594
2595 sock->state = SS_UNCONNECTED;
2596
2597 err = -ENOBUFS;
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07002598 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 if (sk == NULL)
2600 goto out;
2601
2602 sock->ops = &packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 if (sock->type == SOCK_PACKET)
2604 sock->ops = &packet_ops_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002605
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 sock_init_data(sock, sk);
2607
2608 po = pkt_sk(sk);
2609 sk->sk_family = PF_PACKET;
Al Viro0e11c912006-11-08 00:26:29 -08002610 po->num = proto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611
2612 sk->sk_destruct = packet_sock_destruct;
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002613 sk_refcnt_debug_inc(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
2615 /*
2616 * Attach a protocol block
2617 */
2618
2619 spin_lock_init(&po->bind_lock);
Herbert Xu905db442009-01-30 14:12:06 -08002620 mutex_init(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 po->prot_hook.func = packet_rcv;
David S. Millerbe020972007-05-29 13:16:31 -07002622
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 if (sock->type == SOCK_PACKET)
2624 po->prot_hook.func = packet_rcv_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 po->prot_hook.af_packet_priv = sk;
2627
Al Viro0e11c912006-11-08 00:26:29 -08002628 if (proto) {
2629 po->prot_hook.type = proto;
David S. Millerce06b032011-07-04 01:44:29 -07002630 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 }
2632
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002633 mutex_lock(&net->packet.sklist_lock);
stephen hemminger808f5112010-02-22 07:57:18 +00002634 sk_add_node_rcu(sk, &net->packet.sklist);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002635 mutex_unlock(&net->packet.sklist_lock);
2636
2637 preempt_disable();
Eric Dumazet36804532008-11-19 14:25:35 -08002638 sock_prot_inuse_add(net, &packet_proto, 1);
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00002639 preempt_enable();
stephen hemminger808f5112010-02-22 07:57:18 +00002640
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002641 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642out:
2643 return err;
2644}
2645
2646/*
2647 * Pull a packet from our receive queue and hand it to the user.
2648 * If necessary we block.
2649 */
2650
2651static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2652 struct msghdr *msg, size_t len, int flags)
2653{
2654 struct sock *sk = sock->sk;
2655 struct sk_buff *skb;
2656 int copied, err;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002657 struct sockaddr_ll *sll;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002658 int vnet_hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659
2660 err = -EINVAL;
Richard Cochraned85b562010-04-07 22:41:28 +00002661 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 goto out;
2663
2664#if 0
2665 /* What error should we return now? EUNATTACH? */
2666 if (pkt_sk(sk)->ifindex < 0)
2667 return -ENODEV;
2668#endif
2669
Richard Cochraned85b562010-04-07 22:41:28 +00002670 if (flags & MSG_ERRQUEUE) {
Richard Cochrancb820f82013-07-19 19:40:09 +02002671 err = sock_recv_errqueue(sk, msg, len,
2672 SOL_PACKET, PACKET_TX_TIMESTAMP);
Richard Cochraned85b562010-04-07 22:41:28 +00002673 goto out;
2674 }
2675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 * Call the generic datagram receiver. This handles all sorts
2678 * of horrible races and re-entrancy so we can forget about it
2679 * in the protocol layers.
2680 *
2681 * Now it will return ENETDOWN, if device have just gone down,
2682 * but then it will block.
2683 */
2684
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002685 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686
2687 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002688 * An error occurred so return it. Because skb_recv_datagram()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 * handles the blocking we don't see and worry about blocking
2690 * retries.
2691 */
2692
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002693 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 goto out;
2695
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002696 if (pkt_sk(sk)->has_vnet_hdr) {
2697 struct virtio_net_hdr vnet_hdr = { 0 };
2698
2699 err = -EINVAL;
2700 vnet_hdr_len = sizeof(vnet_hdr);
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002701 if (len < vnet_hdr_len)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002702 goto out_free;
2703
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002704 len -= vnet_hdr_len;
2705
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002706 if (skb_is_gso(skb)) {
2707 struct skb_shared_info *sinfo = skb_shinfo(skb);
2708
2709 /* This is a hint as to how much should be linear. */
2710 vnet_hdr.hdr_len = skb_headlen(skb);
2711 vnet_hdr.gso_size = sinfo->gso_size;
2712 if (sinfo->gso_type & SKB_GSO_TCPV4)
2713 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2714 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2715 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2716 else if (sinfo->gso_type & SKB_GSO_UDP)
2717 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2718 else if (sinfo->gso_type & SKB_GSO_FCOE)
2719 goto out_free;
2720 else
2721 BUG();
2722 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2723 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2724 } else
2725 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2726
2727 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2728 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +00002729 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002730 vnet_hdr.csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +00002731 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2732 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002733 } /* else everything is zero */
2734
2735 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2736 vnet_hdr_len);
2737 if (err < 0)
2738 goto out_free;
2739 }
2740
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 /*
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002742 * If the address length field is there to be filled in, we fill
2743 * it in now.
2744 */
2745
Herbert Xuffbc6112007-02-04 23:33:10 -08002746 sll = &PACKET_SKB_CB(skb)->sa.ll;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002747 if (sock->type == SOCK_PACKET)
2748 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2749 else
2750 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2751
2752 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 * You lose any data beyond the buffer you gave. If it worries a
2754 * user program they can ask the device for its MTU anyway.
2755 */
2756
2757 copied = skb->len;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002758 if (copied > len) {
2759 copied = len;
2760 msg->msg_flags |= MSG_TRUNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 }
2762
2763 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2764 if (err)
2765 goto out_free;
2766
Neil Horman3b885782009-10-12 13:26:31 -07002767 sock_recv_ts_and_drops(msg, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768
2769 if (msg->msg_name)
Herbert Xuffbc6112007-02-04 23:33:10 -08002770 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2771 msg->msg_namelen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
Herbert Xu8dc41942007-02-04 23:31:32 -08002773 if (pkt_sk(sk)->auxdata) {
Herbert Xuffbc6112007-02-04 23:33:10 -08002774 struct tpacket_auxdata aux;
2775
2776 aux.tp_status = TP_STATUS_USER;
2777 if (skb->ip_summed == CHECKSUM_PARTIAL)
2778 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2779 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2780 aux.tp_snaplen = skb->len;
2781 aux.tp_mac = 0;
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002782 aux.tp_net = skb_network_offset(skb);
Ben Greeara3bcc232011-06-01 06:49:10 +00002783 if (vlan_tx_tag_present(skb)) {
2784 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2785 aux.tp_status |= TP_STATUS_VLAN_VALID;
2786 } else {
2787 aux.tp_vlan_tci = 0;
2788 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07002789 aux.tp_padding = 0;
Herbert Xuffbc6112007-02-04 23:33:10 -08002790 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
Herbert Xu8dc41942007-02-04 23:31:32 -08002791 }
2792
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 /*
2794 * Free or return the buffer as appropriate. Again this
2795 * hides all the races and re-entrancy issues from us.
2796 */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002797 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798
2799out_free:
2800 skb_free_datagram(sk, skb);
2801out:
2802 return err;
2803}
2804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2806 int *uaddr_len, int peer)
2807{
2808 struct net_device *dev;
2809 struct sock *sk = sock->sk;
2810
2811 if (peer)
2812 return -EOPNOTSUPP;
2813
2814 uaddr->sa_family = AF_PACKET;
Daniel Borkmann2dc85bf2013-06-12 16:02:27 +02002815 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
Eric Dumazet654d1f82009-11-02 10:43:32 +01002816 rcu_read_lock();
2817 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2818 if (dev)
Daniel Borkmann2dc85bf2013-06-12 16:02:27 +02002819 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
Eric Dumazet654d1f82009-11-02 10:43:32 +01002820 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 *uaddr_len = sizeof(*uaddr);
2822
2823 return 0;
2824}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
2826static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2827 int *uaddr_len, int peer)
2828{
2829 struct net_device *dev;
2830 struct sock *sk = sock->sk;
2831 struct packet_sock *po = pkt_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00002832 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833
2834 if (peer)
2835 return -EOPNOTSUPP;
2836
2837 sll->sll_family = AF_PACKET;
2838 sll->sll_ifindex = po->ifindex;
2839 sll->sll_protocol = po->num;
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002840 sll->sll_pkttype = 0;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002841 rcu_read_lock();
2842 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 if (dev) {
2844 sll->sll_hatype = dev->type;
2845 sll->sll_halen = dev->addr_len;
2846 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 } else {
2848 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2849 sll->sll_halen = 0;
2850 }
Eric Dumazet654d1f82009-11-02 10:43:32 +01002851 rcu_read_unlock();
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002852 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853
2854 return 0;
2855}
2856
Wang Chen2aeb0b82008-07-14 20:49:46 -07002857static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2858 int what)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859{
2860 switch (i->type) {
2861 case PACKET_MR_MULTICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002862 if (i->alen != dev->addr_len)
2863 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 if (what > 0)
Jiri Pirko22bedad32010-04-01 21:22:57 +00002865 return dev_mc_add(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 else
Jiri Pirko22bedad32010-04-01 21:22:57 +00002867 return dev_mc_del(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 break;
2869 case PACKET_MR_PROMISC:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002870 return dev_set_promiscuity(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 break;
2872 case PACKET_MR_ALLMULTI:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002873 return dev_set_allmulti(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 break;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002875 case PACKET_MR_UNICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002876 if (i->alen != dev->addr_len)
2877 return -EINVAL;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002878 if (what > 0)
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002879 return dev_uc_add(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002880 else
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002881 return dev_uc_del(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002882 break;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002883 default:
2884 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 }
Wang Chen2aeb0b82008-07-14 20:49:46 -07002886 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887}
2888
2889static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2890{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002891 for ( ; i; i = i->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 if (i->ifindex == dev->ifindex)
2893 packet_dev_mc(dev, i, what);
2894 }
2895}
2896
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002897static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898{
2899 struct packet_sock *po = pkt_sk(sk);
2900 struct packet_mclist *ml, *i;
2901 struct net_device *dev;
2902 int err;
2903
2904 rtnl_lock();
2905
2906 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002907 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 if (!dev)
2909 goto done;
2910
2911 err = -EINVAL;
Jiri Pirko11625632010-03-02 20:40:01 +00002912 if (mreq->mr_alen > dev->addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 goto done;
2914
2915 err = -ENOBUFS;
Kris Katterjohn8b3a7002006-01-11 15:56:43 -08002916 i = kmalloc(sizeof(*i), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 if (i == NULL)
2918 goto done;
2919
2920 err = 0;
2921 for (ml = po->mclist; ml; ml = ml->next) {
2922 if (ml->ifindex == mreq->mr_ifindex &&
2923 ml->type == mreq->mr_type &&
2924 ml->alen == mreq->mr_alen &&
2925 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2926 ml->count++;
2927 /* Free the new element ... */
2928 kfree(i);
2929 goto done;
2930 }
2931 }
2932
2933 i->type = mreq->mr_type;
2934 i->ifindex = mreq->mr_ifindex;
2935 i->alen = mreq->mr_alen;
2936 memcpy(i->addr, mreq->mr_address, i->alen);
2937 i->count = 1;
2938 i->next = po->mclist;
2939 po->mclist = i;
Wang Chen2aeb0b82008-07-14 20:49:46 -07002940 err = packet_dev_mc(dev, i, 1);
2941 if (err) {
2942 po->mclist = i->next;
2943 kfree(i);
2944 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
2946done:
2947 rtnl_unlock();
2948 return err;
2949}
2950
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002951static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952{
2953 struct packet_mclist *ml, **mlp;
2954
2955 rtnl_lock();
2956
2957 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2958 if (ml->ifindex == mreq->mr_ifindex &&
2959 ml->type == mreq->mr_type &&
2960 ml->alen == mreq->mr_alen &&
2961 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2962 if (--ml->count == 0) {
2963 struct net_device *dev;
2964 *mlp = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00002965 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2966 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 kfree(ml);
2969 }
2970 rtnl_unlock();
2971 return 0;
2972 }
2973 }
2974 rtnl_unlock();
2975 return -EADDRNOTAVAIL;
2976}
2977
2978static void packet_flush_mclist(struct sock *sk)
2979{
2980 struct packet_sock *po = pkt_sk(sk);
2981 struct packet_mclist *ml;
2982
2983 if (!po->mclist)
2984 return;
2985
2986 rtnl_lock();
2987 while ((ml = po->mclist) != NULL) {
2988 struct net_device *dev;
2989
2990 po->mclist = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00002991 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2992 if (dev != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 kfree(ml);
2995 }
2996 rtnl_unlock();
2997}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998
2999static int
David S. Millerb7058842009-09-30 16:12:20 -07003000packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001{
3002 struct sock *sk = sock->sk;
Herbert Xu8dc41942007-02-04 23:31:32 -08003003 struct packet_sock *po = pkt_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 int ret;
3005
3006 if (level != SOL_PACKET)
3007 return -ENOPROTOOPT;
3008
Johann Baudy69e3c752009-05-18 22:11:22 -07003009 switch (optname) {
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003010 case PACKET_ADD_MEMBERSHIP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 case PACKET_DROP_MEMBERSHIP:
3012 {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003013 struct packet_mreq_max mreq;
3014 int len = optlen;
3015 memset(&mreq, 0, sizeof(mreq));
3016 if (len < sizeof(struct packet_mreq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 return -EINVAL;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003018 if (len > sizeof(mreq))
3019 len = sizeof(mreq);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003020 if (copy_from_user(&mreq, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 return -EFAULT;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003022 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3023 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 if (optname == PACKET_ADD_MEMBERSHIP)
3025 ret = packet_mc_add(sk, &mreq);
3026 else
3027 ret = packet_mc_drop(sk, &mreq);
3028 return ret;
3029 }
David S. Millera2efcfa2007-05-29 13:12:50 -07003030
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 case PACKET_RX_RING:
Johann Baudy69e3c752009-05-18 22:11:22 -07003032 case PACKET_TX_RING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 {
chetan lokef6fb8f12011-08-19 10:18:16 +00003034 union tpacket_req_u req_u;
3035 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036
chetan lokef6fb8f12011-08-19 10:18:16 +00003037 switch (po->tp_version) {
3038 case TPACKET_V1:
3039 case TPACKET_V2:
3040 len = sizeof(req_u.req);
3041 break;
3042 case TPACKET_V3:
3043 default:
3044 len = sizeof(req_u.req3);
3045 break;
3046 }
3047 if (optlen < len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 return -EINVAL;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003049 if (pkt_sk(sk)->has_vnet_hdr)
3050 return -EINVAL;
chetan lokef6fb8f12011-08-19 10:18:16 +00003051 if (copy_from_user(&req_u.req, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 return -EFAULT;
chetan lokef6fb8f12011-08-19 10:18:16 +00003053 return packet_set_ring(sk, &req_u, 0,
3054 optname == PACKET_TX_RING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 }
3056 case PACKET_COPY_THRESH:
3057 {
3058 int val;
3059
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003060 if (optlen != sizeof(val))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003062 if (copy_from_user(&val, optval, sizeof(val)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 return -EFAULT;
3064
3065 pkt_sk(sk)->copy_thresh = val;
3066 return 0;
3067 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003068 case PACKET_VERSION:
3069 {
3070 int val;
3071
3072 if (optlen != sizeof(val))
3073 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003074 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003075 return -EBUSY;
3076 if (copy_from_user(&val, optval, sizeof(val)))
3077 return -EFAULT;
3078 switch (val) {
3079 case TPACKET_V1:
3080 case TPACKET_V2:
chetan lokef6fb8f12011-08-19 10:18:16 +00003081 case TPACKET_V3:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003082 po->tp_version = val;
3083 return 0;
3084 default:
3085 return -EINVAL;
3086 }
3087 }
Patrick McHardy8913336a2008-07-18 18:05:19 -07003088 case PACKET_RESERVE:
3089 {
3090 unsigned int val;
3091
3092 if (optlen != sizeof(val))
3093 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003094 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardy8913336a2008-07-18 18:05:19 -07003095 return -EBUSY;
3096 if (copy_from_user(&val, optval, sizeof(val)))
3097 return -EFAULT;
3098 po->tp_reserve = val;
3099 return 0;
3100 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003101 case PACKET_LOSS:
3102 {
3103 unsigned int val;
3104
3105 if (optlen != sizeof(val))
3106 return -EINVAL;
3107 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3108 return -EBUSY;
3109 if (copy_from_user(&val, optval, sizeof(val)))
3110 return -EFAULT;
3111 po->tp_loss = !!val;
3112 return 0;
3113 }
Herbert Xu8dc41942007-02-04 23:31:32 -08003114 case PACKET_AUXDATA:
3115 {
3116 int val;
3117
3118 if (optlen < sizeof(val))
3119 return -EINVAL;
3120 if (copy_from_user(&val, optval, sizeof(val)))
3121 return -EFAULT;
3122
3123 po->auxdata = !!val;
3124 return 0;
3125 }
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003126 case PACKET_ORIGDEV:
3127 {
3128 int val;
3129
3130 if (optlen < sizeof(val))
3131 return -EINVAL;
3132 if (copy_from_user(&val, optval, sizeof(val)))
3133 return -EFAULT;
3134
3135 po->origdev = !!val;
3136 return 0;
3137 }
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003138 case PACKET_VNET_HDR:
3139 {
3140 int val;
3141
3142 if (sock->type != SOCK_RAW)
3143 return -EINVAL;
3144 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3145 return -EBUSY;
3146 if (optlen < sizeof(val))
3147 return -EINVAL;
3148 if (copy_from_user(&val, optval, sizeof(val)))
3149 return -EFAULT;
3150
3151 po->has_vnet_hdr = !!val;
3152 return 0;
3153 }
Scott McMillan614f60f2010-06-02 05:53:56 -07003154 case PACKET_TIMESTAMP:
3155 {
3156 int val;
3157
3158 if (optlen != sizeof(val))
3159 return -EINVAL;
3160 if (copy_from_user(&val, optval, sizeof(val)))
3161 return -EFAULT;
3162
3163 po->tp_tstamp = val;
3164 return 0;
3165 }
David S. Millerdc99f602011-07-05 01:45:05 -07003166 case PACKET_FANOUT:
3167 {
3168 int val;
3169
3170 if (optlen != sizeof(val))
3171 return -EINVAL;
3172 if (copy_from_user(&val, optval, sizeof(val)))
3173 return -EFAULT;
3174
3175 return fanout_add(sk, val & 0xffff, val >> 16);
3176 }
Paul Chavent5920cd3a2012-11-06 23:10:47 +00003177 case PACKET_TX_HAS_OFF:
3178 {
3179 unsigned int val;
3180
3181 if (optlen != sizeof(val))
3182 return -EINVAL;
3183 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3184 return -EBUSY;
3185 if (copy_from_user(&val, optval, sizeof(val)))
3186 return -EFAULT;
3187 po->tp_tx_has_off = !!val;
3188 return 0;
3189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 default:
3191 return -ENOPROTOOPT;
3192 }
3193}
3194
3195static int packet_getsockopt(struct socket *sock, int level, int optname,
3196 char __user *optval, int __user *optlen)
3197{
3198 int len;
Eric Dumazetc06fff62012-04-19 21:56:11 +00003199 int val, lv = sizeof(val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200 struct sock *sk = sock->sk;
3201 struct packet_sock *po = pkt_sk(sk);
Eric Dumazetc06fff62012-04-19 21:56:11 +00003202 void *data = &val;
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00003203 union tpacket_stats_u st;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204
3205 if (level != SOL_PACKET)
3206 return -ENOPROTOOPT;
3207
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003208 if (get_user(len, optlen))
3209 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210
3211 if (len < 0)
3212 return -EINVAL;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003213
Johann Baudy69e3c752009-05-18 22:11:22 -07003214 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 case PACKET_STATISTICS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 spin_lock_bh(&sk->sk_receive_queue.lock);
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00003217 memcpy(&st, &po->stats, sizeof(st));
3218 memset(&po->stats, 0, sizeof(po->stats));
3219 spin_unlock_bh(&sk->sk_receive_queue.lock);
3220
chetan lokef6fb8f12011-08-19 10:18:16 +00003221 if (po->tp_version == TPACKET_V3) {
Eric Dumazetc06fff62012-04-19 21:56:11 +00003222 lv = sizeof(struct tpacket_stats_v3);
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00003223 data = &st.stats3;
chetan lokef6fb8f12011-08-19 10:18:16 +00003224 } else {
Eric Dumazetc06fff62012-04-19 21:56:11 +00003225 lv = sizeof(struct tpacket_stats);
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00003226 data = &st.stats1;
chetan lokef6fb8f12011-08-19 10:18:16 +00003227 }
Daniel Borkmannee80fbf2013-04-19 06:12:29 +00003228
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 break;
Herbert Xu8dc41942007-02-04 23:31:32 -08003230 case PACKET_AUXDATA:
Herbert Xu8dc41942007-02-04 23:31:32 -08003231 val = po->auxdata;
Herbert Xu8dc41942007-02-04 23:31:32 -08003232 break;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003233 case PACKET_ORIGDEV:
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003234 val = po->origdev;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003235 break;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003236 case PACKET_VNET_HDR:
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003237 val = po->has_vnet_hdr;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003238 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003239 case PACKET_VERSION:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003240 val = po->tp_version;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003241 break;
3242 case PACKET_HDRLEN:
3243 if (len > sizeof(int))
3244 len = sizeof(int);
3245 if (copy_from_user(&val, optval, len))
3246 return -EFAULT;
3247 switch (val) {
3248 case TPACKET_V1:
3249 val = sizeof(struct tpacket_hdr);
3250 break;
3251 case TPACKET_V2:
3252 val = sizeof(struct tpacket2_hdr);
3253 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003254 case TPACKET_V3:
3255 val = sizeof(struct tpacket3_hdr);
3256 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003257 default:
3258 return -EINVAL;
3259 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003260 break;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003261 case PACKET_RESERVE:
Patrick McHardy8913336a2008-07-18 18:05:19 -07003262 val = po->tp_reserve;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003263 break;
Johann Baudy69e3c752009-05-18 22:11:22 -07003264 case PACKET_LOSS:
Johann Baudy69e3c752009-05-18 22:11:22 -07003265 val = po->tp_loss;
Johann Baudy69e3c752009-05-18 22:11:22 -07003266 break;
Scott McMillan614f60f2010-06-02 05:53:56 -07003267 case PACKET_TIMESTAMP:
Scott McMillan614f60f2010-06-02 05:53:56 -07003268 val = po->tp_tstamp;
Scott McMillan614f60f2010-06-02 05:53:56 -07003269 break;
David S. Millerdc99f602011-07-05 01:45:05 -07003270 case PACKET_FANOUT:
David S. Millerdc99f602011-07-05 01:45:05 -07003271 val = (po->fanout ?
3272 ((u32)po->fanout->id |
Willem de Bruijn77f65eb2013-03-19 10:18:11 +00003273 ((u32)po->fanout->type << 16) |
3274 ((u32)po->fanout->flags << 24)) :
David S. Millerdc99f602011-07-05 01:45:05 -07003275 0);
David S. Millerdc99f602011-07-05 01:45:05 -07003276 break;
Paul Chavent5920cd3a2012-11-06 23:10:47 +00003277 case PACKET_TX_HAS_OFF:
3278 val = po->tp_tx_has_off;
3279 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 default:
3281 return -ENOPROTOOPT;
3282 }
3283
Eric Dumazetc06fff62012-04-19 21:56:11 +00003284 if (len > lv)
3285 len = lv;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003286 if (put_user(len, optlen))
3287 return -EFAULT;
Herbert Xu8dc41942007-02-04 23:31:32 -08003288 if (copy_to_user(optval, data, len))
3289 return -EFAULT;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003290 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291}
3292
3293
Jiri Pirko351638e2013-05-28 01:30:21 +00003294static int packet_notifier(struct notifier_block *this,
3295 unsigned long msg, void *ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296{
3297 struct sock *sk;
Jiri Pirko351638e2013-05-28 01:30:21 +00003298 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003299 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300
stephen hemminger808f5112010-02-22 07:57:18 +00003301 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08003302 sk_for_each_rcu(sk, &net->packet.sklist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 struct packet_sock *po = pkt_sk(sk);
3304
3305 switch (msg) {
3306 case NETDEV_UNREGISTER:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 if (po->mclist)
3308 packet_dev_mclist(dev, po->mclist, -1);
David S. Millera2efcfa2007-05-29 13:12:50 -07003309 /* fallthrough */
3310
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311 case NETDEV_DOWN:
3312 if (dev->ifindex == po->ifindex) {
3313 spin_lock(&po->bind_lock);
3314 if (po->running) {
David S. Millerce06b032011-07-04 01:44:29 -07003315 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 sk->sk_err = ENETDOWN;
3317 if (!sock_flag(sk, SOCK_DEAD))
3318 sk->sk_error_report(sk);
3319 }
3320 if (msg == NETDEV_UNREGISTER) {
3321 po->ifindex = -1;
Ben Greear160ff182011-06-01 07:18:52 +00003322 if (po->prot_hook.dev)
3323 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324 po->prot_hook.dev = NULL;
3325 }
3326 spin_unlock(&po->bind_lock);
3327 }
3328 break;
3329 case NETDEV_UP:
stephen hemminger808f5112010-02-22 07:57:18 +00003330 if (dev->ifindex == po->ifindex) {
3331 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003332 if (po->num)
3333 register_prot_hook(sk);
stephen hemminger808f5112010-02-22 07:57:18 +00003334 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 break;
3337 }
3338 }
stephen hemminger808f5112010-02-22 07:57:18 +00003339 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 return NOTIFY_DONE;
3341}
3342
3343
3344static int packet_ioctl(struct socket *sock, unsigned int cmd,
3345 unsigned long arg)
3346{
3347 struct sock *sk = sock->sk;
3348
Johann Baudy69e3c752009-05-18 22:11:22 -07003349 switch (cmd) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003350 case SIOCOUTQ:
3351 {
3352 int amount = sk_wmem_alloc_get(sk);
Eric Dumazet31e6d362009-06-17 19:05:41 -07003353
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003354 return put_user(amount, (int __user *)arg);
3355 }
3356 case SIOCINQ:
3357 {
3358 struct sk_buff *skb;
3359 int amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003361 spin_lock_bh(&sk->sk_receive_queue.lock);
3362 skb = skb_peek(&sk->sk_receive_queue);
3363 if (skb)
3364 amount = skb->len;
3365 spin_unlock_bh(&sk->sk_receive_queue.lock);
3366 return put_user(amount, (int __user *)arg);
3367 }
3368 case SIOCGSTAMP:
3369 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3370 case SIOCGSTAMPNS:
3371 return sock_get_timestampns(sk, (struct timespec __user *)arg);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003372
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373#ifdef CONFIG_INET
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003374 case SIOCADDRT:
3375 case SIOCDELRT:
3376 case SIOCDARP:
3377 case SIOCGARP:
3378 case SIOCSARP:
3379 case SIOCGIFADDR:
3380 case SIOCSIFADDR:
3381 case SIOCGIFBRDADDR:
3382 case SIOCSIFBRDADDR:
3383 case SIOCGIFNETMASK:
3384 case SIOCSIFNETMASK:
3385 case SIOCGIFDSTADDR:
3386 case SIOCSIFDSTADDR:
3387 case SIOCSIFFLAGS:
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003388 return inet_dgram_ops.ioctl(sock, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389#endif
3390
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003391 default:
3392 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 }
3394 return 0;
3395}
3396
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003397static unsigned int packet_poll(struct file *file, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 poll_table *wait)
3399{
3400 struct sock *sk = sock->sk;
3401 struct packet_sock *po = pkt_sk(sk);
3402 unsigned int mask = datagram_poll(file, sock, wait);
3403
3404 spin_lock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003405 if (po->rx_ring.pg_vec) {
chetan lokef6fb8f12011-08-19 10:18:16 +00003406 if (!packet_previous_rx_frame(po, &po->rx_ring,
3407 TP_STATUS_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 mask |= POLLIN | POLLRDNORM;
3409 }
3410 spin_unlock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003411 spin_lock_bh(&sk->sk_write_queue.lock);
3412 if (po->tx_ring.pg_vec) {
3413 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3414 mask |= POLLOUT | POLLWRNORM;
3415 }
3416 spin_unlock_bh(&sk->sk_write_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 return mask;
3418}
3419
3420
3421/* Dirty? Well, I still did not learn better way to account
3422 * for user mmaps.
3423 */
3424
3425static void packet_mm_open(struct vm_area_struct *vma)
3426{
3427 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003428 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003430
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 if (sk)
3432 atomic_inc(&pkt_sk(sk)->mapped);
3433}
3434
3435static void packet_mm_close(struct vm_area_struct *vma)
3436{
3437 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003438 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003440
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 if (sk)
3442 atomic_dec(&pkt_sk(sk)->mapped);
3443}
3444
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003445static const struct vm_operations_struct packet_mmap_ops = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003446 .open = packet_mm_open,
3447 .close = packet_mm_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448};
3449
Neil Horman0e3125c2010-11-16 10:26:47 -08003450static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3451 unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452{
3453 int i;
3454
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003455 for (i = 0; i < len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003456 if (likely(pg_vec[i].buffer)) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003457 if (is_vmalloc_addr(pg_vec[i].buffer))
Neil Horman0e3125c2010-11-16 10:26:47 -08003458 vfree(pg_vec[i].buffer);
3459 else
3460 free_pages((unsigned long)pg_vec[i].buffer,
3461 order);
3462 pg_vec[i].buffer = NULL;
3463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 }
3465 kfree(pg_vec);
3466}
3467
Olof Johanssoneea49cc92011-11-02 11:00:49 +00003468static char *alloc_one_pg_vec_page(unsigned long order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003469{
Neil Horman0e3125c2010-11-16 10:26:47 -08003470 char *buffer = NULL;
3471 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3472 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
Eric Dumazet719bfea2009-04-15 03:39:52 -07003473
Neil Horman0e3125c2010-11-16 10:26:47 -08003474 buffer = (char *) __get_free_pages(gfp_flags, order);
3475
3476 if (buffer)
3477 return buffer;
3478
3479 /*
3480 * __get_free_pages failed, fall back to vmalloc
3481 */
Eric Dumazetbbce5a52010-11-20 07:31:54 +00003482 buffer = vzalloc((1 << order) * PAGE_SIZE);
Neil Horman0e3125c2010-11-16 10:26:47 -08003483
3484 if (buffer)
3485 return buffer;
3486
3487 /*
3488 * vmalloc failed, lets dig into swap here
3489 */
Neil Horman0e3125c2010-11-16 10:26:47 -08003490 gfp_flags &= ~__GFP_NORETRY;
3491 buffer = (char *)__get_free_pages(gfp_flags, order);
3492 if (buffer)
3493 return buffer;
3494
3495 /*
3496 * complete and utter failure
3497 */
3498 return NULL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003499}
3500
Neil Horman0e3125c2010-11-16 10:26:47 -08003501static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003502{
3503 unsigned int block_nr = req->tp_block_nr;
Neil Horman0e3125c2010-11-16 10:26:47 -08003504 struct pgv *pg_vec;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003505 int i;
3506
Neil Horman0e3125c2010-11-16 10:26:47 -08003507 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003508 if (unlikely(!pg_vec))
3509 goto out;
3510
3511 for (i = 0; i < block_nr; i++) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003512 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
Neil Horman0e3125c2010-11-16 10:26:47 -08003513 if (unlikely(!pg_vec[i].buffer))
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003514 goto out_free_pgvec;
3515 }
3516
3517out:
3518 return pg_vec;
3519
3520out_free_pgvec:
3521 free_pg_vec(pg_vec, order, block_nr);
3522 pg_vec = NULL;
3523 goto out;
3524}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525
chetan lokef6fb8f12011-08-19 10:18:16 +00003526static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -07003527 int closing, int tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528{
Neil Horman0e3125c2010-11-16 10:26:47 -08003529 struct pgv *pg_vec = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 struct packet_sock *po = pkt_sk(sk);
Al Viro0e11c912006-11-08 00:26:29 -08003531 int was_running, order = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003532 struct packet_ring_buffer *rb;
3533 struct sk_buff_head *rb_queue;
Al Viro0e11c912006-11-08 00:26:29 -08003534 __be16 num;
chetan lokef6fb8f12011-08-19 10:18:16 +00003535 int err = -EINVAL;
3536 /* Added to avoid minimal code churn */
3537 struct tpacket_req *req = &req_u->req;
3538
3539 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3540 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3541 WARN(1, "Tx-ring is not supported.\n");
3542 goto out;
3543 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003544
3545 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3546 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3547
3548 err = -EBUSY;
3549 if (!closing) {
3550 if (atomic_read(&po->mapped))
3551 goto out;
3552 if (atomic_read(&rb->pending))
3553 goto out;
3554 }
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003555
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 if (req->tp_block_nr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 /* Sanity tests and some calculations */
Johann Baudy69e3c752009-05-18 22:11:22 -07003558 err = -EBUSY;
3559 if (unlikely(rb->pg_vec))
3560 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003562 switch (po->tp_version) {
3563 case TPACKET_V1:
3564 po->tp_hdrlen = TPACKET_HDRLEN;
3565 break;
3566 case TPACKET_V2:
3567 po->tp_hdrlen = TPACKET2_HDRLEN;
3568 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003569 case TPACKET_V3:
3570 po->tp_hdrlen = TPACKET3_HDRLEN;
3571 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003572 }
3573
Johann Baudy69e3c752009-05-18 22:11:22 -07003574 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003575 if (unlikely((int)req->tp_block_size <= 0))
Johann Baudy69e3c752009-05-18 22:11:22 -07003576 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003577 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003578 goto out;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003579 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
Johann Baudy69e3c752009-05-18 22:11:22 -07003580 po->tp_reserve))
3581 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003582 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003583 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584
Johann Baudy69e3c752009-05-18 22:11:22 -07003585 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3586 if (unlikely(rb->frames_per_block <= 0))
3587 goto out;
3588 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3589 req->tp_frame_nr))
3590 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591
3592 err = -ENOMEM;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003593 order = get_order(req->tp_block_size);
3594 pg_vec = alloc_pg_vec(req, order);
3595 if (unlikely(!pg_vec))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 goto out;
chetan lokef6fb8f12011-08-19 10:18:16 +00003597 switch (po->tp_version) {
3598 case TPACKET_V3:
3599 /* Transmit path is not supported. We checked
3600 * it above but just being paranoid
3601 */
3602 if (!tx_ring)
3603 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3604 break;
3605 default:
3606 break;
3607 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003608 }
3609 /* Done */
3610 else {
3611 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003612 if (unlikely(req->tp_frame_nr))
Johann Baudy69e3c752009-05-18 22:11:22 -07003613 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 }
3615
3616 lock_sock(sk);
3617
3618 /* Detach socket from network */
3619 spin_lock(&po->bind_lock);
3620 was_running = po->running;
3621 num = po->num;
3622 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 po->num = 0;
David S. Millerce06b032011-07-04 01:44:29 -07003624 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 }
3626 spin_unlock(&po->bind_lock);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003627
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 synchronize_net();
3629
3630 err = -EBUSY;
Herbert Xu905db442009-01-30 14:12:06 -08003631 mutex_lock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 if (closing || atomic_read(&po->mapped) == 0) {
3633 err = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003634 spin_lock_bh(&rb_queue->lock);
Changli Gaoc053fd92010-12-10 16:02:20 -08003635 swap(rb->pg_vec, pg_vec);
Johann Baudy69e3c752009-05-18 22:11:22 -07003636 rb->frame_max = (req->tp_frame_nr - 1);
3637 rb->head = 0;
3638 rb->frame_size = req->tp_frame_size;
3639 spin_unlock_bh(&rb_queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640
Changli Gaoc053fd92010-12-10 16:02:20 -08003641 swap(rb->pg_vec_order, order);
3642 swap(rb->pg_vec_len, req->tp_block_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643
Johann Baudy69e3c752009-05-18 22:11:22 -07003644 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3645 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3646 tpacket_rcv : packet_rcv;
3647 skb_queue_purge(rb_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 if (atomic_read(&po->mapped))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003649 pr_err("packet_mmap: vma is busy: %d\n",
3650 atomic_read(&po->mapped));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 }
Herbert Xu905db442009-01-30 14:12:06 -08003652 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653
3654 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003655 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 po->num = num;
David S. Millerce06b032011-07-04 01:44:29 -07003657 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 }
3659 spin_unlock(&po->bind_lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00003660 if (closing && (po->tp_version > TPACKET_V2)) {
3661 /* Because we don't support block-based V3 on tx-ring */
3662 if (!tx_ring)
3663 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3664 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 release_sock(sk);
3666
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667 if (pg_vec)
3668 free_pg_vec(pg_vec, order, req->tp_block_nr);
3669out:
3670 return err;
3671}
3672
Johann Baudy69e3c752009-05-18 22:11:22 -07003673static int packet_mmap(struct file *file, struct socket *sock,
3674 struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675{
3676 struct sock *sk = sock->sk;
3677 struct packet_sock *po = pkt_sk(sk);
Johann Baudy69e3c752009-05-18 22:11:22 -07003678 unsigned long size, expected_size;
3679 struct packet_ring_buffer *rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680 unsigned long start;
3681 int err = -EINVAL;
3682 int i;
3683
3684 if (vma->vm_pgoff)
3685 return -EINVAL;
3686
Herbert Xu905db442009-01-30 14:12:06 -08003687 mutex_lock(&po->pg_vec_lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003688
3689 expected_size = 0;
3690 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3691 if (rb->pg_vec) {
3692 expected_size += rb->pg_vec_len
3693 * rb->pg_vec_pages
3694 * PAGE_SIZE;
3695 }
3696 }
3697
3698 if (expected_size == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07003700
3701 size = vma->vm_end - vma->vm_start;
3702 if (size != expected_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 goto out;
3704
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 start = vma->vm_start;
Johann Baudy69e3c752009-05-18 22:11:22 -07003706 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3707 if (rb->pg_vec == NULL)
3708 continue;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003709
Johann Baudy69e3c752009-05-18 22:11:22 -07003710 for (i = 0; i < rb->pg_vec_len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003711 struct page *page;
3712 void *kaddr = rb->pg_vec[i].buffer;
Johann Baudy69e3c752009-05-18 22:11:22 -07003713 int pg_num;
3714
Changli Gaoc56b4d92010-12-01 02:52:57 +00003715 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3716 page = pgv_to_page(kaddr);
Johann Baudy69e3c752009-05-18 22:11:22 -07003717 err = vm_insert_page(vma, start, page);
3718 if (unlikely(err))
3719 goto out;
3720 start += PAGE_SIZE;
Neil Horman0e3125c2010-11-16 10:26:47 -08003721 kaddr += PAGE_SIZE;
Johann Baudy69e3c752009-05-18 22:11:22 -07003722 }
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003725
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003726 atomic_inc(&po->mapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 vma->vm_ops = &packet_mmap_ops;
3728 err = 0;
3729
3730out:
Herbert Xu905db442009-01-30 14:12:06 -08003731 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 return err;
3733}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003735static const struct proto_ops packet_ops_spkt = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736 .family = PF_PACKET,
3737 .owner = THIS_MODULE,
3738 .release = packet_release,
3739 .bind = packet_bind_spkt,
3740 .connect = sock_no_connect,
3741 .socketpair = sock_no_socketpair,
3742 .accept = sock_no_accept,
3743 .getname = packet_getname_spkt,
3744 .poll = datagram_poll,
3745 .ioctl = packet_ioctl,
3746 .listen = sock_no_listen,
3747 .shutdown = sock_no_shutdown,
3748 .setsockopt = sock_no_setsockopt,
3749 .getsockopt = sock_no_getsockopt,
3750 .sendmsg = packet_sendmsg_spkt,
3751 .recvmsg = packet_recvmsg,
3752 .mmap = sock_no_mmap,
3753 .sendpage = sock_no_sendpage,
3754};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003756static const struct proto_ops packet_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 .family = PF_PACKET,
3758 .owner = THIS_MODULE,
3759 .release = packet_release,
3760 .bind = packet_bind,
3761 .connect = sock_no_connect,
3762 .socketpair = sock_no_socketpair,
3763 .accept = sock_no_accept,
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003764 .getname = packet_getname,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 .poll = packet_poll,
3766 .ioctl = packet_ioctl,
3767 .listen = sock_no_listen,
3768 .shutdown = sock_no_shutdown,
3769 .setsockopt = packet_setsockopt,
3770 .getsockopt = packet_getsockopt,
3771 .sendmsg = packet_sendmsg,
3772 .recvmsg = packet_recvmsg,
3773 .mmap = packet_mmap,
3774 .sendpage = sock_no_sendpage,
3775};
3776
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003777static const struct net_proto_family packet_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778 .family = PF_PACKET,
3779 .create = packet_create,
3780 .owner = THIS_MODULE,
3781};
3782
3783static struct notifier_block packet_netdev_notifier = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003784 .notifier_call = packet_notifier,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785};
3786
3787#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788
3789static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
stephen hemminger808f5112010-02-22 07:57:18 +00003790 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791{
Denis V. Luneve372c412007-11-19 22:31:54 -08003792 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003793
3794 rcu_read_lock();
3795 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796}
3797
3798static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3799{
Herbert Xu1bf40952007-12-16 14:04:02 -08003800 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003801 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802}
3803
3804static void packet_seq_stop(struct seq_file *seq, void *v)
stephen hemminger808f5112010-02-22 07:57:18 +00003805 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806{
stephen hemminger808f5112010-02-22 07:57:18 +00003807 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808}
3809
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003810static int packet_seq_show(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811{
3812 if (v == SEQ_START_TOKEN)
3813 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3814 else {
Li Zefanb7ceabd2010-02-08 23:19:29 +00003815 struct sock *s = sk_entry(v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 const struct packet_sock *po = pkt_sk(s);
3817
3818 seq_printf(seq,
Dan Rosenberg71338aa2011-05-23 12:17:35 +00003819 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 s,
3821 atomic_read(&s->sk_refcnt),
3822 s->sk_type,
3823 ntohs(po->num),
3824 po->ifindex,
3825 po->running,
3826 atomic_read(&s->sk_rmem_alloc),
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06003827 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003828 sock_i_ino(s));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829 }
3830
3831 return 0;
3832}
3833
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003834static const struct seq_operations packet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 .start = packet_seq_start,
3836 .next = packet_seq_next,
3837 .stop = packet_seq_stop,
3838 .show = packet_seq_show,
3839};
3840
3841static int packet_seq_open(struct inode *inode, struct file *file)
3842{
Denis V. Luneve372c412007-11-19 22:31:54 -08003843 return seq_open_net(inode, file, &packet_seq_ops,
3844 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845}
3846
Arjan van de Venda7071d2007-02-12 00:55:36 -08003847static const struct file_operations packet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 .owner = THIS_MODULE,
3849 .open = packet_seq_open,
3850 .read = seq_read,
3851 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003852 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853};
3854
3855#endif
3856
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003857static int __net_init packet_net_init(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003858{
Pavel Emelyanov0fa7fa92012-08-21 01:06:47 +00003859 mutex_init(&net->packet.sklist_lock);
Denis V. Lunev2aaef4e2007-12-11 04:19:54 -08003860 INIT_HLIST_HEAD(&net->packet.sklist);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003861
Gao fengd4beaa62013-02-18 01:34:54 +00003862 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003863 return -ENOMEM;
3864
3865 return 0;
3866}
3867
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003868static void __net_exit packet_net_exit(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003869{
Gao fengece31ff2013-02-18 01:34:56 +00003870 remove_proc_entry("packet", net->proc_net);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003871}
3872
3873static struct pernet_operations packet_net_ops = {
3874 .init = packet_net_init,
3875 .exit = packet_net_exit,
3876};
3877
3878
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879static void __exit packet_exit(void)
3880{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 unregister_netdevice_notifier(&packet_netdev_notifier);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003882 unregister_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 sock_unregister(PF_PACKET);
3884 proto_unregister(&packet_proto);
3885}
3886
3887static int __init packet_init(void)
3888{
3889 int rc = proto_register(&packet_proto, 0);
3890
3891 if (rc != 0)
3892 goto out;
3893
3894 sock_register(&packet_family_ops);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003895 register_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 register_netdevice_notifier(&packet_netdev_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897out:
3898 return rc;
3899}
3900
3901module_init(packet_init);
3902module_exit(packet_exit);
3903MODULE_LICENSE("GPL");
3904MODULE_ALIAS_NETPROTO(PF_PACKET);