blob: bb4fb7f7f894cde92fc893f4e6c8ac46c8a1122b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090012 * Fixes:
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090035 * Ulises Alonso : Frame number limit removal and
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * packet_set_ring memory leak.
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070037 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090040 * byte arrays at the end of sockaddr_ll
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070041 * and packet_mreq.
Johann Baudy69e3c752009-05-18 22:11:22 -070042 * Johann Baudy : Added TX RING.
chetan lokef6fb8f12011-08-19 10:18:16 +000043 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/mm.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080057#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
Herbert Xuffbc6112007-02-04 23:33:10 -080065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090067#include <linux/slab.h>
Neil Horman0e3125c2010-11-16 10:26:47 -080068#include <linux/vmalloc.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020069#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
Al Viroa1f8e7f2006-10-19 16:08:53 -040079#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
Herbert Xu905db442009-01-30 14:12:06 -080086#include <linux/mutex.h>
Eric Dumazet05423b22009-10-26 18:40:35 -070087#include <linux/if_vlan.h>
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -080088#include <linux/virtio_net.h>
Richard Cochraned85b562010-04-07 22:41:28 +000089#include <linux/errqueue.h>
Scott McMillan614f60f2010-06-02 05:53:56 -070090#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 Assumptions:
98 - if device has no dev->hard_header routine, it adds and removes ll header
99 inside itself. In this case ll header is invisible outside of device,
100 but higher levels still should reserve dev->hard_header_len.
101 Some devices are enough clever to reallocate skb, when header
102 will not fit to reserved space (tunnel), another ones are silly
103 (PPP).
104 - packet socket receives packets with pulled ll header,
105 so that SOCK_RAW should push it back.
106
107On receive:
108-----------
109
110Incoming, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700111 mac_header -> ll header
112 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114Outgoing, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700115 mac_header -> ll header
116 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118Incoming, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700119 mac_header -> UNKNOWN position. It is very likely, that it points to ll
120 header. PPP makes it, that is wrong, because introduce
YOSHIFUJI Hideakidb0c58f2007-07-19 10:44:35 +0900121 assymetry between rx and tx paths.
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700122 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124Outgoing, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700125 mac_header -> data. ll header is still not built!
126 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128Resume
129 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
130
131
132On transmit:
133------------
134
135dev->hard_header != NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700136 mac_header -> ll header
137 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139dev->hard_header == NULL (ll header is added by device, we cannot control it)
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700140 mac_header -> data
141 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 We should set nh.raw on output to correct posistion,
144 packet classifier depends on it.
145 */
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/* Private packet socket structures. */
148
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000149struct packet_mclist {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 struct packet_mclist *next;
151 int ifindex;
152 int count;
153 unsigned short type;
154 unsigned short alen;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700155 unsigned char addr[MAX_ADDR_LEN];
156};
157/* identical to struct packet_mreq except it has
158 * a longer address field.
159 */
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000160struct packet_mreq_max {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700161 int mr_ifindex;
162 unsigned short mr_type;
163 unsigned short mr_alen;
164 unsigned char mr_address[MAX_ADDR_LEN];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165};
David S. Millera2efcfa2007-05-29 13:12:50 -0700166
chetan lokef6fb8f12011-08-19 10:18:16 +0000167static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -0700168 int closing, int tx_ring);
169
chetan lokef6fb8f12011-08-19 10:18:16 +0000170
171#define V3_ALIGNMENT (8)
172
chetan lokebc59ba32011-08-25 10:43:30 +0000173#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
chetan lokef6fb8f12011-08-19 10:18:16 +0000174
175#define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177
178/* kbdq - kernel block descriptor queue */
chetan lokebc59ba32011-08-25 10:43:30 +0000179struct tpacket_kbdq_core {
chetan lokef6fb8f12011-08-19 10:18:16 +0000180 struct pgv *pkbdq;
181 unsigned int feature_req_word;
182 unsigned int hdrlen;
183 unsigned char reset_pending_on_curr_blk;
184 unsigned char delete_blk_timer;
185 unsigned short kactive_blk_num;
186 unsigned short blk_sizeof_priv;
187
188 /* last_kactive_blk_num:
189 * trick to see if user-space has caught up
190 * in order to avoid refreshing timer when every single pkt arrives.
191 */
192 unsigned short last_kactive_blk_num;
193
194 char *pkblk_start;
195 char *pkblk_end;
196 int kblk_size;
Eric Dumazet6ac4e552014-08-15 09:16:04 -0700197 unsigned int max_frame_len;
chetan lokef6fb8f12011-08-19 10:18:16 +0000198 unsigned int knum_blocks;
199 uint64_t knxt_seq_num;
200 char *prev;
201 char *nxt_offset;
202 struct sk_buff *skb;
203
204 atomic_t blk_fill_in_prog;
205
206 /* Default is set to 8ms */
207#define DEFAULT_PRB_RETIRE_TOV (8)
208
209 unsigned short retire_blk_tov;
210 unsigned short version;
211 unsigned long tov_in_jiffies;
212
213 /* timer to retire an outstanding block */
214 struct timer_list retire_blk_timer;
215};
216
217#define PGV_FROM_VMALLOC 1
Neil Horman0e3125c2010-11-16 10:26:47 -0800218struct pgv {
219 char *buffer;
Neil Horman0e3125c2010-11-16 10:26:47 -0800220};
221
Johann Baudy69e3c752009-05-18 22:11:22 -0700222struct packet_ring_buffer {
Neil Horman0e3125c2010-11-16 10:26:47 -0800223 struct pgv *pg_vec;
Johann Baudy69e3c752009-05-18 22:11:22 -0700224 unsigned int head;
225 unsigned int frames_per_block;
226 unsigned int frame_size;
227 unsigned int frame_max;
228
229 unsigned int pg_vec_order;
230 unsigned int pg_vec_pages;
231 unsigned int pg_vec_len;
232
chetan lokebc59ba32011-08-25 10:43:30 +0000233 struct tpacket_kbdq_core prb_bdqc;
Johann Baudy69e3c752009-05-18 22:11:22 -0700234 atomic_t pending;
235};
236
chetan lokef6fb8f12011-08-19 10:18:16 +0000237#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
238#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
239#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
240#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
241#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
242#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
243#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
244
Johann Baudy69e3c752009-05-18 22:11:22 -0700245struct packet_sock;
246static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
chetan lokef6fb8f12011-08-19 10:18:16 +0000248static void *packet_previous_frame(struct packet_sock *po,
249 struct packet_ring_buffer *rb,
250 int status);
251static void packet_increment_head(struct packet_ring_buffer *buff);
chetan lokebc59ba32011-08-25 10:43:30 +0000252static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
253 struct tpacket_block_desc *);
254static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000255 struct packet_sock *);
chetan lokebc59ba32011-08-25 10:43:30 +0000256static void prb_retire_current_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f12011-08-19 10:18:16 +0000257 struct packet_sock *, unsigned int status);
chetan lokebc59ba32011-08-25 10:43:30 +0000258static int prb_queue_frozen(struct tpacket_kbdq_core *);
259static void prb_open_block(struct tpacket_kbdq_core *,
260 struct tpacket_block_desc *);
chetan lokef6fb8f12011-08-19 10:18:16 +0000261static void prb_retire_rx_blk_timer_expired(unsigned long);
chetan lokebc59ba32011-08-25 10:43:30 +0000262static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
263static void prb_init_blk_timer(struct packet_sock *,
264 struct tpacket_kbdq_core *,
265 void (*func) (unsigned long));
266static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
267static void prb_clear_rxhash(struct tpacket_kbdq_core *,
268 struct tpacket3_hdr *);
269static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
270 struct tpacket3_hdr *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271static void packet_flush_mclist(struct sock *sk);
272
David S. Millerdc99f602011-07-05 01:45:05 -0700273struct packet_fanout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274struct packet_sock {
275 /* struct sock has to be the first member of packet_sock */
276 struct sock sk;
David S. Millerdc99f602011-07-05 01:45:05 -0700277 struct packet_fanout *fanout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 struct tpacket_stats stats;
chetan lokef6fb8f12011-08-19 10:18:16 +0000279 union tpacket_stats_u stats_u;
Johann Baudy69e3c752009-05-18 22:11:22 -0700280 struct packet_ring_buffer rx_ring;
281 struct packet_ring_buffer tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 int copy_thresh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 spinlock_t bind_lock;
Herbert Xu905db442009-01-30 14:12:06 -0800284 struct mutex pg_vec_lock;
Herbert Xu8dc41942007-02-04 23:31:32 -0800285 unsigned int running:1, /* prot_hook is attached*/
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -0700286 auxdata:1,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -0800287 origdev:1,
288 has_vnet_hdr:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 int ifindex; /* bound device */
Al Viro0e11c912006-11-08 00:26:29 -0800290 __be16 num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 struct packet_mclist *mclist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 atomic_t mapped;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700293 enum tpacket_versions tp_version;
294 unsigned int tp_hdrlen;
Patrick McHardy89133362008-07-18 18:05:19 -0700295 unsigned int tp_reserve;
Johann Baudy69e3c752009-05-18 22:11:22 -0700296 unsigned int tp_loss:1;
Scott McMillan614f60f2010-06-02 05:53:56 -0700297 unsigned int tp_tstamp;
Eric Dumazet94b05952009-10-16 04:02:20 +0000298 struct packet_type prot_hook ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299};
300
David S. Millerdc99f602011-07-05 01:45:05 -0700301#define PACKET_FANOUT_MAX 256
302
303struct packet_fanout {
304#ifdef CONFIG_NET_NS
305 struct net *net;
306#endif
307 unsigned int num_members;
308 u16 id;
309 u8 type;
David S. Miller7736d332011-07-05 01:43:20 -0700310 u8 defrag;
David S. Millerdc99f602011-07-05 01:45:05 -0700311 atomic_t rr_cur;
312 struct list_head list;
313 struct sock *arr[PACKET_FANOUT_MAX];
314 spinlock_t lock;
315 atomic_t sk_ref;
316 struct packet_type prot_hook ____cacheline_aligned_in_smp;
317};
318
Herbert Xuffbc6112007-02-04 23:33:10 -0800319struct packet_skb_cb {
320 unsigned int origlen;
321 union {
322 struct sockaddr_pkt pkt;
323 struct sockaddr_ll ll;
324 } sa;
325};
326
327#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
Herbert Xu8dc41942007-02-04 23:31:32 -0800328
chetan lokebc59ba32011-08-25 10:43:30 +0000329#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
chetan lokef6fb8f12011-08-19 10:18:16 +0000330#define GET_PBLOCK_DESC(x, bid) \
chetan lokebc59ba32011-08-25 10:43:30 +0000331 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000332#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
chetan lokebc59ba32011-08-25 10:43:30 +0000333 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
chetan lokef6fb8f12011-08-19 10:18:16 +0000334#define GET_NEXT_PRB_BLK_NUM(x) \
335 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
336 ((x)->kactive_blk_num+1) : 0)
337
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000338static struct packet_sock *pkt_sk(struct sock *sk)
David S. Millerce06b032011-07-04 01:44:29 -0700339{
340 return (struct packet_sock *)sk;
341}
342
David S. Millerdc99f602011-07-05 01:45:05 -0700343static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
344static void __fanout_link(struct sock *sk, struct packet_sock *po);
345
David S. Millerce06b032011-07-04 01:44:29 -0700346/* register_prot_hook must be invoked with the po->bind_lock held,
347 * or from a context in which asynchronous accesses to the packet
348 * socket is not possible (packet_create()).
349 */
350static void register_prot_hook(struct sock *sk)
351{
352 struct packet_sock *po = pkt_sk(sk);
353 if (!po->running) {
David S. Millerdc99f602011-07-05 01:45:05 -0700354 if (po->fanout)
355 __fanout_link(sk, po);
356 else
357 dev_add_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700358 sock_hold(sk);
359 po->running = 1;
360 }
361}
362
363/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
364 * held. If the sync parameter is true, we will temporarily drop
365 * the po->bind_lock and do a synchronize_net to make sure no
366 * asynchronous packet processing paths still refer to the elements
367 * of po->prot_hook. If the sync parameter is false, it is the
368 * callers responsibility to take care of this.
369 */
370static void __unregister_prot_hook(struct sock *sk, bool sync)
371{
372 struct packet_sock *po = pkt_sk(sk);
373
374 po->running = 0;
David S. Millerdc99f602011-07-05 01:45:05 -0700375 if (po->fanout)
376 __fanout_unlink(sk, po);
377 else
378 __dev_remove_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700379 __sock_put(sk);
380
381 if (sync) {
382 spin_unlock(&po->bind_lock);
383 synchronize_net();
384 spin_lock(&po->bind_lock);
385 }
386}
387
388static void unregister_prot_hook(struct sock *sk, bool sync)
389{
390 struct packet_sock *po = pkt_sk(sk);
391
392 if (po->running)
393 __unregister_prot_hook(sk, sync);
394}
395
Changli Gaof6dafa92010-12-07 04:26:16 +0000396static inline __pure struct page *pgv_to_page(void *addr)
Changli Gao0af55bb2010-12-01 02:52:20 +0000397{
398 if (is_vmalloc_addr(addr))
399 return vmalloc_to_page(addr);
400 return virt_to_page(addr);
401}
402
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700403static void __packet_set_status(struct packet_sock *po, void *frame, int status)
404{
405 union {
406 struct tpacket_hdr *h1;
407 struct tpacket2_hdr *h2;
408 void *raw;
409 } h;
410
411 h.raw = frame;
412 switch (po->tp_version) {
413 case TPACKET_V1:
414 h.h1->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000415 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700416 break;
417 case TPACKET_V2:
418 h.h2->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000419 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700420 break;
chetan lokef6fb8f12011-08-19 10:18:16 +0000421 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700422 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000423 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700424 BUG();
425 }
426
427 smp_wmb();
428}
429
430static int __packet_get_status(struct packet_sock *po, void *frame)
431{
432 union {
433 struct tpacket_hdr *h1;
434 struct tpacket2_hdr *h2;
435 void *raw;
436 } h;
437
438 smp_rmb();
439
440 h.raw = frame;
441 switch (po->tp_version) {
442 case TPACKET_V1:
Changli Gao0af55bb2010-12-01 02:52:20 +0000443 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700444 return h.h1->tp_status;
445 case TPACKET_V2:
Changli Gao0af55bb2010-12-01 02:52:20 +0000446 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700447 return h.h2->tp_status;
chetan lokef6fb8f12011-08-19 10:18:16 +0000448 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700449 default:
chetan lokef6fb8f12011-08-19 10:18:16 +0000450 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700451 BUG();
452 return 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Johann Baudy69e3c752009-05-18 22:11:22 -0700455
456static void *packet_lookup_frame(struct packet_sock *po,
457 struct packet_ring_buffer *rb,
458 unsigned int position,
459 int status)
460{
461 unsigned int pg_vec_pos, frame_offset;
462 union {
463 struct tpacket_hdr *h1;
464 struct tpacket2_hdr *h2;
465 void *raw;
466 } h;
467
468 pg_vec_pos = position / rb->frames_per_block;
469 frame_offset = position % rb->frames_per_block;
470
Neil Horman0e3125c2010-11-16 10:26:47 -0800471 h.raw = rb->pg_vec[pg_vec_pos].buffer +
472 (frame_offset * rb->frame_size);
Johann Baudy69e3c752009-05-18 22:11:22 -0700473
474 if (status != __packet_get_status(po, h.raw))
475 return NULL;
476
477 return h.raw;
478}
479
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000480static void *packet_current_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -0700481 struct packet_ring_buffer *rb,
482 int status)
483{
484 return packet_lookup_frame(po, rb, rb->head, status);
485}
486
chetan lokebc59ba32011-08-25 10:43:30 +0000487static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000488{
489 del_timer_sync(&pkc->retire_blk_timer);
490}
491
492static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
493 int tx_ring,
494 struct sk_buff_head *rb_queue)
495{
chetan lokebc59ba32011-08-25 10:43:30 +0000496 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000497
498 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
499
500 spin_lock(&rb_queue->lock);
501 pkc->delete_blk_timer = 1;
502 spin_unlock(&rb_queue->lock);
503
504 prb_del_retire_blk_timer(pkc);
505}
506
507static void prb_init_blk_timer(struct packet_sock *po,
chetan lokebc59ba32011-08-25 10:43:30 +0000508 struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000509 void (*func) (unsigned long))
510{
511 init_timer(&pkc->retire_blk_timer);
512 pkc->retire_blk_timer.data = (long)po;
513 pkc->retire_blk_timer.function = func;
514 pkc->retire_blk_timer.expires = jiffies;
515}
516
517static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
518{
chetan lokebc59ba32011-08-25 10:43:30 +0000519 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000520
521 if (tx_ring)
522 BUG();
523
524 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
525 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
526}
527
528static int prb_calc_retire_blk_tmo(struct packet_sock *po,
529 int blk_size_in_bytes)
530{
531 struct net_device *dev;
532 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000533 struct ethtool_cmd ecmd;
534 int err;
chetan lokef6fb8f12011-08-19 10:18:16 +0000535
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000536 rtnl_lock();
537 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
538 if (unlikely(!dev)) {
539 rtnl_unlock();
chetan lokef6fb8f12011-08-19 10:18:16 +0000540 return DEFAULT_PRB_RETIRE_TOV;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000541 }
542 err = __ethtool_get_settings(dev, &ecmd);
543 rtnl_unlock();
544 if (!err) {
545 switch (ecmd.speed) {
546 case SPEED_10000:
547 msec = 1;
548 div = 10000/1000;
549 break;
550 case SPEED_1000:
551 msec = 1;
552 div = 1000/1000;
553 break;
554 /*
555 * If the link speed is so slow you don't really
556 * need to worry about perf anyways
557 */
558 case SPEED_100:
559 case SPEED_10:
560 default:
561 return DEFAULT_PRB_RETIRE_TOV;
chetan lokef6fb8f12011-08-19 10:18:16 +0000562 }
563 }
564
565 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
566
567 if (div)
568 mbits /= div;
569
570 tmo = mbits * msec;
571
572 if (div)
573 return tmo+1;
574 return tmo;
575}
576
chetan lokebc59ba32011-08-25 10:43:30 +0000577static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000578 union tpacket_req_u *req_u)
579{
580 p1->feature_req_word = req_u->req3.tp_feature_req_word;
581}
582
583static void init_prb_bdqc(struct packet_sock *po,
584 struct packet_ring_buffer *rb,
585 struct pgv *pg_vec,
586 union tpacket_req_u *req_u, int tx_ring)
587{
chetan lokebc59ba32011-08-25 10:43:30 +0000588 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
589 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000590
591 memset(p1, 0x0, sizeof(*p1));
592
593 p1->knxt_seq_num = 1;
594 p1->pkbdq = pg_vec;
chetan lokebc59ba32011-08-25 10:43:30 +0000595 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
chetan lokef6fb8f12011-08-19 10:18:16 +0000596 p1->pkblk_start = (char *)pg_vec[0].buffer;
597 p1->kblk_size = req_u->req3.tp_block_size;
598 p1->knum_blocks = req_u->req3.tp_block_nr;
599 p1->hdrlen = po->tp_hdrlen;
600 p1->version = po->tp_version;
601 p1->last_kactive_blk_num = 0;
602 po->stats_u.stats3.tp_freeze_q_cnt = 0;
603 if (req_u->req3.tp_retire_blk_tov)
604 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
605 else
606 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
607 req_u->req3.tp_block_size);
608 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
609 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
610
Eric Dumazet6ac4e552014-08-15 09:16:04 -0700611 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
chetan lokef6fb8f12011-08-19 10:18:16 +0000612 prb_init_ft_ops(p1, req_u);
613 prb_setup_retire_blk_timer(po, tx_ring);
614 prb_open_block(p1, pbd);
615}
616
617/* Do NOT update the last_blk_num first.
618 * Assumes sk_buff_head lock is held.
619 */
chetan lokebc59ba32011-08-25 10:43:30 +0000620static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000621{
622 mod_timer(&pkc->retire_blk_timer,
623 jiffies + pkc->tov_in_jiffies);
624 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
625}
626
627/*
628 * Timer logic:
629 * 1) We refresh the timer only when we open a block.
630 * By doing this we don't waste cycles refreshing the timer
631 * on packet-by-packet basis.
632 *
633 * With a 1MB block-size, on a 1Gbps line, it will take
634 * i) ~8 ms to fill a block + ii) memcpy etc.
635 * In this cut we are not accounting for the memcpy time.
636 *
637 * So, if the user sets the 'tmo' to 10ms then the timer
638 * will never fire while the block is still getting filled
639 * (which is what we want). However, the user could choose
640 * to close a block early and that's fine.
641 *
642 * But when the timer does fire, we check whether or not to refresh it.
643 * Since the tmo granularity is in msecs, it is not too expensive
644 * to refresh the timer, lets say every '8' msecs.
645 * Either the user can set the 'tmo' or we can derive it based on
646 * a) line-speed and b) block-size.
647 * prb_calc_retire_blk_tmo() calculates the tmo.
648 *
649 */
650static void prb_retire_rx_blk_timer_expired(unsigned long data)
651{
652 struct packet_sock *po = (struct packet_sock *)data;
chetan lokebc59ba32011-08-25 10:43:30 +0000653 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
chetan lokef6fb8f12011-08-19 10:18:16 +0000654 unsigned int frozen;
chetan lokebc59ba32011-08-25 10:43:30 +0000655 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000656
657 spin_lock(&po->sk.sk_receive_queue.lock);
658
659 frozen = prb_queue_frozen(pkc);
660 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
661
662 if (unlikely(pkc->delete_blk_timer))
663 goto out;
664
665 /* We only need to plug the race when the block is partially filled.
666 * tpacket_rcv:
667 * lock(); increment BLOCK_NUM_PKTS; unlock()
668 * copy_bits() is in progress ...
669 * timer fires on other cpu:
670 * we can't retire the current block because copy_bits
671 * is in progress.
672 *
673 */
674 if (BLOCK_NUM_PKTS(pbd)) {
675 while (atomic_read(&pkc->blk_fill_in_prog)) {
676 /* Waiting for skb_copy_bits to finish... */
677 cpu_relax();
678 }
679 }
680
681 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
682 if (!frozen) {
683 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
684 if (!prb_dispatch_next_block(pkc, po))
685 goto refresh_timer;
686 else
687 goto out;
688 } else {
689 /* Case 1. Queue was frozen because user-space was
690 * lagging behind.
691 */
692 if (prb_curr_blk_in_use(pkc, pbd)) {
693 /*
694 * Ok, user-space is still behind.
695 * So just refresh the timer.
696 */
697 goto refresh_timer;
698 } else {
699 /* Case 2. queue was frozen,user-space caught up,
700 * now the link went idle && the timer fired.
701 * We don't have a block to close.So we open this
702 * block and restart the timer.
703 * opening a block thaws the queue,restarts timer
704 * Thawing/timer-refresh is a side effect.
705 */
706 prb_open_block(pkc, pbd);
707 goto out;
708 }
709 }
710 }
711
712refresh_timer:
713 _prb_refresh_rx_retire_blk_timer(pkc);
714
715out:
716 spin_unlock(&po->sk.sk_receive_queue.lock);
717}
718
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000719static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
chetan lokebc59ba32011-08-25 10:43:30 +0000720 struct tpacket_block_desc *pbd1, __u32 status)
chetan lokef6fb8f12011-08-19 10:18:16 +0000721{
722 /* Flush everything minus the block header */
723
724#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
725 u8 *start, *end;
726
727 start = (u8 *)pbd1;
728
729 /* Skip the block header(we know header WILL fit in 4K) */
730 start += PAGE_SIZE;
731
732 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
733 for (; start < end; start += PAGE_SIZE)
734 flush_dcache_page(pgv_to_page(start));
735
736 smp_wmb();
737#endif
738
739 /* Now update the block status. */
740
741 BLOCK_STATUS(pbd1) = status;
742
743 /* Flush the block header */
744
745#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
746 start = (u8 *)pbd1;
747 flush_dcache_page(pgv_to_page(start));
748
749 smp_wmb();
750#endif
751}
752
753/*
754 * Side effect:
755 *
756 * 1) flush the block
757 * 2) Increment active_blk_num
758 *
759 * Note:We DONT refresh the timer on purpose.
760 * Because almost always the next block will be opened.
761 */
chetan lokebc59ba32011-08-25 10:43:30 +0000762static void prb_close_block(struct tpacket_kbdq_core *pkc1,
763 struct tpacket_block_desc *pbd1,
chetan lokef6fb8f12011-08-19 10:18:16 +0000764 struct packet_sock *po, unsigned int stat)
765{
766 __u32 status = TP_STATUS_USER | stat;
767
768 struct tpacket3_hdr *last_pkt;
chetan lokebc59ba32011-08-25 10:43:30 +0000769 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000770
771 if (po->stats.tp_drops)
772 status |= TP_STATUS_LOSING;
773
774 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
775 last_pkt->tp_next_offset = 0;
776
777 /* Get the ts of the last pkt */
778 if (BLOCK_NUM_PKTS(pbd1)) {
779 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
780 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
781 } else {
782 /* Ok, we tmo'd - so get the current time */
783 struct timespec ts;
784 getnstimeofday(&ts);
785 h1->ts_last_pkt.ts_sec = ts.tv_sec;
786 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
787 }
788
789 smp_wmb();
790
791 /* Flush the block */
792 prb_flush_block(pkc1, pbd1, status);
793
794 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
795}
796
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000797static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000798{
799 pkc->reset_pending_on_curr_blk = 0;
800}
801
802/*
803 * Side effect of opening a block:
804 *
805 * 1) prb_queue is thawed.
806 * 2) retire_blk_timer is refreshed.
807 *
808 */
chetan lokebc59ba32011-08-25 10:43:30 +0000809static void prb_open_block(struct tpacket_kbdq_core *pkc1,
810 struct tpacket_block_desc *pbd1)
chetan lokef6fb8f12011-08-19 10:18:16 +0000811{
812 struct timespec ts;
chetan lokebc59ba32011-08-25 10:43:30 +0000813 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f12011-08-19 10:18:16 +0000814
815 smp_rmb();
816
Daniel Borkmann131f5862013-05-03 02:57:00 +0000817 /* We could have just memset this but we will lose the
818 * flexibility of making the priv area sticky
819 */
820 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
821 BLOCK_NUM_PKTS(pbd1) = 0;
822 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
823 getnstimeofday(&ts);
824 h1->ts_first_pkt.ts_sec = ts.tv_sec;
825 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
826 pkc1->pkblk_start = (char *)pbd1;
827 pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
828 BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
829 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
830 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
831 pbd1->version = pkc1->version;
832 pkc1->prev = pkc1->nxt_offset;
833 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
834 prb_thaw_queue(pkc1);
835 _prb_refresh_rx_retire_blk_timer(pkc1);
chetan lokef6fb8f12011-08-19 10:18:16 +0000836
Daniel Borkmann131f5862013-05-03 02:57:00 +0000837 smp_wmb();
chetan lokef6fb8f12011-08-19 10:18:16 +0000838}
839
840/*
841 * Queue freeze logic:
842 * 1) Assume tp_block_nr = 8 blocks.
843 * 2) At time 't0', user opens Rx ring.
844 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
845 * 4) user-space is either sleeping or processing block '0'.
846 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
847 * it will close block-7,loop around and try to fill block '0'.
848 * call-flow:
849 * __packet_lookup_frame_in_block
850 * prb_retire_current_block()
851 * prb_dispatch_next_block()
852 * |->(BLOCK_STATUS == USER) evaluates to true
853 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
854 * 6) Now there are two cases:
855 * 6.1) Link goes idle right after the queue is frozen.
856 * But remember, the last open_block() refreshed the timer.
857 * When this timer expires,it will refresh itself so that we can
858 * re-open block-0 in near future.
859 * 6.2) Link is busy and keeps on receiving packets. This is a simple
860 * case and __packet_lookup_frame_in_block will check if block-0
861 * is free and can now be re-used.
862 */
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000863static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000864 struct packet_sock *po)
865{
866 pkc->reset_pending_on_curr_blk = 1;
867 po->stats_u.stats3.tp_freeze_q_cnt++;
868}
869
870#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
871
872/*
873 * If the next block is free then we will dispatch it
874 * and return a good offset.
875 * Else, we will freeze the queue.
876 * So, caller must check the return value.
877 */
chetan lokebc59ba32011-08-25 10:43:30 +0000878static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000879 struct packet_sock *po)
880{
chetan lokebc59ba32011-08-25 10:43:30 +0000881 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +0000882
883 smp_rmb();
884
885 /* 1. Get current block num */
886 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
887
888 /* 2. If this block is currently in_use then freeze the queue */
889 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
890 prb_freeze_queue(pkc, po);
891 return NULL;
892 }
893
894 /*
895 * 3.
896 * open this block and return the offset where the first packet
897 * needs to get stored.
898 */
899 prb_open_block(pkc, pbd);
900 return (void *)pkc->nxt_offset;
901}
902
chetan lokebc59ba32011-08-25 10:43:30 +0000903static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000904 struct packet_sock *po, unsigned int status)
905{
chetan lokebc59ba32011-08-25 10:43:30 +0000906 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
chetan lokef6fb8f12011-08-19 10:18:16 +0000907
908 /* retire/close the current block */
909 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
910 /*
911 * Plug the case where copy_bits() is in progress on
912 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
913 * have space to copy the pkt in the current block and
914 * called prb_retire_current_block()
915 *
916 * We don't need to worry about the TMO case because
917 * the timer-handler already handled this case.
918 */
919 if (!(status & TP_STATUS_BLK_TMO)) {
920 while (atomic_read(&pkc->blk_fill_in_prog)) {
921 /* Waiting for skb_copy_bits to finish... */
922 cpu_relax();
923 }
924 }
925 prb_close_block(pkc, pbd, po, status);
926 return;
927 }
chetan lokef6fb8f12011-08-19 10:18:16 +0000928}
929
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000930static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
chetan lokebc59ba32011-08-25 10:43:30 +0000931 struct tpacket_block_desc *pbd)
chetan lokef6fb8f12011-08-19 10:18:16 +0000932{
933 return TP_STATUS_USER & BLOCK_STATUS(pbd);
934}
935
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000936static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f12011-08-19 10:18:16 +0000937{
938 return pkc->reset_pending_on_curr_blk;
939}
940
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000941static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +0000942{
chetan lokebc59ba32011-08-25 10:43:30 +0000943 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
chetan lokef6fb8f12011-08-19 10:18:16 +0000944 atomic_dec(&pkc->blk_fill_in_prog);
945}
946
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000947static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000948 struct tpacket3_hdr *ppd)
949{
950 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
951}
952
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000953static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000954 struct tpacket3_hdr *ppd)
955{
956 ppd->hv1.tp_rxhash = 0;
957}
958
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000959static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000960 struct tpacket3_hdr *ppd)
961{
962 if (vlan_tx_tag_present(pkc->skb)) {
963 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
964 ppd->tp_status = TP_STATUS_VLAN_VALID;
965 } else {
966 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
967 }
968}
969
chetan lokebc59ba32011-08-25 10:43:30 +0000970static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f12011-08-19 10:18:16 +0000971 struct tpacket3_hdr *ppd)
972{
973 prb_fill_vlan_info(pkc, ppd);
974
975 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
976 prb_fill_rxhash(pkc, ppd);
977 else
978 prb_clear_rxhash(pkc, ppd);
979}
980
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000981static void prb_fill_curr_block(char *curr,
chetan lokebc59ba32011-08-25 10:43:30 +0000982 struct tpacket_kbdq_core *pkc,
983 struct tpacket_block_desc *pbd,
chetan lokef6fb8f12011-08-19 10:18:16 +0000984 unsigned int len)
985{
986 struct tpacket3_hdr *ppd;
987
988 ppd = (struct tpacket3_hdr *)curr;
989 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
990 pkc->prev = curr;
991 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
992 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
993 BLOCK_NUM_PKTS(pbd) += 1;
994 atomic_inc(&pkc->blk_fill_in_prog);
995 prb_run_all_ft_ops(pkc, ppd);
996}
997
998/* Assumes caller has the sk->rx_queue.lock */
999static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1000 struct sk_buff *skb,
1001 int status,
1002 unsigned int len
1003 )
1004{
chetan lokebc59ba32011-08-25 10:43:30 +00001005 struct tpacket_kbdq_core *pkc;
1006 struct tpacket_block_desc *pbd;
chetan lokef6fb8f12011-08-19 10:18:16 +00001007 char *curr, *end;
1008
1009 pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
1010 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1011
1012 /* Queue is frozen when user space is lagging behind */
1013 if (prb_queue_frozen(pkc)) {
1014 /*
1015 * Check if that last block which caused the queue to freeze,
1016 * is still in_use by user-space.
1017 */
1018 if (prb_curr_blk_in_use(pkc, pbd)) {
1019 /* Can't record this packet */
1020 return NULL;
1021 } else {
1022 /*
1023 * Ok, the block was released by user-space.
1024 * Now let's open that block.
1025 * opening a block also thaws the queue.
1026 * Thawing is a side effect.
1027 */
1028 prb_open_block(pkc, pbd);
1029 }
1030 }
1031
1032 smp_mb();
1033 curr = pkc->nxt_offset;
1034 pkc->skb = skb;
1035 end = (char *) ((char *)pbd + pkc->kblk_size);
1036
1037 /* first try the current block */
1038 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1039 prb_fill_curr_block(curr, pkc, pbd, len);
1040 return (void *)curr;
1041 }
1042
1043 /* Ok, close the current block */
1044 prb_retire_current_block(pkc, po, 0);
1045
1046 /* Now, try to dispatch the next block */
1047 curr = (char *)prb_dispatch_next_block(pkc, po);
1048 if (curr) {
1049 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1050 prb_fill_curr_block(curr, pkc, pbd, len);
1051 return (void *)curr;
1052 }
1053
1054 /*
1055 * No free blocks are available.user_space hasn't caught up yet.
1056 * Queue was just frozen and now this packet will get dropped.
1057 */
1058 return NULL;
1059}
1060
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001061static void *packet_current_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001062 struct sk_buff *skb,
1063 int status, unsigned int len)
1064{
1065 char *curr = NULL;
1066 switch (po->tp_version) {
1067 case TPACKET_V1:
1068 case TPACKET_V2:
1069 curr = packet_lookup_frame(po, &po->rx_ring,
1070 po->rx_ring.head, status);
1071 return curr;
1072 case TPACKET_V3:
1073 return __packet_lookup_frame_in_block(po, skb, status, len);
1074 default:
1075 WARN(1, "TPACKET version not supported\n");
1076 BUG();
1077 return 0;
1078 }
1079}
1080
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001081static void *prb_lookup_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001082 struct packet_ring_buffer *rb,
1083 unsigned int previous,
1084 int status)
1085{
chetan lokebc59ba32011-08-25 10:43:30 +00001086 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1087 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
chetan lokef6fb8f12011-08-19 10:18:16 +00001088
1089 if (status != BLOCK_STATUS(pbd))
1090 return NULL;
1091 return pbd;
1092}
1093
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001094static int prb_previous_blk_num(struct packet_ring_buffer *rb)
chetan lokef6fb8f12011-08-19 10:18:16 +00001095{
1096 unsigned int prev;
1097 if (rb->prb_bdqc.kactive_blk_num)
1098 prev = rb->prb_bdqc.kactive_blk_num-1;
1099 else
1100 prev = rb->prb_bdqc.knum_blocks-1;
1101 return prev;
1102}
1103
1104/* Assumes caller has held the rx_queue.lock */
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001105static void *__prb_previous_block(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001106 struct packet_ring_buffer *rb,
1107 int status)
1108{
1109 unsigned int previous = prb_previous_blk_num(rb);
1110 return prb_lookup_block(po, rb, previous, status);
1111}
1112
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001113static void *packet_previous_rx_frame(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001114 struct packet_ring_buffer *rb,
1115 int status)
1116{
1117 if (po->tp_version <= TPACKET_V2)
1118 return packet_previous_frame(po, rb, status);
1119
1120 return __prb_previous_block(po, rb, status);
1121}
1122
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001123static void packet_increment_rx_head(struct packet_sock *po,
chetan lokef6fb8f12011-08-19 10:18:16 +00001124 struct packet_ring_buffer *rb)
1125{
1126 switch (po->tp_version) {
1127 case TPACKET_V1:
1128 case TPACKET_V2:
1129 return packet_increment_head(rb);
1130 case TPACKET_V3:
1131 default:
1132 WARN(1, "TPACKET version not supported.\n");
1133 BUG();
1134 return;
1135 }
1136}
1137
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001138static void *packet_previous_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -07001139 struct packet_ring_buffer *rb,
1140 int status)
1141{
1142 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1143 return packet_lookup_frame(po, rb, previous, status);
1144}
1145
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001146static void packet_increment_head(struct packet_ring_buffer *buff)
Johann Baudy69e3c752009-05-18 22:11:22 -07001147{
1148 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1149}
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151static void packet_sock_destruct(struct sock *sk)
1152{
Richard Cochraned85b562010-04-07 22:41:28 +00001153 skb_queue_purge(&sk->sk_error_queue);
1154
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001155 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1156 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
1158 if (!sock_flag(sk, SOCK_DEAD)) {
Syed Rameez Mustafa98e6bea2013-10-04 19:03:28 -07001159 WARN(1, "Attempt to release alive packet socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 return;
1161 }
1162
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08001163 sk_refcnt_debug_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164}
1165
David S. Millerdc99f602011-07-05 01:45:05 -07001166static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1167{
1168 int x = atomic_read(&f->rr_cur) + 1;
1169
1170 if (x >= num)
1171 x = 0;
1172
1173 return x;
1174}
1175
1176static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1177{
1178 u32 idx, hash = skb->rxhash;
1179
1180 idx = ((u64)hash * num) >> 32;
1181
1182 return f->arr[idx];
1183}
1184
1185static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1186{
1187 int cur, old;
1188
1189 cur = atomic_read(&f->rr_cur);
1190 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1191 fanout_rr_next(f, num))) != cur)
1192 cur = old;
1193 return f->arr[cur];
1194}
1195
David S. Miller95ec3eb2011-07-06 01:56:38 -07001196static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1197{
1198 unsigned int cpu = smp_processor_id();
1199
1200 return f->arr[cpu % num];
1201}
1202
David S. Miller95ec3eb2011-07-06 01:56:38 -07001203static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1204 struct packet_type *pt, struct net_device *orig_dev)
David S. Millerdc99f602011-07-05 01:45:05 -07001205{
1206 struct packet_fanout *f = pt->af_packet_priv;
1207 unsigned int num = f->num_members;
1208 struct packet_sock *po;
1209 struct sock *sk;
1210
1211 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1212 !num) {
1213 kfree_skb(skb);
1214 return 0;
1215 }
1216
David S. Miller95ec3eb2011-07-06 01:56:38 -07001217 switch (f->type) {
1218 case PACKET_FANOUT_HASH:
1219 default:
1220 if (f->defrag) {
Eric Dumazetbc416d92011-10-06 10:28:31 +00001221 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001222 if (!skb)
1223 return 0;
1224 }
1225 skb_get_rxhash(skb);
1226 sk = fanout_demux_hash(f, skb, num);
1227 break;
1228 case PACKET_FANOUT_LB:
1229 sk = fanout_demux_lb(f, skb, num);
1230 break;
1231 case PACKET_FANOUT_CPU:
1232 sk = fanout_demux_cpu(f, skb, num);
1233 break;
David S. Miller7736d332011-07-05 01:43:20 -07001234 }
1235
David S. Millerdc99f602011-07-05 01:45:05 -07001236 po = pkt_sk(sk);
1237
1238 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1239}
1240
1241static DEFINE_MUTEX(fanout_mutex);
1242static LIST_HEAD(fanout_list);
1243
1244static void __fanout_link(struct sock *sk, struct packet_sock *po)
1245{
1246 struct packet_fanout *f = po->fanout;
1247
1248 spin_lock(&f->lock);
1249 f->arr[f->num_members] = sk;
1250 smp_wmb();
1251 f->num_members++;
1252 spin_unlock(&f->lock);
1253}
1254
1255static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1256{
1257 struct packet_fanout *f = po->fanout;
1258 int i;
1259
1260 spin_lock(&f->lock);
1261 for (i = 0; i < f->num_members; i++) {
1262 if (f->arr[i] == sk)
1263 break;
1264 }
1265 BUG_ON(i >= f->num_members);
1266 f->arr[i] = f->arr[f->num_members - 1];
1267 f->num_members--;
1268 spin_unlock(&f->lock);
1269}
1270
Eric Leblondcf993982012-08-16 22:02:58 +00001271bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1272{
1273 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1274 return true;
1275
1276 return false;
1277}
1278
David S. Miller7736d332011-07-05 01:43:20 -07001279static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
David S. Millerdc99f602011-07-05 01:45:05 -07001280{
1281 struct packet_sock *po = pkt_sk(sk);
1282 struct packet_fanout *f, *match;
David S. Miller7736d332011-07-05 01:43:20 -07001283 u8 type = type_flags & 0xff;
1284 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001285 int err;
1286
1287 switch (type) {
1288 case PACKET_FANOUT_HASH:
1289 case PACKET_FANOUT_LB:
David S. Miller95ec3eb2011-07-06 01:56:38 -07001290 case PACKET_FANOUT_CPU:
David S. Millerdc99f602011-07-05 01:45:05 -07001291 break;
1292 default:
1293 return -EINVAL;
1294 }
1295
David S. Millerdc99f602011-07-05 01:45:05 -07001296 mutex_lock(&fanout_mutex);
Eric Dumazet09b9f192017-08-17 15:24:51 +08001297
Eric Dumazet09b9f192017-08-17 15:24:51 +08001298 err = -EALREADY;
1299 if (po->fanout)
1300 goto out;
1301
David S. Millerdc99f602011-07-05 01:45:05 -07001302 match = NULL;
1303 list_for_each_entry(f, &fanout_list, list) {
1304 if (f->id == id &&
1305 read_pnet(&f->net) == sock_net(sk)) {
1306 match = f;
1307 break;
1308 }
1309 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001310 err = -EINVAL;
David S. Miller7736d332011-07-05 01:43:20 -07001311 if (match && match->defrag != defrag)
Eric Dumazetafe62c62011-07-07 06:41:29 -07001312 goto out;
David S. Millerdc99f602011-07-05 01:45:05 -07001313 if (!match) {
Eric Dumazetafe62c62011-07-07 06:41:29 -07001314 err = -ENOMEM;
David S. Millerdc99f602011-07-05 01:45:05 -07001315 match = kzalloc(sizeof(*match), GFP_KERNEL);
Eric Dumazetafe62c62011-07-07 06:41:29 -07001316 if (!match)
1317 goto out;
1318 write_pnet(&match->net, sock_net(sk));
1319 match->id = id;
1320 match->type = type;
1321 match->defrag = defrag;
1322 atomic_set(&match->rr_cur, 0);
1323 INIT_LIST_HEAD(&match->list);
1324 spin_lock_init(&match->lock);
1325 atomic_set(&match->sk_ref, 0);
1326 match->prot_hook.type = po->prot_hook.type;
1327 match->prot_hook.dev = po->prot_hook.dev;
1328 match->prot_hook.func = packet_rcv_fanout;
1329 match->prot_hook.af_packet_priv = match;
Eric Leblondcf993982012-08-16 22:02:58 +00001330 match->prot_hook.id_match = match_fanout_group;
Eric Dumazetafe62c62011-07-07 06:41:29 -07001331 dev_add_pack(&match->prot_hook);
1332 list_add(&match->list, &fanout_list);
1333 }
1334 err = -EINVAL;
Ben Hutchings67581272017-11-06 23:03:07 +00001335
1336 spin_lock(&po->bind_lock);
1337 if (po->running &&
1338 match->type == type &&
Eric Dumazetafe62c62011-07-07 06:41:29 -07001339 match->prot_hook.type == po->prot_hook.type &&
1340 match->prot_hook.dev == po->prot_hook.dev) {
1341 err = -ENOSPC;
1342 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1343 __dev_remove_pack(&po->prot_hook);
1344 po->fanout = match;
1345 atomic_inc(&match->sk_ref);
1346 __fanout_link(sk, po);
1347 err = 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001348 }
1349 }
Ben Hutchings67581272017-11-06 23:03:07 +00001350 spin_unlock(&po->bind_lock);
1351
1352 if (err && !atomic_read(&match->sk_ref)) {
1353 list_del(&match->list);
1354 kfree(match);
1355 }
1356
Eric Dumazetafe62c62011-07-07 06:41:29 -07001357out:
David S. Millerdc99f602011-07-05 01:45:05 -07001358 mutex_unlock(&fanout_mutex);
1359 return err;
1360}
1361
1362static void fanout_release(struct sock *sk)
1363{
1364 struct packet_sock *po = pkt_sk(sk);
1365 struct packet_fanout *f;
1366
David S. Millerdc99f602011-07-05 01:45:05 -07001367 mutex_lock(&fanout_mutex);
Eric Dumazet09b9f192017-08-17 15:24:51 +08001368 f = po->fanout;
1369 if (f) {
1370 po->fanout = NULL;
1371
1372 if (atomic_dec_and_test(&f->sk_ref)) {
1373 list_del(&f->list);
1374 dev_remove_pack(&f->prot_hook);
1375 kfree(f);
1376 }
David S. Millerdc99f602011-07-05 01:45:05 -07001377 }
1378 mutex_unlock(&fanout_mutex);
1379}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001381static const struct proto_ops packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001383static const struct proto_ops packet_ops_spkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001385static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1386 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
1388 struct sock *sk;
1389 struct sockaddr_pkt *spkt;
1390
1391 /*
1392 * When we registered the protocol we saved the socket in the data
1393 * field for just this event.
1394 */
1395
1396 sk = pt->af_packet_priv;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 /*
1399 * Yank back the headers [hope the device set this
1400 * right or kerboom...]
1401 *
1402 * Incoming packets have ll header pulled,
1403 * push it back.
1404 *
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001405 * For outgoing ones skb->data == skb_mac_header(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 * so that this procedure is noop.
1407 */
1408
1409 if (skb->pkt_type == PACKET_LOOPBACK)
1410 goto out;
1411
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001412 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001413 goto out;
1414
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001415 skb = skb_share_check(skb, GFP_ATOMIC);
1416 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 goto oom;
1418
1419 /* drop any routing info */
Eric Dumazetadf30902009-06-02 05:19:30 +00001420 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Phil Oester84531c22005-07-12 11:57:52 -07001422 /* drop conntrack reference */
1423 nf_reset(skb);
1424
Herbert Xuffbc6112007-02-04 23:33:10 -08001425 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001427 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
1429 /*
1430 * The SOCK_PACKET socket receives _all_ frames.
1431 */
1432
1433 spkt->spkt_family = dev->type;
1434 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1435 spkt->spkt_protocol = skb->protocol;
1436
1437 /*
1438 * Charge the memory to the socket. This is done specifically
1439 * to prevent sockets using all the memory up.
1440 */
1441
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001442 if (sock_queue_rcv_skb(sk, skb) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 return 0;
1444
1445out:
1446 kfree_skb(skb);
1447oom:
1448 return 0;
1449}
1450
1451
1452/*
1453 * Output a raw packet to a device layer. This bypasses all the other
1454 * protocol layers and you must therefore supply it with a complete frame
1455 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1458 struct msghdr *msg, size_t len)
1459{
1460 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001461 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001462 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 struct net_device *dev;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001464 __be16 proto = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 int err;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001466 int extra_len = 0;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001467
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001469 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 */
1471
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001472 if (saddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 if (msg->msg_namelen < sizeof(struct sockaddr))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001474 return -EINVAL;
1475 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1476 proto = saddr->spkt_protocol;
1477 } else
1478 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
1480 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001481 * Find the device first to size check it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 */
1483
1484 saddr->spkt_device[13] = 0;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001485retry:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001486 rcu_read_lock();
1487 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 err = -ENODEV;
1489 if (dev == NULL)
1490 goto out_unlock;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001491
David S. Millerd5e76b02007-01-25 19:30:36 -08001492 err = -ENETDOWN;
1493 if (!(dev->flags & IFF_UP))
1494 goto out_unlock;
1495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 /*
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001497 * You may not queue a frame bigger than the mtu. This is the lowest level
1498 * raw protocol and you must do your own fragmentation at this level.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001500
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001501 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1502 if (!netif_supports_nofcs(dev)) {
1503 err = -EPROTONOSUPPORT;
1504 goto out_unlock;
1505 }
1506 extra_len = 4; /* We're doing our own CRC */
1507 }
1508
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001510 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 goto out_unlock;
1512
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001513 if (!skb) {
1514 size_t reserved = LL_RESERVED_SPACE(dev);
Herbert Xu4ce40912011-11-18 02:20:05 +00001515 int tlen = dev->needed_tailroom;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001516 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001518 rcu_read_unlock();
Herbert Xu4ce40912011-11-18 02:20:05 +00001519 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001520 if (skb == NULL)
1521 return -ENOBUFS;
1522 /* FIXME: Save some space for broken drivers that write a hard
1523 * header at transmission time by themselves. PPP is the notable
1524 * one here. This should really be fixed at the driver level.
1525 */
1526 skb_reserve(skb, reserved);
1527 skb_reset_network_header(skb);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001528
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001529 /* Try to align data part correctly */
1530 if (hhlen) {
1531 skb->data -= hhlen;
1532 skb->tail -= hhlen;
1533 if (len < hhlen)
1534 skb_reset_network_header(skb);
1535 }
1536 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1537 if (err)
1538 goto out_free;
1539 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 }
1541
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001542 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00001543 /* Earlier code assumed this would be a VLAN pkt,
1544 * double-check this now that we have the actual
1545 * packet in hand.
1546 */
1547 struct ethhdr *ehdr;
1548 skb_reset_mac_header(skb);
1549 ehdr = eth_hdr(skb);
1550 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1551 err = -EMSGSIZE;
1552 goto out_unlock;
1553 }
1554 }
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 skb->protocol = proto;
1557 skb->dev = dev;
1558 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001559 skb->mark = sk->sk_mark;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001560 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00001561 if (err < 0)
1562 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001564 if (unlikely(extra_len == 4))
1565 skb->no_fcs = 1;
1566
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 dev_queue_xmit(skb);
Eric Dumazet654d1f82009-11-02 10:43:32 +01001568 rcu_read_unlock();
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001569 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571out_unlock:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001572 rcu_read_unlock();
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001573out_free:
1574 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 return err;
1576}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001578static unsigned int run_filter(const struct sk_buff *skb,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001579 const struct sock *sk,
David S. Millerdbcb5852007-01-24 15:21:02 -08001580 unsigned int res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581{
1582 struct sk_filter *filter;
1583
Eric Dumazet80f8f102011-01-18 07:46:52 +00001584 rcu_read_lock();
1585 filter = rcu_dereference(sk->sk_filter);
David S. Millerdbcb5852007-01-24 15:21:02 -08001586 if (filter != NULL)
Eric Dumazet0a148422011-04-20 09:27:32 +00001587 res = SK_RUN_FILTER(filter, skb);
Eric Dumazet80f8f102011-01-18 07:46:52 +00001588 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
David S. Millerdbcb5852007-01-24 15:21:02 -08001590 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591}
1592
1593/*
Eric Dumazet62ab0812010-12-06 20:50:09 +00001594 * This function makes lazy skb cloning in hope that most of packets
1595 * are discarded by BPF.
1596 *
1597 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1598 * and skb->cb are mangled. It works because (and until) packets
1599 * falling here are owned by current CPU. Output packets are cloned
1600 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1601 * sequencially, so that if we return skb to original state on exit,
1602 * we will not harm anyone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 */
1604
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001605static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1606 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607{
1608 struct sock *sk;
1609 struct sockaddr_ll *sll;
1610 struct packet_sock *po;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001611 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001613 unsigned int snaplen, res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
1615 if (skb->pkt_type == PACKET_LOOPBACK)
1616 goto drop;
1617
1618 sk = pt->af_packet_priv;
1619 po = pkt_sk(sk);
1620
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001621 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001622 goto drop;
1623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 skb->dev = dev;
1625
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001626 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 /* The device has an explicit notion of ll header,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001628 * exported to higher levels.
1629 *
1630 * Otherwise, the device hides details of its frame
1631 * structure, so that corresponding packet head is
1632 * never delivered to user.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 */
1634 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001635 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 else if (skb->pkt_type == PACKET_OUTGOING) {
1637 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001638 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 }
1640 }
1641
1642 snaplen = skb->len;
1643
David S. Millerdbcb5852007-01-24 15:21:02 -08001644 res = run_filter(skb, sk, snaplen);
1645 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001646 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001647 if (snaplen > res)
1648 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001650 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 goto drop_n_acct;
1652
1653 if (skb_shared(skb)) {
1654 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1655 if (nskb == NULL)
1656 goto drop_n_acct;
1657
1658 if (skb_head != skb->data) {
1659 skb->data = skb_head;
1660 skb->len = skb_len;
1661 }
1662 kfree_skb(skb);
1663 skb = nskb;
1664 }
1665
Herbert Xuffbc6112007-02-04 23:33:10 -08001666 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1667 sizeof(skb->cb));
1668
1669 sll = &PACKET_SKB_CB(skb)->sa.ll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 sll->sll_family = AF_PACKET;
1671 sll->sll_hatype = dev->type;
1672 sll->sll_protocol = skb->protocol;
1673 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001674 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001675 sll->sll_ifindex = orig_dev->ifindex;
1676 else
1677 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001679 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
Herbert Xuffbc6112007-02-04 23:33:10 -08001681 PACKET_SKB_CB(skb)->origlen = skb->len;
Herbert Xu8dc41942007-02-04 23:31:32 -08001682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 if (pskb_trim(skb, snaplen))
1684 goto drop_n_acct;
1685
1686 skb_set_owner_r(skb, sk);
1687 skb->dev = NULL;
Eric Dumazetadf30902009-06-02 05:19:30 +00001688 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
Phil Oester84531c22005-07-12 11:57:52 -07001690 /* drop conntrack reference */
1691 nf_reset(skb);
1692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 spin_lock(&sk->sk_receive_queue.lock);
1694 po->stats.tp_packets++;
Neil Horman3b885782009-10-12 13:26:31 -07001695 skb->dropcount = atomic_read(&sk->sk_drops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 __skb_queue_tail(&sk->sk_receive_queue, skb);
1697 spin_unlock(&sk->sk_receive_queue.lock);
1698 sk->sk_data_ready(sk, skb->len);
1699 return 0;
1700
1701drop_n_acct:
Willem de Bruijn7091fbd2011-09-30 10:38:28 +00001702 spin_lock(&sk->sk_receive_queue.lock);
1703 po->stats.tp_drops++;
1704 atomic_inc(&sk->sk_drops);
1705 spin_unlock(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707drop_n_restore:
1708 if (skb_head != skb->data && skb_shared(skb)) {
1709 skb->data = skb_head;
1710 skb->len = skb_len;
1711 }
1712drop:
Neil Hormanead2ceb2009-03-11 09:49:55 +00001713 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 return 0;
1715}
1716
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001717static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1718 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719{
1720 struct sock *sk;
1721 struct packet_sock *po;
1722 struct sockaddr_ll *sll;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001723 union {
1724 struct tpacket_hdr *h1;
1725 struct tpacket2_hdr *h2;
chetan lokef6fb8f12011-08-19 10:18:16 +00001726 struct tpacket3_hdr *h3;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001727 void *raw;
1728 } h;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001729 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001731 unsigned int snaplen, res;
chetan lokef6fb8f12011-08-19 10:18:16 +00001732 unsigned long status = TP_STATUS_USER;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001733 unsigned short macoff, netoff, hdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 struct sk_buff *copy_skb = NULL;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001735 struct timeval tv;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001736 struct timespec ts;
Scott McMillan614f60f2010-06-02 05:53:56 -07001737 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
1739 if (skb->pkt_type == PACKET_LOOPBACK)
1740 goto drop;
1741
1742 sk = pt->af_packet_priv;
1743 po = pkt_sk(sk);
1744
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001745 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001746 goto drop;
1747
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001748 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001750 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 else if (skb->pkt_type == PACKET_OUTGOING) {
1752 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001753 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 }
1755 }
1756
Herbert Xu8dc41942007-02-04 23:31:32 -08001757 if (skb->ip_summed == CHECKSUM_PARTIAL)
1758 status |= TP_STATUS_CSUMNOTREADY;
1759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 snaplen = skb->len;
1761
David S. Millerdbcb5852007-01-24 15:21:02 -08001762 res = run_filter(skb, sk, snaplen);
1763 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001764 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001765 if (snaplen > res)
1766 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
1768 if (sk->sk_type == SOCK_DGRAM) {
Patrick McHardy89133362008-07-18 18:05:19 -07001769 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1770 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 } else {
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001772 unsigned maclen = skb_network_offset(skb);
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001773 netoff = TPACKET_ALIGN(po->tp_hdrlen +
Patrick McHardy89133362008-07-18 18:05:19 -07001774 (maclen < 16 ? 16 : maclen)) +
1775 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 macoff = netoff - maclen;
1777 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001778 if (po->tp_version <= TPACKET_V2) {
1779 if (macoff + snaplen > po->rx_ring.frame_size) {
1780 if (po->copy_thresh &&
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001781 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
chetan lokef6fb8f12011-08-19 10:18:16 +00001782 if (skb_shared(skb)) {
1783 copy_skb = skb_clone(skb, GFP_ATOMIC);
1784 } else {
1785 copy_skb = skb_get(skb);
1786 skb_head = skb->data;
1787 }
1788 if (copy_skb)
1789 skb_set_owner_r(copy_skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 }
chetan lokef6fb8f12011-08-19 10:18:16 +00001791 snaplen = po->rx_ring.frame_size - macoff;
1792 if ((int)snaplen < 0)
1793 snaplen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 }
Eric Dumazet6ac4e552014-08-15 09:16:04 -07001795 } else if (unlikely(macoff + snaplen >
1796 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
1797 u32 nval;
1798
1799 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
1800 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
1801 snaplen, nval, macoff);
1802 snaplen = nval;
1803 if (unlikely((int)snaplen < 0)) {
1804 snaplen = 0;
1805 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
1806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 spin_lock(&sk->sk_receive_queue.lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00001809 h.raw = packet_current_rx_frame(po, skb,
1810 TP_STATUS_KERNEL, (macoff+snaplen));
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001811 if (!h.raw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 goto ring_is_full;
chetan lokef6fb8f12011-08-19 10:18:16 +00001813 if (po->tp_version <= TPACKET_V2) {
1814 packet_increment_rx_head(po, &po->rx_ring);
1815 /*
1816 * LOSING will be reported till you read the stats,
1817 * because it's COR - Clear On Read.
1818 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1819 * at packet level.
1820 */
1821 if (po->stats.tp_drops)
1822 status |= TP_STATUS_LOSING;
1823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 po->stats.tp_packets++;
1825 if (copy_skb) {
1826 status |= TP_STATUS_COPY;
1827 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 spin_unlock(&sk->sk_receive_queue.lock);
1830
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001831 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001833 switch (po->tp_version) {
1834 case TPACKET_V1:
1835 h.h1->tp_len = skb->len;
1836 h.h1->tp_snaplen = snaplen;
1837 h.h1->tp_mac = macoff;
1838 h.h1->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001839 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1840 && shhwtstamps->syststamp.tv64)
1841 tv = ktime_to_timeval(shhwtstamps->syststamp);
1842 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1843 && shhwtstamps->hwtstamp.tv64)
1844 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1845 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001846 tv = ktime_to_timeval(skb->tstamp);
1847 else
1848 do_gettimeofday(&tv);
1849 h.h1->tp_sec = tv.tv_sec;
1850 h.h1->tp_usec = tv.tv_usec;
1851 hdrlen = sizeof(*h.h1);
1852 break;
1853 case TPACKET_V2:
1854 h.h2->tp_len = skb->len;
1855 h.h2->tp_snaplen = snaplen;
1856 h.h2->tp_mac = macoff;
1857 h.h2->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001858 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1859 && shhwtstamps->syststamp.tv64)
1860 ts = ktime_to_timespec(shhwtstamps->syststamp);
1861 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1862 && shhwtstamps->hwtstamp.tv64)
1863 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1864 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001865 ts = ktime_to_timespec(skb->tstamp);
1866 else
1867 getnstimeofday(&ts);
1868 h.h2->tp_sec = ts.tv_sec;
1869 h.h2->tp_nsec = ts.tv_nsec;
Ben Greeara3bcc232011-06-01 06:49:10 +00001870 if (vlan_tx_tag_present(skb)) {
1871 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1872 status |= TP_STATUS_VLAN_VALID;
1873 } else {
1874 h.h2->tp_vlan_tci = 0;
1875 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07001876 h.h2->tp_padding = 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001877 hdrlen = sizeof(*h.h2);
1878 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00001879 case TPACKET_V3:
1880 /* tp_nxt_offset,vlan are already populated above.
1881 * So DONT clear those fields here
1882 */
1883 h.h3->tp_status |= status;
1884 h.h3->tp_len = skb->len;
1885 h.h3->tp_snaplen = snaplen;
1886 h.h3->tp_mac = macoff;
1887 h.h3->tp_net = netoff;
1888 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1889 && shhwtstamps->syststamp.tv64)
1890 ts = ktime_to_timespec(shhwtstamps->syststamp);
1891 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1892 && shhwtstamps->hwtstamp.tv64)
1893 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1894 else if (skb->tstamp.tv64)
1895 ts = ktime_to_timespec(skb->tstamp);
1896 else
1897 getnstimeofday(&ts);
1898 h.h3->tp_sec = ts.tv_sec;
1899 h.h3->tp_nsec = ts.tv_nsec;
1900 hdrlen = sizeof(*h.h3);
1901 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001902 default:
1903 BUG();
1904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001906 sll = h.raw + TPACKET_ALIGN(hdrlen);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001907 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 sll->sll_family = AF_PACKET;
1909 sll->sll_hatype = dev->type;
1910 sll->sll_protocol = skb->protocol;
1911 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001912 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001913 sll->sll_ifindex = orig_dev->ifindex;
1914 else
1915 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
Ralf Baechlee16aa202006-12-07 00:11:33 -08001917 smp_mb();
Changli Gaof6dafa92010-12-07 04:26:16 +00001918#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 {
Changli Gao0af55bb2010-12-01 02:52:20 +00001920 u8 *start, *end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
chetan lokef6fb8f12011-08-19 10:18:16 +00001922 if (po->tp_version <= TPACKET_V2) {
1923 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1924 + macoff + snaplen);
1925 for (start = h.raw; start < end; start += PAGE_SIZE)
1926 flush_dcache_page(pgv_to_page(start));
1927 }
Chetan Lokecc9f01b2011-07-14 08:36:33 -07001928 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 }
Changli Gaof6dafa92010-12-07 04:26:16 +00001930#endif
chetan lokef6fb8f12011-08-19 10:18:16 +00001931 if (po->tp_version <= TPACKET_V2)
1932 __packet_set_status(po, h.raw, status);
1933 else
1934 prb_clear_blk_fill_status(&po->rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
1936 sk->sk_data_ready(sk, 0);
1937
1938drop_n_restore:
1939 if (skb_head != skb->data && skb_shared(skb)) {
1940 skb->data = skb_head;
1941 skb->len = skb_len;
1942 }
1943drop:
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001944 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 return 0;
1946
1947ring_is_full:
1948 po->stats.tp_drops++;
1949 spin_unlock(&sk->sk_receive_queue.lock);
1950
1951 sk->sk_data_ready(sk, 0);
Wei Yongjunacb5d752009-02-25 00:36:42 +00001952 kfree_skb(copy_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 goto drop_n_restore;
1954}
1955
Johann Baudy69e3c752009-05-18 22:11:22 -07001956static void tpacket_destruct_skb(struct sk_buff *skb)
1957{
1958 struct packet_sock *po = pkt_sk(skb->sk);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001959 void *ph;
Johann Baudy69e3c752009-05-18 22:11:22 -07001960
Johann Baudy69e3c752009-05-18 22:11:22 -07001961 if (likely(po->tx_ring.pg_vec)) {
1962 ph = skb_shinfo(skb)->destructor_arg;
Johann Baudy69e3c752009-05-18 22:11:22 -07001963 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1964 atomic_dec(&po->tx_ring.pending);
1965 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1966 }
1967
1968 sock_wfree(skb);
1969}
1970
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001971static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1972 void *frame, struct net_device *dev, int size_max,
Herbert Xuae641942011-11-18 02:20:04 +00001973 __be16 proto, unsigned char *addr, int hlen)
Johann Baudy69e3c752009-05-18 22:11:22 -07001974{
1975 union {
1976 struct tpacket_hdr *h1;
1977 struct tpacket2_hdr *h2;
1978 void *raw;
1979 } ph;
1980 int to_write, offset, len, tp_len, nr_frags, len_max;
1981 struct socket *sock = po->sk.sk_socket;
1982 struct page *page;
1983 void *data;
1984 int err;
1985
1986 ph.raw = frame;
1987
1988 skb->protocol = proto;
1989 skb->dev = dev;
1990 skb->priority = po->sk.sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001991 skb->mark = po->sk.sk_mark;
Johann Baudy69e3c752009-05-18 22:11:22 -07001992 skb_shinfo(skb)->destructor_arg = ph.raw;
1993
1994 switch (po->tp_version) {
1995 case TPACKET_V2:
1996 tp_len = ph.h2->tp_len;
1997 break;
1998 default:
1999 tp_len = ph.h1->tp_len;
2000 break;
2001 }
2002 if (unlikely(tp_len > size_max)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002003 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
Johann Baudy69e3c752009-05-18 22:11:22 -07002004 return -EMSGSIZE;
2005 }
2006
Herbert Xuae641942011-11-18 02:20:04 +00002007 skb_reserve(skb, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07002008 skb_reset_network_header(skb);
2009
2010 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2011 to_write = tp_len;
2012
2013 if (sock->type == SOCK_DGRAM) {
2014 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2015 NULL, tp_len);
2016 if (unlikely(err < 0))
2017 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002018 } else if (dev->hard_header_len) {
Johann Baudy69e3c752009-05-18 22:11:22 -07002019 /* net device doesn't like empty head */
2020 if (unlikely(tp_len <= dev->hard_header_len)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002021 pr_err("packet size is too short (%d < %d)\n",
2022 tp_len, dev->hard_header_len);
Johann Baudy69e3c752009-05-18 22:11:22 -07002023 return -EINVAL;
2024 }
2025
2026 skb_push(skb, dev->hard_header_len);
2027 err = skb_store_bits(skb, 0, data,
2028 dev->hard_header_len);
2029 if (unlikely(err))
2030 return err;
2031
2032 data += dev->hard_header_len;
2033 to_write -= dev->hard_header_len;
2034 }
2035
2036 err = -EFAULT;
Johann Baudy69e3c752009-05-18 22:11:22 -07002037 offset = offset_in_page(data);
2038 len_max = PAGE_SIZE - offset;
2039 len = ((to_write > len_max) ? len_max : to_write);
2040
2041 skb->data_len = to_write;
2042 skb->len += to_write;
2043 skb->truesize += to_write;
2044 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2045
2046 while (likely(to_write)) {
2047 nr_frags = skb_shinfo(skb)->nr_frags;
2048
2049 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002050 pr_err("Packet exceed the number of skb frags(%lu)\n",
2051 MAX_SKB_FRAGS);
Johann Baudy69e3c752009-05-18 22:11:22 -07002052 return -EFAULT;
2053 }
2054
Changli Gao0af55bb2010-12-01 02:52:20 +00002055 page = pgv_to_page(data);
2056 data += len;
Johann Baudy69e3c752009-05-18 22:11:22 -07002057 flush_dcache_page(page);
2058 get_page(page);
Changli Gao0af55bb2010-12-01 02:52:20 +00002059 skb_fill_page_desc(skb, nr_frags, page, offset, len);
Johann Baudy69e3c752009-05-18 22:11:22 -07002060 to_write -= len;
2061 offset = 0;
2062 len_max = PAGE_SIZE;
2063 len = ((to_write > len_max) ? len_max : to_write);
2064 }
2065
2066 return tp_len;
2067}
2068
2069static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2070{
Johann Baudy69e3c752009-05-18 22:11:22 -07002071 struct sk_buff *skb;
2072 struct net_device *dev;
2073 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002074 bool need_rls_dev = false;
2075 int err, reserve = 0;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002076 void *ph;
2077 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Johann Baudy69e3c752009-05-18 22:11:22 -07002078 int tp_len, size_max;
2079 unsigned char *addr;
2080 int len_sum = 0;
2081 int status = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002082 int hlen, tlen;
Johann Baudy69e3c752009-05-18 22:11:22 -07002083
Johann Baudy69e3c752009-05-18 22:11:22 -07002084 mutex_lock(&po->pg_vec_lock);
2085
2086 err = -EBUSY;
2087 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002088 dev = po->prot_hook.dev;
Johann Baudy69e3c752009-05-18 22:11:22 -07002089 proto = po->num;
2090 addr = NULL;
2091 } else {
2092 err = -EINVAL;
2093 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2094 goto out;
2095 if (msg->msg_namelen < (saddr->sll_halen
2096 + offsetof(struct sockaddr_ll,
2097 sll_addr)))
2098 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07002099 proto = saddr->sll_protocol;
2100 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002101 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2102 need_rls_dev = true;
Johann Baudy69e3c752009-05-18 22:11:22 -07002103 }
2104
Johann Baudy69e3c752009-05-18 22:11:22 -07002105 err = -ENXIO;
2106 if (unlikely(dev == NULL))
2107 goto out;
2108
2109 reserve = dev->hard_header_len;
2110
2111 err = -ENETDOWN;
2112 if (unlikely(!(dev->flags & IFF_UP)))
2113 goto out_put;
2114
2115 size_max = po->tx_ring.frame_size
Gabor Gombasb5dd8842009-10-29 03:19:11 -07002116 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
Johann Baudy69e3c752009-05-18 22:11:22 -07002117
2118 if (size_max > dev->mtu + reserve)
2119 size_max = dev->mtu + reserve;
2120
2121 do {
2122 ph = packet_current_frame(po, &po->tx_ring,
2123 TP_STATUS_SEND_REQUEST);
2124
2125 if (unlikely(ph == NULL)) {
2126 schedule();
2127 continue;
2128 }
2129
2130 status = TP_STATUS_SEND_REQUEST;
Herbert Xuae641942011-11-18 02:20:04 +00002131 hlen = LL_RESERVED_SPACE(dev);
2132 tlen = dev->needed_tailroom;
Johann Baudy69e3c752009-05-18 22:11:22 -07002133 skb = sock_alloc_send_skb(&po->sk,
Herbert Xuae641942011-11-18 02:20:04 +00002134 hlen + tlen + sizeof(struct sockaddr_ll),
Johann Baudy69e3c752009-05-18 22:11:22 -07002135 0, &err);
2136
2137 if (unlikely(skb == NULL))
2138 goto out_status;
2139
2140 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
Herbert Xuae641942011-11-18 02:20:04 +00002141 addr, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07002142
2143 if (unlikely(tp_len < 0)) {
2144 if (po->tp_loss) {
2145 __packet_set_status(po, ph,
2146 TP_STATUS_AVAILABLE);
2147 packet_increment_head(&po->tx_ring);
2148 kfree_skb(skb);
2149 continue;
2150 } else {
2151 status = TP_STATUS_WRONG_FORMAT;
2152 err = tp_len;
2153 goto out_status;
2154 }
2155 }
2156
2157 skb->destructor = tpacket_destruct_skb;
2158 __packet_set_status(po, ph, TP_STATUS_SENDING);
2159 atomic_inc(&po->tx_ring.pending);
2160
2161 status = TP_STATUS_SEND_REQUEST;
2162 err = dev_queue_xmit(skb);
Jarek Poplawskieb70df12010-01-10 22:04:19 +00002163 if (unlikely(err > 0)) {
2164 err = net_xmit_errno(err);
2165 if (err && __packet_get_status(po, ph) ==
2166 TP_STATUS_AVAILABLE) {
2167 /* skb was destructed already */
2168 skb = NULL;
2169 goto out_status;
2170 }
2171 /*
2172 * skb was dropped but not destructed yet;
2173 * let's treat it like congestion or err < 0
2174 */
2175 err = 0;
2176 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002177 packet_increment_head(&po->tx_ring);
2178 len_sum += tp_len;
Joe Perchesf64f9e72009-11-29 16:55:45 -08002179 } while (likely((ph != NULL) ||
2180 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2181 (atomic_read(&po->tx_ring.pending))))
2182 );
Johann Baudy69e3c752009-05-18 22:11:22 -07002183
2184 err = len_sum;
2185 goto out_put;
2186
Johann Baudy69e3c752009-05-18 22:11:22 -07002187out_status:
2188 __packet_set_status(po, ph, status);
2189 kfree_skb(skb);
2190out_put:
Ben Greear827d9782011-06-01 07:18:53 +00002191 if (need_rls_dev)
2192 dev_put(dev);
Johann Baudy69e3c752009-05-18 22:11:22 -07002193out:
2194 mutex_unlock(&po->pg_vec_lock);
2195 return err;
2196}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
Olof Johanssoneea49cc92011-11-02 11:00:49 +00002198static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2199 size_t reserve, size_t len,
2200 size_t linear, int noblock,
2201 int *err)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002202{
2203 struct sk_buff *skb;
2204
2205 /* Under a page? Don't bother with paged skb. */
2206 if (prepad + len < PAGE_SIZE || !linear)
2207 linear = len;
2208
2209 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2210 err);
2211 if (!skb)
2212 return NULL;
2213
2214 skb_reserve(skb, reserve);
2215 skb_put(skb, linear);
2216 skb->data_len = len - linear;
2217 skb->len += len - linear;
2218
2219 return skb;
2220}
2221
Johann Baudy69e3c752009-05-18 22:11:22 -07002222static int packet_snd(struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 struct msghdr *msg, size_t len)
2224{
2225 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002226 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 struct sk_buff *skb;
2228 struct net_device *dev;
Al Viro0e11c912006-11-08 00:26:29 -08002229 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002230 bool need_rls_dev = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 unsigned char *addr;
Ben Greear827d9782011-06-01 07:18:53 +00002232 int err, reserve = 0;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002233 struct virtio_net_hdr vnet_hdr = { 0 };
2234 int offset = 0;
2235 int vnet_hdr_len;
2236 struct packet_sock *po = pkt_sk(sk);
2237 unsigned short gso_type = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002238 int hlen, tlen;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002239 int extra_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
2241 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002242 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002244
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002246 dev = po->prot_hook.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 proto = po->num;
2248 addr = NULL;
2249 } else {
2250 err = -EINVAL;
2251 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2252 goto out;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002253 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2254 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 proto = saddr->sll_protocol;
2256 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002257 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2258 need_rls_dev = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 }
2260
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 err = -ENXIO;
2262 if (dev == NULL)
2263 goto out_unlock;
2264 if (sock->type == SOCK_RAW)
2265 reserve = dev->hard_header_len;
2266
David S. Millerd5e76b02007-01-25 19:30:36 -08002267 err = -ENETDOWN;
2268 if (!(dev->flags & IFF_UP))
2269 goto out_unlock;
2270
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002271 if (po->has_vnet_hdr) {
2272 vnet_hdr_len = sizeof(vnet_hdr);
2273
2274 err = -EINVAL;
2275 if (len < vnet_hdr_len)
2276 goto out_unlock;
2277
2278 len -= vnet_hdr_len;
2279
2280 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2281 vnet_hdr_len);
2282 if (err < 0)
2283 goto out_unlock;
2284
2285 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2286 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2287 vnet_hdr.hdr_len))
2288 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2289 vnet_hdr.csum_offset + 2;
2290
2291 err = -EINVAL;
2292 if (vnet_hdr.hdr_len > len)
2293 goto out_unlock;
2294
2295 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2296 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2297 case VIRTIO_NET_HDR_GSO_TCPV4:
2298 gso_type = SKB_GSO_TCPV4;
2299 break;
2300 case VIRTIO_NET_HDR_GSO_TCPV6:
2301 gso_type = SKB_GSO_TCPV6;
2302 break;
2303 case VIRTIO_NET_HDR_GSO_UDP:
2304 gso_type = SKB_GSO_UDP;
2305 break;
2306 default:
2307 goto out_unlock;
2308 }
2309
2310 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2311 gso_type |= SKB_GSO_TCP_ECN;
2312
2313 if (vnet_hdr.gso_size == 0)
2314 goto out_unlock;
2315
2316 }
2317 }
2318
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002319 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2320 if (!netif_supports_nofcs(dev)) {
2321 err = -EPROTONOSUPPORT;
2322 goto out_unlock;
2323 }
2324 extra_len = 4; /* We're doing our own CRC */
2325 }
2326
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002328 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 goto out_unlock;
2330
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002331 err = -ENOBUFS;
Herbert Xuae641942011-11-18 02:20:04 +00002332 hlen = LL_RESERVED_SPACE(dev);
2333 tlen = dev->needed_tailroom;
2334 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002335 msg->msg_flags & MSG_DONTWAIT, &err);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002336 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 goto out_unlock;
2338
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002339 skb_set_network_header(skb, reserve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002341 err = -EINVAL;
2342 if (sock->type == SOCK_DGRAM &&
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002343 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002344 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
2346 /* Returns -EFAULT on error */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002347 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 if (err)
2349 goto out_free;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002350 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00002351 if (err < 0)
2352 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002354 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00002355 /* Earlier code assumed this would be a VLAN pkt,
2356 * double-check this now that we have the actual
2357 * packet in hand.
2358 */
2359 struct ethhdr *ehdr;
2360 skb_reset_mac_header(skb);
2361 ehdr = eth_hdr(skb);
2362 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2363 err = -EMSGSIZE;
2364 goto out_free;
2365 }
2366 }
2367
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 skb->protocol = proto;
2369 skb->dev = dev;
2370 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00002371 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002373 if (po->has_vnet_hdr) {
2374 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2375 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2376 vnet_hdr.csum_offset)) {
2377 err = -EINVAL;
2378 goto out_free;
2379 }
2380 }
2381
2382 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2383 skb_shinfo(skb)->gso_type = gso_type;
2384
2385 /* Header must be checked, and gso_segs computed. */
2386 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2387 skb_shinfo(skb)->gso_segs = 0;
2388
2389 len += vnet_hdr_len;
2390 }
2391
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002392 if (unlikely(extra_len == 4))
2393 skb->no_fcs = 1;
2394
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 /*
2396 * Now send it
2397 */
2398
2399 err = dev_queue_xmit(skb);
2400 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2401 goto out_unlock;
2402
Ben Greear827d9782011-06-01 07:18:53 +00002403 if (need_rls_dev)
2404 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002406 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
2408out_free:
2409 kfree_skb(skb);
2410out_unlock:
Ben Greear827d9782011-06-01 07:18:53 +00002411 if (dev && need_rls_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 dev_put(dev);
2413out:
2414 return err;
2415}
2416
Johann Baudy69e3c752009-05-18 22:11:22 -07002417static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2418 struct msghdr *msg, size_t len)
2419{
Johann Baudy69e3c752009-05-18 22:11:22 -07002420 struct sock *sk = sock->sk;
2421 struct packet_sock *po = pkt_sk(sk);
2422 if (po->tx_ring.pg_vec)
2423 return tpacket_snd(po, msg);
2424 else
Johann Baudy69e3c752009-05-18 22:11:22 -07002425 return packet_snd(sock, msg, len);
2426}
2427
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428/*
2429 * Close a PACKET socket. This is fairly simple. We immediately go
2430 * to 'closed' state and remove our protocol entry in the device list.
2431 */
2432
2433static int packet_release(struct socket *sock)
2434{
2435 struct sock *sk = sock->sk;
2436 struct packet_sock *po;
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08002437 struct net *net;
chetan lokef6fb8f12011-08-19 10:18:16 +00002438 union tpacket_req_u req_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
2440 if (!sk)
2441 return 0;
2442
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002443 net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 po = pkt_sk(sk);
2445
stephen hemminger808f5112010-02-22 07:57:18 +00002446 spin_lock_bh(&net->packet.sklist_lock);
2447 sk_del_node_init_rcu(sk);
Eric Dumazet920de802008-11-24 00:09:29 -08002448 sock_prot_inuse_add(net, sk->sk_prot, -1);
stephen hemminger808f5112010-02-22 07:57:18 +00002449 spin_unlock_bh(&net->packet.sklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450
stephen hemminger808f5112010-02-22 07:57:18 +00002451 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002452 unregister_prot_hook(sk, false);
Ben Greear160ff182011-06-01 07:18:52 +00002453 if (po->prot_hook.dev) {
2454 dev_put(po->prot_hook.dev);
2455 po->prot_hook.dev = NULL;
2456 }
stephen hemminger808f5112010-02-22 07:57:18 +00002457 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 packet_flush_mclist(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460
Phil Sutter330d6332013-02-01 07:21:41 +00002461 if (po->rx_ring.pg_vec) {
2462 memset(&req_u, 0, sizeof(req_u));
chetan lokef6fb8f12011-08-19 10:18:16 +00002463 packet_set_ring(sk, &req_u, 1, 0);
Phil Sutter330d6332013-02-01 07:21:41 +00002464 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002465
Phil Sutter330d6332013-02-01 07:21:41 +00002466 if (po->tx_ring.pg_vec) {
2467 memset(&req_u, 0, sizeof(req_u));
chetan lokef6fb8f12011-08-19 10:18:16 +00002468 packet_set_ring(sk, &req_u, 1, 1);
Phil Sutter330d6332013-02-01 07:21:41 +00002469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470
David S. Millerdc99f602011-07-05 01:45:05 -07002471 fanout_release(sk);
2472
stephen hemminger808f5112010-02-22 07:57:18 +00002473 synchronize_net();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 /*
2475 * Now the socket is dead. No more input will appear.
2476 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 sock_orphan(sk);
2478 sock->sk = NULL;
2479
2480 /* Purge queues */
2481
2482 skb_queue_purge(&sk->sk_receive_queue);
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002483 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484
2485 sock_put(sk);
2486 return 0;
2487}
2488
2489/*
2490 * Attach a packet hook.
2491 */
2492
Al Viro0e11c912006-11-08 00:26:29 -08002493static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494{
2495 struct packet_sock *po = pkt_sk(sk);
Marissa Walle6401772017-11-16 16:56:10 -08002496 int ret = 0;
2497
2498 lock_sock(sk);
2499
2500 spin_lock(&po->bind_lock);
David S. Millerdc99f602011-07-05 01:45:05 -07002501
Wei Yongjunaef950b2011-12-27 22:32:41 -05002502 if (po->fanout) {
2503 if (dev)
2504 dev_put(dev);
2505
Marissa Walle6401772017-11-16 16:56:10 -08002506 ret = -EINVAL;
2507 goto out_unlock;
Wei Yongjunaef950b2011-12-27 22:32:41 -05002508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
David S. Millerce06b032011-07-04 01:44:29 -07002510 unregister_prot_hook(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 po->num = protocol;
2512 po->prot_hook.type = protocol;
Ben Greear160ff182011-06-01 07:18:52 +00002513 if (po->prot_hook.dev)
2514 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 po->prot_hook.dev = dev;
2516
2517 po->ifindex = dev ? dev->ifindex : 0;
2518
2519 if (protocol == 0)
2520 goto out_unlock;
2521
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002522 if (!dev || (dev->flags & IFF_UP)) {
David S. Millerce06b032011-07-04 01:44:29 -07002523 register_prot_hook(sk);
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002524 } else {
2525 sk->sk_err = ENETDOWN;
2526 if (!sock_flag(sk, SOCK_DEAD))
2527 sk->sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 }
2529
2530out_unlock:
2531 spin_unlock(&po->bind_lock);
2532 release_sock(sk);
Marissa Walle6401772017-11-16 16:56:10 -08002533 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534}
2535
2536/*
2537 * Bind a packet socket to a device
2538 */
2539
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002540static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2541 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002543 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 char name[15];
2545 struct net_device *dev;
2546 int err = -ENODEV;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002547
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 /*
2549 * Check legality
2550 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002551
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002552 if (addr_len != sizeof(struct sockaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002554 strlcpy(name, uaddr->sa_data, sizeof(name));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002556 dev = dev_get_by_name(sock_net(sk), name);
Ben Greear160ff182011-06-01 07:18:52 +00002557 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 return err;
2560}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561
2562static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2563{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002564 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2565 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 struct net_device *dev = NULL;
2567 int err;
2568
2569
2570 /*
2571 * Check legality
2572 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002573
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 if (addr_len < sizeof(struct sockaddr_ll))
2575 return -EINVAL;
2576 if (sll->sll_family != AF_PACKET)
2577 return -EINVAL;
2578
2579 if (sll->sll_ifindex) {
2580 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002581 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 if (dev == NULL)
2583 goto out;
2584 }
2585 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586
2587out:
2588 return err;
2589}
2590
2591static struct proto packet_proto = {
2592 .name = "PACKET",
2593 .owner = THIS_MODULE,
2594 .obj_size = sizeof(struct packet_sock),
2595};
2596
2597/*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002598 * Create a packet of type SOCK_PACKET.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 */
2600
Eric Paris3f378b62009-11-05 22:18:14 -08002601static int packet_create(struct net *net, struct socket *sock, int protocol,
2602 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603{
2604 struct sock *sk;
2605 struct packet_sock *po;
Al Viro0e11c912006-11-08 00:26:29 -08002606 __be16 proto = (__force __be16)protocol; /* weird, but documented */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 int err;
2608
2609 if (!capable(CAP_NET_RAW))
2610 return -EPERM;
David S. Millerbe020972007-05-29 13:16:31 -07002611 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2612 sock->type != SOCK_PACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 return -ESOCKTNOSUPPORT;
2614
2615 sock->state = SS_UNCONNECTED;
2616
2617 err = -ENOBUFS;
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07002618 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 if (sk == NULL)
2620 goto out;
2621
2622 sock->ops = &packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 if (sock->type == SOCK_PACKET)
2624 sock->ops = &packet_ops_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 sock_init_data(sock, sk);
2627
2628 po = pkt_sk(sk);
2629 sk->sk_family = PF_PACKET;
Al Viro0e11c912006-11-08 00:26:29 -08002630 po->num = proto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631
2632 sk->sk_destruct = packet_sock_destruct;
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002633 sk_refcnt_debug_inc(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634
2635 /*
2636 * Attach a protocol block
2637 */
2638
2639 spin_lock_init(&po->bind_lock);
Herbert Xu905db442009-01-30 14:12:06 -08002640 mutex_init(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 po->prot_hook.func = packet_rcv;
David S. Millerbe020972007-05-29 13:16:31 -07002642
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 if (sock->type == SOCK_PACKET)
2644 po->prot_hook.func = packet_rcv_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002645
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 po->prot_hook.af_packet_priv = sk;
2647
Al Viro0e11c912006-11-08 00:26:29 -08002648 if (proto) {
2649 po->prot_hook.type = proto;
David S. Millerce06b032011-07-04 01:44:29 -07002650 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 }
2652
stephen hemminger808f5112010-02-22 07:57:18 +00002653 spin_lock_bh(&net->packet.sklist_lock);
2654 sk_add_node_rcu(sk, &net->packet.sklist);
Eric Dumazet36804532008-11-19 14:25:35 -08002655 sock_prot_inuse_add(net, &packet_proto, 1);
stephen hemminger808f5112010-02-22 07:57:18 +00002656 spin_unlock_bh(&net->packet.sklist_lock);
2657
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002658 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659out:
2660 return err;
2661}
2662
Richard Cochraned85b562010-04-07 22:41:28 +00002663static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2664{
2665 struct sock_exterr_skb *serr;
2666 struct sk_buff *skb, *skb2;
2667 int copied, err;
2668
2669 err = -EAGAIN;
2670 skb = skb_dequeue(&sk->sk_error_queue);
2671 if (skb == NULL)
2672 goto out;
2673
2674 copied = skb->len;
2675 if (copied > len) {
2676 msg->msg_flags |= MSG_TRUNC;
2677 copied = len;
2678 }
2679 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2680 if (err)
2681 goto out_free_skb;
2682
2683 sock_recv_timestamp(msg, sk, skb);
2684
2685 serr = SKB_EXT_ERR(skb);
2686 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2687 sizeof(serr->ee), &serr->ee);
2688
2689 msg->msg_flags |= MSG_ERRQUEUE;
2690 err = copied;
2691
2692 /* Reset and regenerate socket error */
2693 spin_lock_bh(&sk->sk_error_queue.lock);
2694 sk->sk_err = 0;
2695 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2696 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2697 spin_unlock_bh(&sk->sk_error_queue.lock);
2698 sk->sk_error_report(sk);
2699 } else
2700 spin_unlock_bh(&sk->sk_error_queue.lock);
2701
2702out_free_skb:
2703 kfree_skb(skb);
2704out:
2705 return err;
2706}
2707
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708/*
2709 * Pull a packet from our receive queue and hand it to the user.
2710 * If necessary we block.
2711 */
2712
2713static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2714 struct msghdr *msg, size_t len, int flags)
2715{
2716 struct sock *sk = sock->sk;
2717 struct sk_buff *skb;
2718 int copied, err;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002719 int vnet_hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720
2721 err = -EINVAL;
Richard Cochraned85b562010-04-07 22:41:28 +00002722 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 goto out;
2724
2725#if 0
2726 /* What error should we return now? EUNATTACH? */
2727 if (pkt_sk(sk)->ifindex < 0)
2728 return -ENODEV;
2729#endif
2730
Richard Cochraned85b562010-04-07 22:41:28 +00002731 if (flags & MSG_ERRQUEUE) {
2732 err = packet_recv_error(sk, msg, len);
2733 goto out;
2734 }
2735
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 * Call the generic datagram receiver. This handles all sorts
2738 * of horrible races and re-entrancy so we can forget about it
2739 * in the protocol layers.
2740 *
2741 * Now it will return ENETDOWN, if device have just gone down,
2742 * but then it will block.
2743 */
2744
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002745 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002748 * An error occurred so return it. Because skb_recv_datagram()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 * handles the blocking we don't see and worry about blocking
2750 * retries.
2751 */
2752
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002753 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 goto out;
2755
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002756 if (pkt_sk(sk)->has_vnet_hdr) {
2757 struct virtio_net_hdr vnet_hdr = { 0 };
2758
2759 err = -EINVAL;
2760 vnet_hdr_len = sizeof(vnet_hdr);
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002761 if (len < vnet_hdr_len)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002762 goto out_free;
2763
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002764 len -= vnet_hdr_len;
2765
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002766 if (skb_is_gso(skb)) {
2767 struct skb_shared_info *sinfo = skb_shinfo(skb);
2768
2769 /* This is a hint as to how much should be linear. */
2770 vnet_hdr.hdr_len = skb_headlen(skb);
2771 vnet_hdr.gso_size = sinfo->gso_size;
2772 if (sinfo->gso_type & SKB_GSO_TCPV4)
2773 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2774 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2775 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2776 else if (sinfo->gso_type & SKB_GSO_UDP)
2777 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2778 else if (sinfo->gso_type & SKB_GSO_FCOE)
2779 goto out_free;
2780 else
2781 BUG();
2782 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2783 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2784 } else
2785 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2786
2787 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2788 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +00002789 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002790 vnet_hdr.csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +00002791 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2792 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002793 } /* else everything is zero */
2794
2795 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2796 vnet_hdr_len);
2797 if (err < 0)
2798 goto out_free;
2799 }
2800
Hannes Frederic Sowaa7ef3862013-11-21 03:14:22 +01002801 /* You lose any data beyond the buffer you gave. If it worries
2802 * a user program they can ask the device for its MTU
2803 * anyway.
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002804 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 copied = skb->len;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002806 if (copied > len) {
2807 copied = len;
2808 msg->msg_flags |= MSG_TRUNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 }
2810
2811 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2812 if (err)
2813 goto out_free;
2814
Neil Horman3b885782009-10-12 13:26:31 -07002815 sock_recv_ts_and_drops(msg, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
Hannes Frederic Sowaa7ef3862013-11-21 03:14:22 +01002817 if (msg->msg_name) {
2818 /* If the address length field is there to be filled
2819 * in, we fill it in now.
2820 */
2821 if (sock->type == SOCK_PACKET) {
2822 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2823 } else {
2824 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
2825 msg->msg_namelen = sll->sll_halen +
2826 offsetof(struct sockaddr_ll, sll_addr);
2827 }
Herbert Xuffbc6112007-02-04 23:33:10 -08002828 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2829 msg->msg_namelen);
Hannes Frederic Sowaa7ef3862013-11-21 03:14:22 +01002830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831
Herbert Xu8dc41942007-02-04 23:31:32 -08002832 if (pkt_sk(sk)->auxdata) {
Herbert Xuffbc6112007-02-04 23:33:10 -08002833 struct tpacket_auxdata aux;
2834
2835 aux.tp_status = TP_STATUS_USER;
2836 if (skb->ip_summed == CHECKSUM_PARTIAL)
2837 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2838 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2839 aux.tp_snaplen = skb->len;
2840 aux.tp_mac = 0;
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002841 aux.tp_net = skb_network_offset(skb);
Ben Greeara3bcc232011-06-01 06:49:10 +00002842 if (vlan_tx_tag_present(skb)) {
2843 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2844 aux.tp_status |= TP_STATUS_VLAN_VALID;
2845 } else {
2846 aux.tp_vlan_tci = 0;
2847 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07002848 aux.tp_padding = 0;
Herbert Xuffbc6112007-02-04 23:33:10 -08002849 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
Herbert Xu8dc41942007-02-04 23:31:32 -08002850 }
2851
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 /*
2853 * Free or return the buffer as appropriate. Again this
2854 * hides all the races and re-entrancy issues from us.
2855 */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002856 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
2858out_free:
2859 skb_free_datagram(sk, skb);
2860out:
2861 return err;
2862}
2863
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2865 int *uaddr_len, int peer)
2866{
2867 struct net_device *dev;
2868 struct sock *sk = sock->sk;
2869
2870 if (peer)
2871 return -EOPNOTSUPP;
2872
2873 uaddr->sa_family = AF_PACKET;
Daniel Borkmannefb75822013-06-12 16:02:27 +02002874 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
Eric Dumazet654d1f82009-11-02 10:43:32 +01002875 rcu_read_lock();
2876 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2877 if (dev)
Daniel Borkmannefb75822013-06-12 16:02:27 +02002878 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
Eric Dumazet654d1f82009-11-02 10:43:32 +01002879 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 *uaddr_len = sizeof(*uaddr);
2881
2882 return 0;
2883}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
2885static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2886 int *uaddr_len, int peer)
2887{
2888 struct net_device *dev;
2889 struct sock *sk = sock->sk;
2890 struct packet_sock *po = pkt_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00002891 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892
2893 if (peer)
2894 return -EOPNOTSUPP;
2895
2896 sll->sll_family = AF_PACKET;
2897 sll->sll_ifindex = po->ifindex;
2898 sll->sll_protocol = po->num;
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002899 sll->sll_pkttype = 0;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002900 rcu_read_lock();
2901 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 if (dev) {
2903 sll->sll_hatype = dev->type;
2904 sll->sll_halen = dev->addr_len;
2905 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 } else {
2907 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2908 sll->sll_halen = 0;
2909 }
Eric Dumazet654d1f82009-11-02 10:43:32 +01002910 rcu_read_unlock();
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002911 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
2913 return 0;
2914}
2915
Wang Chen2aeb0b82008-07-14 20:49:46 -07002916static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2917 int what)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918{
2919 switch (i->type) {
2920 case PACKET_MR_MULTICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002921 if (i->alen != dev->addr_len)
2922 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 if (what > 0)
Jiri Pirko22bedad2010-04-01 21:22:57 +00002924 return dev_mc_add(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 else
Jiri Pirko22bedad2010-04-01 21:22:57 +00002926 return dev_mc_del(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 break;
2928 case PACKET_MR_PROMISC:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002929 return dev_set_promiscuity(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 break;
2931 case PACKET_MR_ALLMULTI:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002932 return dev_set_allmulti(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 break;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002934 case PACKET_MR_UNICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002935 if (i->alen != dev->addr_len)
2936 return -EINVAL;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002937 if (what > 0)
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002938 return dev_uc_add(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002939 else
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002940 return dev_uc_del(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002941 break;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002942 default:
2943 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 }
Wang Chen2aeb0b82008-07-14 20:49:46 -07002945 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946}
2947
2948static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2949{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002950 for ( ; i; i = i->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 if (i->ifindex == dev->ifindex)
2952 packet_dev_mc(dev, i, what);
2953 }
2954}
2955
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002956static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957{
2958 struct packet_sock *po = pkt_sk(sk);
2959 struct packet_mclist *ml, *i;
2960 struct net_device *dev;
2961 int err;
2962
2963 rtnl_lock();
2964
2965 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002966 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 if (!dev)
2968 goto done;
2969
2970 err = -EINVAL;
Jiri Pirko11625632010-03-02 20:40:01 +00002971 if (mreq->mr_alen > dev->addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 goto done;
2973
2974 err = -ENOBUFS;
Kris Katterjohn8b3a7002006-01-11 15:56:43 -08002975 i = kmalloc(sizeof(*i), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 if (i == NULL)
2977 goto done;
2978
2979 err = 0;
2980 for (ml = po->mclist; ml; ml = ml->next) {
2981 if (ml->ifindex == mreq->mr_ifindex &&
2982 ml->type == mreq->mr_type &&
2983 ml->alen == mreq->mr_alen &&
2984 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2985 ml->count++;
2986 /* Free the new element ... */
2987 kfree(i);
2988 goto done;
2989 }
2990 }
2991
2992 i->type = mreq->mr_type;
2993 i->ifindex = mreq->mr_ifindex;
2994 i->alen = mreq->mr_alen;
2995 memcpy(i->addr, mreq->mr_address, i->alen);
2996 i->count = 1;
2997 i->next = po->mclist;
2998 po->mclist = i;
Wang Chen2aeb0b82008-07-14 20:49:46 -07002999 err = packet_dev_mc(dev, i, 1);
3000 if (err) {
3001 po->mclist = i->next;
3002 kfree(i);
3003 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004
3005done:
3006 rtnl_unlock();
3007 return err;
3008}
3009
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003010static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011{
3012 struct packet_mclist *ml, **mlp;
3013
3014 rtnl_lock();
3015
3016 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3017 if (ml->ifindex == mreq->mr_ifindex &&
3018 ml->type == mreq->mr_type &&
3019 ml->alen == mreq->mr_alen &&
3020 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3021 if (--ml->count == 0) {
3022 struct net_device *dev;
3023 *mlp = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00003024 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3025 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 kfree(ml);
3028 }
3029 rtnl_unlock();
3030 return 0;
3031 }
3032 }
3033 rtnl_unlock();
3034 return -EADDRNOTAVAIL;
3035}
3036
3037static void packet_flush_mclist(struct sock *sk)
3038{
3039 struct packet_sock *po = pkt_sk(sk);
3040 struct packet_mclist *ml;
3041
3042 if (!po->mclist)
3043 return;
3044
3045 rtnl_lock();
3046 while ((ml = po->mclist) != NULL) {
3047 struct net_device *dev;
3048
3049 po->mclist = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00003050 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3051 if (dev != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 kfree(ml);
3054 }
3055 rtnl_unlock();
3056}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057
3058static int
David S. Millerb7058842009-09-30 16:12:20 -07003059packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060{
3061 struct sock *sk = sock->sk;
Herbert Xu8dc41942007-02-04 23:31:32 -08003062 struct packet_sock *po = pkt_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 int ret;
3064
3065 if (level != SOL_PACKET)
3066 return -ENOPROTOOPT;
3067
Johann Baudy69e3c752009-05-18 22:11:22 -07003068 switch (optname) {
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003069 case PACKET_ADD_MEMBERSHIP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070 case PACKET_DROP_MEMBERSHIP:
3071 {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003072 struct packet_mreq_max mreq;
3073 int len = optlen;
3074 memset(&mreq, 0, sizeof(mreq));
3075 if (len < sizeof(struct packet_mreq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 return -EINVAL;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003077 if (len > sizeof(mreq))
3078 len = sizeof(mreq);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003079 if (copy_from_user(&mreq, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 return -EFAULT;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003081 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3082 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 if (optname == PACKET_ADD_MEMBERSHIP)
3084 ret = packet_mc_add(sk, &mreq);
3085 else
3086 ret = packet_mc_drop(sk, &mreq);
3087 return ret;
3088 }
David S. Millera2efcfa2007-05-29 13:12:50 -07003089
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 case PACKET_RX_RING:
Johann Baudy69e3c752009-05-18 22:11:22 -07003091 case PACKET_TX_RING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 {
chetan lokef6fb8f12011-08-19 10:18:16 +00003093 union tpacket_req_u req_u;
3094 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095
chetan lokef6fb8f12011-08-19 10:18:16 +00003096 switch (po->tp_version) {
3097 case TPACKET_V1:
3098 case TPACKET_V2:
3099 len = sizeof(req_u.req);
3100 break;
3101 case TPACKET_V3:
3102 default:
3103 len = sizeof(req_u.req3);
3104 break;
3105 }
3106 if (optlen < len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107 return -EINVAL;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003108 if (pkt_sk(sk)->has_vnet_hdr)
3109 return -EINVAL;
chetan lokef6fb8f12011-08-19 10:18:16 +00003110 if (copy_from_user(&req_u.req, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 return -EFAULT;
chetan lokef6fb8f12011-08-19 10:18:16 +00003112 return packet_set_ring(sk, &req_u, 0,
3113 optname == PACKET_TX_RING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 }
3115 case PACKET_COPY_THRESH:
3116 {
3117 int val;
3118
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003119 if (optlen != sizeof(val))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003121 if (copy_from_user(&val, optval, sizeof(val)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122 return -EFAULT;
3123
3124 pkt_sk(sk)->copy_thresh = val;
3125 return 0;
3126 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003127 case PACKET_VERSION:
3128 {
3129 int val;
3130
3131 if (optlen != sizeof(val))
3132 return -EINVAL;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003133 if (copy_from_user(&val, optval, sizeof(val)))
3134 return -EFAULT;
3135 switch (val) {
3136 case TPACKET_V1:
3137 case TPACKET_V2:
chetan lokef6fb8f12011-08-19 10:18:16 +00003138 case TPACKET_V3:
Philip Pettersson24a567e2017-02-20 15:04:20 +08003139 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003140 default:
3141 return -EINVAL;
3142 }
Philip Pettersson24a567e2017-02-20 15:04:20 +08003143 lock_sock(sk);
3144 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3145 ret = -EBUSY;
3146 } else {
3147 po->tp_version = val;
3148 ret = 0;
3149 }
3150 release_sock(sk);
3151 return ret;
Eric Dumazet09b9f192017-08-17 15:24:51 +08003152
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003153 }
Patrick McHardy89133362008-07-18 18:05:19 -07003154 case PACKET_RESERVE:
3155 {
3156 unsigned int val;
3157
3158 if (optlen != sizeof(val))
3159 return -EINVAL;
Patrick McHardy89133362008-07-18 18:05:19 -07003160 if (copy_from_user(&val, optval, sizeof(val)))
3161 return -EFAULT;
Andrey Konovalova117cf02017-03-29 16:11:22 +02003162 if (val > INT_MAX)
3163 return -EINVAL;
Willem de Bruijn8b981792017-08-10 12:41:58 -04003164 lock_sock(sk);
3165 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3166 ret = -EBUSY;
3167 } else {
3168 po->tp_reserve = val;
3169 ret = 0;
3170 }
3171 release_sock(sk);
3172 return ret;
Patrick McHardy89133362008-07-18 18:05:19 -07003173 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003174 case PACKET_LOSS:
3175 {
3176 unsigned int val;
3177
3178 if (optlen != sizeof(val))
3179 return -EINVAL;
3180 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3181 return -EBUSY;
3182 if (copy_from_user(&val, optval, sizeof(val)))
3183 return -EFAULT;
3184 po->tp_loss = !!val;
3185 return 0;
3186 }
Herbert Xu8dc41942007-02-04 23:31:32 -08003187 case PACKET_AUXDATA:
3188 {
3189 int val;
3190
3191 if (optlen < sizeof(val))
3192 return -EINVAL;
3193 if (copy_from_user(&val, optval, sizeof(val)))
3194 return -EFAULT;
3195
3196 po->auxdata = !!val;
3197 return 0;
3198 }
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003199 case PACKET_ORIGDEV:
3200 {
3201 int val;
3202
3203 if (optlen < sizeof(val))
3204 return -EINVAL;
3205 if (copy_from_user(&val, optval, sizeof(val)))
3206 return -EFAULT;
3207
3208 po->origdev = !!val;
3209 return 0;
3210 }
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003211 case PACKET_VNET_HDR:
3212 {
3213 int val;
3214
3215 if (sock->type != SOCK_RAW)
3216 return -EINVAL;
3217 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3218 return -EBUSY;
3219 if (optlen < sizeof(val))
3220 return -EINVAL;
3221 if (copy_from_user(&val, optval, sizeof(val)))
3222 return -EFAULT;
3223
3224 po->has_vnet_hdr = !!val;
3225 return 0;
3226 }
Scott McMillan614f60f2010-06-02 05:53:56 -07003227 case PACKET_TIMESTAMP:
3228 {
3229 int val;
3230
3231 if (optlen != sizeof(val))
3232 return -EINVAL;
3233 if (copy_from_user(&val, optval, sizeof(val)))
3234 return -EFAULT;
3235
3236 po->tp_tstamp = val;
3237 return 0;
3238 }
David S. Millerdc99f602011-07-05 01:45:05 -07003239 case PACKET_FANOUT:
3240 {
3241 int val;
3242
3243 if (optlen != sizeof(val))
3244 return -EINVAL;
3245 if (copy_from_user(&val, optval, sizeof(val)))
3246 return -EFAULT;
3247
3248 return fanout_add(sk, val & 0xffff, val >> 16);
3249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 default:
3251 return -ENOPROTOOPT;
3252 }
3253}
3254
3255static int packet_getsockopt(struct socket *sock, int level, int optname,
3256 char __user *optval, int __user *optlen)
3257{
3258 int len;
Herbert Xu8dc41942007-02-04 23:31:32 -08003259 int val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 struct sock *sk = sock->sk;
3261 struct packet_sock *po = pkt_sk(sk);
Herbert Xu8dc41942007-02-04 23:31:32 -08003262 void *data;
3263 struct tpacket_stats st;
chetan lokef6fb8f12011-08-19 10:18:16 +00003264 union tpacket_stats_u st_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
3266 if (level != SOL_PACKET)
3267 return -ENOPROTOOPT;
3268
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003269 if (get_user(len, optlen))
3270 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271
3272 if (len < 0)
3273 return -EINVAL;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003274
Johann Baudy69e3c752009-05-18 22:11:22 -07003275 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 case PACKET_STATISTICS:
chetan lokef6fb8f12011-08-19 10:18:16 +00003277 if (po->tp_version == TPACKET_V3) {
3278 len = sizeof(struct tpacket_stats_v3);
3279 } else {
3280 if (len > sizeof(struct tpacket_stats))
3281 len = sizeof(struct tpacket_stats);
3282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 spin_lock_bh(&sk->sk_receive_queue.lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00003284 if (po->tp_version == TPACKET_V3) {
3285 memcpy(&st_u.stats3, &po->stats,
3286 sizeof(struct tpacket_stats));
3287 st_u.stats3.tp_freeze_q_cnt =
3288 po->stats_u.stats3.tp_freeze_q_cnt;
3289 st_u.stats3.tp_packets += po->stats.tp_drops;
3290 data = &st_u.stats3;
3291 } else {
3292 st = po->stats;
3293 st.tp_packets += st.tp_drops;
3294 data = &st;
3295 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 memset(&po->stats, 0, sizeof(st));
3297 spin_unlock_bh(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298 break;
Herbert Xu8dc41942007-02-04 23:31:32 -08003299 case PACKET_AUXDATA:
3300 if (len > sizeof(int))
3301 len = sizeof(int);
3302 val = po->auxdata;
3303
3304 data = &val;
3305 break;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003306 case PACKET_ORIGDEV:
3307 if (len > sizeof(int))
3308 len = sizeof(int);
3309 val = po->origdev;
3310
3311 data = &val;
3312 break;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003313 case PACKET_VNET_HDR:
3314 if (len > sizeof(int))
3315 len = sizeof(int);
3316 val = po->has_vnet_hdr;
3317
3318 data = &val;
3319 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003320 case PACKET_VERSION:
3321 if (len > sizeof(int))
3322 len = sizeof(int);
3323 val = po->tp_version;
3324 data = &val;
3325 break;
3326 case PACKET_HDRLEN:
3327 if (len > sizeof(int))
3328 len = sizeof(int);
3329 if (copy_from_user(&val, optval, len))
3330 return -EFAULT;
3331 switch (val) {
3332 case TPACKET_V1:
3333 val = sizeof(struct tpacket_hdr);
3334 break;
3335 case TPACKET_V2:
3336 val = sizeof(struct tpacket2_hdr);
3337 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003338 case TPACKET_V3:
3339 val = sizeof(struct tpacket3_hdr);
3340 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003341 default:
3342 return -EINVAL;
3343 }
3344 data = &val;
3345 break;
Patrick McHardy89133362008-07-18 18:05:19 -07003346 case PACKET_RESERVE:
3347 if (len > sizeof(unsigned int))
3348 len = sizeof(unsigned int);
3349 val = po->tp_reserve;
3350 data = &val;
3351 break;
Johann Baudy69e3c752009-05-18 22:11:22 -07003352 case PACKET_LOSS:
3353 if (len > sizeof(unsigned int))
3354 len = sizeof(unsigned int);
3355 val = po->tp_loss;
3356 data = &val;
3357 break;
Scott McMillan614f60f2010-06-02 05:53:56 -07003358 case PACKET_TIMESTAMP:
3359 if (len > sizeof(int))
3360 len = sizeof(int);
3361 val = po->tp_tstamp;
3362 data = &val;
3363 break;
David S. Millerdc99f602011-07-05 01:45:05 -07003364 case PACKET_FANOUT:
3365 if (len > sizeof(int))
3366 len = sizeof(int);
3367 val = (po->fanout ?
3368 ((u32)po->fanout->id |
3369 ((u32)po->fanout->type << 16)) :
3370 0);
3371 data = &val;
3372 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 default:
3374 return -ENOPROTOOPT;
3375 }
3376
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003377 if (put_user(len, optlen))
3378 return -EFAULT;
Herbert Xu8dc41942007-02-04 23:31:32 -08003379 if (copy_to_user(optval, data, len))
3380 return -EFAULT;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003381 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382}
3383
3384
3385static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3386{
3387 struct sock *sk;
3388 struct hlist_node *node;
Jason Lunzad930652007-02-20 23:19:54 -08003389 struct net_device *dev = data;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003390 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391
stephen hemminger808f5112010-02-22 07:57:18 +00003392 rcu_read_lock();
3393 sk_for_each_rcu(sk, node, &net->packet.sklist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 struct packet_sock *po = pkt_sk(sk);
3395
3396 switch (msg) {
3397 case NETDEV_UNREGISTER:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 if (po->mclist)
3399 packet_dev_mclist(dev, po->mclist, -1);
David S. Millera2efcfa2007-05-29 13:12:50 -07003400 /* fallthrough */
3401
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 case NETDEV_DOWN:
3403 if (dev->ifindex == po->ifindex) {
3404 spin_lock(&po->bind_lock);
3405 if (po->running) {
David S. Millerce06b032011-07-04 01:44:29 -07003406 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 sk->sk_err = ENETDOWN;
3408 if (!sock_flag(sk, SOCK_DEAD))
3409 sk->sk_error_report(sk);
3410 }
3411 if (msg == NETDEV_UNREGISTER) {
3412 po->ifindex = -1;
Ben Greear160ff182011-06-01 07:18:52 +00003413 if (po->prot_hook.dev)
3414 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 po->prot_hook.dev = NULL;
3416 }
3417 spin_unlock(&po->bind_lock);
3418 }
3419 break;
3420 case NETDEV_UP:
stephen hemminger808f5112010-02-22 07:57:18 +00003421 if (dev->ifindex == po->ifindex) {
3422 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003423 if (po->num)
3424 register_prot_hook(sk);
stephen hemminger808f5112010-02-22 07:57:18 +00003425 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 break;
3428 }
3429 }
stephen hemminger808f5112010-02-22 07:57:18 +00003430 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 return NOTIFY_DONE;
3432}
3433
3434
3435static int packet_ioctl(struct socket *sock, unsigned int cmd,
3436 unsigned long arg)
3437{
3438 struct sock *sk = sock->sk;
3439
Johann Baudy69e3c752009-05-18 22:11:22 -07003440 switch (cmd) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003441 case SIOCOUTQ:
3442 {
3443 int amount = sk_wmem_alloc_get(sk);
Eric Dumazet31e6d362009-06-17 19:05:41 -07003444
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003445 return put_user(amount, (int __user *)arg);
3446 }
3447 case SIOCINQ:
3448 {
3449 struct sk_buff *skb;
3450 int amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003452 spin_lock_bh(&sk->sk_receive_queue.lock);
3453 skb = skb_peek(&sk->sk_receive_queue);
3454 if (skb)
3455 amount = skb->len;
3456 spin_unlock_bh(&sk->sk_receive_queue.lock);
3457 return put_user(amount, (int __user *)arg);
3458 }
3459 case SIOCGSTAMP:
3460 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3461 case SIOCGSTAMPNS:
3462 return sock_get_timestampns(sk, (struct timespec __user *)arg);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003463
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464#ifdef CONFIG_INET
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003465 case SIOCADDRT:
3466 case SIOCDELRT:
3467 case SIOCDARP:
3468 case SIOCGARP:
3469 case SIOCSARP:
3470 case SIOCGIFADDR:
3471 case SIOCSIFADDR:
3472 case SIOCGIFBRDADDR:
3473 case SIOCSIFBRDADDR:
3474 case SIOCGIFNETMASK:
3475 case SIOCSIFNETMASK:
3476 case SIOCGIFDSTADDR:
3477 case SIOCSIFDSTADDR:
3478 case SIOCSIFFLAGS:
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003479 return inet_dgram_ops.ioctl(sock, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480#endif
3481
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003482 default:
3483 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 }
3485 return 0;
3486}
3487
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003488static unsigned int packet_poll(struct file *file, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 poll_table *wait)
3490{
3491 struct sock *sk = sock->sk;
3492 struct packet_sock *po = pkt_sk(sk);
3493 unsigned int mask = datagram_poll(file, sock, wait);
3494
3495 spin_lock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003496 if (po->rx_ring.pg_vec) {
chetan lokef6fb8f12011-08-19 10:18:16 +00003497 if (!packet_previous_rx_frame(po, &po->rx_ring,
3498 TP_STATUS_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 mask |= POLLIN | POLLRDNORM;
3500 }
3501 spin_unlock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003502 spin_lock_bh(&sk->sk_write_queue.lock);
3503 if (po->tx_ring.pg_vec) {
3504 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3505 mask |= POLLOUT | POLLWRNORM;
3506 }
3507 spin_unlock_bh(&sk->sk_write_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 return mask;
3509}
3510
3511
3512/* Dirty? Well, I still did not learn better way to account
3513 * for user mmaps.
3514 */
3515
3516static void packet_mm_open(struct vm_area_struct *vma)
3517{
3518 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003519 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003521
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 if (sk)
3523 atomic_inc(&pkt_sk(sk)->mapped);
3524}
3525
3526static void packet_mm_close(struct vm_area_struct *vma)
3527{
3528 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003529 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003531
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 if (sk)
3533 atomic_dec(&pkt_sk(sk)->mapped);
3534}
3535
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003536static const struct vm_operations_struct packet_mmap_ops = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003537 .open = packet_mm_open,
3538 .close = packet_mm_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539};
3540
Neil Horman0e3125c2010-11-16 10:26:47 -08003541static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3542 unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543{
3544 int i;
3545
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003546 for (i = 0; i < len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003547 if (likely(pg_vec[i].buffer)) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003548 if (is_vmalloc_addr(pg_vec[i].buffer))
Neil Horman0e3125c2010-11-16 10:26:47 -08003549 vfree(pg_vec[i].buffer);
3550 else
3551 free_pages((unsigned long)pg_vec[i].buffer,
3552 order);
3553 pg_vec[i].buffer = NULL;
3554 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 }
3556 kfree(pg_vec);
3557}
3558
Olof Johanssoneea49cc92011-11-02 11:00:49 +00003559static char *alloc_one_pg_vec_page(unsigned long order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003560{
Neil Horman0e3125c2010-11-16 10:26:47 -08003561 char *buffer = NULL;
3562 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3563 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
Eric Dumazet719bfea2009-04-15 03:39:52 -07003564
Neil Horman0e3125c2010-11-16 10:26:47 -08003565 buffer = (char *) __get_free_pages(gfp_flags, order);
3566
3567 if (buffer)
3568 return buffer;
3569
3570 /*
3571 * __get_free_pages failed, fall back to vmalloc
3572 */
Eric Dumazetbbce5a52010-11-20 07:31:54 +00003573 buffer = vzalloc((1 << order) * PAGE_SIZE);
Neil Horman0e3125c2010-11-16 10:26:47 -08003574
3575 if (buffer)
3576 return buffer;
3577
3578 /*
3579 * vmalloc failed, lets dig into swap here
3580 */
Neil Horman0e3125c2010-11-16 10:26:47 -08003581 gfp_flags &= ~__GFP_NORETRY;
3582 buffer = (char *)__get_free_pages(gfp_flags, order);
3583 if (buffer)
3584 return buffer;
3585
3586 /*
3587 * complete and utter failure
3588 */
3589 return NULL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003590}
3591
Neil Horman0e3125c2010-11-16 10:26:47 -08003592static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003593{
3594 unsigned int block_nr = req->tp_block_nr;
Neil Horman0e3125c2010-11-16 10:26:47 -08003595 struct pgv *pg_vec;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003596 int i;
3597
Neil Horman0e3125c2010-11-16 10:26:47 -08003598 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003599 if (unlikely(!pg_vec))
3600 goto out;
3601
3602 for (i = 0; i < block_nr; i++) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003603 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
Neil Horman0e3125c2010-11-16 10:26:47 -08003604 if (unlikely(!pg_vec[i].buffer))
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003605 goto out_free_pgvec;
3606 }
3607
3608out:
3609 return pg_vec;
3610
3611out_free_pgvec:
3612 free_pg_vec(pg_vec, order, block_nr);
3613 pg_vec = NULL;
3614 goto out;
3615}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
chetan lokef6fb8f12011-08-19 10:18:16 +00003617static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -07003618 int closing, int tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619{
Neil Horman0e3125c2010-11-16 10:26:47 -08003620 struct pgv *pg_vec = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 struct packet_sock *po = pkt_sk(sk);
Al Viro0e11c912006-11-08 00:26:29 -08003622 int was_running, order = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003623 struct packet_ring_buffer *rb;
3624 struct sk_buff_head *rb_queue;
Al Viro0e11c912006-11-08 00:26:29 -08003625 __be16 num;
chetan lokef6fb8f12011-08-19 10:18:16 +00003626 int err = -EINVAL;
3627 /* Added to avoid minimal code churn */
3628 struct tpacket_req *req = &req_u->req;
3629
Philip Pettersson24a567e2017-02-20 15:04:20 +08003630 lock_sock(sk);
chetan lokef6fb8f12011-08-19 10:18:16 +00003631 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3632 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3633 WARN(1, "Tx-ring is not supported.\n");
3634 goto out;
3635 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003636
3637 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3638 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3639
3640 err = -EBUSY;
3641 if (!closing) {
3642 if (atomic_read(&po->mapped))
3643 goto out;
3644 if (atomic_read(&rb->pending))
3645 goto out;
3646 }
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003647
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 if (req->tp_block_nr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649 /* Sanity tests and some calculations */
Johann Baudy69e3c752009-05-18 22:11:22 -07003650 err = -EBUSY;
3651 if (unlikely(rb->pg_vec))
3652 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003654 switch (po->tp_version) {
3655 case TPACKET_V1:
3656 po->tp_hdrlen = TPACKET_HDRLEN;
3657 break;
3658 case TPACKET_V2:
3659 po->tp_hdrlen = TPACKET2_HDRLEN;
3660 break;
chetan lokef6fb8f12011-08-19 10:18:16 +00003661 case TPACKET_V3:
3662 po->tp_hdrlen = TPACKET3_HDRLEN;
3663 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003664 }
3665
Johann Baudy69e3c752009-05-18 22:11:22 -07003666 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003667 if (unlikely((int)req->tp_block_size <= 0))
Johann Baudy69e3c752009-05-18 22:11:22 -07003668 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003669 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003670 goto out;
Eric Dumazet6ac4e552014-08-15 09:16:04 -07003671 if (po->tp_version >= TPACKET_V3 &&
Andrey Konovalovba671b12017-03-29 16:11:20 +02003672 req->tp_block_size <=
3673 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
Eric Dumazet6ac4e552014-08-15 09:16:04 -07003674 goto out;
Patrick McHardy89133362008-07-18 18:05:19 -07003675 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
Johann Baudy69e3c752009-05-18 22:11:22 -07003676 po->tp_reserve))
3677 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003678 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003679 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680
Johann Baudy69e3c752009-05-18 22:11:22 -07003681 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3682 if (unlikely(rb->frames_per_block <= 0))
3683 goto out;
Andrey Konovalov4d86d7d2017-03-29 16:11:21 +02003684 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
3685 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07003686 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3687 req->tp_frame_nr))
3688 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689
3690 err = -ENOMEM;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003691 order = get_order(req->tp_block_size);
3692 pg_vec = alloc_pg_vec(req, order);
3693 if (unlikely(!pg_vec))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 goto out;
chetan lokef6fb8f12011-08-19 10:18:16 +00003695 switch (po->tp_version) {
3696 case TPACKET_V3:
3697 /* Transmit path is not supported. We checked
3698 * it above but just being paranoid
3699 */
3700 if (!tx_ring)
3701 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3702 break;
3703 default:
3704 break;
3705 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003706 }
3707 /* Done */
3708 else {
3709 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003710 if (unlikely(req->tp_frame_nr))
Johann Baudy69e3c752009-05-18 22:11:22 -07003711 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 }
3713
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 /* Detach socket from network */
3715 spin_lock(&po->bind_lock);
3716 was_running = po->running;
3717 num = po->num;
3718 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 po->num = 0;
David S. Millerce06b032011-07-04 01:44:29 -07003720 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721 }
3722 spin_unlock(&po->bind_lock);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003723
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724 synchronize_net();
3725
3726 err = -EBUSY;
Herbert Xu905db442009-01-30 14:12:06 -08003727 mutex_lock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728 if (closing || atomic_read(&po->mapped) == 0) {
3729 err = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003730 spin_lock_bh(&rb_queue->lock);
Changli Gaoc053fd92010-12-10 16:02:20 -08003731 swap(rb->pg_vec, pg_vec);
Johann Baudy69e3c752009-05-18 22:11:22 -07003732 rb->frame_max = (req->tp_frame_nr - 1);
3733 rb->head = 0;
3734 rb->frame_size = req->tp_frame_size;
3735 spin_unlock_bh(&rb_queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736
Changli Gaoc053fd92010-12-10 16:02:20 -08003737 swap(rb->pg_vec_order, order);
3738 swap(rb->pg_vec_len, req->tp_block_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739
Johann Baudy69e3c752009-05-18 22:11:22 -07003740 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3741 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3742 tpacket_rcv : packet_rcv;
3743 skb_queue_purge(rb_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 if (atomic_read(&po->mapped))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003745 pr_err("packet_mmap: vma is busy: %d\n",
3746 atomic_read(&po->mapped));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 }
Herbert Xu905db442009-01-30 14:12:06 -08003748 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749
3750 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003751 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 po->num = num;
David S. Millerce06b032011-07-04 01:44:29 -07003753 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754 }
3755 spin_unlock(&po->bind_lock);
chetan lokef6fb8f12011-08-19 10:18:16 +00003756 if (closing && (po->tp_version > TPACKET_V2)) {
3757 /* Because we don't support block-based V3 on tx-ring */
3758 if (!tx_ring)
3759 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3760 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 if (pg_vec)
3763 free_pg_vec(pg_vec, order, req->tp_block_nr);
3764out:
Philip Pettersson24a567e2017-02-20 15:04:20 +08003765 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 return err;
3767}
3768
Johann Baudy69e3c752009-05-18 22:11:22 -07003769static int packet_mmap(struct file *file, struct socket *sock,
3770 struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771{
3772 struct sock *sk = sock->sk;
3773 struct packet_sock *po = pkt_sk(sk);
Johann Baudy69e3c752009-05-18 22:11:22 -07003774 unsigned long size, expected_size;
3775 struct packet_ring_buffer *rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776 unsigned long start;
3777 int err = -EINVAL;
3778 int i;
3779
3780 if (vma->vm_pgoff)
3781 return -EINVAL;
3782
Herbert Xu905db442009-01-30 14:12:06 -08003783 mutex_lock(&po->pg_vec_lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003784
3785 expected_size = 0;
3786 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3787 if (rb->pg_vec) {
3788 expected_size += rb->pg_vec_len
3789 * rb->pg_vec_pages
3790 * PAGE_SIZE;
3791 }
3792 }
3793
3794 if (expected_size == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07003796
3797 size = vma->vm_end - vma->vm_start;
3798 if (size != expected_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 goto out;
3800
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 start = vma->vm_start;
Johann Baudy69e3c752009-05-18 22:11:22 -07003802 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3803 if (rb->pg_vec == NULL)
3804 continue;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003805
Johann Baudy69e3c752009-05-18 22:11:22 -07003806 for (i = 0; i < rb->pg_vec_len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003807 struct page *page;
3808 void *kaddr = rb->pg_vec[i].buffer;
Johann Baudy69e3c752009-05-18 22:11:22 -07003809 int pg_num;
3810
Changli Gaoc56b4d92010-12-01 02:52:57 +00003811 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3812 page = pgv_to_page(kaddr);
Johann Baudy69e3c752009-05-18 22:11:22 -07003813 err = vm_insert_page(vma, start, page);
3814 if (unlikely(err))
3815 goto out;
3816 start += PAGE_SIZE;
Neil Horman0e3125c2010-11-16 10:26:47 -08003817 kaddr += PAGE_SIZE;
Johann Baudy69e3c752009-05-18 22:11:22 -07003818 }
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003819 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003821
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003822 atomic_inc(&po->mapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823 vma->vm_ops = &packet_mmap_ops;
3824 err = 0;
3825
3826out:
Herbert Xu905db442009-01-30 14:12:06 -08003827 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 return err;
3829}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003831static const struct proto_ops packet_ops_spkt = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 .family = PF_PACKET,
3833 .owner = THIS_MODULE,
3834 .release = packet_release,
3835 .bind = packet_bind_spkt,
3836 .connect = sock_no_connect,
3837 .socketpair = sock_no_socketpair,
3838 .accept = sock_no_accept,
3839 .getname = packet_getname_spkt,
3840 .poll = datagram_poll,
3841 .ioctl = packet_ioctl,
3842 .listen = sock_no_listen,
3843 .shutdown = sock_no_shutdown,
3844 .setsockopt = sock_no_setsockopt,
3845 .getsockopt = sock_no_getsockopt,
3846 .sendmsg = packet_sendmsg_spkt,
3847 .recvmsg = packet_recvmsg,
3848 .mmap = sock_no_mmap,
3849 .sendpage = sock_no_sendpage,
3850};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003852static const struct proto_ops packet_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853 .family = PF_PACKET,
3854 .owner = THIS_MODULE,
3855 .release = packet_release,
3856 .bind = packet_bind,
3857 .connect = sock_no_connect,
3858 .socketpair = sock_no_socketpair,
3859 .accept = sock_no_accept,
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003860 .getname = packet_getname,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 .poll = packet_poll,
3862 .ioctl = packet_ioctl,
3863 .listen = sock_no_listen,
3864 .shutdown = sock_no_shutdown,
3865 .setsockopt = packet_setsockopt,
3866 .getsockopt = packet_getsockopt,
3867 .sendmsg = packet_sendmsg,
3868 .recvmsg = packet_recvmsg,
3869 .mmap = packet_mmap,
3870 .sendpage = sock_no_sendpage,
3871};
3872
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003873static const struct net_proto_family packet_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874 .family = PF_PACKET,
3875 .create = packet_create,
3876 .owner = THIS_MODULE,
3877};
3878
3879static struct notifier_block packet_netdev_notifier = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003880 .notifier_call = packet_notifier,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881};
3882
3883#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884
3885static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
stephen hemminger808f5112010-02-22 07:57:18 +00003886 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887{
Denis V. Luneve372c412007-11-19 22:31:54 -08003888 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003889
3890 rcu_read_lock();
3891 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892}
3893
3894static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3895{
Herbert Xu1bf40952007-12-16 14:04:02 -08003896 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003897 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898}
3899
3900static void packet_seq_stop(struct seq_file *seq, void *v)
stephen hemminger808f5112010-02-22 07:57:18 +00003901 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902{
stephen hemminger808f5112010-02-22 07:57:18 +00003903 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904}
3905
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003906static int packet_seq_show(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907{
3908 if (v == SEQ_START_TOKEN)
3909 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3910 else {
Li Zefanb7ceabd2010-02-08 23:19:29 +00003911 struct sock *s = sk_entry(v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 const struct packet_sock *po = pkt_sk(s);
3913
3914 seq_printf(seq,
Dan Rosenberg71338aa2011-05-23 12:17:35 +00003915 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 s,
3917 atomic_read(&s->sk_refcnt),
3918 s->sk_type,
3919 ntohs(po->num),
3920 po->ifindex,
3921 po->running,
3922 atomic_read(&s->sk_rmem_alloc),
3923 sock_i_uid(s),
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003924 sock_i_ino(s));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925 }
3926
3927 return 0;
3928}
3929
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003930static const struct seq_operations packet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 .start = packet_seq_start,
3932 .next = packet_seq_next,
3933 .stop = packet_seq_stop,
3934 .show = packet_seq_show,
3935};
3936
3937static int packet_seq_open(struct inode *inode, struct file *file)
3938{
Denis V. Luneve372c412007-11-19 22:31:54 -08003939 return seq_open_net(inode, file, &packet_seq_ops,
3940 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941}
3942
Arjan van de Venda7071d2007-02-12 00:55:36 -08003943static const struct file_operations packet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 .owner = THIS_MODULE,
3945 .open = packet_seq_open,
3946 .read = seq_read,
3947 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003948 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949};
3950
3951#endif
3952
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003953static int __net_init packet_net_init(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003954{
stephen hemminger808f5112010-02-22 07:57:18 +00003955 spin_lock_init(&net->packet.sklist_lock);
Denis V. Lunev2aaef4e2007-12-11 04:19:54 -08003956 INIT_HLIST_HEAD(&net->packet.sklist);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003957
3958 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3959 return -ENOMEM;
3960
3961 return 0;
3962}
3963
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003964static void __net_exit packet_net_exit(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003965{
3966 proc_net_remove(net, "packet");
3967}
3968
3969static struct pernet_operations packet_net_ops = {
3970 .init = packet_net_init,
3971 .exit = packet_net_exit,
3972};
3973
3974
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975static void __exit packet_exit(void)
3976{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977 unregister_netdevice_notifier(&packet_netdev_notifier);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003978 unregister_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 sock_unregister(PF_PACKET);
3980 proto_unregister(&packet_proto);
3981}
3982
3983static int __init packet_init(void)
3984{
3985 int rc = proto_register(&packet_proto, 0);
3986
3987 if (rc != 0)
3988 goto out;
3989
3990 sock_register(&packet_family_ops);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003991 register_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 register_netdevice_notifier(&packet_netdev_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993out:
3994 return rc;
3995}
3996
3997module_init(packet_init);
3998module_exit(packet_exit);
3999MODULE_LICENSE("GPL");
4000MODULE_ALIAS_NETPROTO(PF_PACKET);