blob: 71ac6559e0c66a47076fe79ce6668f850dbf4aa8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090012 * Fixes:
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090035 * Ulises Alonso : Frame number limit removal and
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * packet_set_ring memory leak.
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070037 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090040 * byte arrays at the end of sockaddr_ll
Eric W. Biederman0fb375f2005-09-21 00:11:37 -070041 * and packet_mreq.
Johann Baudy69e3c752009-05-18 22:11:22 -070042 * Johann Baudy : Added TX RING.
chetan lokef6fb8f102011-08-19 10:18:16 +000043 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +090054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/mm.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080057#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
Herbert Xuffbc6112007-02-04 23:33:10 -080065#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/kmod.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090067#include <linux/slab.h>
Neil Horman0e3125c2010-11-16 10:26:47 -080068#include <linux/vmalloc.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020069#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040079#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
Herbert Xu905db442009-01-30 14:12:06 -080086#include <linux/mutex.h>
Eric Dumazet05423b22009-10-26 18:40:35 -070087#include <linux/if_vlan.h>
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -080088#include <linux/virtio_net.h>
Richard Cochraned85b562010-04-07 22:41:28 +000089#include <linux/errqueue.h>
Scott McMillan614f60f2010-06-02 05:53:56 -070090#include <linux/net_tstamp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 Assumptions:
98 - if device has no dev->hard_header routine, it adds and removes ll header
99 inside itself. In this case ll header is invisible outside of device,
100 but higher levels still should reserve dev->hard_header_len.
101 Some devices are enough clever to reallocate skb, when header
102 will not fit to reserved space (tunnel), another ones are silly
103 (PPP).
104 - packet socket receives packets with pulled ll header,
105 so that SOCK_RAW should push it back.
106
107On receive:
108-----------
109
110Incoming, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700111 mac_header -> ll header
112 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114Outgoing, dev->hard_header!=NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700115 mac_header -> ll header
116 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118Incoming, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700119 mac_header -> UNKNOWN position. It is very likely, that it points to ll
120 header. PPP makes it, that is wrong, because introduce
YOSHIFUJI Hideakidb0c58f2007-07-19 10:44:35 +0900121 assymetry between rx and tx paths.
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700122 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124Outgoing, dev->hard_header==NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700125 mac_header -> data. ll header is still not built!
126 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128Resume
129 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
130
131
132On transmit:
133------------
134
135dev->hard_header != NULL
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700136 mac_header -> ll header
137 data -> ll header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139dev->hard_header == NULL (ll header is added by device, we cannot control it)
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700140 mac_header -> data
141 data -> data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 We should set nh.raw on output to correct posistion,
144 packet classifier depends on it.
145 */
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/* Private packet socket structures. */
148
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000149struct packet_mclist {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 struct packet_mclist *next;
151 int ifindex;
152 int count;
153 unsigned short type;
154 unsigned short alen;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700155 unsigned char addr[MAX_ADDR_LEN];
156};
157/* identical to struct packet_mreq except it has
158 * a longer address field.
159 */
Eric Dumazet40d4e3d2009-07-21 21:57:59 +0000160struct packet_mreq_max {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -0700161 int mr_ifindex;
162 unsigned short mr_type;
163 unsigned short mr_alen;
164 unsigned char mr_address[MAX_ADDR_LEN];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165};
David S. Millera2efcfa2007-05-29 13:12:50 -0700166
chetan lokef6fb8f102011-08-19 10:18:16 +0000167static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -0700168 int closing, int tx_ring);
169
chetan lokef6fb8f102011-08-19 10:18:16 +0000170
171#define V3_ALIGNMENT (8)
172
chetan lokebc59ba32011-08-25 10:43:30 +0000173#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
chetan lokef6fb8f102011-08-19 10:18:16 +0000174
175#define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177
178/* kbdq - kernel block descriptor queue */
chetan lokebc59ba32011-08-25 10:43:30 +0000179struct tpacket_kbdq_core {
chetan lokef6fb8f102011-08-19 10:18:16 +0000180 struct pgv *pkbdq;
181 unsigned int feature_req_word;
182 unsigned int hdrlen;
183 unsigned char reset_pending_on_curr_blk;
184 unsigned char delete_blk_timer;
185 unsigned short kactive_blk_num;
186 unsigned short blk_sizeof_priv;
187
188 /* last_kactive_blk_num:
189 * trick to see if user-space has caught up
190 * in order to avoid refreshing timer when every single pkt arrives.
191 */
192 unsigned short last_kactive_blk_num;
193
194 char *pkblk_start;
195 char *pkblk_end;
196 int kblk_size;
197 unsigned int knum_blocks;
198 uint64_t knxt_seq_num;
199 char *prev;
200 char *nxt_offset;
201 struct sk_buff *skb;
202
203 atomic_t blk_fill_in_prog;
204
205 /* Default is set to 8ms */
206#define DEFAULT_PRB_RETIRE_TOV (8)
207
208 unsigned short retire_blk_tov;
209 unsigned short version;
210 unsigned long tov_in_jiffies;
211
212 /* timer to retire an outstanding block */
213 struct timer_list retire_blk_timer;
214};
215
216#define PGV_FROM_VMALLOC 1
Neil Horman0e3125c2010-11-16 10:26:47 -0800217struct pgv {
218 char *buffer;
Neil Horman0e3125c2010-11-16 10:26:47 -0800219};
220
Johann Baudy69e3c752009-05-18 22:11:22 -0700221struct packet_ring_buffer {
Neil Horman0e3125c2010-11-16 10:26:47 -0800222 struct pgv *pg_vec;
Johann Baudy69e3c752009-05-18 22:11:22 -0700223 unsigned int head;
224 unsigned int frames_per_block;
225 unsigned int frame_size;
226 unsigned int frame_max;
227
228 unsigned int pg_vec_order;
229 unsigned int pg_vec_pages;
230 unsigned int pg_vec_len;
231
chetan lokebc59ba32011-08-25 10:43:30 +0000232 struct tpacket_kbdq_core prb_bdqc;
Johann Baudy69e3c752009-05-18 22:11:22 -0700233 atomic_t pending;
234};
235
chetan lokef6fb8f102011-08-19 10:18:16 +0000236#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
237#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
238#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
239#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
240#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
241#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
242#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
243
Johann Baudy69e3c752009-05-18 22:11:22 -0700244struct packet_sock;
245static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
chetan lokef6fb8f102011-08-19 10:18:16 +0000247static void *packet_previous_frame(struct packet_sock *po,
248 struct packet_ring_buffer *rb,
249 int status);
250static void packet_increment_head(struct packet_ring_buffer *buff);
chetan lokebc59ba32011-08-25 10:43:30 +0000251static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
252 struct tpacket_block_desc *);
253static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f102011-08-19 10:18:16 +0000254 struct packet_sock *);
chetan lokebc59ba32011-08-25 10:43:30 +0000255static void prb_retire_current_block(struct tpacket_kbdq_core *,
chetan lokef6fb8f102011-08-19 10:18:16 +0000256 struct packet_sock *, unsigned int status);
chetan lokebc59ba32011-08-25 10:43:30 +0000257static int prb_queue_frozen(struct tpacket_kbdq_core *);
258static void prb_open_block(struct tpacket_kbdq_core *,
259 struct tpacket_block_desc *);
chetan lokef6fb8f102011-08-19 10:18:16 +0000260static void prb_retire_rx_blk_timer_expired(unsigned long);
chetan lokebc59ba32011-08-25 10:43:30 +0000261static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
262static void prb_init_blk_timer(struct packet_sock *,
263 struct tpacket_kbdq_core *,
264 void (*func) (unsigned long));
265static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
266static void prb_clear_rxhash(struct tpacket_kbdq_core *,
267 struct tpacket3_hdr *);
268static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
269 struct tpacket3_hdr *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270static void packet_flush_mclist(struct sock *sk);
271
David S. Millerdc99f602011-07-05 01:45:05 -0700272struct packet_fanout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273struct packet_sock {
274 /* struct sock has to be the first member of packet_sock */
275 struct sock sk;
David S. Millerdc99f602011-07-05 01:45:05 -0700276 struct packet_fanout *fanout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 struct tpacket_stats stats;
chetan lokef6fb8f102011-08-19 10:18:16 +0000278 union tpacket_stats_u stats_u;
Johann Baudy69e3c752009-05-18 22:11:22 -0700279 struct packet_ring_buffer rx_ring;
280 struct packet_ring_buffer tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 int copy_thresh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 spinlock_t bind_lock;
Herbert Xu905db442009-01-30 14:12:06 -0800283 struct mutex pg_vec_lock;
Herbert Xu8dc41942007-02-04 23:31:32 -0800284 unsigned int running:1, /* prot_hook is attached*/
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -0700285 auxdata:1,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -0800286 origdev:1,
287 has_vnet_hdr:1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 int ifindex; /* bound device */
Al Viro0e11c912006-11-08 00:26:29 -0800289 __be16 num;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 struct packet_mclist *mclist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 atomic_t mapped;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700292 enum tpacket_versions tp_version;
293 unsigned int tp_hdrlen;
Patrick McHardy8913336a2008-07-18 18:05:19 -0700294 unsigned int tp_reserve;
Johann Baudy69e3c752009-05-18 22:11:22 -0700295 unsigned int tp_loss:1;
Scott McMillan614f60f2010-06-02 05:53:56 -0700296 unsigned int tp_tstamp;
Eric Dumazet94b05952009-10-16 04:02:20 +0000297 struct packet_type prot_hook ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298};
299
David S. Millerdc99f602011-07-05 01:45:05 -0700300#define PACKET_FANOUT_MAX 256
301
302struct packet_fanout {
303#ifdef CONFIG_NET_NS
304 struct net *net;
305#endif
306 unsigned int num_members;
307 u16 id;
308 u8 type;
David S. Miller7736d332011-07-05 01:43:20 -0700309 u8 defrag;
David S. Millerdc99f602011-07-05 01:45:05 -0700310 atomic_t rr_cur;
311 struct list_head list;
312 struct sock *arr[PACKET_FANOUT_MAX];
313 spinlock_t lock;
314 atomic_t sk_ref;
315 struct packet_type prot_hook ____cacheline_aligned_in_smp;
316};
317
Herbert Xuffbc6112007-02-04 23:33:10 -0800318struct packet_skb_cb {
319 unsigned int origlen;
320 union {
321 struct sockaddr_pkt pkt;
322 struct sockaddr_ll ll;
323 } sa;
324};
325
326#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
Herbert Xu8dc41942007-02-04 23:31:32 -0800327
chetan lokebc59ba32011-08-25 10:43:30 +0000328#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
chetan lokef6fb8f102011-08-19 10:18:16 +0000329#define GET_PBLOCK_DESC(x, bid) \
chetan lokebc59ba32011-08-25 10:43:30 +0000330 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
chetan lokef6fb8f102011-08-19 10:18:16 +0000331#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
chetan lokebc59ba32011-08-25 10:43:30 +0000332 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
chetan lokef6fb8f102011-08-19 10:18:16 +0000333#define GET_NEXT_PRB_BLK_NUM(x) \
334 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
335 ((x)->kactive_blk_num+1) : 0)
336
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000337static struct packet_sock *pkt_sk(struct sock *sk)
David S. Millerce06b032011-07-04 01:44:29 -0700338{
339 return (struct packet_sock *)sk;
340}
341
David S. Millerdc99f602011-07-05 01:45:05 -0700342static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
343static void __fanout_link(struct sock *sk, struct packet_sock *po);
344
David S. Millerce06b032011-07-04 01:44:29 -0700345/* register_prot_hook must be invoked with the po->bind_lock held,
346 * or from a context in which asynchronous accesses to the packet
347 * socket is not possible (packet_create()).
348 */
349static void register_prot_hook(struct sock *sk)
350{
351 struct packet_sock *po = pkt_sk(sk);
352 if (!po->running) {
David S. Millerdc99f602011-07-05 01:45:05 -0700353 if (po->fanout)
354 __fanout_link(sk, po);
355 else
356 dev_add_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700357 sock_hold(sk);
358 po->running = 1;
359 }
360}
361
362/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
363 * held. If the sync parameter is true, we will temporarily drop
364 * the po->bind_lock and do a synchronize_net to make sure no
365 * asynchronous packet processing paths still refer to the elements
366 * of po->prot_hook. If the sync parameter is false, it is the
367 * callers responsibility to take care of this.
368 */
369static void __unregister_prot_hook(struct sock *sk, bool sync)
370{
371 struct packet_sock *po = pkt_sk(sk);
372
373 po->running = 0;
David S. Millerdc99f602011-07-05 01:45:05 -0700374 if (po->fanout)
375 __fanout_unlink(sk, po);
376 else
377 __dev_remove_pack(&po->prot_hook);
David S. Millerce06b032011-07-04 01:44:29 -0700378 __sock_put(sk);
379
380 if (sync) {
381 spin_unlock(&po->bind_lock);
382 synchronize_net();
383 spin_lock(&po->bind_lock);
384 }
385}
386
387static void unregister_prot_hook(struct sock *sk, bool sync)
388{
389 struct packet_sock *po = pkt_sk(sk);
390
391 if (po->running)
392 __unregister_prot_hook(sk, sync);
393}
394
Changli Gaof6dafa92010-12-07 04:26:16 +0000395static inline __pure struct page *pgv_to_page(void *addr)
Changli Gao0af55bb2010-12-01 02:52:20 +0000396{
397 if (is_vmalloc_addr(addr))
398 return vmalloc_to_page(addr);
399 return virt_to_page(addr);
400}
401
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700402static void __packet_set_status(struct packet_sock *po, void *frame, int status)
403{
404 union {
405 struct tpacket_hdr *h1;
406 struct tpacket2_hdr *h2;
407 void *raw;
408 } h;
409
410 h.raw = frame;
411 switch (po->tp_version) {
412 case TPACKET_V1:
413 h.h1->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000414 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700415 break;
416 case TPACKET_V2:
417 h.h2->tp_status = status;
Changli Gao0af55bb2010-12-01 02:52:20 +0000418 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700419 break;
chetan lokef6fb8f102011-08-19 10:18:16 +0000420 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700421 default:
chetan lokef6fb8f102011-08-19 10:18:16 +0000422 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700423 BUG();
424 }
425
426 smp_wmb();
427}
428
429static int __packet_get_status(struct packet_sock *po, void *frame)
430{
431 union {
432 struct tpacket_hdr *h1;
433 struct tpacket2_hdr *h2;
434 void *raw;
435 } h;
436
437 smp_rmb();
438
439 h.raw = frame;
440 switch (po->tp_version) {
441 case TPACKET_V1:
Changli Gao0af55bb2010-12-01 02:52:20 +0000442 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700443 return h.h1->tp_status;
444 case TPACKET_V2:
Changli Gao0af55bb2010-12-01 02:52:20 +0000445 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
Johann Baudy69e3c752009-05-18 22:11:22 -0700446 return h.h2->tp_status;
chetan lokef6fb8f102011-08-19 10:18:16 +0000447 case TPACKET_V3:
Johann Baudy69e3c752009-05-18 22:11:22 -0700448 default:
chetan lokef6fb8f102011-08-19 10:18:16 +0000449 WARN(1, "TPACKET version not supported.\n");
Johann Baudy69e3c752009-05-18 22:11:22 -0700450 BUG();
451 return 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -0700452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453}
Johann Baudy69e3c752009-05-18 22:11:22 -0700454
455static void *packet_lookup_frame(struct packet_sock *po,
456 struct packet_ring_buffer *rb,
457 unsigned int position,
458 int status)
459{
460 unsigned int pg_vec_pos, frame_offset;
461 union {
462 struct tpacket_hdr *h1;
463 struct tpacket2_hdr *h2;
464 void *raw;
465 } h;
466
467 pg_vec_pos = position / rb->frames_per_block;
468 frame_offset = position % rb->frames_per_block;
469
Neil Horman0e3125c2010-11-16 10:26:47 -0800470 h.raw = rb->pg_vec[pg_vec_pos].buffer +
471 (frame_offset * rb->frame_size);
Johann Baudy69e3c752009-05-18 22:11:22 -0700472
473 if (status != __packet_get_status(po, h.raw))
474 return NULL;
475
476 return h.raw;
477}
478
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000479static void *packet_current_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -0700480 struct packet_ring_buffer *rb,
481 int status)
482{
483 return packet_lookup_frame(po, rb, rb->head, status);
484}
485
chetan lokebc59ba32011-08-25 10:43:30 +0000486static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f102011-08-19 10:18:16 +0000487{
488 del_timer_sync(&pkc->retire_blk_timer);
489}
490
491static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
492 int tx_ring,
493 struct sk_buff_head *rb_queue)
494{
chetan lokebc59ba32011-08-25 10:43:30 +0000495 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f102011-08-19 10:18:16 +0000496
497 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
498
499 spin_lock(&rb_queue->lock);
500 pkc->delete_blk_timer = 1;
501 spin_unlock(&rb_queue->lock);
502
503 prb_del_retire_blk_timer(pkc);
504}
505
506static void prb_init_blk_timer(struct packet_sock *po,
chetan lokebc59ba32011-08-25 10:43:30 +0000507 struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000508 void (*func) (unsigned long))
509{
510 init_timer(&pkc->retire_blk_timer);
511 pkc->retire_blk_timer.data = (long)po;
512 pkc->retire_blk_timer.function = func;
513 pkc->retire_blk_timer.expires = jiffies;
514}
515
516static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
517{
chetan lokebc59ba32011-08-25 10:43:30 +0000518 struct tpacket_kbdq_core *pkc;
chetan lokef6fb8f102011-08-19 10:18:16 +0000519
520 if (tx_ring)
521 BUG();
522
523 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
524 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
525}
526
527static int prb_calc_retire_blk_tmo(struct packet_sock *po,
528 int blk_size_in_bytes)
529{
530 struct net_device *dev;
531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000532 struct ethtool_cmd ecmd;
533 int err;
chetan lokef6fb8f102011-08-19 10:18:16 +0000534
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000535 rtnl_lock();
536 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
537 if (unlikely(!dev)) {
538 rtnl_unlock();
chetan lokef6fb8f102011-08-19 10:18:16 +0000539 return DEFAULT_PRB_RETIRE_TOV;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000540 }
541 err = __ethtool_get_settings(dev, &ecmd);
542 rtnl_unlock();
543 if (!err) {
544 switch (ecmd.speed) {
545 case SPEED_10000:
546 msec = 1;
547 div = 10000/1000;
548 break;
549 case SPEED_1000:
550 msec = 1;
551 div = 1000/1000;
552 break;
553 /*
554 * If the link speed is so slow you don't really
555 * need to worry about perf anyways
556 */
557 case SPEED_100:
558 case SPEED_10:
559 default:
560 return DEFAULT_PRB_RETIRE_TOV;
chetan lokef6fb8f102011-08-19 10:18:16 +0000561 }
562 }
563
564 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
565
566 if (div)
567 mbits /= div;
568
569 tmo = mbits * msec;
570
571 if (div)
572 return tmo+1;
573 return tmo;
574}
575
chetan lokebc59ba32011-08-25 10:43:30 +0000576static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
chetan lokef6fb8f102011-08-19 10:18:16 +0000577 union tpacket_req_u *req_u)
578{
579 p1->feature_req_word = req_u->req3.tp_feature_req_word;
580}
581
582static void init_prb_bdqc(struct packet_sock *po,
583 struct packet_ring_buffer *rb,
584 struct pgv *pg_vec,
585 union tpacket_req_u *req_u, int tx_ring)
586{
chetan lokebc59ba32011-08-25 10:43:30 +0000587 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
588 struct tpacket_block_desc *pbd;
chetan lokef6fb8f102011-08-19 10:18:16 +0000589
590 memset(p1, 0x0, sizeof(*p1));
591
592 p1->knxt_seq_num = 1;
593 p1->pkbdq = pg_vec;
chetan lokebc59ba32011-08-25 10:43:30 +0000594 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
Joe Perchese3192692012-06-03 17:41:40 +0000595 p1->pkblk_start = pg_vec[0].buffer;
chetan lokef6fb8f102011-08-19 10:18:16 +0000596 p1->kblk_size = req_u->req3.tp_block_size;
597 p1->knum_blocks = req_u->req3.tp_block_nr;
598 p1->hdrlen = po->tp_hdrlen;
599 p1->version = po->tp_version;
600 p1->last_kactive_blk_num = 0;
601 po->stats_u.stats3.tp_freeze_q_cnt = 0;
602 if (req_u->req3.tp_retire_blk_tov)
603 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
604 else
605 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
606 req_u->req3.tp_block_size);
607 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
608 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
609
610 prb_init_ft_ops(p1, req_u);
611 prb_setup_retire_blk_timer(po, tx_ring);
612 prb_open_block(p1, pbd);
613}
614
615/* Do NOT update the last_blk_num first.
616 * Assumes sk_buff_head lock is held.
617 */
chetan lokebc59ba32011-08-25 10:43:30 +0000618static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f102011-08-19 10:18:16 +0000619{
620 mod_timer(&pkc->retire_blk_timer,
621 jiffies + pkc->tov_in_jiffies);
622 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
623}
624
625/*
626 * Timer logic:
627 * 1) We refresh the timer only when we open a block.
628 * By doing this we don't waste cycles refreshing the timer
629 * on packet-by-packet basis.
630 *
631 * With a 1MB block-size, on a 1Gbps line, it will take
632 * i) ~8 ms to fill a block + ii) memcpy etc.
633 * In this cut we are not accounting for the memcpy time.
634 *
635 * So, if the user sets the 'tmo' to 10ms then the timer
636 * will never fire while the block is still getting filled
637 * (which is what we want). However, the user could choose
638 * to close a block early and that's fine.
639 *
640 * But when the timer does fire, we check whether or not to refresh it.
641 * Since the tmo granularity is in msecs, it is not too expensive
642 * to refresh the timer, lets say every '8' msecs.
643 * Either the user can set the 'tmo' or we can derive it based on
644 * a) line-speed and b) block-size.
645 * prb_calc_retire_blk_tmo() calculates the tmo.
646 *
647 */
648static void prb_retire_rx_blk_timer_expired(unsigned long data)
649{
650 struct packet_sock *po = (struct packet_sock *)data;
chetan lokebc59ba32011-08-25 10:43:30 +0000651 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
chetan lokef6fb8f102011-08-19 10:18:16 +0000652 unsigned int frozen;
chetan lokebc59ba32011-08-25 10:43:30 +0000653 struct tpacket_block_desc *pbd;
chetan lokef6fb8f102011-08-19 10:18:16 +0000654
655 spin_lock(&po->sk.sk_receive_queue.lock);
656
657 frozen = prb_queue_frozen(pkc);
658 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
659
660 if (unlikely(pkc->delete_blk_timer))
661 goto out;
662
663 /* We only need to plug the race when the block is partially filled.
664 * tpacket_rcv:
665 * lock(); increment BLOCK_NUM_PKTS; unlock()
666 * copy_bits() is in progress ...
667 * timer fires on other cpu:
668 * we can't retire the current block because copy_bits
669 * is in progress.
670 *
671 */
672 if (BLOCK_NUM_PKTS(pbd)) {
673 while (atomic_read(&pkc->blk_fill_in_prog)) {
674 /* Waiting for skb_copy_bits to finish... */
675 cpu_relax();
676 }
677 }
678
679 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
680 if (!frozen) {
681 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
682 if (!prb_dispatch_next_block(pkc, po))
683 goto refresh_timer;
684 else
685 goto out;
686 } else {
687 /* Case 1. Queue was frozen because user-space was
688 * lagging behind.
689 */
690 if (prb_curr_blk_in_use(pkc, pbd)) {
691 /*
692 * Ok, user-space is still behind.
693 * So just refresh the timer.
694 */
695 goto refresh_timer;
696 } else {
697 /* Case 2. queue was frozen,user-space caught up,
698 * now the link went idle && the timer fired.
699 * We don't have a block to close.So we open this
700 * block and restart the timer.
701 * opening a block thaws the queue,restarts timer
702 * Thawing/timer-refresh is a side effect.
703 */
704 prb_open_block(pkc, pbd);
705 goto out;
706 }
707 }
708 }
709
710refresh_timer:
711 _prb_refresh_rx_retire_blk_timer(pkc);
712
713out:
714 spin_unlock(&po->sk.sk_receive_queue.lock);
715}
716
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000717static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
chetan lokebc59ba32011-08-25 10:43:30 +0000718 struct tpacket_block_desc *pbd1, __u32 status)
chetan lokef6fb8f102011-08-19 10:18:16 +0000719{
720 /* Flush everything minus the block header */
721
722#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
723 u8 *start, *end;
724
725 start = (u8 *)pbd1;
726
727 /* Skip the block header(we know header WILL fit in 4K) */
728 start += PAGE_SIZE;
729
730 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
731 for (; start < end; start += PAGE_SIZE)
732 flush_dcache_page(pgv_to_page(start));
733
734 smp_wmb();
735#endif
736
737 /* Now update the block status. */
738
739 BLOCK_STATUS(pbd1) = status;
740
741 /* Flush the block header */
742
743#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
744 start = (u8 *)pbd1;
745 flush_dcache_page(pgv_to_page(start));
746
747 smp_wmb();
748#endif
749}
750
751/*
752 * Side effect:
753 *
754 * 1) flush the block
755 * 2) Increment active_blk_num
756 *
757 * Note:We DONT refresh the timer on purpose.
758 * Because almost always the next block will be opened.
759 */
chetan lokebc59ba32011-08-25 10:43:30 +0000760static void prb_close_block(struct tpacket_kbdq_core *pkc1,
761 struct tpacket_block_desc *pbd1,
chetan lokef6fb8f102011-08-19 10:18:16 +0000762 struct packet_sock *po, unsigned int stat)
763{
764 __u32 status = TP_STATUS_USER | stat;
765
766 struct tpacket3_hdr *last_pkt;
chetan lokebc59ba32011-08-25 10:43:30 +0000767 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f102011-08-19 10:18:16 +0000768
769 if (po->stats.tp_drops)
770 status |= TP_STATUS_LOSING;
771
772 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
773 last_pkt->tp_next_offset = 0;
774
775 /* Get the ts of the last pkt */
776 if (BLOCK_NUM_PKTS(pbd1)) {
777 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
778 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
779 } else {
780 /* Ok, we tmo'd - so get the current time */
781 struct timespec ts;
782 getnstimeofday(&ts);
783 h1->ts_last_pkt.ts_sec = ts.tv_sec;
784 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
785 }
786
787 smp_wmb();
788
789 /* Flush the block */
790 prb_flush_block(pkc1, pbd1, status);
791
792 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
793}
794
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000795static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f102011-08-19 10:18:16 +0000796{
797 pkc->reset_pending_on_curr_blk = 0;
798}
799
800/*
801 * Side effect of opening a block:
802 *
803 * 1) prb_queue is thawed.
804 * 2) retire_blk_timer is refreshed.
805 *
806 */
chetan lokebc59ba32011-08-25 10:43:30 +0000807static void prb_open_block(struct tpacket_kbdq_core *pkc1,
808 struct tpacket_block_desc *pbd1)
chetan lokef6fb8f102011-08-19 10:18:16 +0000809{
810 struct timespec ts;
chetan lokebc59ba32011-08-25 10:43:30 +0000811 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
chetan lokef6fb8f102011-08-19 10:18:16 +0000812
813 smp_rmb();
814
815 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
816
817 /* We could have just memset this but we will lose the
818 * flexibility of making the priv area sticky
819 */
820 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
821 BLOCK_NUM_PKTS(pbd1) = 0;
822 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
823 getnstimeofday(&ts);
824 h1->ts_first_pkt.ts_sec = ts.tv_sec;
825 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
826 pkc1->pkblk_start = (char *)pbd1;
Joe Perchese3192692012-06-03 17:41:40 +0000827 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
chetan lokef6fb8f102011-08-19 10:18:16 +0000828 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
829 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
830 pbd1->version = pkc1->version;
831 pkc1->prev = pkc1->nxt_offset;
832 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
833 prb_thaw_queue(pkc1);
834 _prb_refresh_rx_retire_blk_timer(pkc1);
835
836 smp_wmb();
837
838 return;
839 }
840
841 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
842 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
843 dump_stack();
844 BUG();
845}
846
847/*
848 * Queue freeze logic:
849 * 1) Assume tp_block_nr = 8 blocks.
850 * 2) At time 't0', user opens Rx ring.
851 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
852 * 4) user-space is either sleeping or processing block '0'.
853 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
854 * it will close block-7,loop around and try to fill block '0'.
855 * call-flow:
856 * __packet_lookup_frame_in_block
857 * prb_retire_current_block()
858 * prb_dispatch_next_block()
859 * |->(BLOCK_STATUS == USER) evaluates to true
860 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
861 * 6) Now there are two cases:
862 * 6.1) Link goes idle right after the queue is frozen.
863 * But remember, the last open_block() refreshed the timer.
864 * When this timer expires,it will refresh itself so that we can
865 * re-open block-0 in near future.
866 * 6.2) Link is busy and keeps on receiving packets. This is a simple
867 * case and __packet_lookup_frame_in_block will check if block-0
868 * is free and can now be re-used.
869 */
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000870static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000871 struct packet_sock *po)
872{
873 pkc->reset_pending_on_curr_blk = 1;
874 po->stats_u.stats3.tp_freeze_q_cnt++;
875}
876
877#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
878
879/*
880 * If the next block is free then we will dispatch it
881 * and return a good offset.
882 * Else, we will freeze the queue.
883 * So, caller must check the return value.
884 */
chetan lokebc59ba32011-08-25 10:43:30 +0000885static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000886 struct packet_sock *po)
887{
chetan lokebc59ba32011-08-25 10:43:30 +0000888 struct tpacket_block_desc *pbd;
chetan lokef6fb8f102011-08-19 10:18:16 +0000889
890 smp_rmb();
891
892 /* 1. Get current block num */
893 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
894
895 /* 2. If this block is currently in_use then freeze the queue */
896 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
897 prb_freeze_queue(pkc, po);
898 return NULL;
899 }
900
901 /*
902 * 3.
903 * open this block and return the offset where the first packet
904 * needs to get stored.
905 */
906 prb_open_block(pkc, pbd);
907 return (void *)pkc->nxt_offset;
908}
909
chetan lokebc59ba32011-08-25 10:43:30 +0000910static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000911 struct packet_sock *po, unsigned int status)
912{
chetan lokebc59ba32011-08-25 10:43:30 +0000913 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
chetan lokef6fb8f102011-08-19 10:18:16 +0000914
915 /* retire/close the current block */
916 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
917 /*
918 * Plug the case where copy_bits() is in progress on
919 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
920 * have space to copy the pkt in the current block and
921 * called prb_retire_current_block()
922 *
923 * We don't need to worry about the TMO case because
924 * the timer-handler already handled this case.
925 */
926 if (!(status & TP_STATUS_BLK_TMO)) {
927 while (atomic_read(&pkc->blk_fill_in_prog)) {
928 /* Waiting for skb_copy_bits to finish... */
929 cpu_relax();
930 }
931 }
932 prb_close_block(pkc, pbd, po, status);
933 return;
934 }
935
936 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
937 dump_stack();
938 BUG();
939}
940
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000941static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
chetan lokebc59ba32011-08-25 10:43:30 +0000942 struct tpacket_block_desc *pbd)
chetan lokef6fb8f102011-08-19 10:18:16 +0000943{
944 return TP_STATUS_USER & BLOCK_STATUS(pbd);
945}
946
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000947static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
chetan lokef6fb8f102011-08-19 10:18:16 +0000948{
949 return pkc->reset_pending_on_curr_blk;
950}
951
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000952static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
chetan lokef6fb8f102011-08-19 10:18:16 +0000953{
chetan lokebc59ba32011-08-25 10:43:30 +0000954 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
chetan lokef6fb8f102011-08-19 10:18:16 +0000955 atomic_dec(&pkc->blk_fill_in_prog);
956}
957
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000958static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000959 struct tpacket3_hdr *ppd)
960{
961 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
962}
963
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000964static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000965 struct tpacket3_hdr *ppd)
966{
967 ppd->hv1.tp_rxhash = 0;
968}
969
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000970static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000971 struct tpacket3_hdr *ppd)
972{
973 if (vlan_tx_tag_present(pkc->skb)) {
974 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
975 ppd->tp_status = TP_STATUS_VLAN_VALID;
976 } else {
977 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
978 }
979}
980
chetan lokebc59ba32011-08-25 10:43:30 +0000981static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
chetan lokef6fb8f102011-08-19 10:18:16 +0000982 struct tpacket3_hdr *ppd)
983{
984 prb_fill_vlan_info(pkc, ppd);
985
986 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
987 prb_fill_rxhash(pkc, ppd);
988 else
989 prb_clear_rxhash(pkc, ppd);
990}
991
Olof Johanssoneea49cc92011-11-02 11:00:49 +0000992static void prb_fill_curr_block(char *curr,
chetan lokebc59ba32011-08-25 10:43:30 +0000993 struct tpacket_kbdq_core *pkc,
994 struct tpacket_block_desc *pbd,
chetan lokef6fb8f102011-08-19 10:18:16 +0000995 unsigned int len)
996{
997 struct tpacket3_hdr *ppd;
998
999 ppd = (struct tpacket3_hdr *)curr;
1000 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1001 pkc->prev = curr;
1002 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1003 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1004 BLOCK_NUM_PKTS(pbd) += 1;
1005 atomic_inc(&pkc->blk_fill_in_prog);
1006 prb_run_all_ft_ops(pkc, ppd);
1007}
1008
1009/* Assumes caller has the sk->rx_queue.lock */
1010static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1011 struct sk_buff *skb,
1012 int status,
1013 unsigned int len
1014 )
1015{
chetan lokebc59ba32011-08-25 10:43:30 +00001016 struct tpacket_kbdq_core *pkc;
1017 struct tpacket_block_desc *pbd;
chetan lokef6fb8f102011-08-19 10:18:16 +00001018 char *curr, *end;
1019
Joe Perchese3192692012-06-03 17:41:40 +00001020 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
chetan lokef6fb8f102011-08-19 10:18:16 +00001021 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1022
1023 /* Queue is frozen when user space is lagging behind */
1024 if (prb_queue_frozen(pkc)) {
1025 /*
1026 * Check if that last block which caused the queue to freeze,
1027 * is still in_use by user-space.
1028 */
1029 if (prb_curr_blk_in_use(pkc, pbd)) {
1030 /* Can't record this packet */
1031 return NULL;
1032 } else {
1033 /*
1034 * Ok, the block was released by user-space.
1035 * Now let's open that block.
1036 * opening a block also thaws the queue.
1037 * Thawing is a side effect.
1038 */
1039 prb_open_block(pkc, pbd);
1040 }
1041 }
1042
1043 smp_mb();
1044 curr = pkc->nxt_offset;
1045 pkc->skb = skb;
Joe Perchese3192692012-06-03 17:41:40 +00001046 end = (char *)pbd + pkc->kblk_size;
chetan lokef6fb8f102011-08-19 10:18:16 +00001047
1048 /* first try the current block */
1049 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1050 prb_fill_curr_block(curr, pkc, pbd, len);
1051 return (void *)curr;
1052 }
1053
1054 /* Ok, close the current block */
1055 prb_retire_current_block(pkc, po, 0);
1056
1057 /* Now, try to dispatch the next block */
1058 curr = (char *)prb_dispatch_next_block(pkc, po);
1059 if (curr) {
1060 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1061 prb_fill_curr_block(curr, pkc, pbd, len);
1062 return (void *)curr;
1063 }
1064
1065 /*
1066 * No free blocks are available.user_space hasn't caught up yet.
1067 * Queue was just frozen and now this packet will get dropped.
1068 */
1069 return NULL;
1070}
1071
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001072static void *packet_current_rx_frame(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +00001073 struct sk_buff *skb,
1074 int status, unsigned int len)
1075{
1076 char *curr = NULL;
1077 switch (po->tp_version) {
1078 case TPACKET_V1:
1079 case TPACKET_V2:
1080 curr = packet_lookup_frame(po, &po->rx_ring,
1081 po->rx_ring.head, status);
1082 return curr;
1083 case TPACKET_V3:
1084 return __packet_lookup_frame_in_block(po, skb, status, len);
1085 default:
1086 WARN(1, "TPACKET version not supported\n");
1087 BUG();
1088 return 0;
1089 }
1090}
1091
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001092static void *prb_lookup_block(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +00001093 struct packet_ring_buffer *rb,
1094 unsigned int previous,
1095 int status)
1096{
chetan lokebc59ba32011-08-25 10:43:30 +00001097 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1098 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
chetan lokef6fb8f102011-08-19 10:18:16 +00001099
1100 if (status != BLOCK_STATUS(pbd))
1101 return NULL;
1102 return pbd;
1103}
1104
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001105static int prb_previous_blk_num(struct packet_ring_buffer *rb)
chetan lokef6fb8f102011-08-19 10:18:16 +00001106{
1107 unsigned int prev;
1108 if (rb->prb_bdqc.kactive_blk_num)
1109 prev = rb->prb_bdqc.kactive_blk_num-1;
1110 else
1111 prev = rb->prb_bdqc.knum_blocks-1;
1112 return prev;
1113}
1114
1115/* Assumes caller has held the rx_queue.lock */
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001116static void *__prb_previous_block(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +00001117 struct packet_ring_buffer *rb,
1118 int status)
1119{
1120 unsigned int previous = prb_previous_blk_num(rb);
1121 return prb_lookup_block(po, rb, previous, status);
1122}
1123
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001124static void *packet_previous_rx_frame(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +00001125 struct packet_ring_buffer *rb,
1126 int status)
1127{
1128 if (po->tp_version <= TPACKET_V2)
1129 return packet_previous_frame(po, rb, status);
1130
1131 return __prb_previous_block(po, rb, status);
1132}
1133
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001134static void packet_increment_rx_head(struct packet_sock *po,
chetan lokef6fb8f102011-08-19 10:18:16 +00001135 struct packet_ring_buffer *rb)
1136{
1137 switch (po->tp_version) {
1138 case TPACKET_V1:
1139 case TPACKET_V2:
1140 return packet_increment_head(rb);
1141 case TPACKET_V3:
1142 default:
1143 WARN(1, "TPACKET version not supported.\n");
1144 BUG();
1145 return;
1146 }
1147}
1148
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001149static void *packet_previous_frame(struct packet_sock *po,
Johann Baudy69e3c752009-05-18 22:11:22 -07001150 struct packet_ring_buffer *rb,
1151 int status)
1152{
1153 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1154 return packet_lookup_frame(po, rb, previous, status);
1155}
1156
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001157static void packet_increment_head(struct packet_ring_buffer *buff)
Johann Baudy69e3c752009-05-18 22:11:22 -07001158{
1159 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1160}
1161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162static void packet_sock_destruct(struct sock *sk)
1163{
Richard Cochraned85b562010-04-07 22:41:28 +00001164 skb_queue_purge(&sk->sk_error_queue);
1165
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001166 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1167 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
1169 if (!sock_flag(sk, SOCK_DEAD)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001170 pr_err("Attempt to release alive packet socket: %p\n", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 return;
1172 }
1173
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08001174 sk_refcnt_debug_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175}
1176
David S. Millerdc99f602011-07-05 01:45:05 -07001177static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1178{
1179 int x = atomic_read(&f->rr_cur) + 1;
1180
1181 if (x >= num)
1182 x = 0;
1183
1184 return x;
1185}
1186
1187static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1188{
1189 u32 idx, hash = skb->rxhash;
1190
1191 idx = ((u64)hash * num) >> 32;
1192
1193 return f->arr[idx];
1194}
1195
1196static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1197{
1198 int cur, old;
1199
1200 cur = atomic_read(&f->rr_cur);
1201 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1202 fanout_rr_next(f, num))) != cur)
1203 cur = old;
1204 return f->arr[cur];
1205}
1206
David S. Miller95ec3eb2011-07-06 01:56:38 -07001207static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1208{
1209 unsigned int cpu = smp_processor_id();
1210
1211 return f->arr[cpu % num];
1212}
1213
David S. Miller95ec3eb2011-07-06 01:56:38 -07001214static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1215 struct packet_type *pt, struct net_device *orig_dev)
David S. Millerdc99f602011-07-05 01:45:05 -07001216{
1217 struct packet_fanout *f = pt->af_packet_priv;
1218 unsigned int num = f->num_members;
1219 struct packet_sock *po;
1220 struct sock *sk;
1221
1222 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1223 !num) {
1224 kfree_skb(skb);
1225 return 0;
1226 }
1227
David S. Miller95ec3eb2011-07-06 01:56:38 -07001228 switch (f->type) {
1229 case PACKET_FANOUT_HASH:
1230 default:
1231 if (f->defrag) {
Eric Dumazetbc416d92011-10-06 10:28:31 +00001232 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
David S. Miller95ec3eb2011-07-06 01:56:38 -07001233 if (!skb)
1234 return 0;
1235 }
1236 skb_get_rxhash(skb);
1237 sk = fanout_demux_hash(f, skb, num);
1238 break;
1239 case PACKET_FANOUT_LB:
1240 sk = fanout_demux_lb(f, skb, num);
1241 break;
1242 case PACKET_FANOUT_CPU:
1243 sk = fanout_demux_cpu(f, skb, num);
1244 break;
David S. Miller7736d332011-07-05 01:43:20 -07001245 }
1246
David S. Millerdc99f602011-07-05 01:45:05 -07001247 po = pkt_sk(sk);
1248
1249 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1250}
1251
1252static DEFINE_MUTEX(fanout_mutex);
1253static LIST_HEAD(fanout_list);
1254
1255static void __fanout_link(struct sock *sk, struct packet_sock *po)
1256{
1257 struct packet_fanout *f = po->fanout;
1258
1259 spin_lock(&f->lock);
1260 f->arr[f->num_members] = sk;
1261 smp_wmb();
1262 f->num_members++;
1263 spin_unlock(&f->lock);
1264}
1265
1266static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1267{
1268 struct packet_fanout *f = po->fanout;
1269 int i;
1270
1271 spin_lock(&f->lock);
1272 for (i = 0; i < f->num_members; i++) {
1273 if (f->arr[i] == sk)
1274 break;
1275 }
1276 BUG_ON(i >= f->num_members);
1277 f->arr[i] = f->arr[f->num_members - 1];
1278 f->num_members--;
1279 spin_unlock(&f->lock);
1280}
1281
David S. Miller7736d332011-07-05 01:43:20 -07001282static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
David S. Millerdc99f602011-07-05 01:45:05 -07001283{
1284 struct packet_sock *po = pkt_sk(sk);
1285 struct packet_fanout *f, *match;
David S. Miller7736d332011-07-05 01:43:20 -07001286 u8 type = type_flags & 0xff;
1287 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001288 int err;
1289
1290 switch (type) {
1291 case PACKET_FANOUT_HASH:
1292 case PACKET_FANOUT_LB:
David S. Miller95ec3eb2011-07-06 01:56:38 -07001293 case PACKET_FANOUT_CPU:
David S. Millerdc99f602011-07-05 01:45:05 -07001294 break;
1295 default:
1296 return -EINVAL;
1297 }
1298
1299 if (!po->running)
1300 return -EINVAL;
1301
1302 if (po->fanout)
1303 return -EALREADY;
1304
1305 mutex_lock(&fanout_mutex);
1306 match = NULL;
1307 list_for_each_entry(f, &fanout_list, list) {
1308 if (f->id == id &&
1309 read_pnet(&f->net) == sock_net(sk)) {
1310 match = f;
1311 break;
1312 }
1313 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001314 err = -EINVAL;
David S. Miller7736d332011-07-05 01:43:20 -07001315 if (match && match->defrag != defrag)
Eric Dumazetafe62c62011-07-07 06:41:29 -07001316 goto out;
David S. Millerdc99f602011-07-05 01:45:05 -07001317 if (!match) {
Eric Dumazetafe62c62011-07-07 06:41:29 -07001318 err = -ENOMEM;
David S. Millerdc99f602011-07-05 01:45:05 -07001319 match = kzalloc(sizeof(*match), GFP_KERNEL);
Eric Dumazetafe62c62011-07-07 06:41:29 -07001320 if (!match)
1321 goto out;
1322 write_pnet(&match->net, sock_net(sk));
1323 match->id = id;
1324 match->type = type;
1325 match->defrag = defrag;
1326 atomic_set(&match->rr_cur, 0);
1327 INIT_LIST_HEAD(&match->list);
1328 spin_lock_init(&match->lock);
1329 atomic_set(&match->sk_ref, 0);
1330 match->prot_hook.type = po->prot_hook.type;
1331 match->prot_hook.dev = po->prot_hook.dev;
1332 match->prot_hook.func = packet_rcv_fanout;
1333 match->prot_hook.af_packet_priv = match;
1334 dev_add_pack(&match->prot_hook);
1335 list_add(&match->list, &fanout_list);
1336 }
1337 err = -EINVAL;
1338 if (match->type == type &&
1339 match->prot_hook.type == po->prot_hook.type &&
1340 match->prot_hook.dev == po->prot_hook.dev) {
1341 err = -ENOSPC;
1342 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1343 __dev_remove_pack(&po->prot_hook);
1344 po->fanout = match;
1345 atomic_inc(&match->sk_ref);
1346 __fanout_link(sk, po);
1347 err = 0;
David S. Millerdc99f602011-07-05 01:45:05 -07001348 }
1349 }
Eric Dumazetafe62c62011-07-07 06:41:29 -07001350out:
David S. Millerdc99f602011-07-05 01:45:05 -07001351 mutex_unlock(&fanout_mutex);
1352 return err;
1353}
1354
1355static void fanout_release(struct sock *sk)
1356{
1357 struct packet_sock *po = pkt_sk(sk);
1358 struct packet_fanout *f;
1359
1360 f = po->fanout;
1361 if (!f)
1362 return;
1363
1364 po->fanout = NULL;
1365
1366 mutex_lock(&fanout_mutex);
1367 if (atomic_dec_and_test(&f->sk_ref)) {
1368 list_del(&f->list);
1369 dev_remove_pack(&f->prot_hook);
1370 kfree(f);
1371 }
1372 mutex_unlock(&fanout_mutex);
1373}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001375static const struct proto_ops packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001377static const struct proto_ops packet_ops_spkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001379static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1380 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381{
1382 struct sock *sk;
1383 struct sockaddr_pkt *spkt;
1384
1385 /*
1386 * When we registered the protocol we saved the socket in the data
1387 * field for just this event.
1388 */
1389
1390 sk = pt->af_packet_priv;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 /*
1393 * Yank back the headers [hope the device set this
1394 * right or kerboom...]
1395 *
1396 * Incoming packets have ll header pulled,
1397 * push it back.
1398 *
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001399 * For outgoing ones skb->data == skb_mac_header(skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 * so that this procedure is noop.
1401 */
1402
1403 if (skb->pkt_type == PACKET_LOOPBACK)
1404 goto out;
1405
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001406 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001407 goto out;
1408
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001409 skb = skb_share_check(skb, GFP_ATOMIC);
1410 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 goto oom;
1412
1413 /* drop any routing info */
Eric Dumazetadf30902009-06-02 05:19:30 +00001414 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
Phil Oester84531c22005-07-12 11:57:52 -07001416 /* drop conntrack reference */
1417 nf_reset(skb);
1418
Herbert Xuffbc6112007-02-04 23:33:10 -08001419 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001421 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423 /*
1424 * The SOCK_PACKET socket receives _all_ frames.
1425 */
1426
1427 spkt->spkt_family = dev->type;
1428 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1429 spkt->spkt_protocol = skb->protocol;
1430
1431 /*
1432 * Charge the memory to the socket. This is done specifically
1433 * to prevent sockets using all the memory up.
1434 */
1435
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001436 if (sock_queue_rcv_skb(sk, skb) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 return 0;
1438
1439out:
1440 kfree_skb(skb);
1441oom:
1442 return 0;
1443}
1444
1445
1446/*
1447 * Output a raw packet to a device layer. This bypasses all the other
1448 * protocol layers and you must therefore supply it with a complete frame
1449 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1452 struct msghdr *msg, size_t len)
1453{
1454 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001455 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001456 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 struct net_device *dev;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001458 __be16 proto = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 int err;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001460 int extra_len = 0;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001463 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 */
1465
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001466 if (saddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 if (msg->msg_namelen < sizeof(struct sockaddr))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001468 return -EINVAL;
1469 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1470 proto = saddr->spkt_protocol;
1471 } else
1472 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
1474 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001475 * Find the device first to size check it
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 */
1477
1478 saddr->spkt_device[13] = 0;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001479retry:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001480 rcu_read_lock();
1481 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 err = -ENODEV;
1483 if (dev == NULL)
1484 goto out_unlock;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001485
David S. Millerd5e76b02007-01-25 19:30:36 -08001486 err = -ENETDOWN;
1487 if (!(dev->flags & IFF_UP))
1488 goto out_unlock;
1489
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 /*
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001491 * You may not queue a frame bigger than the mtu. This is the lowest level
1492 * raw protocol and you must do your own fragmentation at this level.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001494
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001495 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1496 if (!netif_supports_nofcs(dev)) {
1497 err = -EPROTONOSUPPORT;
1498 goto out_unlock;
1499 }
1500 extra_len = 4; /* We're doing our own CRC */
1501 }
1502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001504 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 goto out_unlock;
1506
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001507 if (!skb) {
1508 size_t reserved = LL_RESERVED_SPACE(dev);
Herbert Xu4ce40912011-11-18 02:20:05 +00001509 int tlen = dev->needed_tailroom;
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001510 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001512 rcu_read_unlock();
Herbert Xu4ce40912011-11-18 02:20:05 +00001513 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001514 if (skb == NULL)
1515 return -ENOBUFS;
1516 /* FIXME: Save some space for broken drivers that write a hard
1517 * header at transmission time by themselves. PPP is the notable
1518 * one here. This should really be fixed at the driver level.
1519 */
1520 skb_reserve(skb, reserved);
1521 skb_reset_network_header(skb);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001522
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001523 /* Try to align data part correctly */
1524 if (hhlen) {
1525 skb->data -= hhlen;
1526 skb->tail -= hhlen;
1527 if (len < hhlen)
1528 skb_reset_network_header(skb);
1529 }
1530 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1531 if (err)
1532 goto out_free;
1533 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 }
1535
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001536 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00001537 /* Earlier code assumed this would be a VLAN pkt,
1538 * double-check this now that we have the actual
1539 * packet in hand.
1540 */
1541 struct ethhdr *ehdr;
1542 skb_reset_mac_header(skb);
1543 ehdr = eth_hdr(skb);
1544 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1545 err = -EMSGSIZE;
1546 goto out_unlock;
1547 }
1548 }
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 skb->protocol = proto;
1551 skb->dev = dev;
1552 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001553 skb->mark = sk->sk_mark;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00001554 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00001555 if (err < 0)
1556 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Ben Greear3bdc0eb2012-02-11 15:39:30 +00001558 if (unlikely(extra_len == 4))
1559 skb->no_fcs = 1;
1560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 dev_queue_xmit(skb);
Eric Dumazet654d1f82009-11-02 10:43:32 +01001562 rcu_read_unlock();
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001563 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565out_unlock:
Eric Dumazet654d1f82009-11-02 10:43:32 +01001566 rcu_read_unlock();
Eric Dumazet1a35ca82009-12-15 05:47:03 +00001567out_free:
1568 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 return err;
1570}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Olof Johanssoneea49cc92011-11-02 11:00:49 +00001572static unsigned int run_filter(const struct sk_buff *skb,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001573 const struct sock *sk,
David S. Millerdbcb5852007-01-24 15:21:02 -08001574 unsigned int res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575{
1576 struct sk_filter *filter;
1577
Eric Dumazet80f8f102011-01-18 07:46:52 +00001578 rcu_read_lock();
1579 filter = rcu_dereference(sk->sk_filter);
David S. Millerdbcb5852007-01-24 15:21:02 -08001580 if (filter != NULL)
Eric Dumazet0a148422011-04-20 09:27:32 +00001581 res = SK_RUN_FILTER(filter, skb);
Eric Dumazet80f8f102011-01-18 07:46:52 +00001582 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
David S. Millerdbcb5852007-01-24 15:21:02 -08001584 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585}
1586
1587/*
Eric Dumazet62ab0812010-12-06 20:50:09 +00001588 * This function makes lazy skb cloning in hope that most of packets
1589 * are discarded by BPF.
1590 *
1591 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1592 * and skb->cb are mangled. It works because (and until) packets
1593 * falling here are owned by current CPU. Output packets are cloned
1594 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1595 * sequencially, so that if we return skb to original state on exit,
1596 * we will not harm anyone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 */
1598
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001599static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1600 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601{
1602 struct sock *sk;
1603 struct sockaddr_ll *sll;
1604 struct packet_sock *po;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001605 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001607 unsigned int snaplen, res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
1609 if (skb->pkt_type == PACKET_LOOPBACK)
1610 goto drop;
1611
1612 sk = pt->af_packet_priv;
1613 po = pkt_sk(sk);
1614
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001615 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001616 goto drop;
1617
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 skb->dev = dev;
1619
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001620 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 /* The device has an explicit notion of ll header,
Eric Dumazet62ab0812010-12-06 20:50:09 +00001622 * exported to higher levels.
1623 *
1624 * Otherwise, the device hides details of its frame
1625 * structure, so that corresponding packet head is
1626 * never delivered to user.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 */
1628 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001629 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 else if (skb->pkt_type == PACKET_OUTGOING) {
1631 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001632 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 }
1634 }
1635
1636 snaplen = skb->len;
1637
David S. Millerdbcb5852007-01-24 15:21:02 -08001638 res = run_filter(skb, sk, snaplen);
1639 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001640 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001641 if (snaplen > res)
1642 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001644 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 goto drop_n_acct;
1646
1647 if (skb_shared(skb)) {
1648 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1649 if (nskb == NULL)
1650 goto drop_n_acct;
1651
1652 if (skb_head != skb->data) {
1653 skb->data = skb_head;
1654 skb->len = skb_len;
1655 }
Eric Dumazetabc4e4f2012-04-19 02:24:42 +00001656 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 skb = nskb;
1658 }
1659
Herbert Xuffbc6112007-02-04 23:33:10 -08001660 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1661 sizeof(skb->cb));
1662
1663 sll = &PACKET_SKB_CB(skb)->sa.ll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 sll->sll_family = AF_PACKET;
1665 sll->sll_hatype = dev->type;
1666 sll->sll_protocol = skb->protocol;
1667 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001668 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001669 sll->sll_ifindex = orig_dev->ifindex;
1670 else
1671 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001673 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Herbert Xuffbc6112007-02-04 23:33:10 -08001675 PACKET_SKB_CB(skb)->origlen = skb->len;
Herbert Xu8dc41942007-02-04 23:31:32 -08001676
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 if (pskb_trim(skb, snaplen))
1678 goto drop_n_acct;
1679
1680 skb_set_owner_r(skb, sk);
1681 skb->dev = NULL;
Eric Dumazetadf30902009-06-02 05:19:30 +00001682 skb_dst_drop(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Phil Oester84531c22005-07-12 11:57:52 -07001684 /* drop conntrack reference */
1685 nf_reset(skb);
1686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 spin_lock(&sk->sk_receive_queue.lock);
1688 po->stats.tp_packets++;
Neil Horman3b885782009-10-12 13:26:31 -07001689 skb->dropcount = atomic_read(&sk->sk_drops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 __skb_queue_tail(&sk->sk_receive_queue, skb);
1691 spin_unlock(&sk->sk_receive_queue.lock);
1692 sk->sk_data_ready(sk, skb->len);
1693 return 0;
1694
1695drop_n_acct:
Willem de Bruijn7091fbd2011-09-30 10:38:28 +00001696 spin_lock(&sk->sk_receive_queue.lock);
1697 po->stats.tp_drops++;
1698 atomic_inc(&sk->sk_drops);
1699 spin_unlock(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
1701drop_n_restore:
1702 if (skb_head != skb->data && skb_shared(skb)) {
1703 skb->data = skb_head;
1704 skb->len = skb_len;
1705 }
1706drop:
Neil Hormanead2ceb2009-03-11 09:49:55 +00001707 consume_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 return 0;
1709}
1710
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001711static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1712 struct packet_type *pt, struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713{
1714 struct sock *sk;
1715 struct packet_sock *po;
1716 struct sockaddr_ll *sll;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001717 union {
1718 struct tpacket_hdr *h1;
1719 struct tpacket2_hdr *h2;
chetan lokef6fb8f102011-08-19 10:18:16 +00001720 struct tpacket3_hdr *h3;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001721 void *raw;
1722 } h;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001723 u8 *skb_head = skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 int skb_len = skb->len;
David S. Millerdbcb5852007-01-24 15:21:02 -08001725 unsigned int snaplen, res;
chetan lokef6fb8f102011-08-19 10:18:16 +00001726 unsigned long status = TP_STATUS_USER;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001727 unsigned short macoff, netoff, hdrlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 struct sk_buff *copy_skb = NULL;
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001729 struct timeval tv;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001730 struct timespec ts;
Scott McMillan614f60f2010-06-02 05:53:56 -07001731 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
1733 if (skb->pkt_type == PACKET_LOOPBACK)
1734 goto drop;
1735
1736 sk = pt->af_packet_priv;
1737 po = pkt_sk(sk);
1738
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001739 if (!net_eq(dev_net(dev), sock_net(sk)))
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08001740 goto drop;
1741
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -07001742 if (dev->header_ops) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 if (sk->sk_type != SOCK_DGRAM)
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001744 skb_push(skb, skb->data - skb_mac_header(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 else if (skb->pkt_type == PACKET_OUTGOING) {
1746 /* Special case: outgoing packets have ll header at head */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001747 skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 }
1749 }
1750
Herbert Xu8dc41942007-02-04 23:31:32 -08001751 if (skb->ip_summed == CHECKSUM_PARTIAL)
1752 status |= TP_STATUS_CSUMNOTREADY;
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 snaplen = skb->len;
1755
David S. Millerdbcb5852007-01-24 15:21:02 -08001756 res = run_filter(skb, sk, snaplen);
1757 if (!res)
Dmitry Mishinfda9ef52006-08-31 15:28:39 -07001758 goto drop_n_restore;
David S. Millerdbcb5852007-01-24 15:21:02 -08001759 if (snaplen > res)
1760 snaplen = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
1762 if (sk->sk_type == SOCK_DGRAM) {
Patrick McHardy8913336a2008-07-18 18:05:19 -07001763 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1764 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 } else {
Eric Dumazet95c96172012-04-15 05:58:06 +00001766 unsigned int maclen = skb_network_offset(skb);
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001767 netoff = TPACKET_ALIGN(po->tp_hdrlen +
Patrick McHardy8913336a2008-07-18 18:05:19 -07001768 (maclen < 16 ? 16 : maclen)) +
1769 po->tp_reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 macoff = netoff - maclen;
1771 }
chetan lokef6fb8f102011-08-19 10:18:16 +00001772 if (po->tp_version <= TPACKET_V2) {
1773 if (macoff + snaplen > po->rx_ring.frame_size) {
1774 if (po->copy_thresh &&
Eric Dumazet0fd7bac2011-12-21 07:11:44 +00001775 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
chetan lokef6fb8f102011-08-19 10:18:16 +00001776 if (skb_shared(skb)) {
1777 copy_skb = skb_clone(skb, GFP_ATOMIC);
1778 } else {
1779 copy_skb = skb_get(skb);
1780 skb_head = skb->data;
1781 }
1782 if (copy_skb)
1783 skb_set_owner_r(copy_skb, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 }
chetan lokef6fb8f102011-08-19 10:18:16 +00001785 snaplen = po->rx_ring.frame_size - macoff;
1786 if ((int)snaplen < 0)
1787 snaplen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 spin_lock(&sk->sk_receive_queue.lock);
chetan lokef6fb8f102011-08-19 10:18:16 +00001791 h.raw = packet_current_rx_frame(po, skb,
1792 TP_STATUS_KERNEL, (macoff+snaplen));
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001793 if (!h.raw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 goto ring_is_full;
chetan lokef6fb8f102011-08-19 10:18:16 +00001795 if (po->tp_version <= TPACKET_V2) {
1796 packet_increment_rx_head(po, &po->rx_ring);
1797 /*
1798 * LOSING will be reported till you read the stats,
1799 * because it's COR - Clear On Read.
1800 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1801 * at packet level.
1802 */
1803 if (po->stats.tp_drops)
1804 status |= TP_STATUS_LOSING;
1805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 po->stats.tp_packets++;
1807 if (copy_skb) {
1808 status |= TP_STATUS_COPY;
1809 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1810 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 spin_unlock(&sk->sk_receive_queue.lock);
1812
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001813 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001815 switch (po->tp_version) {
1816 case TPACKET_V1:
1817 h.h1->tp_len = skb->len;
1818 h.h1->tp_snaplen = snaplen;
1819 h.h1->tp_mac = macoff;
1820 h.h1->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001821 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1822 && shhwtstamps->syststamp.tv64)
1823 tv = ktime_to_timeval(shhwtstamps->syststamp);
1824 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1825 && shhwtstamps->hwtstamp.tv64)
1826 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1827 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001828 tv = ktime_to_timeval(skb->tstamp);
1829 else
1830 do_gettimeofday(&tv);
1831 h.h1->tp_sec = tv.tv_sec;
1832 h.h1->tp_usec = tv.tv_usec;
1833 hdrlen = sizeof(*h.h1);
1834 break;
1835 case TPACKET_V2:
1836 h.h2->tp_len = skb->len;
1837 h.h2->tp_snaplen = snaplen;
1838 h.h2->tp_mac = macoff;
1839 h.h2->tp_net = netoff;
Scott McMillan614f60f2010-06-02 05:53:56 -07001840 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1841 && shhwtstamps->syststamp.tv64)
1842 ts = ktime_to_timespec(shhwtstamps->syststamp);
1843 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1844 && shhwtstamps->hwtstamp.tv64)
1845 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1846 else if (skb->tstamp.tv64)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001847 ts = ktime_to_timespec(skb->tstamp);
1848 else
1849 getnstimeofday(&ts);
1850 h.h2->tp_sec = ts.tv_sec;
1851 h.h2->tp_nsec = ts.tv_nsec;
Ben Greeara3bcc232011-06-01 06:49:10 +00001852 if (vlan_tx_tag_present(skb)) {
1853 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1854 status |= TP_STATUS_VLAN_VALID;
1855 } else {
1856 h.h2->tp_vlan_tci = 0;
1857 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07001858 h.h2->tp_padding = 0;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001859 hdrlen = sizeof(*h.h2);
1860 break;
chetan lokef6fb8f102011-08-19 10:18:16 +00001861 case TPACKET_V3:
1862 /* tp_nxt_offset,vlan are already populated above.
1863 * So DONT clear those fields here
1864 */
1865 h.h3->tp_status |= status;
1866 h.h3->tp_len = skb->len;
1867 h.h3->tp_snaplen = snaplen;
1868 h.h3->tp_mac = macoff;
1869 h.h3->tp_net = netoff;
1870 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1871 && shhwtstamps->syststamp.tv64)
1872 ts = ktime_to_timespec(shhwtstamps->syststamp);
1873 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1874 && shhwtstamps->hwtstamp.tv64)
1875 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1876 else if (skb->tstamp.tv64)
1877 ts = ktime_to_timespec(skb->tstamp);
1878 else
1879 getnstimeofday(&ts);
1880 h.h3->tp_sec = ts.tv_sec;
1881 h.h3->tp_nsec = ts.tv_nsec;
1882 hdrlen = sizeof(*h.h3);
1883 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001884 default:
1885 BUG();
1886 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
Patrick McHardybbd6ef82008-07-14 22:50:15 -07001888 sll = h.raw + TPACKET_ALIGN(hdrlen);
Stephen Hemmingerb95cce32007-09-26 22:13:38 -07001889 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 sll->sll_family = AF_PACKET;
1891 sll->sll_hatype = dev->type;
1892 sll->sll_protocol = skb->protocol;
1893 sll->sll_pkttype = skb->pkt_type;
Peter P Waskiewicz Jr8032b462007-11-10 22:03:25 -08001894 if (unlikely(po->origdev))
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07001895 sll->sll_ifindex = orig_dev->ifindex;
1896 else
1897 sll->sll_ifindex = dev->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
Ralf Baechlee16aa202006-12-07 00:11:33 -08001899 smp_mb();
Changli Gaof6dafa92010-12-07 04:26:16 +00001900#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 {
Changli Gao0af55bb2010-12-01 02:52:20 +00001902 u8 *start, *end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903
chetan lokef6fb8f102011-08-19 10:18:16 +00001904 if (po->tp_version <= TPACKET_V2) {
1905 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1906 + macoff + snaplen);
1907 for (start = h.raw; start < end; start += PAGE_SIZE)
1908 flush_dcache_page(pgv_to_page(start));
1909 }
Chetan Lokecc9f01b2011-07-14 08:36:33 -07001910 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 }
Changli Gaof6dafa92010-12-07 04:26:16 +00001912#endif
chetan lokef6fb8f102011-08-19 10:18:16 +00001913 if (po->tp_version <= TPACKET_V2)
1914 __packet_set_status(po, h.raw, status);
1915 else
1916 prb_clear_blk_fill_status(&po->rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917
1918 sk->sk_data_ready(sk, 0);
1919
1920drop_n_restore:
1921 if (skb_head != skb->data && skb_shared(skb)) {
1922 skb->data = skb_head;
1923 skb->len = skb_len;
1924 }
1925drop:
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09001926 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 return 0;
1928
1929ring_is_full:
1930 po->stats.tp_drops++;
1931 spin_unlock(&sk->sk_receive_queue.lock);
1932
1933 sk->sk_data_ready(sk, 0);
Wei Yongjunacb5d752009-02-25 00:36:42 +00001934 kfree_skb(copy_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 goto drop_n_restore;
1936}
1937
Johann Baudy69e3c752009-05-18 22:11:22 -07001938static void tpacket_destruct_skb(struct sk_buff *skb)
1939{
1940 struct packet_sock *po = pkt_sk(skb->sk);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001941 void *ph;
Johann Baudy69e3c752009-05-18 22:11:22 -07001942
Johann Baudy69e3c752009-05-18 22:11:22 -07001943 if (likely(po->tx_ring.pg_vec)) {
1944 ph = skb_shinfo(skb)->destructor_arg;
1945 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
1946 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1947 atomic_dec(&po->tx_ring.pending);
1948 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1949 }
1950
1951 sock_wfree(skb);
1952}
1953
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001954static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1955 void *frame, struct net_device *dev, int size_max,
Herbert Xuae641942011-11-18 02:20:04 +00001956 __be16 proto, unsigned char *addr, int hlen)
Johann Baudy69e3c752009-05-18 22:11:22 -07001957{
1958 union {
1959 struct tpacket_hdr *h1;
1960 struct tpacket2_hdr *h2;
1961 void *raw;
1962 } ph;
1963 int to_write, offset, len, tp_len, nr_frags, len_max;
1964 struct socket *sock = po->sk.sk_socket;
1965 struct page *page;
1966 void *data;
1967 int err;
1968
1969 ph.raw = frame;
1970
1971 skb->protocol = proto;
1972 skb->dev = dev;
1973 skb->priority = po->sk.sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00001974 skb->mark = po->sk.sk_mark;
Johann Baudy69e3c752009-05-18 22:11:22 -07001975 skb_shinfo(skb)->destructor_arg = ph.raw;
1976
1977 switch (po->tp_version) {
1978 case TPACKET_V2:
1979 tp_len = ph.h2->tp_len;
1980 break;
1981 default:
1982 tp_len = ph.h1->tp_len;
1983 break;
1984 }
1985 if (unlikely(tp_len > size_max)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00001986 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
Johann Baudy69e3c752009-05-18 22:11:22 -07001987 return -EMSGSIZE;
1988 }
1989
Herbert Xuae641942011-11-18 02:20:04 +00001990 skb_reserve(skb, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07001991 skb_reset_network_header(skb);
1992
1993 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1994 to_write = tp_len;
1995
1996 if (sock->type == SOCK_DGRAM) {
1997 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1998 NULL, tp_len);
1999 if (unlikely(err < 0))
2000 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002001 } else if (dev->hard_header_len) {
Johann Baudy69e3c752009-05-18 22:11:22 -07002002 /* net device doesn't like empty head */
2003 if (unlikely(tp_len <= dev->hard_header_len)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002004 pr_err("packet size is too short (%d < %d)\n",
2005 tp_len, dev->hard_header_len);
Johann Baudy69e3c752009-05-18 22:11:22 -07002006 return -EINVAL;
2007 }
2008
2009 skb_push(skb, dev->hard_header_len);
2010 err = skb_store_bits(skb, 0, data,
2011 dev->hard_header_len);
2012 if (unlikely(err))
2013 return err;
2014
2015 data += dev->hard_header_len;
2016 to_write -= dev->hard_header_len;
2017 }
2018
2019 err = -EFAULT;
Johann Baudy69e3c752009-05-18 22:11:22 -07002020 offset = offset_in_page(data);
2021 len_max = PAGE_SIZE - offset;
2022 len = ((to_write > len_max) ? len_max : to_write);
2023
2024 skb->data_len = to_write;
2025 skb->len += to_write;
2026 skb->truesize += to_write;
2027 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2028
2029 while (likely(to_write)) {
2030 nr_frags = skb_shinfo(skb)->nr_frags;
2031
2032 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002033 pr_err("Packet exceed the number of skb frags(%lu)\n",
2034 MAX_SKB_FRAGS);
Johann Baudy69e3c752009-05-18 22:11:22 -07002035 return -EFAULT;
2036 }
2037
Changli Gao0af55bb2010-12-01 02:52:20 +00002038 page = pgv_to_page(data);
2039 data += len;
Johann Baudy69e3c752009-05-18 22:11:22 -07002040 flush_dcache_page(page);
2041 get_page(page);
Changli Gao0af55bb2010-12-01 02:52:20 +00002042 skb_fill_page_desc(skb, nr_frags, page, offset, len);
Johann Baudy69e3c752009-05-18 22:11:22 -07002043 to_write -= len;
2044 offset = 0;
2045 len_max = PAGE_SIZE;
2046 len = ((to_write > len_max) ? len_max : to_write);
2047 }
2048
2049 return tp_len;
2050}
2051
2052static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2053{
Johann Baudy69e3c752009-05-18 22:11:22 -07002054 struct sk_buff *skb;
2055 struct net_device *dev;
2056 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002057 bool need_rls_dev = false;
2058 int err, reserve = 0;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002059 void *ph;
2060 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Johann Baudy69e3c752009-05-18 22:11:22 -07002061 int tp_len, size_max;
2062 unsigned char *addr;
2063 int len_sum = 0;
2064 int status = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002065 int hlen, tlen;
Johann Baudy69e3c752009-05-18 22:11:22 -07002066
Johann Baudy69e3c752009-05-18 22:11:22 -07002067 mutex_lock(&po->pg_vec_lock);
2068
2069 err = -EBUSY;
2070 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002071 dev = po->prot_hook.dev;
Johann Baudy69e3c752009-05-18 22:11:22 -07002072 proto = po->num;
2073 addr = NULL;
2074 } else {
2075 err = -EINVAL;
2076 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2077 goto out;
2078 if (msg->msg_namelen < (saddr->sll_halen
2079 + offsetof(struct sockaddr_ll,
2080 sll_addr)))
2081 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07002082 proto = saddr->sll_protocol;
2083 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002084 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2085 need_rls_dev = true;
Johann Baudy69e3c752009-05-18 22:11:22 -07002086 }
2087
Johann Baudy69e3c752009-05-18 22:11:22 -07002088 err = -ENXIO;
2089 if (unlikely(dev == NULL))
2090 goto out;
2091
2092 reserve = dev->hard_header_len;
2093
2094 err = -ENETDOWN;
2095 if (unlikely(!(dev->flags & IFF_UP)))
2096 goto out_put;
2097
2098 size_max = po->tx_ring.frame_size
Gabor Gombasb5dd8842009-10-29 03:19:11 -07002099 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
Johann Baudy69e3c752009-05-18 22:11:22 -07002100
2101 if (size_max > dev->mtu + reserve)
2102 size_max = dev->mtu + reserve;
2103
2104 do {
2105 ph = packet_current_frame(po, &po->tx_ring,
2106 TP_STATUS_SEND_REQUEST);
2107
2108 if (unlikely(ph == NULL)) {
2109 schedule();
2110 continue;
2111 }
2112
2113 status = TP_STATUS_SEND_REQUEST;
Herbert Xuae641942011-11-18 02:20:04 +00002114 hlen = LL_RESERVED_SPACE(dev);
2115 tlen = dev->needed_tailroom;
Johann Baudy69e3c752009-05-18 22:11:22 -07002116 skb = sock_alloc_send_skb(&po->sk,
Herbert Xuae641942011-11-18 02:20:04 +00002117 hlen + tlen + sizeof(struct sockaddr_ll),
Johann Baudy69e3c752009-05-18 22:11:22 -07002118 0, &err);
2119
2120 if (unlikely(skb == NULL))
2121 goto out_status;
2122
2123 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
Herbert Xuae641942011-11-18 02:20:04 +00002124 addr, hlen);
Johann Baudy69e3c752009-05-18 22:11:22 -07002125
2126 if (unlikely(tp_len < 0)) {
2127 if (po->tp_loss) {
2128 __packet_set_status(po, ph,
2129 TP_STATUS_AVAILABLE);
2130 packet_increment_head(&po->tx_ring);
2131 kfree_skb(skb);
2132 continue;
2133 } else {
2134 status = TP_STATUS_WRONG_FORMAT;
2135 err = tp_len;
2136 goto out_status;
2137 }
2138 }
2139
2140 skb->destructor = tpacket_destruct_skb;
2141 __packet_set_status(po, ph, TP_STATUS_SENDING);
2142 atomic_inc(&po->tx_ring.pending);
2143
2144 status = TP_STATUS_SEND_REQUEST;
2145 err = dev_queue_xmit(skb);
Jarek Poplawskieb70df12010-01-10 22:04:19 +00002146 if (unlikely(err > 0)) {
2147 err = net_xmit_errno(err);
2148 if (err && __packet_get_status(po, ph) ==
2149 TP_STATUS_AVAILABLE) {
2150 /* skb was destructed already */
2151 skb = NULL;
2152 goto out_status;
2153 }
2154 /*
2155 * skb was dropped but not destructed yet;
2156 * let's treat it like congestion or err < 0
2157 */
2158 err = 0;
2159 }
Johann Baudy69e3c752009-05-18 22:11:22 -07002160 packet_increment_head(&po->tx_ring);
2161 len_sum += tp_len;
Joe Perchesf64f9e72009-11-29 16:55:45 -08002162 } while (likely((ph != NULL) ||
2163 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2164 (atomic_read(&po->tx_ring.pending))))
2165 );
Johann Baudy69e3c752009-05-18 22:11:22 -07002166
2167 err = len_sum;
2168 goto out_put;
2169
Johann Baudy69e3c752009-05-18 22:11:22 -07002170out_status:
2171 __packet_set_status(po, ph, status);
2172 kfree_skb(skb);
2173out_put:
Ben Greear827d9782011-06-01 07:18:53 +00002174 if (need_rls_dev)
2175 dev_put(dev);
Johann Baudy69e3c752009-05-18 22:11:22 -07002176out:
2177 mutex_unlock(&po->pg_vec_lock);
2178 return err;
2179}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
Olof Johanssoneea49cc92011-11-02 11:00:49 +00002181static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2182 size_t reserve, size_t len,
2183 size_t linear, int noblock,
2184 int *err)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002185{
2186 struct sk_buff *skb;
2187
2188 /* Under a page? Don't bother with paged skb. */
2189 if (prepad + len < PAGE_SIZE || !linear)
2190 linear = len;
2191
2192 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2193 err);
2194 if (!skb)
2195 return NULL;
2196
2197 skb_reserve(skb, reserve);
2198 skb_put(skb, linear);
2199 skb->data_len = len - linear;
2200 skb->len += len - linear;
2201
2202 return skb;
2203}
2204
Johann Baudy69e3c752009-05-18 22:11:22 -07002205static int packet_snd(struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 struct msghdr *msg, size_t len)
2207{
2208 struct sock *sk = sock->sk;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002209 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 struct sk_buff *skb;
2211 struct net_device *dev;
Al Viro0e11c912006-11-08 00:26:29 -08002212 __be16 proto;
Ben Greear827d9782011-06-01 07:18:53 +00002213 bool need_rls_dev = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 unsigned char *addr;
Ben Greear827d9782011-06-01 07:18:53 +00002215 int err, reserve = 0;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002216 struct virtio_net_hdr vnet_hdr = { 0 };
2217 int offset = 0;
2218 int vnet_hdr_len;
2219 struct packet_sock *po = pkt_sk(sk);
2220 unsigned short gso_type = 0;
Herbert Xuae641942011-11-18 02:20:04 +00002221 int hlen, tlen;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002222 int extra_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
2224 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002225 * Get and verify the address.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002227
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 if (saddr == NULL) {
Ben Greear827d9782011-06-01 07:18:53 +00002229 dev = po->prot_hook.dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 proto = po->num;
2231 addr = NULL;
2232 } else {
2233 err = -EINVAL;
2234 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2235 goto out;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002236 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2237 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 proto = saddr->sll_protocol;
2239 addr = saddr->sll_addr;
Ben Greear827d9782011-06-01 07:18:53 +00002240 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2241 need_rls_dev = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 }
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 err = -ENXIO;
2245 if (dev == NULL)
2246 goto out_unlock;
2247 if (sock->type == SOCK_RAW)
2248 reserve = dev->hard_header_len;
2249
David S. Millerd5e76b02007-01-25 19:30:36 -08002250 err = -ENETDOWN;
2251 if (!(dev->flags & IFF_UP))
2252 goto out_unlock;
2253
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002254 if (po->has_vnet_hdr) {
2255 vnet_hdr_len = sizeof(vnet_hdr);
2256
2257 err = -EINVAL;
2258 if (len < vnet_hdr_len)
2259 goto out_unlock;
2260
2261 len -= vnet_hdr_len;
2262
2263 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2264 vnet_hdr_len);
2265 if (err < 0)
2266 goto out_unlock;
2267
2268 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2269 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2270 vnet_hdr.hdr_len))
2271 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2272 vnet_hdr.csum_offset + 2;
2273
2274 err = -EINVAL;
2275 if (vnet_hdr.hdr_len > len)
2276 goto out_unlock;
2277
2278 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2279 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2280 case VIRTIO_NET_HDR_GSO_TCPV4:
2281 gso_type = SKB_GSO_TCPV4;
2282 break;
2283 case VIRTIO_NET_HDR_GSO_TCPV6:
2284 gso_type = SKB_GSO_TCPV6;
2285 break;
2286 case VIRTIO_NET_HDR_GSO_UDP:
2287 gso_type = SKB_GSO_UDP;
2288 break;
2289 default:
2290 goto out_unlock;
2291 }
2292
2293 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2294 gso_type |= SKB_GSO_TCP_ECN;
2295
2296 if (vnet_hdr.gso_size == 0)
2297 goto out_unlock;
2298
2299 }
2300 }
2301
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002302 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2303 if (!netif_supports_nofcs(dev)) {
2304 err = -EPROTONOSUPPORT;
2305 goto out_unlock;
2306 }
2307 extra_len = 4; /* We're doing our own CRC */
2308 }
2309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 err = -EMSGSIZE;
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002311 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 goto out_unlock;
2313
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002314 err = -ENOBUFS;
Herbert Xuae641942011-11-18 02:20:04 +00002315 hlen = LL_RESERVED_SPACE(dev);
2316 tlen = dev->needed_tailroom;
2317 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002318 msg->msg_flags & MSG_DONTWAIT, &err);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002319 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 goto out_unlock;
2321
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002322 skb_set_network_header(skb, reserve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002324 err = -EINVAL;
2325 if (sock->type == SOCK_DGRAM &&
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002326 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
Stephen Hemminger0c4e8582007-10-09 01:36:32 -07002327 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
2329 /* Returns -EFAULT on error */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002330 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 if (err)
2332 goto out_free;
Oliver Hartkopp2244d072010-08-17 08:59:14 +00002333 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
Richard Cochraned85b562010-04-07 22:41:28 +00002334 if (err < 0)
2335 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002337 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
Ben Greear57f89bf2011-02-11 09:35:18 +00002338 /* Earlier code assumed this would be a VLAN pkt,
2339 * double-check this now that we have the actual
2340 * packet in hand.
2341 */
2342 struct ethhdr *ehdr;
2343 skb_reset_mac_header(skb);
2344 ehdr = eth_hdr(skb);
2345 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2346 err = -EMSGSIZE;
2347 goto out_free;
2348 }
2349 }
2350
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 skb->protocol = proto;
2352 skb->dev = dev;
2353 skb->priority = sk->sk_priority;
Eric Dumazet2d37a182009-10-01 19:14:46 +00002354 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002356 if (po->has_vnet_hdr) {
2357 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2358 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2359 vnet_hdr.csum_offset)) {
2360 err = -EINVAL;
2361 goto out_free;
2362 }
2363 }
2364
2365 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2366 skb_shinfo(skb)->gso_type = gso_type;
2367
2368 /* Header must be checked, and gso_segs computed. */
2369 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2370 skb_shinfo(skb)->gso_segs = 0;
2371
2372 len += vnet_hdr_len;
2373 }
2374
Ben Greear3bdc0eb2012-02-11 15:39:30 +00002375 if (unlikely(extra_len == 4))
2376 skb->no_fcs = 1;
2377
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 /*
2379 * Now send it
2380 */
2381
2382 err = dev_queue_xmit(skb);
2383 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2384 goto out_unlock;
2385
Ben Greear827d9782011-06-01 07:18:53 +00002386 if (need_rls_dev)
2387 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002389 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390
2391out_free:
2392 kfree_skb(skb);
2393out_unlock:
Ben Greear827d9782011-06-01 07:18:53 +00002394 if (dev && need_rls_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 dev_put(dev);
2396out:
2397 return err;
2398}
2399
Johann Baudy69e3c752009-05-18 22:11:22 -07002400static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2401 struct msghdr *msg, size_t len)
2402{
Johann Baudy69e3c752009-05-18 22:11:22 -07002403 struct sock *sk = sock->sk;
2404 struct packet_sock *po = pkt_sk(sk);
2405 if (po->tx_ring.pg_vec)
2406 return tpacket_snd(po, msg);
2407 else
Johann Baudy69e3c752009-05-18 22:11:22 -07002408 return packet_snd(sock, msg, len);
2409}
2410
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411/*
2412 * Close a PACKET socket. This is fairly simple. We immediately go
2413 * to 'closed' state and remove our protocol entry in the device list.
2414 */
2415
2416static int packet_release(struct socket *sock)
2417{
2418 struct sock *sk = sock->sk;
2419 struct packet_sock *po;
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08002420 struct net *net;
chetan lokef6fb8f102011-08-19 10:18:16 +00002421 union tpacket_req_u req_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422
2423 if (!sk)
2424 return 0;
2425
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002426 net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 po = pkt_sk(sk);
2428
stephen hemminger808f5112010-02-22 07:57:18 +00002429 spin_lock_bh(&net->packet.sklist_lock);
2430 sk_del_node_init_rcu(sk);
Eric Dumazet920de802008-11-24 00:09:29 -08002431 sock_prot_inuse_add(net, sk->sk_prot, -1);
stephen hemminger808f5112010-02-22 07:57:18 +00002432 spin_unlock_bh(&net->packet.sklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
stephen hemminger808f5112010-02-22 07:57:18 +00002434 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002435 unregister_prot_hook(sk, false);
Ben Greear160ff182011-06-01 07:18:52 +00002436 if (po->prot_hook.dev) {
2437 dev_put(po->prot_hook.dev);
2438 po->prot_hook.dev = NULL;
2439 }
stephen hemminger808f5112010-02-22 07:57:18 +00002440 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 packet_flush_mclist(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
chetan lokef6fb8f102011-08-19 10:18:16 +00002444 memset(&req_u, 0, sizeof(req_u));
Johann Baudy69e3c752009-05-18 22:11:22 -07002445
2446 if (po->rx_ring.pg_vec)
chetan lokef6fb8f102011-08-19 10:18:16 +00002447 packet_set_ring(sk, &req_u, 1, 0);
Johann Baudy69e3c752009-05-18 22:11:22 -07002448
2449 if (po->tx_ring.pg_vec)
chetan lokef6fb8f102011-08-19 10:18:16 +00002450 packet_set_ring(sk, &req_u, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451
David S. Millerdc99f602011-07-05 01:45:05 -07002452 fanout_release(sk);
2453
stephen hemminger808f5112010-02-22 07:57:18 +00002454 synchronize_net();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 /*
2456 * Now the socket is dead. No more input will appear.
2457 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 sock_orphan(sk);
2459 sock->sk = NULL;
2460
2461 /* Purge queues */
2462
2463 skb_queue_purge(&sk->sk_receive_queue);
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002464 sk_refcnt_debug_release(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
2466 sock_put(sk);
2467 return 0;
2468}
2469
2470/*
2471 * Attach a packet hook.
2472 */
2473
Al Viro0e11c912006-11-08 00:26:29 -08002474static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475{
2476 struct packet_sock *po = pkt_sk(sk);
David S. Millerdc99f602011-07-05 01:45:05 -07002477
Wei Yongjunaef950b2011-12-27 22:32:41 -05002478 if (po->fanout) {
2479 if (dev)
2480 dev_put(dev);
2481
David S. Millerdc99f602011-07-05 01:45:05 -07002482 return -EINVAL;
Wei Yongjunaef950b2011-12-27 22:32:41 -05002483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484
2485 lock_sock(sk);
2486
2487 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07002488 unregister_prot_hook(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 po->num = protocol;
2490 po->prot_hook.type = protocol;
Ben Greear160ff182011-06-01 07:18:52 +00002491 if (po->prot_hook.dev)
2492 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 po->prot_hook.dev = dev;
2494
2495 po->ifindex = dev ? dev->ifindex : 0;
2496
2497 if (protocol == 0)
2498 goto out_unlock;
2499
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002500 if (!dev || (dev->flags & IFF_UP)) {
David S. Millerce06b032011-07-04 01:44:29 -07002501 register_prot_hook(sk);
Urs Thuermannbe85d4a2007-11-12 21:05:20 -08002502 } else {
2503 sk->sk_err = ENETDOWN;
2504 if (!sock_flag(sk, SOCK_DEAD))
2505 sk->sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 }
2507
2508out_unlock:
2509 spin_unlock(&po->bind_lock);
2510 release_sock(sk);
2511 return 0;
2512}
2513
2514/*
2515 * Bind a packet socket to a device
2516 */
2517
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002518static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2519 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002521 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 char name[15];
2523 struct net_device *dev;
2524 int err = -ENODEV;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002525
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 /*
2527 * Check legality
2528 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002529
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002530 if (addr_len != sizeof(struct sockaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002532 strlcpy(name, uaddr->sa_data, sizeof(name));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002534 dev = dev_get_by_name(sock_net(sk), name);
Ben Greear160ff182011-06-01 07:18:52 +00002535 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 return err;
2538}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
2540static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2541{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002542 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2543 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 struct net_device *dev = NULL;
2545 int err;
2546
2547
2548 /*
2549 * Check legality
2550 */
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 if (addr_len < sizeof(struct sockaddr_ll))
2553 return -EINVAL;
2554 if (sll->sll_family != AF_PACKET)
2555 return -EINVAL;
2556
2557 if (sll->sll_ifindex) {
2558 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002559 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 if (dev == NULL)
2561 goto out;
2562 }
2563 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564
2565out:
2566 return err;
2567}
2568
2569static struct proto packet_proto = {
2570 .name = "PACKET",
2571 .owner = THIS_MODULE,
2572 .obj_size = sizeof(struct packet_sock),
2573};
2574
2575/*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002576 * Create a packet of type SOCK_PACKET.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 */
2578
Eric Paris3f378b62009-11-05 22:18:14 -08002579static int packet_create(struct net *net, struct socket *sock, int protocol,
2580 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581{
2582 struct sock *sk;
2583 struct packet_sock *po;
Al Viro0e11c912006-11-08 00:26:29 -08002584 __be16 proto = (__force __be16)protocol; /* weird, but documented */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 int err;
2586
2587 if (!capable(CAP_NET_RAW))
2588 return -EPERM;
David S. Millerbe020972007-05-29 13:16:31 -07002589 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2590 sock->type != SOCK_PACKET)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 return -ESOCKTNOSUPPORT;
2592
2593 sock->state = SS_UNCONNECTED;
2594
2595 err = -ENOBUFS;
Pavel Emelyanov6257ff22007-11-01 00:39:31 -07002596 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 if (sk == NULL)
2598 goto out;
2599
2600 sock->ops = &packet_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 if (sock->type == SOCK_PACKET)
2602 sock->ops = &packet_ops_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002603
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 sock_init_data(sock, sk);
2605
2606 po = pkt_sk(sk);
2607 sk->sk_family = PF_PACKET;
Al Viro0e11c912006-11-08 00:26:29 -08002608 po->num = proto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609
2610 sk->sk_destruct = packet_sock_destruct;
Pavel Emelyanov17ab56a2007-11-10 21:38:48 -08002611 sk_refcnt_debug_inc(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612
2613 /*
2614 * Attach a protocol block
2615 */
2616
2617 spin_lock_init(&po->bind_lock);
Herbert Xu905db442009-01-30 14:12:06 -08002618 mutex_init(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 po->prot_hook.func = packet_rcv;
David S. Millerbe020972007-05-29 13:16:31 -07002620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 if (sock->type == SOCK_PACKET)
2622 po->prot_hook.func = packet_rcv_spkt;
David S. Millerbe020972007-05-29 13:16:31 -07002623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 po->prot_hook.af_packet_priv = sk;
2625
Al Viro0e11c912006-11-08 00:26:29 -08002626 if (proto) {
2627 po->prot_hook.type = proto;
David S. Millerce06b032011-07-04 01:44:29 -07002628 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 }
2630
stephen hemminger808f5112010-02-22 07:57:18 +00002631 spin_lock_bh(&net->packet.sklist_lock);
2632 sk_add_node_rcu(sk, &net->packet.sklist);
Eric Dumazet36804532008-11-19 14:25:35 -08002633 sock_prot_inuse_add(net, &packet_proto, 1);
stephen hemminger808f5112010-02-22 07:57:18 +00002634 spin_unlock_bh(&net->packet.sklist_lock);
2635
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002636 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637out:
2638 return err;
2639}
2640
Richard Cochraned85b562010-04-07 22:41:28 +00002641static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2642{
2643 struct sock_exterr_skb *serr;
2644 struct sk_buff *skb, *skb2;
2645 int copied, err;
2646
2647 err = -EAGAIN;
2648 skb = skb_dequeue(&sk->sk_error_queue);
2649 if (skb == NULL)
2650 goto out;
2651
2652 copied = skb->len;
2653 if (copied > len) {
2654 msg->msg_flags |= MSG_TRUNC;
2655 copied = len;
2656 }
2657 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2658 if (err)
2659 goto out_free_skb;
2660
2661 sock_recv_timestamp(msg, sk, skb);
2662
2663 serr = SKB_EXT_ERR(skb);
2664 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2665 sizeof(serr->ee), &serr->ee);
2666
2667 msg->msg_flags |= MSG_ERRQUEUE;
2668 err = copied;
2669
2670 /* Reset and regenerate socket error */
2671 spin_lock_bh(&sk->sk_error_queue.lock);
2672 sk->sk_err = 0;
2673 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2674 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2675 spin_unlock_bh(&sk->sk_error_queue.lock);
2676 sk->sk_error_report(sk);
2677 } else
2678 spin_unlock_bh(&sk->sk_error_queue.lock);
2679
2680out_free_skb:
2681 kfree_skb(skb);
2682out:
2683 return err;
2684}
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686/*
2687 * Pull a packet from our receive queue and hand it to the user.
2688 * If necessary we block.
2689 */
2690
2691static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2692 struct msghdr *msg, size_t len, int flags)
2693{
2694 struct sock *sk = sock->sk;
2695 struct sk_buff *skb;
2696 int copied, err;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002697 struct sockaddr_ll *sll;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002698 int vnet_hdr_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700 err = -EINVAL;
Richard Cochraned85b562010-04-07 22:41:28 +00002701 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 goto out;
2703
2704#if 0
2705 /* What error should we return now? EUNATTACH? */
2706 if (pkt_sk(sk)->ifindex < 0)
2707 return -ENODEV;
2708#endif
2709
Richard Cochraned85b562010-04-07 22:41:28 +00002710 if (flags & MSG_ERRQUEUE) {
2711 err = packet_recv_error(sk, msg, len);
2712 goto out;
2713 }
2714
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 * Call the generic datagram receiver. This handles all sorts
2717 * of horrible races and re-entrancy so we can forget about it
2718 * in the protocol layers.
2719 *
2720 * Now it will return ENETDOWN, if device have just gone down,
2721 * but then it will block.
2722 */
2723
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002724 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
2726 /*
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09002727 * An error occurred so return it. Because skb_recv_datagram()
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 * handles the blocking we don't see and worry about blocking
2729 * retries.
2730 */
2731
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08002732 if (skb == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 goto out;
2734
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002735 if (pkt_sk(sk)->has_vnet_hdr) {
2736 struct virtio_net_hdr vnet_hdr = { 0 };
2737
2738 err = -EINVAL;
2739 vnet_hdr_len = sizeof(vnet_hdr);
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002740 if (len < vnet_hdr_len)
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002741 goto out_free;
2742
Mariusz Kozlowski1f18b712010-11-08 11:58:45 +00002743 len -= vnet_hdr_len;
2744
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002745 if (skb_is_gso(skb)) {
2746 struct skb_shared_info *sinfo = skb_shinfo(skb);
2747
2748 /* This is a hint as to how much should be linear. */
2749 vnet_hdr.hdr_len = skb_headlen(skb);
2750 vnet_hdr.gso_size = sinfo->gso_size;
2751 if (sinfo->gso_type & SKB_GSO_TCPV4)
2752 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2753 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2754 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2755 else if (sinfo->gso_type & SKB_GSO_UDP)
2756 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2757 else if (sinfo->gso_type & SKB_GSO_FCOE)
2758 goto out_free;
2759 else
2760 BUG();
2761 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2762 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2763 } else
2764 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2765
2766 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2767 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +00002768 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002769 vnet_hdr.csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +00002770 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2771 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002772 } /* else everything is zero */
2773
2774 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2775 vnet_hdr_len);
2776 if (err < 0)
2777 goto out_free;
2778 }
2779
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 /*
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002781 * If the address length field is there to be filled in, we fill
2782 * it in now.
2783 */
2784
Herbert Xuffbc6112007-02-04 23:33:10 -08002785 sll = &PACKET_SKB_CB(skb)->sa.ll;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002786 if (sock->type == SOCK_PACKET)
2787 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2788 else
2789 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2790
2791 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 * You lose any data beyond the buffer you gave. If it worries a
2793 * user program they can ask the device for its MTU anyway.
2794 */
2795
2796 copied = skb->len;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002797 if (copied > len) {
2798 copied = len;
2799 msg->msg_flags |= MSG_TRUNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 }
2801
2802 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2803 if (err)
2804 goto out_free;
2805
Neil Horman3b885782009-10-12 13:26:31 -07002806 sock_recv_ts_and_drops(msg, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807
2808 if (msg->msg_name)
Herbert Xuffbc6112007-02-04 23:33:10 -08002809 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2810 msg->msg_namelen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811
Herbert Xu8dc41942007-02-04 23:31:32 -08002812 if (pkt_sk(sk)->auxdata) {
Herbert Xuffbc6112007-02-04 23:33:10 -08002813 struct tpacket_auxdata aux;
2814
2815 aux.tp_status = TP_STATUS_USER;
2816 if (skb->ip_summed == CHECKSUM_PARTIAL)
2817 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2818 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2819 aux.tp_snaplen = skb->len;
2820 aux.tp_mac = 0;
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002821 aux.tp_net = skb_network_offset(skb);
Ben Greeara3bcc232011-06-01 06:49:10 +00002822 if (vlan_tx_tag_present(skb)) {
2823 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2824 aux.tp_status |= TP_STATUS_VLAN_VALID;
2825 } else {
2826 aux.tp_vlan_tci = 0;
2827 }
Eric Dumazet13fcb7b2011-06-06 22:42:06 -07002828 aux.tp_padding = 0;
Herbert Xuffbc6112007-02-04 23:33:10 -08002829 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
Herbert Xu8dc41942007-02-04 23:31:32 -08002830 }
2831
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 /*
2833 * Free or return the buffer as appropriate. Again this
2834 * hides all the races and re-entrancy issues from us.
2835 */
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08002836 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
2838out_free:
2839 skb_free_datagram(sk, skb);
2840out:
2841 return err;
2842}
2843
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2845 int *uaddr_len, int peer)
2846{
2847 struct net_device *dev;
2848 struct sock *sk = sock->sk;
2849
2850 if (peer)
2851 return -EOPNOTSUPP;
2852
2853 uaddr->sa_family = AF_PACKET;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002854 rcu_read_lock();
2855 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2856 if (dev)
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002857 strncpy(uaddr->sa_data, dev->name, 14);
Eric Dumazet654d1f82009-11-02 10:43:32 +01002858 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 memset(uaddr->sa_data, 0, 14);
Eric Dumazet654d1f82009-11-02 10:43:32 +01002860 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 *uaddr_len = sizeof(*uaddr);
2862
2863 return 0;
2864}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865
2866static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2867 int *uaddr_len, int peer)
2868{
2869 struct net_device *dev;
2870 struct sock *sk = sock->sk;
2871 struct packet_sock *po = pkt_sk(sk);
Cyrill Gorcunov13cfa972009-11-08 05:51:19 +00002872 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873
2874 if (peer)
2875 return -EOPNOTSUPP;
2876
2877 sll->sll_family = AF_PACKET;
2878 sll->sll_ifindex = po->ifindex;
2879 sll->sll_protocol = po->num;
Vasiliy Kulikov67286642010-11-10 12:09:10 -08002880 sll->sll_pkttype = 0;
Eric Dumazet654d1f82009-11-02 10:43:32 +01002881 rcu_read_lock();
2882 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 if (dev) {
2884 sll->sll_hatype = dev->type;
2885 sll->sll_halen = dev->addr_len;
2886 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 } else {
2888 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2889 sll->sll_halen = 0;
2890 }
Eric Dumazet654d1f82009-11-02 10:43:32 +01002891 rcu_read_unlock();
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002892 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893
2894 return 0;
2895}
2896
Wang Chen2aeb0b82008-07-14 20:49:46 -07002897static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2898 int what)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899{
2900 switch (i->type) {
2901 case PACKET_MR_MULTICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002902 if (i->alen != dev->addr_len)
2903 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 if (what > 0)
Jiri Pirko22bedad32010-04-01 21:22:57 +00002905 return dev_mc_add(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 else
Jiri Pirko22bedad32010-04-01 21:22:57 +00002907 return dev_mc_del(dev, i->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 break;
2909 case PACKET_MR_PROMISC:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002910 return dev_set_promiscuity(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 break;
2912 case PACKET_MR_ALLMULTI:
Wang Chen2aeb0b82008-07-14 20:49:46 -07002913 return dev_set_allmulti(dev, what);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 break;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002915 case PACKET_MR_UNICAST:
Jiri Pirko11625632010-03-02 20:40:01 +00002916 if (i->alen != dev->addr_len)
2917 return -EINVAL;
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002918 if (what > 0)
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002919 return dev_uc_add(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002920 else
Jiri Pirkoa748ee22010-04-01 21:22:09 +00002921 return dev_uc_del(dev, i->addr);
Eric W. Biedermand95ed922009-05-19 18:27:17 +00002922 break;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002923 default:
2924 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 }
Wang Chen2aeb0b82008-07-14 20:49:46 -07002926 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927}
2928
2929static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2930{
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00002931 for ( ; i; i = i->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 if (i->ifindex == dev->ifindex)
2933 packet_dev_mc(dev, i, what);
2934 }
2935}
2936
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002937static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938{
2939 struct packet_sock *po = pkt_sk(sk);
2940 struct packet_mclist *ml, *i;
2941 struct net_device *dev;
2942 int err;
2943
2944 rtnl_lock();
2945
2946 err = -ENODEV;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09002947 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 if (!dev)
2949 goto done;
2950
2951 err = -EINVAL;
Jiri Pirko11625632010-03-02 20:40:01 +00002952 if (mreq->mr_alen > dev->addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 goto done;
2954
2955 err = -ENOBUFS;
Kris Katterjohn8b3a7002006-01-11 15:56:43 -08002956 i = kmalloc(sizeof(*i), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 if (i == NULL)
2958 goto done;
2959
2960 err = 0;
2961 for (ml = po->mclist; ml; ml = ml->next) {
2962 if (ml->ifindex == mreq->mr_ifindex &&
2963 ml->type == mreq->mr_type &&
2964 ml->alen == mreq->mr_alen &&
2965 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2966 ml->count++;
2967 /* Free the new element ... */
2968 kfree(i);
2969 goto done;
2970 }
2971 }
2972
2973 i->type = mreq->mr_type;
2974 i->ifindex = mreq->mr_ifindex;
2975 i->alen = mreq->mr_alen;
2976 memcpy(i->addr, mreq->mr_address, i->alen);
2977 i->count = 1;
2978 i->next = po->mclist;
2979 po->mclist = i;
Wang Chen2aeb0b82008-07-14 20:49:46 -07002980 err = packet_dev_mc(dev, i, 1);
2981 if (err) {
2982 po->mclist = i->next;
2983 kfree(i);
2984 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985
2986done:
2987 rtnl_unlock();
2988 return err;
2989}
2990
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07002991static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992{
2993 struct packet_mclist *ml, **mlp;
2994
2995 rtnl_lock();
2996
2997 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2998 if (ml->ifindex == mreq->mr_ifindex &&
2999 ml->type == mreq->mr_type &&
3000 ml->alen == mreq->mr_alen &&
3001 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3002 if (--ml->count == 0) {
3003 struct net_device *dev;
3004 *mlp = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00003005 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3006 if (dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008 kfree(ml);
3009 }
3010 rtnl_unlock();
3011 return 0;
3012 }
3013 }
3014 rtnl_unlock();
3015 return -EADDRNOTAVAIL;
3016}
3017
3018static void packet_flush_mclist(struct sock *sk)
3019{
3020 struct packet_sock *po = pkt_sk(sk);
3021 struct packet_mclist *ml;
3022
3023 if (!po->mclist)
3024 return;
3025
3026 rtnl_lock();
3027 while ((ml = po->mclist) != NULL) {
3028 struct net_device *dev;
3029
3030 po->mclist = ml->next;
Eric Dumazetad959e72009-10-16 06:38:46 +00003031 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3032 if (dev != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 packet_dev_mc(dev, ml, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 kfree(ml);
3035 }
3036 rtnl_unlock();
3037}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038
3039static int
David S. Millerb7058842009-09-30 16:12:20 -07003040packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041{
3042 struct sock *sk = sock->sk;
Herbert Xu8dc41942007-02-04 23:31:32 -08003043 struct packet_sock *po = pkt_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 int ret;
3045
3046 if (level != SOL_PACKET)
3047 return -ENOPROTOOPT;
3048
Johann Baudy69e3c752009-05-18 22:11:22 -07003049 switch (optname) {
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003050 case PACKET_ADD_MEMBERSHIP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 case PACKET_DROP_MEMBERSHIP:
3052 {
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003053 struct packet_mreq_max mreq;
3054 int len = optlen;
3055 memset(&mreq, 0, sizeof(mreq));
3056 if (len < sizeof(struct packet_mreq))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 return -EINVAL;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003058 if (len > sizeof(mreq))
3059 len = sizeof(mreq);
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003060 if (copy_from_user(&mreq, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061 return -EFAULT;
Eric W. Biederman0fb375f2005-09-21 00:11:37 -07003062 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3063 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 if (optname == PACKET_ADD_MEMBERSHIP)
3065 ret = packet_mc_add(sk, &mreq);
3066 else
3067 ret = packet_mc_drop(sk, &mreq);
3068 return ret;
3069 }
David S. Millera2efcfa2007-05-29 13:12:50 -07003070
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 case PACKET_RX_RING:
Johann Baudy69e3c752009-05-18 22:11:22 -07003072 case PACKET_TX_RING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 {
chetan lokef6fb8f102011-08-19 10:18:16 +00003074 union tpacket_req_u req_u;
3075 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076
chetan lokef6fb8f102011-08-19 10:18:16 +00003077 switch (po->tp_version) {
3078 case TPACKET_V1:
3079 case TPACKET_V2:
3080 len = sizeof(req_u.req);
3081 break;
3082 case TPACKET_V3:
3083 default:
3084 len = sizeof(req_u.req3);
3085 break;
3086 }
3087 if (optlen < len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 return -EINVAL;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003089 if (pkt_sk(sk)->has_vnet_hdr)
3090 return -EINVAL;
chetan lokef6fb8f102011-08-19 10:18:16 +00003091 if (copy_from_user(&req_u.req, optval, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 return -EFAULT;
chetan lokef6fb8f102011-08-19 10:18:16 +00003093 return packet_set_ring(sk, &req_u, 0,
3094 optname == PACKET_TX_RING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 }
3096 case PACKET_COPY_THRESH:
3097 {
3098 int val;
3099
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003100 if (optlen != sizeof(val))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 return -EINVAL;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003102 if (copy_from_user(&val, optval, sizeof(val)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 return -EFAULT;
3104
3105 pkt_sk(sk)->copy_thresh = val;
3106 return 0;
3107 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003108 case PACKET_VERSION:
3109 {
3110 int val;
3111
3112 if (optlen != sizeof(val))
3113 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003114 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003115 return -EBUSY;
3116 if (copy_from_user(&val, optval, sizeof(val)))
3117 return -EFAULT;
3118 switch (val) {
3119 case TPACKET_V1:
3120 case TPACKET_V2:
chetan lokef6fb8f102011-08-19 10:18:16 +00003121 case TPACKET_V3:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003122 po->tp_version = val;
3123 return 0;
3124 default:
3125 return -EINVAL;
3126 }
3127 }
Patrick McHardy8913336a2008-07-18 18:05:19 -07003128 case PACKET_RESERVE:
3129 {
3130 unsigned int val;
3131
3132 if (optlen != sizeof(val))
3133 return -EINVAL;
Johann Baudy69e3c752009-05-18 22:11:22 -07003134 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
Patrick McHardy8913336a2008-07-18 18:05:19 -07003135 return -EBUSY;
3136 if (copy_from_user(&val, optval, sizeof(val)))
3137 return -EFAULT;
3138 po->tp_reserve = val;
3139 return 0;
3140 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003141 case PACKET_LOSS:
3142 {
3143 unsigned int val;
3144
3145 if (optlen != sizeof(val))
3146 return -EINVAL;
3147 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3148 return -EBUSY;
3149 if (copy_from_user(&val, optval, sizeof(val)))
3150 return -EFAULT;
3151 po->tp_loss = !!val;
3152 return 0;
3153 }
Herbert Xu8dc41942007-02-04 23:31:32 -08003154 case PACKET_AUXDATA:
3155 {
3156 int val;
3157
3158 if (optlen < sizeof(val))
3159 return -EINVAL;
3160 if (copy_from_user(&val, optval, sizeof(val)))
3161 return -EFAULT;
3162
3163 po->auxdata = !!val;
3164 return 0;
3165 }
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003166 case PACKET_ORIGDEV:
3167 {
3168 int val;
3169
3170 if (optlen < sizeof(val))
3171 return -EINVAL;
3172 if (copy_from_user(&val, optval, sizeof(val)))
3173 return -EFAULT;
3174
3175 po->origdev = !!val;
3176 return 0;
3177 }
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003178 case PACKET_VNET_HDR:
3179 {
3180 int val;
3181
3182 if (sock->type != SOCK_RAW)
3183 return -EINVAL;
3184 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3185 return -EBUSY;
3186 if (optlen < sizeof(val))
3187 return -EINVAL;
3188 if (copy_from_user(&val, optval, sizeof(val)))
3189 return -EFAULT;
3190
3191 po->has_vnet_hdr = !!val;
3192 return 0;
3193 }
Scott McMillan614f60f2010-06-02 05:53:56 -07003194 case PACKET_TIMESTAMP:
3195 {
3196 int val;
3197
3198 if (optlen != sizeof(val))
3199 return -EINVAL;
3200 if (copy_from_user(&val, optval, sizeof(val)))
3201 return -EFAULT;
3202
3203 po->tp_tstamp = val;
3204 return 0;
3205 }
David S. Millerdc99f602011-07-05 01:45:05 -07003206 case PACKET_FANOUT:
3207 {
3208 int val;
3209
3210 if (optlen != sizeof(val))
3211 return -EINVAL;
3212 if (copy_from_user(&val, optval, sizeof(val)))
3213 return -EFAULT;
3214
3215 return fanout_add(sk, val & 0xffff, val >> 16);
3216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217 default:
3218 return -ENOPROTOOPT;
3219 }
3220}
3221
3222static int packet_getsockopt(struct socket *sock, int level, int optname,
3223 char __user *optval, int __user *optlen)
3224{
3225 int len;
Eric Dumazetc06fff62012-04-19 21:56:11 +00003226 int val, lv = sizeof(val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 struct sock *sk = sock->sk;
3228 struct packet_sock *po = pkt_sk(sk);
Eric Dumazetc06fff62012-04-19 21:56:11 +00003229 void *data = &val;
Herbert Xu8dc41942007-02-04 23:31:32 -08003230 struct tpacket_stats st;
chetan lokef6fb8f102011-08-19 10:18:16 +00003231 union tpacket_stats_u st_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
3233 if (level != SOL_PACKET)
3234 return -ENOPROTOOPT;
3235
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003236 if (get_user(len, optlen))
3237 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238
3239 if (len < 0)
3240 return -EINVAL;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003241
Johann Baudy69e3c752009-05-18 22:11:22 -07003242 switch (optname) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 case PACKET_STATISTICS:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 spin_lock_bh(&sk->sk_receive_queue.lock);
chetan lokef6fb8f102011-08-19 10:18:16 +00003245 if (po->tp_version == TPACKET_V3) {
Eric Dumazetc06fff62012-04-19 21:56:11 +00003246 lv = sizeof(struct tpacket_stats_v3);
chetan lokef6fb8f102011-08-19 10:18:16 +00003247 memcpy(&st_u.stats3, &po->stats,
Eric Dumazetc06fff62012-04-19 21:56:11 +00003248 sizeof(struct tpacket_stats));
chetan lokef6fb8f102011-08-19 10:18:16 +00003249 st_u.stats3.tp_freeze_q_cnt =
Eric Dumazetc06fff62012-04-19 21:56:11 +00003250 po->stats_u.stats3.tp_freeze_q_cnt;
chetan lokef6fb8f102011-08-19 10:18:16 +00003251 st_u.stats3.tp_packets += po->stats.tp_drops;
3252 data = &st_u.stats3;
3253 } else {
Eric Dumazetc06fff62012-04-19 21:56:11 +00003254 lv = sizeof(struct tpacket_stats);
chetan lokef6fb8f102011-08-19 10:18:16 +00003255 st = po->stats;
3256 st.tp_packets += st.tp_drops;
3257 data = &st;
3258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259 memset(&po->stats, 0, sizeof(st));
3260 spin_unlock_bh(&sk->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 break;
Herbert Xu8dc41942007-02-04 23:31:32 -08003262 case PACKET_AUXDATA:
Herbert Xu8dc41942007-02-04 23:31:32 -08003263 val = po->auxdata;
Herbert Xu8dc41942007-02-04 23:31:32 -08003264 break;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003265 case PACKET_ORIGDEV:
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003266 val = po->origdev;
Peter P. Waskiewicz Jr80feaac2007-04-20 16:05:39 -07003267 break;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003268 case PACKET_VNET_HDR:
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003269 val = po->has_vnet_hdr;
Sridhar Samudralabfd5f4a2010-02-04 20:24:10 -08003270 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003271 case PACKET_VERSION:
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003272 val = po->tp_version;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003273 break;
3274 case PACKET_HDRLEN:
3275 if (len > sizeof(int))
3276 len = sizeof(int);
3277 if (copy_from_user(&val, optval, len))
3278 return -EFAULT;
3279 switch (val) {
3280 case TPACKET_V1:
3281 val = sizeof(struct tpacket_hdr);
3282 break;
3283 case TPACKET_V2:
3284 val = sizeof(struct tpacket2_hdr);
3285 break;
chetan lokef6fb8f102011-08-19 10:18:16 +00003286 case TPACKET_V3:
3287 val = sizeof(struct tpacket3_hdr);
3288 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003289 default:
3290 return -EINVAL;
3291 }
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003292 break;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003293 case PACKET_RESERVE:
Patrick McHardy8913336a2008-07-18 18:05:19 -07003294 val = po->tp_reserve;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003295 break;
Johann Baudy69e3c752009-05-18 22:11:22 -07003296 case PACKET_LOSS:
Johann Baudy69e3c752009-05-18 22:11:22 -07003297 val = po->tp_loss;
Johann Baudy69e3c752009-05-18 22:11:22 -07003298 break;
Scott McMillan614f60f2010-06-02 05:53:56 -07003299 case PACKET_TIMESTAMP:
Scott McMillan614f60f2010-06-02 05:53:56 -07003300 val = po->tp_tstamp;
Scott McMillan614f60f2010-06-02 05:53:56 -07003301 break;
David S. Millerdc99f602011-07-05 01:45:05 -07003302 case PACKET_FANOUT:
David S. Millerdc99f602011-07-05 01:45:05 -07003303 val = (po->fanout ?
3304 ((u32)po->fanout->id |
3305 ((u32)po->fanout->type << 16)) :
3306 0);
David S. Millerdc99f602011-07-05 01:45:05 -07003307 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 default:
3309 return -ENOPROTOOPT;
3310 }
3311
Eric Dumazetc06fff62012-04-19 21:56:11 +00003312 if (len > lv)
3313 len = lv;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003314 if (put_user(len, optlen))
3315 return -EFAULT;
Herbert Xu8dc41942007-02-04 23:31:32 -08003316 if (copy_to_user(optval, data, len))
3317 return -EFAULT;
Kris Katterjohn8ae55f02006-01-23 16:28:02 -08003318 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319}
3320
3321
3322static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3323{
3324 struct sock *sk;
3325 struct hlist_node *node;
Jason Lunzad930652007-02-20 23:19:54 -08003326 struct net_device *dev = data;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003327 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328
stephen hemminger808f5112010-02-22 07:57:18 +00003329 rcu_read_lock();
3330 sk_for_each_rcu(sk, node, &net->packet.sklist) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 struct packet_sock *po = pkt_sk(sk);
3332
3333 switch (msg) {
3334 case NETDEV_UNREGISTER:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 if (po->mclist)
3336 packet_dev_mclist(dev, po->mclist, -1);
David S. Millera2efcfa2007-05-29 13:12:50 -07003337 /* fallthrough */
3338
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 case NETDEV_DOWN:
3340 if (dev->ifindex == po->ifindex) {
3341 spin_lock(&po->bind_lock);
3342 if (po->running) {
David S. Millerce06b032011-07-04 01:44:29 -07003343 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 sk->sk_err = ENETDOWN;
3345 if (!sock_flag(sk, SOCK_DEAD))
3346 sk->sk_error_report(sk);
3347 }
3348 if (msg == NETDEV_UNREGISTER) {
3349 po->ifindex = -1;
Ben Greear160ff182011-06-01 07:18:52 +00003350 if (po->prot_hook.dev)
3351 dev_put(po->prot_hook.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 po->prot_hook.dev = NULL;
3353 }
3354 spin_unlock(&po->bind_lock);
3355 }
3356 break;
3357 case NETDEV_UP:
stephen hemminger808f5112010-02-22 07:57:18 +00003358 if (dev->ifindex == po->ifindex) {
3359 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003360 if (po->num)
3361 register_prot_hook(sk);
stephen hemminger808f5112010-02-22 07:57:18 +00003362 spin_unlock(&po->bind_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 break;
3365 }
3366 }
stephen hemminger808f5112010-02-22 07:57:18 +00003367 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 return NOTIFY_DONE;
3369}
3370
3371
3372static int packet_ioctl(struct socket *sock, unsigned int cmd,
3373 unsigned long arg)
3374{
3375 struct sock *sk = sock->sk;
3376
Johann Baudy69e3c752009-05-18 22:11:22 -07003377 switch (cmd) {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003378 case SIOCOUTQ:
3379 {
3380 int amount = sk_wmem_alloc_get(sk);
Eric Dumazet31e6d362009-06-17 19:05:41 -07003381
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003382 return put_user(amount, (int __user *)arg);
3383 }
3384 case SIOCINQ:
3385 {
3386 struct sk_buff *skb;
3387 int amount = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003389 spin_lock_bh(&sk->sk_receive_queue.lock);
3390 skb = skb_peek(&sk->sk_receive_queue);
3391 if (skb)
3392 amount = skb->len;
3393 spin_unlock_bh(&sk->sk_receive_queue.lock);
3394 return put_user(amount, (int __user *)arg);
3395 }
3396 case SIOCGSTAMP:
3397 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3398 case SIOCGSTAMPNS:
3399 return sock_get_timestampns(sk, (struct timespec __user *)arg);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003400
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401#ifdef CONFIG_INET
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003402 case SIOCADDRT:
3403 case SIOCDELRT:
3404 case SIOCDARP:
3405 case SIOCGARP:
3406 case SIOCSARP:
3407 case SIOCGIFADDR:
3408 case SIOCSIFADDR:
3409 case SIOCGIFBRDADDR:
3410 case SIOCSIFBRDADDR:
3411 case SIOCGIFNETMASK:
3412 case SIOCSIFNETMASK:
3413 case SIOCGIFDSTADDR:
3414 case SIOCSIFDSTADDR:
3415 case SIOCSIFFLAGS:
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003416 return inet_dgram_ops.ioctl(sock, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417#endif
3418
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003419 default:
3420 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 }
3422 return 0;
3423}
3424
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003425static unsigned int packet_poll(struct file *file, struct socket *sock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 poll_table *wait)
3427{
3428 struct sock *sk = sock->sk;
3429 struct packet_sock *po = pkt_sk(sk);
3430 unsigned int mask = datagram_poll(file, sock, wait);
3431
3432 spin_lock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003433 if (po->rx_ring.pg_vec) {
chetan lokef6fb8f102011-08-19 10:18:16 +00003434 if (!packet_previous_rx_frame(po, &po->rx_ring,
3435 TP_STATUS_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 mask |= POLLIN | POLLRDNORM;
3437 }
3438 spin_unlock_bh(&sk->sk_receive_queue.lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003439 spin_lock_bh(&sk->sk_write_queue.lock);
3440 if (po->tx_ring.pg_vec) {
3441 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3442 mask |= POLLOUT | POLLWRNORM;
3443 }
3444 spin_unlock_bh(&sk->sk_write_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 return mask;
3446}
3447
3448
3449/* Dirty? Well, I still did not learn better way to account
3450 * for user mmaps.
3451 */
3452
3453static void packet_mm_open(struct vm_area_struct *vma)
3454{
3455 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003456 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003458
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 if (sk)
3460 atomic_inc(&pkt_sk(sk)->mapped);
3461}
3462
3463static void packet_mm_close(struct vm_area_struct *vma)
3464{
3465 struct file *file = vma->vm_file;
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003466 struct socket *sock = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 struct sock *sk = sock->sk;
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003468
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 if (sk)
3470 atomic_dec(&pkt_sk(sk)->mapped);
3471}
3472
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003473static const struct vm_operations_struct packet_mmap_ops = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003474 .open = packet_mm_open,
3475 .close = packet_mm_close,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476};
3477
Neil Horman0e3125c2010-11-16 10:26:47 -08003478static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3479 unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480{
3481 int i;
3482
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003483 for (i = 0; i < len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003484 if (likely(pg_vec[i].buffer)) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003485 if (is_vmalloc_addr(pg_vec[i].buffer))
Neil Horman0e3125c2010-11-16 10:26:47 -08003486 vfree(pg_vec[i].buffer);
3487 else
3488 free_pages((unsigned long)pg_vec[i].buffer,
3489 order);
3490 pg_vec[i].buffer = NULL;
3491 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 }
3493 kfree(pg_vec);
3494}
3495
Olof Johanssoneea49cc92011-11-02 11:00:49 +00003496static char *alloc_one_pg_vec_page(unsigned long order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003497{
Neil Horman0e3125c2010-11-16 10:26:47 -08003498 char *buffer = NULL;
3499 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3500 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
Eric Dumazet719bfea2009-04-15 03:39:52 -07003501
Neil Horman0e3125c2010-11-16 10:26:47 -08003502 buffer = (char *) __get_free_pages(gfp_flags, order);
3503
3504 if (buffer)
3505 return buffer;
3506
3507 /*
3508 * __get_free_pages failed, fall back to vmalloc
3509 */
Eric Dumazetbbce5a52010-11-20 07:31:54 +00003510 buffer = vzalloc((1 << order) * PAGE_SIZE);
Neil Horman0e3125c2010-11-16 10:26:47 -08003511
3512 if (buffer)
3513 return buffer;
3514
3515 /*
3516 * vmalloc failed, lets dig into swap here
3517 */
Neil Horman0e3125c2010-11-16 10:26:47 -08003518 gfp_flags &= ~__GFP_NORETRY;
3519 buffer = (char *)__get_free_pages(gfp_flags, order);
3520 if (buffer)
3521 return buffer;
3522
3523 /*
3524 * complete and utter failure
3525 */
3526 return NULL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003527}
3528
Neil Horman0e3125c2010-11-16 10:26:47 -08003529static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003530{
3531 unsigned int block_nr = req->tp_block_nr;
Neil Horman0e3125c2010-11-16 10:26:47 -08003532 struct pgv *pg_vec;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003533 int i;
3534
Neil Horman0e3125c2010-11-16 10:26:47 -08003535 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003536 if (unlikely(!pg_vec))
3537 goto out;
3538
3539 for (i = 0; i < block_nr; i++) {
Changli Gaoc56b4d92010-12-01 02:52:57 +00003540 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
Neil Horman0e3125c2010-11-16 10:26:47 -08003541 if (unlikely(!pg_vec[i].buffer))
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003542 goto out_free_pgvec;
3543 }
3544
3545out:
3546 return pg_vec;
3547
3548out_free_pgvec:
3549 free_pg_vec(pg_vec, order, block_nr);
3550 pg_vec = NULL;
3551 goto out;
3552}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553
chetan lokef6fb8f102011-08-19 10:18:16 +00003554static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
Johann Baudy69e3c752009-05-18 22:11:22 -07003555 int closing, int tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556{
Neil Horman0e3125c2010-11-16 10:26:47 -08003557 struct pgv *pg_vec = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 struct packet_sock *po = pkt_sk(sk);
Al Viro0e11c912006-11-08 00:26:29 -08003559 int was_running, order = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003560 struct packet_ring_buffer *rb;
3561 struct sk_buff_head *rb_queue;
Al Viro0e11c912006-11-08 00:26:29 -08003562 __be16 num;
chetan lokef6fb8f102011-08-19 10:18:16 +00003563 int err = -EINVAL;
3564 /* Added to avoid minimal code churn */
3565 struct tpacket_req *req = &req_u->req;
3566
3567 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3568 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3569 WARN(1, "Tx-ring is not supported.\n");
3570 goto out;
3571 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003572
3573 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3574 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3575
3576 err = -EBUSY;
3577 if (!closing) {
3578 if (atomic_read(&po->mapped))
3579 goto out;
3580 if (atomic_read(&rb->pending))
3581 goto out;
3582 }
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003583
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 if (req->tp_block_nr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 /* Sanity tests and some calculations */
Johann Baudy69e3c752009-05-18 22:11:22 -07003586 err = -EBUSY;
3587 if (unlikely(rb->pg_vec))
3588 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003590 switch (po->tp_version) {
3591 case TPACKET_V1:
3592 po->tp_hdrlen = TPACKET_HDRLEN;
3593 break;
3594 case TPACKET_V2:
3595 po->tp_hdrlen = TPACKET2_HDRLEN;
3596 break;
chetan lokef6fb8f102011-08-19 10:18:16 +00003597 case TPACKET_V3:
3598 po->tp_hdrlen = TPACKET3_HDRLEN;
3599 break;
Patrick McHardybbd6ef82008-07-14 22:50:15 -07003600 }
3601
Johann Baudy69e3c752009-05-18 22:11:22 -07003602 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003603 if (unlikely((int)req->tp_block_size <= 0))
Johann Baudy69e3c752009-05-18 22:11:22 -07003604 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003605 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003606 goto out;
Patrick McHardy8913336a2008-07-18 18:05:19 -07003607 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
Johann Baudy69e3c752009-05-18 22:11:22 -07003608 po->tp_reserve))
3609 goto out;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003610 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
Johann Baudy69e3c752009-05-18 22:11:22 -07003611 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612
Johann Baudy69e3c752009-05-18 22:11:22 -07003613 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3614 if (unlikely(rb->frames_per_block <= 0))
3615 goto out;
3616 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3617 req->tp_frame_nr))
3618 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619
3620 err = -ENOMEM;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003621 order = get_order(req->tp_block_size);
3622 pg_vec = alloc_pg_vec(req, order);
3623 if (unlikely(!pg_vec))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 goto out;
chetan lokef6fb8f102011-08-19 10:18:16 +00003625 switch (po->tp_version) {
3626 case TPACKET_V3:
3627 /* Transmit path is not supported. We checked
3628 * it above but just being paranoid
3629 */
3630 if (!tx_ring)
3631 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3632 break;
3633 default:
3634 break;
3635 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003636 }
3637 /* Done */
3638 else {
3639 err = -EINVAL;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003640 if (unlikely(req->tp_frame_nr))
Johann Baudy69e3c752009-05-18 22:11:22 -07003641 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642 }
3643
3644 lock_sock(sk);
3645
3646 /* Detach socket from network */
3647 spin_lock(&po->bind_lock);
3648 was_running = po->running;
3649 num = po->num;
3650 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 po->num = 0;
David S. Millerce06b032011-07-04 01:44:29 -07003652 __unregister_prot_hook(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 }
3654 spin_unlock(&po->bind_lock);
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003655
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 synchronize_net();
3657
3658 err = -EBUSY;
Herbert Xu905db442009-01-30 14:12:06 -08003659 mutex_lock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 if (closing || atomic_read(&po->mapped) == 0) {
3661 err = 0;
Johann Baudy69e3c752009-05-18 22:11:22 -07003662 spin_lock_bh(&rb_queue->lock);
Changli Gaoc053fd92010-12-10 16:02:20 -08003663 swap(rb->pg_vec, pg_vec);
Johann Baudy69e3c752009-05-18 22:11:22 -07003664 rb->frame_max = (req->tp_frame_nr - 1);
3665 rb->head = 0;
3666 rb->frame_size = req->tp_frame_size;
3667 spin_unlock_bh(&rb_queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668
Changli Gaoc053fd92010-12-10 16:02:20 -08003669 swap(rb->pg_vec_order, order);
3670 swap(rb->pg_vec_len, req->tp_block_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671
Johann Baudy69e3c752009-05-18 22:11:22 -07003672 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3673 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3674 tpacket_rcv : packet_rcv;
3675 skb_queue_purge(rb_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676 if (atomic_read(&po->mapped))
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003677 pr_err("packet_mmap: vma is busy: %d\n",
3678 atomic_read(&po->mapped));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 }
Herbert Xu905db442009-01-30 14:12:06 -08003680 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681
3682 spin_lock(&po->bind_lock);
David S. Millerce06b032011-07-04 01:44:29 -07003683 if (was_running) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 po->num = num;
David S. Millerce06b032011-07-04 01:44:29 -07003685 register_prot_hook(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 }
3687 spin_unlock(&po->bind_lock);
chetan lokef6fb8f102011-08-19 10:18:16 +00003688 if (closing && (po->tp_version > TPACKET_V2)) {
3689 /* Because we don't support block-based V3 on tx-ring */
3690 if (!tx_ring)
3691 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 release_sock(sk);
3694
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 if (pg_vec)
3696 free_pg_vec(pg_vec, order, req->tp_block_nr);
3697out:
3698 return err;
3699}
3700
Johann Baudy69e3c752009-05-18 22:11:22 -07003701static int packet_mmap(struct file *file, struct socket *sock,
3702 struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703{
3704 struct sock *sk = sock->sk;
3705 struct packet_sock *po = pkt_sk(sk);
Johann Baudy69e3c752009-05-18 22:11:22 -07003706 unsigned long size, expected_size;
3707 struct packet_ring_buffer *rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 unsigned long start;
3709 int err = -EINVAL;
3710 int i;
3711
3712 if (vma->vm_pgoff)
3713 return -EINVAL;
3714
Herbert Xu905db442009-01-30 14:12:06 -08003715 mutex_lock(&po->pg_vec_lock);
Johann Baudy69e3c752009-05-18 22:11:22 -07003716
3717 expected_size = 0;
3718 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3719 if (rb->pg_vec) {
3720 expected_size += rb->pg_vec_len
3721 * rb->pg_vec_pages
3722 * PAGE_SIZE;
3723 }
3724 }
3725
3726 if (expected_size == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 goto out;
Johann Baudy69e3c752009-05-18 22:11:22 -07003728
3729 size = vma->vm_end - vma->vm_start;
3730 if (size != expected_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 goto out;
3732
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 start = vma->vm_start;
Johann Baudy69e3c752009-05-18 22:11:22 -07003734 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3735 if (rb->pg_vec == NULL)
3736 continue;
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003737
Johann Baudy69e3c752009-05-18 22:11:22 -07003738 for (i = 0; i < rb->pg_vec_len; i++) {
Neil Horman0e3125c2010-11-16 10:26:47 -08003739 struct page *page;
3740 void *kaddr = rb->pg_vec[i].buffer;
Johann Baudy69e3c752009-05-18 22:11:22 -07003741 int pg_num;
3742
Changli Gaoc56b4d92010-12-01 02:52:57 +00003743 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3744 page = pgv_to_page(kaddr);
Johann Baudy69e3c752009-05-18 22:11:22 -07003745 err = vm_insert_page(vma, start, page);
3746 if (unlikely(err))
3747 goto out;
3748 start += PAGE_SIZE;
Neil Horman0e3125c2010-11-16 10:26:47 -08003749 kaddr += PAGE_SIZE;
Johann Baudy69e3c752009-05-18 22:11:22 -07003750 }
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003751 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 }
Johann Baudy69e3c752009-05-18 22:11:22 -07003753
David S. Miller4ebf0ae2005-12-06 16:38:35 -08003754 atomic_inc(&po->mapped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 vma->vm_ops = &packet_mmap_ops;
3756 err = 0;
3757
3758out:
Herbert Xu905db442009-01-30 14:12:06 -08003759 mutex_unlock(&po->pg_vec_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 return err;
3761}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003763static const struct proto_ops packet_ops_spkt = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 .family = PF_PACKET,
3765 .owner = THIS_MODULE,
3766 .release = packet_release,
3767 .bind = packet_bind_spkt,
3768 .connect = sock_no_connect,
3769 .socketpair = sock_no_socketpair,
3770 .accept = sock_no_accept,
3771 .getname = packet_getname_spkt,
3772 .poll = datagram_poll,
3773 .ioctl = packet_ioctl,
3774 .listen = sock_no_listen,
3775 .shutdown = sock_no_shutdown,
3776 .setsockopt = sock_no_setsockopt,
3777 .getsockopt = sock_no_getsockopt,
3778 .sendmsg = packet_sendmsg_spkt,
3779 .recvmsg = packet_recvmsg,
3780 .mmap = sock_no_mmap,
3781 .sendpage = sock_no_sendpage,
3782};
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08003784static const struct proto_ops packet_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 .family = PF_PACKET,
3786 .owner = THIS_MODULE,
3787 .release = packet_release,
3788 .bind = packet_bind,
3789 .connect = sock_no_connect,
3790 .socketpair = sock_no_socketpair,
3791 .accept = sock_no_accept,
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003792 .getname = packet_getname,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 .poll = packet_poll,
3794 .ioctl = packet_ioctl,
3795 .listen = sock_no_listen,
3796 .shutdown = sock_no_shutdown,
3797 .setsockopt = packet_setsockopt,
3798 .getsockopt = packet_getsockopt,
3799 .sendmsg = packet_sendmsg,
3800 .recvmsg = packet_recvmsg,
3801 .mmap = packet_mmap,
3802 .sendpage = sock_no_sendpage,
3803};
3804
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00003805static const struct net_proto_family packet_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806 .family = PF_PACKET,
3807 .create = packet_create,
3808 .owner = THIS_MODULE,
3809};
3810
3811static struct notifier_block packet_netdev_notifier = {
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003812 .notifier_call = packet_notifier,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813};
3814
3815#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816
3817static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
stephen hemminger808f5112010-02-22 07:57:18 +00003818 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819{
Denis V. Luneve372c412007-11-19 22:31:54 -08003820 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003821
3822 rcu_read_lock();
3823 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824}
3825
3826static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3827{
Herbert Xu1bf40952007-12-16 14:04:02 -08003828 struct net *net = seq_file_net(seq);
stephen hemminger808f5112010-02-22 07:57:18 +00003829 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830}
3831
3832static void packet_seq_stop(struct seq_file *seq, void *v)
stephen hemminger808f5112010-02-22 07:57:18 +00003833 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834{
stephen hemminger808f5112010-02-22 07:57:18 +00003835 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836}
3837
YOSHIFUJI Hideaki1ce4f282007-02-09 23:25:10 +09003838static int packet_seq_show(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839{
3840 if (v == SEQ_START_TOKEN)
3841 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3842 else {
Li Zefanb7ceabd2010-02-08 23:19:29 +00003843 struct sock *s = sk_entry(v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 const struct packet_sock *po = pkt_sk(s);
3845
3846 seq_printf(seq,
Dan Rosenberg71338aa2011-05-23 12:17:35 +00003847 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848 s,
3849 atomic_read(&s->sk_refcnt),
3850 s->sk_type,
3851 ntohs(po->num),
3852 po->ifindex,
3853 po->running,
3854 atomic_read(&s->sk_rmem_alloc),
3855 sock_i_uid(s),
Eric Dumazet40d4e3d2009-07-21 21:57:59 +00003856 sock_i_ino(s));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857 }
3858
3859 return 0;
3860}
3861
Philippe De Muyter56b3d972007-07-10 23:07:31 -07003862static const struct seq_operations packet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863 .start = packet_seq_start,
3864 .next = packet_seq_next,
3865 .stop = packet_seq_stop,
3866 .show = packet_seq_show,
3867};
3868
3869static int packet_seq_open(struct inode *inode, struct file *file)
3870{
Denis V. Luneve372c412007-11-19 22:31:54 -08003871 return seq_open_net(inode, file, &packet_seq_ops,
3872 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873}
3874
Arjan van de Venda7071d2007-02-12 00:55:36 -08003875static const struct file_operations packet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 .owner = THIS_MODULE,
3877 .open = packet_seq_open,
3878 .read = seq_read,
3879 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003880 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881};
3882
3883#endif
3884
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003885static int __net_init packet_net_init(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003886{
stephen hemminger808f5112010-02-22 07:57:18 +00003887 spin_lock_init(&net->packet.sklist_lock);
Denis V. Lunev2aaef4e2007-12-11 04:19:54 -08003888 INIT_HLIST_HEAD(&net->packet.sklist);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003889
3890 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3891 return -ENOMEM;
3892
3893 return 0;
3894}
3895
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003896static void __net_exit packet_net_exit(struct net *net)
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003897{
3898 proc_net_remove(net, "packet");
3899}
3900
3901static struct pernet_operations packet_net_ops = {
3902 .init = packet_net_init,
3903 .exit = packet_net_exit,
3904};
3905
3906
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907static void __exit packet_exit(void)
3908{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 unregister_netdevice_notifier(&packet_netdev_notifier);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003910 unregister_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911 sock_unregister(PF_PACKET);
3912 proto_unregister(&packet_proto);
3913}
3914
3915static int __init packet_init(void)
3916{
3917 int rc = proto_register(&packet_proto, 0);
3918
3919 if (rc != 0)
3920 goto out;
3921
3922 sock_register(&packet_family_ops);
Denis V. Lunevd12d01d2007-11-19 22:28:35 -08003923 register_pernet_subsys(&packet_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 register_netdevice_notifier(&packet_netdev_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925out:
3926 return rc;
3927}
3928
3929module_init(packet_init);
3930module_exit(packet_exit);
3931MODULE_LICENSE("GPL");
3932MODULE_ALIAS_NETPROTO(PF_PACKET);