blob: 8ffc52e01ece35db173fee05a3fdc160d733b684 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300132#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700133#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700135#include "net-sysfs.h"
136
Herbert Xud565b0a2008-12-15 23:38:52 -0800137/* Instead of increasing this, you should create a hash table. */
138#define MAX_GRO_SKBS 8
139
Herbert Xu5d38a072009-01-04 16:13:40 -0800140/* This should be increased if a protocol with a bigger head is added. */
141#define GRO_MAX_HEAD (MAX_HEADER + 128)
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000144static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000145struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
146struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000147static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700150 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * semaphore.
152 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800153 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 *
155 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700156 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 * actual updates. This allows pure readers to access the list even
158 * while a writer is preparing to update it.
159 *
160 * To put it another way, dev_base_lock is held for writing only to
161 * protect against pure readers; the rtnl semaphore provides the
162 * protection against other writers.
163 *
164 * See, for example usages, register_netdevice() and
165 * unregister_netdevice(), which must be called with the rtnl
166 * semaphore held.
167 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169EXPORT_SYMBOL(dev_base_lock);
170
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300171/* protects napi_hash addition/deletion and napi_gen_id */
172static DEFINE_SPINLOCK(napi_hash_lock);
173
174static unsigned int napi_gen_id;
175static DEFINE_HASHTABLE(napi_hash, 8);
176
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200177static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000178
Thomas Graf4e985ad2011-06-21 03:11:20 +0000179static inline void dev_base_seq_inc(struct net *net)
180{
181 while (++net->dev_base_seq == 0);
182}
183
Eric W. Biederman881d9662007-09-17 11:56:21 -0700184static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Eric Dumazet95c96172012-04-15 05:58:06 +0000186 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
187
stephen hemminger08e98972009-11-10 07:20:34 +0000188 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
Eric W. Biederman881d9662007-09-17 11:56:21 -0700191static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700193 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000196static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000197{
198#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000199 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000200#endif
201}
202
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000203static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000204{
205#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000206 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000207#endif
208}
209
Eric W. Biedermance286d32007-09-12 13:53:49 +0200210/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000211static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200212{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900213 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214
215 ASSERT_RTNL();
216
217 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800218 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000219 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000220 hlist_add_head_rcu(&dev->index_hlist,
221 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000223
224 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200225}
226
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000227/* Device list removal
228 * caller must respect a RCU grace period before freeing/reusing dev
229 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200230static void unlist_netdevice(struct net_device *dev)
231{
232 ASSERT_RTNL();
233
234 /* Unlink dev from the device chain */
235 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800236 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000237 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000238 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200239 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000240
241 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200242}
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244/*
245 * Our notifier list
246 */
247
Alan Sternf07d5b92006-05-09 15:23:03 -0700248static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
250/*
251 * Device drivers call our routines to queue packets here. We empty the
252 * queue in the local softnet handler.
253 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700254
Eric Dumazet9958da02010-04-17 04:17:02 +0000255DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700256EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
David S. Millercf508b12008-07-22 14:16:42 -0700258#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700259/*
David S. Millerc773e842008-07-08 23:13:53 -0700260 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700261 * according to dev->type
262 */
263static const unsigned short netdev_lock_type[] =
264 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
265 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
266 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
267 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
268 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
269 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
270 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
271 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
272 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
273 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
274 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
275 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400276 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
277 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
278 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700279
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700280static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700281 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
282 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
283 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
284 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
285 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
286 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
287 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
288 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
289 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
290 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
291 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
292 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400293 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
294 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
295 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700296
297static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700298static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700299
300static inline unsigned short netdev_lock_pos(unsigned short dev_type)
301{
302 int i;
303
304 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
305 if (netdev_lock_type[i] == dev_type)
306 return i;
307 /* the last key is used by default */
308 return ARRAY_SIZE(netdev_lock_type) - 1;
309}
310
David S. Millercf508b12008-07-22 14:16:42 -0700311static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
312 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700313{
314 int i;
315
316 i = netdev_lock_pos(dev_type);
317 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
318 netdev_lock_name[i]);
319}
David S. Millercf508b12008-07-22 14:16:42 -0700320
321static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
322{
323 int i;
324
325 i = netdev_lock_pos(dev->type);
326 lockdep_set_class_and_name(&dev->addr_list_lock,
327 &netdev_addr_lock_key[i],
328 netdev_lock_name[i]);
329}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330#else
David S. Millercf508b12008-07-22 14:16:42 -0700331static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
332 unsigned short dev_type)
333{
334}
335static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700336{
337}
338#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
340/*******************************************************************************
341
342 Protocol management and registration routines
343
344*******************************************************************************/
345
346/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 * Add a protocol ID to the list. Now that the input handler is
348 * smarter we can dispense with all the messy stuff that used to be
349 * here.
350 *
351 * BEWARE!!! Protocol handlers, mangling input packets,
352 * MUST BE last in hash buckets and checking protocol handlers
353 * MUST start from promiscuous ptype_all chain in net_bh.
354 * It is true now, do not change it.
355 * Explanation follows: if protocol handler, mangling packet, will
356 * be the first on list, it is not able to sense, that packet
357 * is cloned and should be copied-on-write, so that it will
358 * change it and subsequent readers will get broken packet.
359 * --ANK (980803)
360 */
361
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000362static inline struct list_head *ptype_head(const struct packet_type *pt)
363{
364 if (pt->type == htons(ETH_P_ALL))
365 return &ptype_all;
366 else
367 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
368}
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370/**
371 * dev_add_pack - add packet handler
372 * @pt: packet type declaration
373 *
374 * Add a protocol handler to the networking stack. The passed &packet_type
375 * is linked into kernel lists and may not be freed until it has been
376 * removed from the kernel lists.
377 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900378 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 * guarantee all CPU's that are in middle of receiving packets
380 * will see the new packet type (until the next received packet).
381 */
382
383void dev_add_pack(struct packet_type *pt)
384{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000385 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000387 spin_lock(&ptype_lock);
388 list_add_rcu(&pt->list, head);
389 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700391EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393/**
394 * __dev_remove_pack - remove packet handler
395 * @pt: packet type declaration
396 *
397 * Remove a protocol handler that was previously added to the kernel
398 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
399 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900400 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 *
402 * The packet type might still be in use by receivers
403 * and must not be freed until after all the CPU's have gone
404 * through a quiescent state.
405 */
406void __dev_remove_pack(struct packet_type *pt)
407{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000408 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 struct packet_type *pt1;
410
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000411 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 list_for_each_entry(pt1, head, list) {
414 if (pt == pt1) {
415 list_del_rcu(&pt->list);
416 goto out;
417 }
418 }
419
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000420 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000422 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700424EXPORT_SYMBOL(__dev_remove_pack);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426/**
427 * dev_remove_pack - remove packet handler
428 * @pt: packet type declaration
429 *
430 * Remove a protocol handler that was previously added to the kernel
431 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
432 * from the kernel lists and can be freed or reused once this function
433 * returns.
434 *
435 * This call sleeps to guarantee that no CPU is looking at the packet
436 * type after return.
437 */
438void dev_remove_pack(struct packet_type *pt)
439{
440 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 synchronize_net();
443}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700444EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Vlad Yasevich62532da2012-11-15 08:49:10 +0000446
447/**
448 * dev_add_offload - register offload handlers
449 * @po: protocol offload declaration
450 *
451 * Add protocol offload handlers to the networking stack. The passed
452 * &proto_offload is linked into kernel lists and may not be freed until
453 * it has been removed from the kernel lists.
454 *
455 * This call does not sleep therefore it can not
456 * guarantee all CPU's that are in middle of receiving packets
457 * will see the new offload handlers (until the next received packet).
458 */
459void dev_add_offload(struct packet_offload *po)
460{
461 struct list_head *head = &offload_base;
462
463 spin_lock(&offload_lock);
464 list_add_rcu(&po->list, head);
465 spin_unlock(&offload_lock);
466}
467EXPORT_SYMBOL(dev_add_offload);
468
469/**
470 * __dev_remove_offload - remove offload handler
471 * @po: packet offload declaration
472 *
473 * Remove a protocol offload handler that was previously added to the
474 * kernel offload handlers by dev_add_offload(). The passed &offload_type
475 * is removed from the kernel lists and can be freed or reused once this
476 * function returns.
477 *
478 * The packet type might still be in use by receivers
479 * and must not be freed until after all the CPU's have gone
480 * through a quiescent state.
481 */
482void __dev_remove_offload(struct packet_offload *po)
483{
484 struct list_head *head = &offload_base;
485 struct packet_offload *po1;
486
Eric Dumazetc53aa502012-11-16 08:08:23 +0000487 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000488
489 list_for_each_entry(po1, head, list) {
490 if (po == po1) {
491 list_del_rcu(&po->list);
492 goto out;
493 }
494 }
495
496 pr_warn("dev_remove_offload: %p not found\n", po);
497out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000498 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000499}
500EXPORT_SYMBOL(__dev_remove_offload);
501
502/**
503 * dev_remove_offload - remove packet offload handler
504 * @po: packet offload declaration
505 *
506 * Remove a packet offload handler that was previously added to the kernel
507 * offload handlers by dev_add_offload(). The passed &offload_type is
508 * removed from the kernel lists and can be freed or reused once this
509 * function returns.
510 *
511 * This call sleeps to guarantee that no CPU is looking at the packet
512 * type after return.
513 */
514void dev_remove_offload(struct packet_offload *po)
515{
516 __dev_remove_offload(po);
517
518 synchronize_net();
519}
520EXPORT_SYMBOL(dev_remove_offload);
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522/******************************************************************************
523
524 Device Boot-time Settings Routines
525
526*******************************************************************************/
527
528/* Boot time configuration table */
529static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
530
531/**
532 * netdev_boot_setup_add - add new setup entry
533 * @name: name of the device
534 * @map: configured settings for the device
535 *
536 * Adds new setup entry to the dev_boot_setup list. The function
537 * returns 0 on error and 1 on success. This is a generic routine to
538 * all netdevices.
539 */
540static int netdev_boot_setup_add(char *name, struct ifmap *map)
541{
542 struct netdev_boot_setup *s;
543 int i;
544
545 s = dev_boot_setup;
546 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
547 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
548 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700549 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 memcpy(&s[i].map, map, sizeof(s[i].map));
551 break;
552 }
553 }
554
555 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
556}
557
558/**
559 * netdev_boot_setup_check - check boot time settings
560 * @dev: the netdevice
561 *
562 * Check boot time settings for the device.
563 * The found settings are set for the device to be used
564 * later in the device probing.
565 * Returns 0 if no settings found, 1 if they are.
566 */
567int netdev_boot_setup_check(struct net_device *dev)
568{
569 struct netdev_boot_setup *s = dev_boot_setup;
570 int i;
571
572 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
573 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700574 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 dev->irq = s[i].map.irq;
576 dev->base_addr = s[i].map.base_addr;
577 dev->mem_start = s[i].map.mem_start;
578 dev->mem_end = s[i].map.mem_end;
579 return 1;
580 }
581 }
582 return 0;
583}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700584EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
586
587/**
588 * netdev_boot_base - get address from boot time settings
589 * @prefix: prefix for network device
590 * @unit: id for network device
591 *
592 * Check boot time settings for the base address of device.
593 * The found settings are set for the device to be used
594 * later in the device probing.
595 * Returns 0 if no settings found.
596 */
597unsigned long netdev_boot_base(const char *prefix, int unit)
598{
599 const struct netdev_boot_setup *s = dev_boot_setup;
600 char name[IFNAMSIZ];
601 int i;
602
603 sprintf(name, "%s%d", prefix, unit);
604
605 /*
606 * If device already registered then return base of 1
607 * to indicate not to probe for this interface
608 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700609 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 return 1;
611
612 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
613 if (!strcmp(name, s[i].name))
614 return s[i].map.base_addr;
615 return 0;
616}
617
618/*
619 * Saves at boot time configured settings for any netdevice.
620 */
621int __init netdev_boot_setup(char *str)
622{
623 int ints[5];
624 struct ifmap map;
625
626 str = get_options(str, ARRAY_SIZE(ints), ints);
627 if (!str || !*str)
628 return 0;
629
630 /* Save settings */
631 memset(&map, 0, sizeof(map));
632 if (ints[0] > 0)
633 map.irq = ints[1];
634 if (ints[0] > 1)
635 map.base_addr = ints[2];
636 if (ints[0] > 2)
637 map.mem_start = ints[3];
638 if (ints[0] > 3)
639 map.mem_end = ints[4];
640
641 /* Add new entry to the list */
642 return netdev_boot_setup_add(str, &map);
643}
644
645__setup("netdev=", netdev_boot_setup);
646
647/*******************************************************************************
648
649 Device Interface Subroutines
650
651*******************************************************************************/
652
653/**
654 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700655 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 * @name: name to find
657 *
658 * Find an interface by name. Must be called under RTNL semaphore
659 * or @dev_base_lock. If the name is found a pointer to the device
660 * is returned. If the name is not found then %NULL is returned. The
661 * reference counters are not incremented so the caller must be
662 * careful with locks.
663 */
664
Eric W. Biederman881d9662007-09-17 11:56:21 -0700665struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700667 struct net_device *dev;
668 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Sasha Levinb67bfe02013-02-27 17:06:00 -0800670 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if (!strncmp(dev->name, name, IFNAMSIZ))
672 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 return NULL;
675}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700676EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000679 * dev_get_by_name_rcu - find a device by its name
680 * @net: the applicable net namespace
681 * @name: name to find
682 *
683 * Find an interface by name.
684 * If the name is found a pointer to the device is returned.
685 * If the name is not found then %NULL is returned.
686 * The reference counters are not incremented so the caller must be
687 * careful with locks. The caller must hold RCU lock.
688 */
689
690struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
691{
Eric Dumazet72c95282009-10-30 07:11:27 +0000692 struct net_device *dev;
693 struct hlist_head *head = dev_name_hash(net, name);
694
Sasha Levinb67bfe02013-02-27 17:06:00 -0800695 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000696 if (!strncmp(dev->name, name, IFNAMSIZ))
697 return dev;
698
699 return NULL;
700}
701EXPORT_SYMBOL(dev_get_by_name_rcu);
702
703/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700705 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 * @name: name to find
707 *
708 * Find an interface by name. This can be called from any
709 * context and does its own locking. The returned handle has
710 * the usage count incremented and the caller must use dev_put() to
711 * release it when it is no longer needed. %NULL is returned if no
712 * matching device is found.
713 */
714
Eric W. Biederman881d9662007-09-17 11:56:21 -0700715struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 struct net_device *dev;
718
Eric Dumazet72c95282009-10-30 07:11:27 +0000719 rcu_read_lock();
720 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if (dev)
722 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000723 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 return dev;
725}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700726EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
728/**
729 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700730 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 * @ifindex: index of device
732 *
733 * Search for an interface by index. Returns %NULL if the device
734 * is not found or a pointer to the device. The device has not
735 * had its reference counter increased so the caller must be careful
736 * about locking. The caller must hold either the RTNL semaphore
737 * or @dev_base_lock.
738 */
739
Eric W. Biederman881d9662007-09-17 11:56:21 -0700740struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700742 struct net_device *dev;
743 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Sasha Levinb67bfe02013-02-27 17:06:00 -0800745 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (dev->ifindex == ifindex)
747 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700748
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 return NULL;
750}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700751EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000753/**
754 * dev_get_by_index_rcu - find a device by its ifindex
755 * @net: the applicable net namespace
756 * @ifindex: index of device
757 *
758 * Search for an interface by index. Returns %NULL if the device
759 * is not found or a pointer to the device. The device has not
760 * had its reference counter increased so the caller must be careful
761 * about locking. The caller must hold RCU lock.
762 */
763
764struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
765{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000766 struct net_device *dev;
767 struct hlist_head *head = dev_index_hash(net, ifindex);
768
Sasha Levinb67bfe02013-02-27 17:06:00 -0800769 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000770 if (dev->ifindex == ifindex)
771 return dev;
772
773 return NULL;
774}
775EXPORT_SYMBOL(dev_get_by_index_rcu);
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778/**
779 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700780 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 * @ifindex: index of device
782 *
783 * Search for an interface by index. Returns NULL if the device
784 * is not found or a pointer to the device. The device returned has
785 * had a reference added and the pointer is safe until the user calls
786 * dev_put to indicate they have finished with it.
787 */
788
Eric W. Biederman881d9662007-09-17 11:56:21 -0700789struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
791 struct net_device *dev;
792
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000793 rcu_read_lock();
794 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 if (dev)
796 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000797 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 return dev;
799}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700800EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200803 * netdev_get_name - get a netdevice name, knowing its ifindex.
804 * @net: network namespace
805 * @name: a pointer to the buffer where the name will be stored.
806 * @ifindex: the ifindex of the interface to get the name from.
807 *
808 * The use of raw_seqcount_begin() and cond_resched() before
809 * retrying is required as we want to give the writers a chance
810 * to complete when CONFIG_PREEMPT is not set.
811 */
812int netdev_get_name(struct net *net, char *name, int ifindex)
813{
814 struct net_device *dev;
815 unsigned int seq;
816
817retry:
818 seq = raw_seqcount_begin(&devnet_rename_seq);
819 rcu_read_lock();
820 dev = dev_get_by_index_rcu(net, ifindex);
821 if (!dev) {
822 rcu_read_unlock();
823 return -ENODEV;
824 }
825
826 strcpy(name, dev->name);
827 rcu_read_unlock();
828 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
829 cond_resched();
830 goto retry;
831 }
832
833 return 0;
834}
835
836/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000837 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700838 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 * @type: media type of device
840 * @ha: hardware address
841 *
842 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800843 * is not found or a pointer to the device.
844 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000845 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 * and the caller must therefore be careful about locking
847 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 */
849
Eric Dumazet941666c2010-12-05 01:23:53 +0000850struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
851 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
853 struct net_device *dev;
854
Eric Dumazet941666c2010-12-05 01:23:53 +0000855 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (dev->type == type &&
857 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700858 return dev;
859
860 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861}
Eric Dumazet941666c2010-12-05 01:23:53 +0000862EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300863
Eric W. Biederman881d9662007-09-17 11:56:21 -0700864struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700865{
866 struct net_device *dev;
867
868 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700869 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700870 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700871 return dev;
872
873 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700874}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875EXPORT_SYMBOL(__dev_getfirstbyhwtype);
876
Eric W. Biederman881d9662007-09-17 11:56:21 -0700877struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000879 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000881 rcu_read_lock();
882 for_each_netdev_rcu(net, dev)
883 if (dev->type == type) {
884 dev_hold(dev);
885 ret = dev;
886 break;
887 }
888 rcu_read_unlock();
889 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891EXPORT_SYMBOL(dev_getfirstbyhwtype);
892
893/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000894 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700895 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 * @if_flags: IFF_* values
897 * @mask: bitmask of bits in if_flags to check
898 *
899 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000900 * is not found or a pointer to the device. Must be called inside
901 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 */
903
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000904struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700905 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700907 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Pavel Emelianov7562f872007-05-03 15:13:45 -0700909 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800910 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700912 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914 }
915 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700916 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000918EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
920/**
921 * dev_valid_name - check if name is okay for network device
922 * @name: name string
923 *
924 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700925 * to allow sysfs to work. We also disallow any kind of
926 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 */
David S. Miller95f050b2012-03-06 16:12:15 -0500928bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700930 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500931 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700932 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500933 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700934 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500935 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700936
937 while (*name) {
938 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500939 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700940 name++;
941 }
David S. Miller95f050b2012-03-06 16:12:15 -0500942 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700944EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200947 * __dev_alloc_name - allocate a name for a device
948 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200950 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 *
952 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700953 * id. It scans list of devices to build up a free map, then chooses
954 * the first empty slot. The caller must hold the dev_base or rtnl lock
955 * while allocating the name and adding the device in order to avoid
956 * duplicates.
957 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
958 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 */
960
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200961static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
963 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 const char *p;
965 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700966 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 struct net_device *d;
968
969 p = strnchr(name, IFNAMSIZ-1, '%');
970 if (p) {
971 /*
972 * Verify the string as this thing may have come from
973 * the user. There must be either one "%d" and no other "%"
974 * characters.
975 */
976 if (p[1] != 'd' || strchr(p + 2, '%'))
977 return -EINVAL;
978
979 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700980 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 if (!inuse)
982 return -ENOMEM;
983
Eric W. Biederman881d9662007-09-17 11:56:21 -0700984 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 if (!sscanf(d->name, name, &i))
986 continue;
987 if (i < 0 || i >= max_netdevices)
988 continue;
989
990 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200991 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 if (!strncmp(buf, d->name, IFNAMSIZ))
993 set_bit(i, inuse);
994 }
995
996 i = find_first_zero_bit(inuse, max_netdevices);
997 free_page((unsigned long) inuse);
998 }
999
Octavian Purdilad9031022009-11-18 02:36:59 +00001000 if (buf != name)
1001 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001002 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 /* It is possible to run out of possible slots
1006 * when the name is long and there isn't enough space left
1007 * for the digits, or if all bits are used.
1008 */
1009 return -ENFILE;
1010}
1011
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001012/**
1013 * dev_alloc_name - allocate a name for a device
1014 * @dev: device
1015 * @name: name format string
1016 *
1017 * Passed a format string - eg "lt%d" it will try and find a suitable
1018 * id. It scans list of devices to build up a free map, then chooses
1019 * the first empty slot. The caller must hold the dev_base or rtnl lock
1020 * while allocating the name and adding the device in order to avoid
1021 * duplicates.
1022 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1023 * Returns the number of the unit assigned or a negative errno code.
1024 */
1025
1026int dev_alloc_name(struct net_device *dev, const char *name)
1027{
1028 char buf[IFNAMSIZ];
1029 struct net *net;
1030 int ret;
1031
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001032 BUG_ON(!dev_net(dev));
1033 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001034 ret = __dev_alloc_name(net, name, buf);
1035 if (ret >= 0)
1036 strlcpy(dev->name, buf, IFNAMSIZ);
1037 return ret;
1038}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001039EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001040
Gao feng828de4f2012-09-13 20:58:27 +00001041static int dev_alloc_name_ns(struct net *net,
1042 struct net_device *dev,
1043 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001044{
Gao feng828de4f2012-09-13 20:58:27 +00001045 char buf[IFNAMSIZ];
1046 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001047
Gao feng828de4f2012-09-13 20:58:27 +00001048 ret = __dev_alloc_name(net, name, buf);
1049 if (ret >= 0)
1050 strlcpy(dev->name, buf, IFNAMSIZ);
1051 return ret;
1052}
1053
1054static int dev_get_valid_name(struct net *net,
1055 struct net_device *dev,
1056 const char *name)
1057{
1058 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001059
Octavian Purdilad9031022009-11-18 02:36:59 +00001060 if (!dev_valid_name(name))
1061 return -EINVAL;
1062
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001063 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001064 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001065 else if (__dev_get_by_name(net, name))
1066 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001067 else if (dev->name != name)
1068 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001069
1070 return 0;
1071}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
1073/**
1074 * dev_change_name - change name of a device
1075 * @dev: device
1076 * @newname: name (or format string) must be at least IFNAMSIZ
1077 *
1078 * Change name of a device, can pass format strings "eth%d".
1079 * for wildcarding.
1080 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001081int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082{
Herbert Xufcc5a032007-07-30 17:03:38 -07001083 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001085 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001086 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001089 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001091 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 if (dev->flags & IFF_UP)
1093 return -EBUSY;
1094
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001095 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001096
1097 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001098 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001099 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001100 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001101
Herbert Xufcc5a032007-07-30 17:03:38 -07001102 memcpy(oldname, dev->name, IFNAMSIZ);
1103
Gao feng828de4f2012-09-13 20:58:27 +00001104 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001105 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001106 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001107 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Herbert Xufcc5a032007-07-30 17:03:38 -07001110rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001111 ret = device_rename(&dev->dev, dev->name);
1112 if (ret) {
1113 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001114 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001115 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001116 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001117
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001118 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001119
Herbert Xu7f988ea2007-07-30 16:35:46 -07001120 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001121 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001122 write_unlock_bh(&dev_base_lock);
1123
1124 synchronize_rcu();
1125
1126 write_lock_bh(&dev_base_lock);
1127 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001128 write_unlock_bh(&dev_base_lock);
1129
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001130 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001131 ret = notifier_to_errno(ret);
1132
1133 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001134 /* err >= 0 after dev_alloc_name() or stores the first errno */
1135 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001136 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001137 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001138 memcpy(dev->name, oldname, IFNAMSIZ);
1139 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001140 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001141 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001142 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001143 }
1144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 return err;
1147}
1148
1149/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001150 * dev_set_alias - change ifalias of a device
1151 * @dev: device
1152 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001153 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001154 *
1155 * Set ifalias for a device,
1156 */
1157int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1158{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001159 char *new_ifalias;
1160
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001161 ASSERT_RTNL();
1162
1163 if (len >= IFALIASZ)
1164 return -EINVAL;
1165
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001166 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001167 kfree(dev->ifalias);
1168 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001169 return 0;
1170 }
1171
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001172 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1173 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001174 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001175 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001176
1177 strlcpy(dev->ifalias, alias, len+1);
1178 return len;
1179}
1180
1181
1182/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001183 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001184 * @dev: device to cause notification
1185 *
1186 * Called to indicate a device has changed features.
1187 */
1188void netdev_features_change(struct net_device *dev)
1189{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001190 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001191}
1192EXPORT_SYMBOL(netdev_features_change);
1193
1194/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 * netdev_state_change - device changes state
1196 * @dev: device to cause notification
1197 *
1198 * Called to indicate a device has changed state. This function calls
1199 * the notifier chains for netdev_chain and sends a NEWLINK message
1200 * to the routing socket.
1201 */
1202void netdev_state_change(struct net_device *dev)
1203{
1204 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001205 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001206 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 }
1208}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001209EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Amerigo Wangee89bab2012-08-09 22:14:56 +00001211/**
1212 * netdev_notify_peers - notify network peers about existence of @dev
1213 * @dev: network device
1214 *
1215 * Generate traffic such that interested network peers are aware of
1216 * @dev, such as by generating a gratuitous ARP. This may be used when
1217 * a device wants to inform the rest of the network about some sort of
1218 * reconfiguration such as a failover event or virtual machine
1219 * migration.
1220 */
1221void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001222{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001223 rtnl_lock();
1224 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1225 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001226}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001227EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001228
Patrick McHardybd380812010-02-26 06:34:53 +00001229static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001231 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001232 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001234 ASSERT_RTNL();
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 if (!netif_device_present(dev))
1237 return -ENODEV;
1238
Neil Hormanca99ca12013-02-05 08:05:43 +00001239 /* Block netpoll from trying to do any rx path servicing.
1240 * If we don't do this there is a chance ndo_poll_controller
1241 * or ndo_poll may be running while we open the device
1242 */
dingtianhongda6e3782013-05-27 19:53:31 +00001243 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001244
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001245 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1246 ret = notifier_to_errno(ret);
1247 if (ret)
1248 return ret;
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001251
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001252 if (ops->ndo_validate_addr)
1253 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001254
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001255 if (!ret && ops->ndo_open)
1256 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
Neil Hormanca99ca12013-02-05 08:05:43 +00001258 netpoll_rx_enable(dev);
1259
Jeff Garzikbada3392007-10-23 20:19:37 -07001260 if (ret)
1261 clear_bit(__LINK_STATE_START, &dev->state);
1262 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001264 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001265 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001267 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 return ret;
1271}
Patrick McHardybd380812010-02-26 06:34:53 +00001272
1273/**
1274 * dev_open - prepare an interface for use.
1275 * @dev: device to open
1276 *
1277 * Takes a device from down to up state. The device's private open
1278 * function is invoked and then the multicast lists are loaded. Finally
1279 * the device is moved into the up state and a %NETDEV_UP message is
1280 * sent to the netdev notifier chain.
1281 *
1282 * Calling this function on an active interface is a nop. On a failure
1283 * a negative errno code is returned.
1284 */
1285int dev_open(struct net_device *dev)
1286{
1287 int ret;
1288
Patrick McHardybd380812010-02-26 06:34:53 +00001289 if (dev->flags & IFF_UP)
1290 return 0;
1291
Patrick McHardybd380812010-02-26 06:34:53 +00001292 ret = __dev_open(dev);
1293 if (ret < 0)
1294 return ret;
1295
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001296 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001297 call_netdevice_notifiers(NETDEV_UP, dev);
1298
1299 return ret;
1300}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001301EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Octavian Purdila44345722010-12-13 12:44:07 +00001303static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304{
Octavian Purdila44345722010-12-13 12:44:07 +00001305 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001306
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001307 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001308 might_sleep();
1309
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001310 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001311 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Octavian Purdila44345722010-12-13 12:44:07 +00001313 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
Octavian Purdila44345722010-12-13 12:44:07 +00001315 /* Synchronize to scheduled poll. We cannot touch poll list, it
1316 * can be even on different cpu. So just clear netif_running().
1317 *
1318 * dev->stop() will invoke napi_disable() on all of it's
1319 * napi_struct instances on this device.
1320 */
1321 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Octavian Purdila44345722010-12-13 12:44:07 +00001324 dev_deactivate_many(head);
1325
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001326 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001327 const struct net_device_ops *ops = dev->netdev_ops;
1328
1329 /*
1330 * Call the device specific close. This cannot fail.
1331 * Only if device is UP
1332 *
1333 * We allow it to be called even after a DETACH hot-plug
1334 * event.
1335 */
1336 if (ops->ndo_stop)
1337 ops->ndo_stop(dev);
1338
Octavian Purdila44345722010-12-13 12:44:07 +00001339 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001340 net_dmaengine_put();
1341 }
1342
1343 return 0;
1344}
1345
1346static int __dev_close(struct net_device *dev)
1347{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001348 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001349 LIST_HEAD(single);
1350
Neil Hormanca99ca12013-02-05 08:05:43 +00001351 /* Temporarily disable netpoll until the interface is down */
dingtianhongda6e3782013-05-27 19:53:31 +00001352 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001353
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001354 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001355 retval = __dev_close_many(&single);
1356 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001357
1358 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001359 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001360}
1361
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001362static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001363{
1364 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001365
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001366 /* Remove the devices that don't need to be closed */
1367 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001368 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001369 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001370
1371 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001372
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001373 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001374 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001375 call_netdevice_notifiers(NETDEV_DOWN, dev);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001376 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 return 0;
1380}
Patrick McHardybd380812010-02-26 06:34:53 +00001381
1382/**
1383 * dev_close - shutdown an interface.
1384 * @dev: device to shutdown
1385 *
1386 * This function moves an active device into down state. A
1387 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1388 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1389 * chain.
1390 */
1391int dev_close(struct net_device *dev)
1392{
Eric Dumazete14a5992011-05-10 12:26:06 -07001393 if (dev->flags & IFF_UP) {
1394 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001395
Neil Hormanca99ca12013-02-05 08:05:43 +00001396 /* Block netpoll rx while the interface is going down */
dingtianhongda6e3782013-05-27 19:53:31 +00001397 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001398
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001399 list_add(&dev->close_list, &single);
Eric Dumazete14a5992011-05-10 12:26:06 -07001400 dev_close_many(&single);
1401 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001402
1403 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001404 }
dingtianhongda6e3782013-05-27 19:53:31 +00001405 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001406}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001407EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001410/**
1411 * dev_disable_lro - disable Large Receive Offload on a device
1412 * @dev: device
1413 *
1414 * Disable Large Receive Offload (LRO) on a net device. Must be
1415 * called under RTNL. This is needed if received packets may be
1416 * forwarded to another interface.
1417 */
1418void dev_disable_lro(struct net_device *dev)
1419{
Neil Hormanf11970e2011-05-24 08:31:09 +00001420 /*
1421 * If we're trying to disable lro on a vlan device
1422 * use the underlying physical device instead
1423 */
1424 if (is_vlan_dev(dev))
1425 dev = vlan_dev_real_dev(dev);
1426
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001427 dev->wanted_features &= ~NETIF_F_LRO;
1428 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001429
Michał Mirosław22d59692011-04-21 12:42:15 +00001430 if (unlikely(dev->features & NETIF_F_LRO))
1431 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001432}
1433EXPORT_SYMBOL(dev_disable_lro);
1434
Jiri Pirko351638e2013-05-28 01:30:21 +00001435static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1436 struct net_device *dev)
1437{
1438 struct netdev_notifier_info info;
1439
1440 netdev_notifier_info_init(&info, dev);
1441 return nb->notifier_call(nb, val, &info);
1442}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001443
Eric W. Biederman881d9662007-09-17 11:56:21 -07001444static int dev_boot_phase = 1;
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446/**
1447 * register_netdevice_notifier - register a network notifier block
1448 * @nb: notifier
1449 *
1450 * Register a notifier to be called when network device events occur.
1451 * The notifier passed is linked into the kernel structures and must
1452 * not be reused until it has been unregistered. A negative errno code
1453 * is returned on a failure.
1454 *
1455 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001456 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 * view of the network device list.
1458 */
1459
1460int register_netdevice_notifier(struct notifier_block *nb)
1461{
1462 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001463 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001464 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 int err;
1466
1467 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001468 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001469 if (err)
1470 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001471 if (dev_boot_phase)
1472 goto unlock;
1473 for_each_net(net) {
1474 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001475 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001476 err = notifier_to_errno(err);
1477 if (err)
1478 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
Eric W. Biederman881d9662007-09-17 11:56:21 -07001480 if (!(dev->flags & IFF_UP))
1481 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001482
Jiri Pirko351638e2013-05-28 01:30:21 +00001483 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001486
1487unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 rtnl_unlock();
1489 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001490
1491rollback:
1492 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001493 for_each_net(net) {
1494 for_each_netdev(net, dev) {
1495 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001496 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001497
Eric W. Biederman881d9662007-09-17 11:56:21 -07001498 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001499 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1500 dev);
1501 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001502 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001503 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001504 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001505 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001506
RongQing.Li8f891482011-11-30 23:43:07 -05001507outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001508 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001509 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001511EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
1513/**
1514 * unregister_netdevice_notifier - unregister a network notifier block
1515 * @nb: notifier
1516 *
1517 * Unregister a notifier previously registered by
1518 * register_netdevice_notifier(). The notifier is unlinked into the
1519 * kernel structures and may then be reused. A negative errno code
1520 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001521 *
1522 * After unregistering unregister and down device events are synthesized
1523 * for all devices on the device list to the removed notifier to remove
1524 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 */
1526
1527int unregister_netdevice_notifier(struct notifier_block *nb)
1528{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001529 struct net_device *dev;
1530 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001531 int err;
1532
1533 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001534 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001535 if (err)
1536 goto unlock;
1537
1538 for_each_net(net) {
1539 for_each_netdev(net, dev) {
1540 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001541 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1542 dev);
1543 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001544 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001545 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001546 }
1547 }
1548unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001549 rtnl_unlock();
1550 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001552EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
1554/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001555 * call_netdevice_notifiers_info - call all network notifier blocks
1556 * @val: value passed unmodified to notifier function
1557 * @dev: net_device pointer passed unmodified to notifier function
1558 * @info: notifier information data
1559 *
1560 * Call all network notifier blocks. Parameters and return value
1561 * are as for raw_notifier_call_chain().
1562 */
1563
1564int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1565 struct netdev_notifier_info *info)
1566{
1567 ASSERT_RTNL();
1568 netdev_notifier_info_init(info, dev);
1569 return raw_notifier_call_chain(&netdev_chain, val, info);
1570}
1571EXPORT_SYMBOL(call_netdevice_notifiers_info);
1572
1573/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 * call_netdevice_notifiers - call all network notifier blocks
1575 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001576 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 *
1578 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001579 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 */
1581
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001582int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583{
Jiri Pirko351638e2013-05-28 01:30:21 +00001584 struct netdev_notifier_info info;
1585
1586 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001588EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Ingo Molnarc5905af2012-02-24 08:31:31 +01001590static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001591#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001592/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001593 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001594 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001595 */
1596static atomic_t netstamp_needed_deferred;
1597#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599void net_enable_timestamp(void)
1600{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001601#ifdef HAVE_JUMP_LABEL
1602 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1603
1604 if (deferred) {
1605 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001606 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001607 return;
1608 }
1609#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001610 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001612EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
1614void net_disable_timestamp(void)
1615{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001616#ifdef HAVE_JUMP_LABEL
1617 if (in_interrupt()) {
1618 atomic_inc(&netstamp_needed_deferred);
1619 return;
1620 }
1621#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001622 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001624EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Eric Dumazet3b098e22010-05-15 23:57:10 -07001626static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627{
Eric Dumazet588f0332011-11-15 04:12:55 +00001628 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001629 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001630 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631}
1632
Eric Dumazet588f0332011-11-15 04:12:55 +00001633#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001634 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001635 if ((COND) && !(SKB)->tstamp.tv64) \
1636 __net_timestamp(SKB); \
1637 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001638
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001639static inline bool is_skb_forwardable(struct net_device *dev,
1640 struct sk_buff *skb)
1641{
1642 unsigned int len;
1643
1644 if (!(dev->flags & IFF_UP))
1645 return false;
1646
1647 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1648 if (skb->len <= len)
1649 return true;
1650
1651 /* if TSO is enabled, we don't care about the length as the packet
1652 * could be forwarded without being segmented before
1653 */
1654 if (skb_is_gso(skb))
1655 return true;
1656
1657 return false;
1658}
1659
Arnd Bergmann44540962009-11-26 06:07:08 +00001660/**
1661 * dev_forward_skb - loopback an skb to another netif
1662 *
1663 * @dev: destination network device
1664 * @skb: buffer to forward
1665 *
1666 * return values:
1667 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001668 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001669 *
1670 * dev_forward_skb can be used for injecting an skb from the
1671 * start_xmit function of one device into the receive queue
1672 * of another device.
1673 *
1674 * The receiving device may be in another namespace, so
1675 * we have to clear all information in the skb that could
1676 * impact namespace isolation.
1677 */
1678int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1679{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001680 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1681 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1682 atomic_long_inc(&dev->rx_dropped);
1683 kfree_skb(skb);
1684 return NET_RX_DROP;
1685 }
1686 }
1687
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001688 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001689 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001690 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001691 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001692 }
Arnd Bergmann44540962009-11-26 06:07:08 +00001693 skb->protocol = eth_type_trans(skb, dev);
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001694
1695 /* eth_type_trans() can set pkt_type.
Nicolas Dichtel64261f22013-08-13 17:51:09 +02001696 * call skb_scrub_packet() after it to clear pkt_type _after_ calling
1697 * eth_type_trans().
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001698 */
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02001699 skb_scrub_packet(skb, true);
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001700
Arnd Bergmann44540962009-11-26 06:07:08 +00001701 return netif_rx(skb);
1702}
1703EXPORT_SYMBOL_GPL(dev_forward_skb);
1704
Changli Gao71d9dec2010-12-15 19:57:25 +00001705static inline int deliver_skb(struct sk_buff *skb,
1706 struct packet_type *pt_prev,
1707 struct net_device *orig_dev)
1708{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001709 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1710 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001711 atomic_inc(&skb->users);
1712 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1713}
1714
Eric Leblondc0de08d2012-08-16 22:02:58 +00001715static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1716{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001717 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001718 return false;
1719
1720 if (ptype->id_match)
1721 return ptype->id_match(ptype, skb->sk);
1722 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1723 return true;
1724
1725 return false;
1726}
1727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728/*
1729 * Support routine. Sends outgoing frames to any network
1730 * taps currently in use.
1731 */
1732
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001733static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734{
1735 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001736 struct sk_buff *skb2 = NULL;
1737 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 rcu_read_lock();
1740 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1741 /* Never send packets back to the socket
1742 * they originated from - MvS (miquels@drinkel.ow.org)
1743 */
1744 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001745 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001746 if (pt_prev) {
1747 deliver_skb(skb2, pt_prev, skb->dev);
1748 pt_prev = ptype;
1749 continue;
1750 }
1751
1752 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 if (!skb2)
1754 break;
1755
Eric Dumazet70978182010-12-20 21:22:51 +00001756 net_timestamp_set(skb2);
1757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 /* skb->nh should be correctly
1759 set by sender, so that the second statement is
1760 just protection against buggy protocols.
1761 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001762 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001764 if (skb_network_header(skb2) < skb2->data ||
Simon Hormanced14f62013-05-28 20:34:25 +00001765 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
Joe Perchese87cc472012-05-13 21:56:26 +00001766 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1767 ntohs(skb2->protocol),
1768 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001769 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 }
1771
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001772 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001774 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 }
1776 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001777 if (pt_prev)
1778 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 rcu_read_unlock();
1780}
1781
Ben Hutchings2c530402012-07-10 10:55:09 +00001782/**
1783 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001784 * @dev: Network device
1785 * @txq: number of queues available
1786 *
1787 * If real_num_tx_queues is changed the tc mappings may no longer be
1788 * valid. To resolve this verify the tc mapping remains valid and if
1789 * not NULL the mapping. With no priorities mapping to this
1790 * offset/count pair it will no longer be used. In the worst case TC0
1791 * is invalid nothing can be done so disable priority mappings. If is
1792 * expected that drivers will fix this mapping if they can before
1793 * calling netif_set_real_num_tx_queues.
1794 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001795static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001796{
1797 int i;
1798 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1799
1800 /* If TC0 is invalidated disable TC mapping */
1801 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001802 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001803 dev->num_tc = 0;
1804 return;
1805 }
1806
1807 /* Invalidated prio to tc mappings set to TC0 */
1808 for (i = 1; i < TC_BITMASK + 1; i++) {
1809 int q = netdev_get_prio_tc_map(dev, i);
1810
1811 tc = &dev->tc_to_txq[q];
1812 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001813 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1814 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001815 netdev_set_prio_tc_map(dev, i, 0);
1816 }
1817 }
1818}
1819
Alexander Duyck537c00d2013-01-10 08:57:02 +00001820#ifdef CONFIG_XPS
1821static DEFINE_MUTEX(xps_map_mutex);
1822#define xmap_dereference(P) \
1823 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1824
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001825static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1826 int cpu, u16 index)
1827{
1828 struct xps_map *map = NULL;
1829 int pos;
1830
1831 if (dev_maps)
1832 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1833
1834 for (pos = 0; map && pos < map->len; pos++) {
1835 if (map->queues[pos] == index) {
1836 if (map->len > 1) {
1837 map->queues[pos] = map->queues[--map->len];
1838 } else {
1839 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1840 kfree_rcu(map, rcu);
1841 map = NULL;
1842 }
1843 break;
1844 }
1845 }
1846
1847 return map;
1848}
1849
Alexander Duyck024e9672013-01-10 08:57:46 +00001850static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001851{
1852 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001853 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001854 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001855
1856 mutex_lock(&xps_map_mutex);
1857 dev_maps = xmap_dereference(dev->xps_maps);
1858
1859 if (!dev_maps)
1860 goto out_no_maps;
1861
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001862 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001863 for (i = index; i < dev->num_tx_queues; i++) {
1864 if (!remove_xps_queue(dev_maps, cpu, i))
1865 break;
1866 }
1867 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001868 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001869 }
1870
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001871 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001872 RCU_INIT_POINTER(dev->xps_maps, NULL);
1873 kfree_rcu(dev_maps, rcu);
1874 }
1875
Alexander Duyck024e9672013-01-10 08:57:46 +00001876 for (i = index; i < dev->num_tx_queues; i++)
1877 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1878 NUMA_NO_NODE);
1879
Alexander Duyck537c00d2013-01-10 08:57:02 +00001880out_no_maps:
1881 mutex_unlock(&xps_map_mutex);
1882}
1883
Alexander Duyck01c5f862013-01-10 08:57:35 +00001884static struct xps_map *expand_xps_map(struct xps_map *map,
1885 int cpu, u16 index)
1886{
1887 struct xps_map *new_map;
1888 int alloc_len = XPS_MIN_MAP_ALLOC;
1889 int i, pos;
1890
1891 for (pos = 0; map && pos < map->len; pos++) {
1892 if (map->queues[pos] != index)
1893 continue;
1894 return map;
1895 }
1896
1897 /* Need to add queue to this CPU's existing map */
1898 if (map) {
1899 if (pos < map->alloc_len)
1900 return map;
1901
1902 alloc_len = map->alloc_len * 2;
1903 }
1904
1905 /* Need to allocate new map to store queue on this CPU's map */
1906 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1907 cpu_to_node(cpu));
1908 if (!new_map)
1909 return NULL;
1910
1911 for (i = 0; i < pos; i++)
1912 new_map->queues[i] = map->queues[i];
1913 new_map->alloc_len = alloc_len;
1914 new_map->len = pos;
1915
1916 return new_map;
1917}
1918
Michael S. Tsirkin35735402013-10-02 09:14:06 +03001919int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1920 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001921{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001922 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001923 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001924 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001925 int cpu, numa_node_id = -2;
1926 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001927
1928 mutex_lock(&xps_map_mutex);
1929
1930 dev_maps = xmap_dereference(dev->xps_maps);
1931
Alexander Duyck01c5f862013-01-10 08:57:35 +00001932 /* allocate memory for queue storage */
1933 for_each_online_cpu(cpu) {
1934 if (!cpumask_test_cpu(cpu, mask))
1935 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001936
Alexander Duyck01c5f862013-01-10 08:57:35 +00001937 if (!new_dev_maps)
1938 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001939 if (!new_dev_maps) {
1940 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001941 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001942 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001943
1944 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1945 NULL;
1946
1947 map = expand_xps_map(map, cpu, index);
1948 if (!map)
1949 goto error;
1950
1951 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1952 }
1953
1954 if (!new_dev_maps)
1955 goto out_no_new_maps;
1956
1957 for_each_possible_cpu(cpu) {
1958 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1959 /* add queue to CPU maps */
1960 int pos = 0;
1961
1962 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1963 while ((pos < map->len) && (map->queues[pos] != index))
1964 pos++;
1965
1966 if (pos == map->len)
1967 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001968#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001969 if (numa_node_id == -2)
1970 numa_node_id = cpu_to_node(cpu);
1971 else if (numa_node_id != cpu_to_node(cpu))
1972 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001973#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001974 } else if (dev_maps) {
1975 /* fill in the new device map from the old device map */
1976 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1977 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001978 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001979
Alexander Duyck537c00d2013-01-10 08:57:02 +00001980 }
1981
Alexander Duyck01c5f862013-01-10 08:57:35 +00001982 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1983
Alexander Duyck537c00d2013-01-10 08:57:02 +00001984 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00001985 if (dev_maps) {
1986 for_each_possible_cpu(cpu) {
1987 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1988 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1989 if (map && map != new_map)
1990 kfree_rcu(map, rcu);
1991 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001992
Alexander Duyck537c00d2013-01-10 08:57:02 +00001993 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001994 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001995
Alexander Duyck01c5f862013-01-10 08:57:35 +00001996 dev_maps = new_dev_maps;
1997 active = true;
1998
1999out_no_new_maps:
2000 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002001 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2002 (numa_node_id >= 0) ? numa_node_id :
2003 NUMA_NO_NODE);
2004
Alexander Duyck01c5f862013-01-10 08:57:35 +00002005 if (!dev_maps)
2006 goto out_no_maps;
2007
2008 /* removes queue from unused CPUs */
2009 for_each_possible_cpu(cpu) {
2010 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2011 continue;
2012
2013 if (remove_xps_queue(dev_maps, cpu, index))
2014 active = true;
2015 }
2016
2017 /* free map if not active */
2018 if (!active) {
2019 RCU_INIT_POINTER(dev->xps_maps, NULL);
2020 kfree_rcu(dev_maps, rcu);
2021 }
2022
2023out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002024 mutex_unlock(&xps_map_mutex);
2025
2026 return 0;
2027error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002028 /* remove any maps that we added */
2029 for_each_possible_cpu(cpu) {
2030 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2031 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2032 NULL;
2033 if (new_map && new_map != map)
2034 kfree(new_map);
2035 }
2036
Alexander Duyck537c00d2013-01-10 08:57:02 +00002037 mutex_unlock(&xps_map_mutex);
2038
Alexander Duyck537c00d2013-01-10 08:57:02 +00002039 kfree(new_dev_maps);
2040 return -ENOMEM;
2041}
2042EXPORT_SYMBOL(netif_set_xps_queue);
2043
2044#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002045/*
2046 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2047 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2048 */
Tom Herberte6484932010-10-18 18:04:39 +00002049int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002050{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002051 int rc;
2052
Tom Herberte6484932010-10-18 18:04:39 +00002053 if (txq < 1 || txq > dev->num_tx_queues)
2054 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002055
Ben Hutchings5c565802011-02-15 19:39:21 +00002056 if (dev->reg_state == NETREG_REGISTERED ||
2057 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002058 ASSERT_RTNL();
2059
Tom Herbert1d24eb42010-11-21 13:17:27 +00002060 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2061 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002062 if (rc)
2063 return rc;
2064
John Fastabend4f57c082011-01-17 08:06:04 +00002065 if (dev->num_tc)
2066 netif_setup_tc(dev, txq);
2067
Alexander Duyck024e9672013-01-10 08:57:46 +00002068 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002069 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002070#ifdef CONFIG_XPS
2071 netif_reset_xps_queues_gt(dev, txq);
2072#endif
2073 }
John Fastabendf0796d52010-07-01 13:21:57 +00002074 }
Tom Herberte6484932010-10-18 18:04:39 +00002075
2076 dev->real_num_tx_queues = txq;
2077 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002078}
2079EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002080
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002081#ifdef CONFIG_RPS
2082/**
2083 * netif_set_real_num_rx_queues - set actual number of RX queues used
2084 * @dev: Network device
2085 * @rxq: Actual number of RX queues
2086 *
2087 * This must be called either with the rtnl_lock held or before
2088 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002089 * negative error code. If called before registration, it always
2090 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002091 */
2092int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2093{
2094 int rc;
2095
Tom Herbertbd25fa72010-10-18 18:00:16 +00002096 if (rxq < 1 || rxq > dev->num_rx_queues)
2097 return -EINVAL;
2098
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002099 if (dev->reg_state == NETREG_REGISTERED) {
2100 ASSERT_RTNL();
2101
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002102 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2103 rxq);
2104 if (rc)
2105 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002106 }
2107
2108 dev->real_num_rx_queues = rxq;
2109 return 0;
2110}
2111EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2112#endif
2113
Ben Hutchings2c530402012-07-10 10:55:09 +00002114/**
2115 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002116 *
2117 * This routine should set an upper limit on the number of RSS queues
2118 * used by default by multiqueue devices.
2119 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002120int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002121{
2122 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2123}
2124EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2125
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002126static inline void __netif_reschedule(struct Qdisc *q)
2127{
2128 struct softnet_data *sd;
2129 unsigned long flags;
2130
2131 local_irq_save(flags);
2132 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002133 q->next_sched = NULL;
2134 *sd->output_queue_tailp = q;
2135 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002136 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2137 local_irq_restore(flags);
2138}
2139
David S. Miller37437bb2008-07-16 02:15:04 -07002140void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002141{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002142 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2143 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002144}
2145EXPORT_SYMBOL(__netif_schedule);
2146
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002147void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002148{
David S. Miller3578b0c2010-08-03 00:24:04 -07002149 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002150 struct softnet_data *sd;
2151 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002152
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002153 local_irq_save(flags);
2154 sd = &__get_cpu_var(softnet_data);
2155 skb->next = sd->completion_queue;
2156 sd->completion_queue = skb;
2157 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2158 local_irq_restore(flags);
2159 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002160}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002161EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002162
2163void dev_kfree_skb_any(struct sk_buff *skb)
2164{
2165 if (in_irq() || irqs_disabled())
2166 dev_kfree_skb_irq(skb);
2167 else
2168 dev_kfree_skb(skb);
2169}
2170EXPORT_SYMBOL(dev_kfree_skb_any);
2171
2172
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002173/**
2174 * netif_device_detach - mark device as removed
2175 * @dev: network device
2176 *
2177 * Mark device as removed from system and therefore no longer available.
2178 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002179void netif_device_detach(struct net_device *dev)
2180{
2181 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2182 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002183 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002184 }
2185}
2186EXPORT_SYMBOL(netif_device_detach);
2187
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002188/**
2189 * netif_device_attach - mark device as attached
2190 * @dev: network device
2191 *
2192 * Mark device as attached from system and restart if needed.
2193 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002194void netif_device_attach(struct net_device *dev)
2195{
2196 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2197 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002198 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002199 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002200 }
2201}
2202EXPORT_SYMBOL(netif_device_attach);
2203
Ben Hutchings36c92472012-01-17 07:57:56 +00002204static void skb_warn_bad_offload(const struct sk_buff *skb)
2205{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002206 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002207 struct net_device *dev = skb->dev;
2208 const char *driver = "";
2209
Ben Greearc846ad92013-04-19 10:45:52 +00002210 if (!net_ratelimit())
2211 return;
2212
Ben Hutchings36c92472012-01-17 07:57:56 +00002213 if (dev && dev->dev.parent)
2214 driver = dev_driver_string(dev->dev.parent);
2215
2216 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2217 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002218 driver, dev ? &dev->features : &null_features,
2219 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002220 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2221 skb_shinfo(skb)->gso_type, skb->ip_summed);
2222}
2223
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224/*
2225 * Invalidate hardware checksum when packet is to be mangled, and
2226 * complete checksum manually on outgoing path.
2227 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002228int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229{
Al Virod3bc23e2006-11-14 21:24:49 -08002230 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002231 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Patrick McHardy84fa7932006-08-29 16:44:56 -07002233 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002234 goto out_set_summed;
2235
2236 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002237 skb_warn_bad_offload(skb);
2238 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 }
2240
Eric Dumazetcef401d2013-01-25 20:34:37 +00002241 /* Before computing a checksum, we should make sure no frag could
2242 * be modified by an external entity : checksum could be wrong.
2243 */
2244 if (skb_has_shared_frag(skb)) {
2245 ret = __skb_linearize(skb);
2246 if (ret)
2247 goto out;
2248 }
2249
Michał Mirosław55508d62010-12-14 15:24:08 +00002250 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002251 BUG_ON(offset >= skb_headlen(skb));
2252 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2253
2254 offset += skb->csum_offset;
2255 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2256
2257 if (skb_cloned(skb) &&
2258 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2260 if (ret)
2261 goto out;
2262 }
2263
Herbert Xua0308472007-10-15 01:47:15 -07002264 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002265out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002267out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 return ret;
2269}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002270EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002272__be16 skb_network_protocol(struct sk_buff *skb)
2273{
2274 __be16 type = skb->protocol;
David S. Miller61816592013-03-20 12:46:26 -04002275 int vlan_depth = ETH_HLEN;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002276
Pravin B Shelar19acc322013-05-07 20:41:07 +00002277 /* Tunnel gso handlers can set protocol to ethernet. */
2278 if (type == htons(ETH_P_TEB)) {
2279 struct ethhdr *eth;
2280
2281 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2282 return 0;
2283
2284 eth = (struct ethhdr *)skb_mac_header(skb);
2285 type = eth->h_proto;
2286 }
2287
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002288 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002289 struct vlan_hdr *vh;
2290
2291 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2292 return 0;
2293
2294 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2295 type = vh->h_vlan_encapsulated_proto;
2296 vlan_depth += VLAN_HLEN;
2297 }
2298
2299 return type;
2300}
2301
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002302/**
2303 * skb_mac_gso_segment - mac layer segmentation handler.
2304 * @skb: buffer to segment
2305 * @features: features for the output path (see dev->features)
2306 */
2307struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2308 netdev_features_t features)
2309{
2310 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2311 struct packet_offload *ptype;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002312 __be16 type = skb_network_protocol(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002313
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002314 if (unlikely(!type))
2315 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002316
2317 __skb_pull(skb, skb->mac_len);
2318
2319 rcu_read_lock();
2320 list_for_each_entry_rcu(ptype, &offload_base, list) {
2321 if (ptype->type == type && ptype->callbacks.gso_segment) {
2322 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2323 int err;
2324
2325 err = ptype->callbacks.gso_send_check(skb);
2326 segs = ERR_PTR(err);
2327 if (err || skb_gso_ok(skb, features))
2328 break;
2329 __skb_push(skb, (skb->data -
2330 skb_network_header(skb)));
2331 }
2332 segs = ptype->callbacks.gso_segment(skb, features);
2333 break;
2334 }
2335 }
2336 rcu_read_unlock();
2337
2338 __skb_push(skb, skb->data - skb_mac_header(skb));
2339
2340 return segs;
2341}
2342EXPORT_SYMBOL(skb_mac_gso_segment);
2343
2344
Cong Wang12b00042013-02-05 16:36:38 +00002345/* openvswitch calls this on rx path, so we need a different check.
2346 */
2347static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2348{
2349 if (tx_path)
2350 return skb->ip_summed != CHECKSUM_PARTIAL;
2351 else
2352 return skb->ip_summed == CHECKSUM_NONE;
2353}
2354
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002355/**
Cong Wang12b00042013-02-05 16:36:38 +00002356 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002357 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002358 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002359 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002360 *
2361 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002362 *
2363 * It may return NULL if the skb requires no segmentation. This is
2364 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002365 */
Cong Wang12b00042013-02-05 16:36:38 +00002366struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2367 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002368{
Cong Wang12b00042013-02-05 16:36:38 +00002369 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002370 int err;
2371
Ben Hutchings36c92472012-01-17 07:57:56 +00002372 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002373
Herbert Xua430a432006-07-08 13:34:56 -07002374 if (skb_header_cloned(skb) &&
2375 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2376 return ERR_PTR(err);
2377 }
2378
Pravin B Shelar68c33162013-02-14 14:02:41 +00002379 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002380 SKB_GSO_CB(skb)->encap_level = 0;
2381
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002382 skb_reset_mac_header(skb);
2383 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002384
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002385 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002386}
Cong Wang12b00042013-02-05 16:36:38 +00002387EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002388
Herbert Xufb286bb2005-11-10 13:01:24 -08002389/* Take action when hardware reception checksum errors are detected. */
2390#ifdef CONFIG_BUG
2391void netdev_rx_csum_fault(struct net_device *dev)
2392{
2393 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002394 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002395 dump_stack();
2396 }
2397}
2398EXPORT_SYMBOL(netdev_rx_csum_fault);
2399#endif
2400
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401/* Actually, we should eliminate this check as soon as we know, that:
2402 * 1. IOMMU is present and allows to map all the memory.
2403 * 2. No high memory really exists on this machine.
2404 */
2405
Eric Dumazet9092c652010-04-02 13:34:49 -07002406static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002408#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002410 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2412 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2413 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002414 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002415 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002418 if (PCI_DMA_BUS_IS_PHYS) {
2419 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
Eric Dumazet9092c652010-04-02 13:34:49 -07002421 if (!pdev)
2422 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002423 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002424 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2425 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002426 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2427 return 1;
2428 }
2429 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002430#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 return 0;
2432}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002434struct dev_gso_cb {
2435 void (*destructor)(struct sk_buff *skb);
2436};
2437
2438#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2439
2440static void dev_gso_skb_destructor(struct sk_buff *skb)
2441{
2442 struct dev_gso_cb *cb;
2443
2444 do {
2445 struct sk_buff *nskb = skb->next;
2446
2447 skb->next = nskb->next;
2448 nskb->next = NULL;
2449 kfree_skb(nskb);
2450 } while (skb->next);
2451
2452 cb = DEV_GSO_CB(skb);
2453 if (cb->destructor)
2454 cb->destructor(skb);
2455}
2456
2457/**
2458 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2459 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002460 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002461 *
2462 * This function segments the given skb and stores the list of segments
2463 * in skb->next.
2464 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002465static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002466{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002467 struct sk_buff *segs;
2468
Herbert Xu576a30e2006-06-27 13:22:38 -07002469 segs = skb_gso_segment(skb, features);
2470
2471 /* Verifying header integrity only. */
2472 if (!segs)
2473 return 0;
2474
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002475 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002476 return PTR_ERR(segs);
2477
2478 skb->next = segs;
2479 DEV_GSO_CB(skb)->destructor = skb->destructor;
2480 skb->destructor = dev_gso_skb_destructor;
2481
2482 return 0;
2483}
2484
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002485static netdev_features_t harmonize_features(struct sk_buff *skb,
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002486 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002487{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002488 if (skb->ip_summed != CHECKSUM_NONE &&
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002489 !can_checksum_protocol(features, skb_network_protocol(skb))) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002490 features &= ~NETIF_F_ALL_CSUM;
Jesse Grossf01a5232011-01-09 06:23:31 +00002491 } else if (illegal_highdma(skb->dev, skb)) {
2492 features &= ~NETIF_F_SG;
2493 }
2494
2495 return features;
2496}
2497
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002498netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002499{
2500 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002501 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002502
Ben Hutchings30b678d2012-07-30 15:57:00 +00002503 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2504 features &= ~NETIF_F_GSO_MASK;
2505
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002506 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002507 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2508 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002509 } else if (!vlan_tx_tag_present(skb)) {
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002510 return harmonize_features(skb, features);
Jesse Grossf01a5232011-01-09 06:23:31 +00002511 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002512
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002513 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2514 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002515
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002516 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
Jesse Grossf01a5232011-01-09 06:23:31 +00002517 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002518 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2519 NETIF_F_HW_VLAN_STAG_TX;
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002520
2521 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002522}
Jesse Grossf01a5232011-01-09 06:23:31 +00002523EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002524
John Fastabend6afff0c2010-06-16 14:18:12 +00002525/*
2526 * Returns true if either:
2527 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002528 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002529 */
2530static inline int skb_needs_linearize(struct sk_buff *skb,
Patrick McHardy6708c9e2013-05-01 22:36:49 +00002531 netdev_features_t features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002532{
Jesse Gross02932ce2011-01-09 06:23:34 +00002533 return skb_is_nonlinear(skb) &&
2534 ((skb_has_frag_list(skb) &&
2535 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002536 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002537 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002538}
2539
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002540int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002541 struct netdev_queue *txq, void *accel_priv)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002542{
Stephen Hemminger00829822008-11-20 20:14:53 -08002543 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002544 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002545 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002546
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002547 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002548 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002549
Eric Dumazet93f154b2009-05-18 22:19:19 -07002550 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002551 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002552 * its hot in this cpu cache
2553 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002554 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2555 skb_dst_drop(skb);
2556
Jesse Grossfc741212011-01-09 06:23:32 +00002557 features = netif_skb_features(skb);
2558
Jesse Gross7b9c6092010-10-20 13:56:04 +00002559 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002560 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2561 skb = __vlan_put_tag(skb, skb->vlan_proto,
2562 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002563 if (unlikely(!skb))
2564 goto out;
2565
2566 skb->vlan_tci = 0;
2567 }
2568
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002569 /* If encapsulation offload request, verify we are testing
2570 * hardware encapsulation features instead of standard
2571 * features for the netdev
2572 */
2573 if (skb->encapsulation)
2574 features &= dev->hw_enc_features;
2575
Jesse Grossfc741212011-01-09 06:23:32 +00002576 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002577 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002578 goto out_kfree_skb;
2579 if (skb->next)
2580 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002581 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002582 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002583 __skb_linearize(skb))
2584 goto out_kfree_skb;
2585
2586 /* If packet is not checksummed and device does not
2587 * support checksumming for this protocol, complete
2588 * checksumming here.
2589 */
2590 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002591 if (skb->encapsulation)
2592 skb_set_inner_transport_header(skb,
2593 skb_checksum_start_offset(skb));
2594 else
2595 skb_set_transport_header(skb,
2596 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002597 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002598 skb_checksum_help(skb))
2599 goto out_kfree_skb;
2600 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002601 }
2602
Eric Dumazetb40863c2012-09-18 20:44:49 +00002603 if (!list_empty(&ptype_all))
2604 dev_queue_xmit_nit(skb, dev);
2605
Koki Sanagiec764bf2011-05-30 21:48:34 +00002606 skb_len = skb->len;
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002607 if (accel_priv)
2608 rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
2609 else
2610 rc = ops->ndo_start_xmit(skb, dev);
2611
Koki Sanagiec764bf2011-05-30 21:48:34 +00002612 trace_net_dev_xmit(skb, rc, dev, skb_len);
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002613 if (rc == NETDEV_TX_OK && txq)
Eric Dumazet08baf562009-05-25 22:58:01 -07002614 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002615 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002616 }
2617
Herbert Xu576a30e2006-06-27 13:22:38 -07002618gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002619 do {
2620 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002621
2622 skb->next = nskb->next;
2623 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002624
Eric Dumazetb40863c2012-09-18 20:44:49 +00002625 if (!list_empty(&ptype_all))
2626 dev_queue_xmit_nit(nskb, dev);
2627
Koki Sanagiec764bf2011-05-30 21:48:34 +00002628 skb_len = nskb->len;
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002629 if (accel_priv)
2630 rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
2631 else
2632 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002633 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002634 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002635 if (rc & ~NETDEV_TX_MASK)
2636 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002637 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002638 skb->next = nskb;
2639 return rc;
2640 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002641 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002642 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002643 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002644 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002645
Patrick McHardy572a9d72009-11-10 06:14:14 +00002646out_kfree_gso_skb:
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002647 if (likely(skb->next == NULL)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002648 skb->destructor = DEV_GSO_CB(skb)->destructor;
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002649 consume_skb(skb);
2650 return rc;
2651 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002652out_kfree_skb:
2653 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002654out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002655 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002656}
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002657EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002658
Eric Dumazet1def9232013-01-10 12:36:42 +00002659static void qdisc_pkt_len_init(struct sk_buff *skb)
2660{
2661 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2662
2663 qdisc_skb_cb(skb)->pkt_len = skb->len;
2664
2665 /* To get more precise estimation of bytes sent on wire,
2666 * we add to pkt_len the headers size of all segments
2667 */
2668 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002669 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002670 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002671
Eric Dumazet757b8b12013-01-15 21:14:21 -08002672 /* mac layer + network layer */
2673 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2674
2675 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002676 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2677 hdr_len += tcp_hdrlen(skb);
2678 else
2679 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002680
2681 if (shinfo->gso_type & SKB_GSO_DODGY)
2682 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2683 shinfo->gso_size);
2684
2685 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002686 }
2687}
2688
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002689static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2690 struct net_device *dev,
2691 struct netdev_queue *txq)
2692{
2693 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002694 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002695 int rc;
2696
Eric Dumazet1def9232013-01-10 12:36:42 +00002697 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002698 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002699 /*
2700 * Heuristic to force contended enqueues to serialize on a
2701 * separate lock before trying to get qdisc main lock.
2702 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2703 * and dequeue packets faster.
2704 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002705 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002706 if (unlikely(contended))
2707 spin_lock(&q->busylock);
2708
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002709 spin_lock(root_lock);
2710 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2711 kfree_skb(skb);
2712 rc = NET_XMIT_DROP;
2713 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002714 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002715 /*
2716 * This is a work-conserving queue; there are no old skbs
2717 * waiting to be sent out; and the qdisc is not running -
2718 * xmit the skb directly.
2719 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002720 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2721 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002722
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002723 qdisc_bstats_update(q, skb);
2724
Eric Dumazet79640a42010-06-02 05:09:29 -07002725 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2726 if (unlikely(contended)) {
2727 spin_unlock(&q->busylock);
2728 contended = false;
2729 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002730 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002731 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002732 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002733
2734 rc = NET_XMIT_SUCCESS;
2735 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002736 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002737 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002738 if (qdisc_run_begin(q)) {
2739 if (unlikely(contended)) {
2740 spin_unlock(&q->busylock);
2741 contended = false;
2742 }
2743 __qdisc_run(q);
2744 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002745 }
2746 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002747 if (unlikely(contended))
2748 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002749 return rc;
2750}
2751
Neil Horman5bc14212011-11-22 05:10:51 +00002752#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2753static void skb_update_prio(struct sk_buff *skb)
2754{
Igor Maravic6977a792011-11-25 07:44:54 +00002755 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002756
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002757 if (!skb->priority && skb->sk && map) {
2758 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2759
2760 if (prioidx < map->priomap_len)
2761 skb->priority = map->priomap[prioidx];
2762 }
Neil Horman5bc14212011-11-22 05:10:51 +00002763}
2764#else
2765#define skb_update_prio(skb)
2766#endif
2767
Eric Dumazet745e20f2010-09-29 13:23:09 -07002768static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002769#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002770
Dave Jonesd29f7492008-07-22 14:09:06 -07002771/**
Michel Machado95603e22012-06-12 10:16:35 +00002772 * dev_loopback_xmit - loop back @skb
2773 * @skb: buffer to transmit
2774 */
2775int dev_loopback_xmit(struct sk_buff *skb)
2776{
2777 skb_reset_mac_header(skb);
2778 __skb_pull(skb, skb_network_offset(skb));
2779 skb->pkt_type = PACKET_LOOPBACK;
2780 skb->ip_summed = CHECKSUM_UNNECESSARY;
2781 WARN_ON(!skb_dst(skb));
2782 skb_dst_force(skb);
2783 netif_rx_ni(skb);
2784 return 0;
2785}
2786EXPORT_SYMBOL(dev_loopback_xmit);
2787
2788/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002789 * dev_queue_xmit - transmit a buffer
2790 * @skb: buffer to transmit
2791 *
2792 * Queue a buffer for transmission to a network device. The caller must
2793 * have set the device and priority and built the buffer before calling
2794 * this function. The function can be called from an interrupt.
2795 *
2796 * A negative errno code is returned on a failure. A success does not
2797 * guarantee the frame will be transmitted as it may be dropped due
2798 * to congestion or traffic shaping.
2799 *
2800 * -----------------------------------------------------------------------------------
2801 * I notice this method can also return errors from the queue disciplines,
2802 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2803 * be positive.
2804 *
2805 * Regardless of the return value, the skb is consumed, so it is currently
2806 * difficult to retry a send to this method. (You can bump the ref count
2807 * before sending to hold a reference for retry if you are careful.)
2808 *
2809 * When calling this method, interrupts MUST be enabled. This is because
2810 * the BH enable code must have IRQs enabled so that it will not deadlock.
2811 * --BLG
2812 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813int dev_queue_xmit(struct sk_buff *skb)
2814{
2815 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002816 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 struct Qdisc *q;
2818 int rc = -ENOMEM;
2819
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002820 skb_reset_mac_header(skb);
2821
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002822 /* Disable soft irqs for various locks below. Also
2823 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002825 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826
Neil Horman5bc14212011-11-22 05:10:51 +00002827 skb_update_prio(skb);
2828
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002829 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002830 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002831
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002833 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002835 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002837 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002838 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 }
2840
2841 /* The device has no queue. Common case for software devices:
2842 loopback, all the sorts of tunnels...
2843
Herbert Xu932ff272006-06-09 12:20:56 -07002844 Really, it is unlikely that netif_tx_lock protection is necessary
2845 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 counters.)
2847 However, it is possible, that they rely on protection
2848 made by us here.
2849
2850 Check this and shot the lock. It is not prone from deadlocks.
2851 Either shot noqueue qdisc, it is even simpler 8)
2852 */
2853 if (dev->flags & IFF_UP) {
2854 int cpu = smp_processor_id(); /* ok because BHs are off */
2855
David S. Millerc773e842008-07-08 23:13:53 -07002856 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
Eric Dumazet745e20f2010-09-29 13:23:09 -07002858 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2859 goto recursion_alert;
2860
David S. Millerc773e842008-07-08 23:13:53 -07002861 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862
Tom Herbert734664982011-11-28 16:32:44 +00002863 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002864 __this_cpu_inc(xmit_recursion);
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002865 rc = dev_hard_start_xmit(skb, dev, txq, NULL);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002866 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002867 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002868 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 goto out;
2870 }
2871 }
David S. Millerc773e842008-07-08 23:13:53 -07002872 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002873 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2874 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 } else {
2876 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002877 * unfortunately
2878 */
2879recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002880 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2881 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 }
2883 }
2884
2885 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002886 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 kfree_skb(skb);
2889 return rc;
2890out:
Herbert Xud4828d82006-06-22 02:28:18 -07002891 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 return rc;
2893}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002894EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895
2896
2897/*=======================================================================
2898 Receiver routines
2899 =======================================================================*/
2900
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002901int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002902EXPORT_SYMBOL(netdev_max_backlog);
2903
Eric Dumazet3b098e22010-05-15 23:57:10 -07002904int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002905int netdev_budget __read_mostly = 300;
2906int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002908/* Called with irq disabled */
2909static inline void ____napi_schedule(struct softnet_data *sd,
2910 struct napi_struct *napi)
2911{
2912 list_add_tail(&napi->poll_list, &sd->poll_list);
2913 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2914}
2915
Eric Dumazetdf334542010-03-24 19:13:54 +00002916#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002917
2918/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002919struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002920EXPORT_SYMBOL(rps_sock_flow_table);
2921
Ingo Molnarc5905af2012-02-24 08:31:31 +01002922struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002923
Ben Hutchingsc4454772011-01-19 11:03:53 +00002924static struct rps_dev_flow *
2925set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2926 struct rps_dev_flow *rflow, u16 next_cpu)
2927{
Ben Hutchings09994d12011-10-03 04:42:46 +00002928 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002929#ifdef CONFIG_RFS_ACCEL
2930 struct netdev_rx_queue *rxqueue;
2931 struct rps_dev_flow_table *flow_table;
2932 struct rps_dev_flow *old_rflow;
2933 u32 flow_id;
2934 u16 rxq_index;
2935 int rc;
2936
2937 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002938 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2939 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002940 goto out;
2941 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2942 if (rxq_index == skb_get_rx_queue(skb))
2943 goto out;
2944
2945 rxqueue = dev->_rx + rxq_index;
2946 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2947 if (!flow_table)
2948 goto out;
2949 flow_id = skb->rxhash & flow_table->mask;
2950 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2951 rxq_index, flow_id);
2952 if (rc < 0)
2953 goto out;
2954 old_rflow = rflow;
2955 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002956 rflow->filter = rc;
2957 if (old_rflow->filter == rflow->filter)
2958 old_rflow->filter = RPS_NO_FILTER;
2959 out:
2960#endif
2961 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002962 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002963 }
2964
Ben Hutchings09994d12011-10-03 04:42:46 +00002965 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002966 return rflow;
2967}
2968
Tom Herbert0a9627f2010-03-16 08:03:29 +00002969/*
2970 * get_rps_cpu is called from netif_receive_skb and returns the target
2971 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002972 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002973 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002974static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2975 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002976{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002977 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002978 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002979 struct rps_dev_flow_table *flow_table;
2980 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002981 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07002982 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002983
Tom Herbert0a9627f2010-03-16 08:03:29 +00002984 if (skb_rx_queue_recorded(skb)) {
2985 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002986 if (unlikely(index >= dev->real_num_rx_queues)) {
2987 WARN_ONCE(dev->real_num_rx_queues > 1,
2988 "%s received packet on queue %u, but number "
2989 "of RX queues is %u\n",
2990 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002991 goto done;
2992 }
2993 rxqueue = dev->_rx + index;
2994 } else
2995 rxqueue = dev->_rx;
2996
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002997 map = rcu_dereference(rxqueue->rps_map);
2998 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08002999 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00003000 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00003001 tcpu = map->cpus[0];
3002 if (cpu_online(tcpu))
3003 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003004 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00003005 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00003006 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003007 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003008 }
3009
Changli Gao2d47b452010-08-17 19:00:56 +00003010 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003011 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003012 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003013
Tom Herbertfec5e652010-04-16 16:01:27 -07003014 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3015 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3016 if (flow_table && sock_flow_table) {
3017 u16 next_cpu;
3018 struct rps_dev_flow *rflow;
3019
3020 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3021 tcpu = rflow->cpu;
3022
3023 next_cpu = sock_flow_table->ents[skb->rxhash &
3024 sock_flow_table->mask];
3025
3026 /*
3027 * If the desired CPU (where last recvmsg was done) is
3028 * different from current CPU (one in the rx-queue flow
3029 * table entry), switch if one of the following holds:
3030 * - Current CPU is unset (equal to RPS_NO_CPU).
3031 * - Current CPU is offline.
3032 * - The current CPU's queue tail has advanced beyond the
3033 * last packet that was enqueued using this table entry.
3034 * This guarantees that all previous packets for the flow
3035 * have been dequeued, thus preserving in order delivery.
3036 */
3037 if (unlikely(tcpu != next_cpu) &&
3038 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3039 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003040 rflow->last_qtail)) >= 0)) {
3041 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003042 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003043 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003044
Tom Herbertfec5e652010-04-16 16:01:27 -07003045 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3046 *rflowp = rflow;
3047 cpu = tcpu;
3048 goto done;
3049 }
3050 }
3051
Tom Herbert0a9627f2010-03-16 08:03:29 +00003052 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003053 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003054
3055 if (cpu_online(tcpu)) {
3056 cpu = tcpu;
3057 goto done;
3058 }
3059 }
3060
3061done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003062 return cpu;
3063}
3064
Ben Hutchingsc4454772011-01-19 11:03:53 +00003065#ifdef CONFIG_RFS_ACCEL
3066
3067/**
3068 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3069 * @dev: Device on which the filter was set
3070 * @rxq_index: RX queue index
3071 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3072 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3073 *
3074 * Drivers that implement ndo_rx_flow_steer() should periodically call
3075 * this function for each installed filter and remove the filters for
3076 * which it returns %true.
3077 */
3078bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3079 u32 flow_id, u16 filter_id)
3080{
3081 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3082 struct rps_dev_flow_table *flow_table;
3083 struct rps_dev_flow *rflow;
3084 bool expire = true;
3085 int cpu;
3086
3087 rcu_read_lock();
3088 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3089 if (flow_table && flow_id <= flow_table->mask) {
3090 rflow = &flow_table->flows[flow_id];
3091 cpu = ACCESS_ONCE(rflow->cpu);
3092 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3093 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3094 rflow->last_qtail) <
3095 (int)(10 * flow_table->mask)))
3096 expire = false;
3097 }
3098 rcu_read_unlock();
3099 return expire;
3100}
3101EXPORT_SYMBOL(rps_may_expire_flow);
3102
3103#endif /* CONFIG_RFS_ACCEL */
3104
Tom Herbert0a9627f2010-03-16 08:03:29 +00003105/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003106static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003107{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003108 struct softnet_data *sd = data;
3109
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003110 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003111 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003112}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003113
Tom Herbertfec5e652010-04-16 16:01:27 -07003114#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003115
3116/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003117 * Check if this softnet_data structure is another cpu one
3118 * If yes, queue it to our IPI list and return 1
3119 * If no, return 0
3120 */
3121static int rps_ipi_queued(struct softnet_data *sd)
3122{
3123#ifdef CONFIG_RPS
3124 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3125
3126 if (sd != mysd) {
3127 sd->rps_ipi_next = mysd->rps_ipi_list;
3128 mysd->rps_ipi_list = sd;
3129
3130 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3131 return 1;
3132 }
3133#endif /* CONFIG_RPS */
3134 return 0;
3135}
3136
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003137#ifdef CONFIG_NET_FLOW_LIMIT
3138int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3139#endif
3140
3141static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3142{
3143#ifdef CONFIG_NET_FLOW_LIMIT
3144 struct sd_flow_limit *fl;
3145 struct softnet_data *sd;
3146 unsigned int old_flow, new_flow;
3147
3148 if (qlen < (netdev_max_backlog >> 1))
3149 return false;
3150
3151 sd = &__get_cpu_var(softnet_data);
3152
3153 rcu_read_lock();
3154 fl = rcu_dereference(sd->flow_limit);
3155 if (fl) {
3156 new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
3157 old_flow = fl->history[fl->history_head];
3158 fl->history[fl->history_head] = new_flow;
3159
3160 fl->history_head++;
3161 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3162
3163 if (likely(fl->buckets[old_flow]))
3164 fl->buckets[old_flow]--;
3165
3166 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3167 fl->count++;
3168 rcu_read_unlock();
3169 return true;
3170 }
3171 }
3172 rcu_read_unlock();
3173#endif
3174 return false;
3175}
3176
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003177/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003178 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3179 * queue (may be a remote CPU queue).
3180 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003181static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3182 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003183{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003184 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003185 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003186 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003187
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003188 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003189
3190 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003191
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003192 rps_lock(sd);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003193 qlen = skb_queue_len(&sd->input_pkt_queue);
3194 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Changli Gao6e7676c2010-04-27 15:07:33 -07003195 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003196enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003197 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003198 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003199 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003200 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003201 return NET_RX_SUCCESS;
3202 }
3203
Eric Dumazetebda37c22010-05-06 23:51:21 +00003204 /* Schedule NAPI for backlog device
3205 * We can use non atomic operation since we own the queue lock
3206 */
3207 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003208 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003209 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003210 }
3211 goto enqueue;
3212 }
3213
Changli Gaodee42872010-05-02 05:42:16 +00003214 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003215 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003216
Tom Herbert0a9627f2010-03-16 08:03:29 +00003217 local_irq_restore(flags);
3218
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003219 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003220 kfree_skb(skb);
3221 return NET_RX_DROP;
3222}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224/**
3225 * netif_rx - post buffer to the network code
3226 * @skb: buffer to post
3227 *
3228 * This function receives a packet from a device driver and queues it for
3229 * the upper (protocol) levels to process. It always succeeds. The buffer
3230 * may be dropped during processing for congestion control or by the
3231 * protocol layers.
3232 *
3233 * return values:
3234 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 * NET_RX_DROP (packet was dropped)
3236 *
3237 */
3238
3239int netif_rx(struct sk_buff *skb)
3240{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003241 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242
3243 /* if netpoll wants it, pretend we never saw it */
3244 if (netpoll_rx(skb))
3245 return NET_RX_DROP;
3246
Eric Dumazet588f0332011-11-15 04:12:55 +00003247 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
Koki Sanagicf66ba52010-08-23 18:45:02 +09003249 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003250#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003251 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003252 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003253 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254
Changli Gaocece1942010-08-07 20:35:43 -07003255 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003256 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003257
3258 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003259 if (cpu < 0)
3260 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003261
3262 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3263
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003264 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003265 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003266 } else
3267#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003268 {
3269 unsigned int qtail;
3270 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3271 put_cpu();
3272 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003273 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003275EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276
3277int netif_rx_ni(struct sk_buff *skb)
3278{
3279 int err;
3280
3281 preempt_disable();
3282 err = netif_rx(skb);
3283 if (local_softirq_pending())
3284 do_softirq();
3285 preempt_enable();
3286
3287 return err;
3288}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289EXPORT_SYMBOL(netif_rx_ni);
3290
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291static void net_tx_action(struct softirq_action *h)
3292{
3293 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3294
3295 if (sd->completion_queue) {
3296 struct sk_buff *clist;
3297
3298 local_irq_disable();
3299 clist = sd->completion_queue;
3300 sd->completion_queue = NULL;
3301 local_irq_enable();
3302
3303 while (clist) {
3304 struct sk_buff *skb = clist;
3305 clist = clist->next;
3306
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003307 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003308 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309 __kfree_skb(skb);
3310 }
3311 }
3312
3313 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003314 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315
3316 local_irq_disable();
3317 head = sd->output_queue;
3318 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003319 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 local_irq_enable();
3321
3322 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003323 struct Qdisc *q = head;
3324 spinlock_t *root_lock;
3325
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 head = head->next_sched;
3327
David S. Miller5fb66222008-08-02 20:02:43 -07003328 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003329 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003330 smp_mb__before_clear_bit();
3331 clear_bit(__QDISC_STATE_SCHED,
3332 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003333 qdisc_run(q);
3334 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003336 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003337 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003338 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003339 } else {
3340 smp_mb__before_clear_bit();
3341 clear_bit(__QDISC_STATE_SCHED,
3342 &q->state);
3343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 }
3345 }
3346 }
3347}
3348
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003349#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3350 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003351/* This hook is defined here for ATM LANE */
3352int (*br_fdb_test_addr_hook)(struct net_device *dev,
3353 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003354EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003355#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357#ifdef CONFIG_NET_CLS_ACT
3358/* TODO: Maybe we should just force sch_ingress to be compiled in
3359 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3360 * a compare and 2 stores extra right now if we dont have it on
3361 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003362 * NOTE: This doesn't stop any functionality; if you dont have
3363 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 *
3365 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003366static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003369 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003370 int result = TC_ACT_OK;
3371 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003372
Stephen Hemmingerde384832010-08-01 00:33:23 -07003373 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003374 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3375 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003376 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 }
3378
Herbert Xuf697c3e2007-10-14 00:38:47 -07003379 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3380 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3381
David S. Miller83874002008-07-17 00:53:03 -07003382 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003383 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003384 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003385 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3386 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003387 spin_unlock(qdisc_lock(q));
3388 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003389
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 return result;
3391}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003392
3393static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3394 struct packet_type **pt_prev,
3395 int *ret, struct net_device *orig_dev)
3396{
Eric Dumazet24824a02010-10-02 06:11:55 +00003397 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3398
3399 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003400 goto out;
3401
3402 if (*pt_prev) {
3403 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3404 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003405 }
3406
Eric Dumazet24824a02010-10-02 06:11:55 +00003407 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003408 case TC_ACT_SHOT:
3409 case TC_ACT_STOLEN:
3410 kfree_skb(skb);
3411 return NULL;
3412 }
3413
3414out:
3415 skb->tc_verd = 0;
3416 return skb;
3417}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418#endif
3419
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003420/**
3421 * netdev_rx_handler_register - register receive handler
3422 * @dev: device to register a handler for
3423 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003424 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003425 *
3426 * Register a receive hander for a device. This handler will then be
3427 * called from __netif_receive_skb. A negative errno code is returned
3428 * on a failure.
3429 *
3430 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003431 *
3432 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003433 */
3434int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003435 rx_handler_func_t *rx_handler,
3436 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003437{
3438 ASSERT_RTNL();
3439
3440 if (dev->rx_handler)
3441 return -EBUSY;
3442
Eric Dumazet00cfec32013-03-29 03:01:22 +00003443 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003444 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003445 rcu_assign_pointer(dev->rx_handler, rx_handler);
3446
3447 return 0;
3448}
3449EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3450
3451/**
3452 * netdev_rx_handler_unregister - unregister receive handler
3453 * @dev: device to unregister a handler from
3454 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003455 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003456 *
3457 * The caller must hold the rtnl_mutex.
3458 */
3459void netdev_rx_handler_unregister(struct net_device *dev)
3460{
3461
3462 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003463 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003464 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3465 * section has a guarantee to see a non NULL rx_handler_data
3466 * as well.
3467 */
3468 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003469 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003470}
3471EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3472
Mel Gormanb4b9e352012-07-31 16:44:26 -07003473/*
3474 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3475 * the special handling of PFMEMALLOC skbs.
3476 */
3477static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3478{
3479 switch (skb->protocol) {
3480 case __constant_htons(ETH_P_ARP):
3481 case __constant_htons(ETH_P_IP):
3482 case __constant_htons(ETH_P_IPV6):
3483 case __constant_htons(ETH_P_8021Q):
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003484 case __constant_htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003485 return true;
3486 default:
3487 return false;
3488 }
3489}
3490
David S. Miller9754e292013-02-14 15:57:38 -05003491static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492{
3493 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003494 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003495 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003496 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003497 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003499 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500
Eric Dumazet588f0332011-11-15 04:12:55 +00003501 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003502
Koki Sanagicf66ba52010-08-23 18:45:02 +09003503 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003504
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003506 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003507 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003509 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003510
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003511 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003512 if (!skb_transport_header_was_set(skb))
3513 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003514 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515
3516 pt_prev = NULL;
3517
3518 rcu_read_lock();
3519
David S. Miller63d8ea72011-02-28 10:48:59 -08003520another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003521 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003522
3523 __this_cpu_inc(softnet_data.processed);
3524
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003525 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3526 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003527 skb = vlan_untag(skb);
3528 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003529 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003530 }
3531
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532#ifdef CONFIG_NET_CLS_ACT
3533 if (skb->tc_verd & TC_NCLS) {
3534 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3535 goto ncls;
3536 }
3537#endif
3538
David S. Miller9754e292013-02-14 15:57:38 -05003539 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003540 goto skip_taps;
3541
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003543 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003544 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003545 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 pt_prev = ptype;
3547 }
3548 }
3549
Mel Gormanb4b9e352012-07-31 16:44:26 -07003550skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003552 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3553 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003554 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555ncls:
3556#endif
3557
David S. Miller9754e292013-02-14 15:57:38 -05003558 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003559 goto drop;
3560
John Fastabend24257172011-10-10 09:16:41 +00003561 if (vlan_tx_tag_present(skb)) {
3562 if (pt_prev) {
3563 ret = deliver_skb(skb, pt_prev, orig_dev);
3564 pt_prev = NULL;
3565 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003566 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003567 goto another_round;
3568 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003569 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003570 }
3571
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003572 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003573 if (rx_handler) {
3574 if (pt_prev) {
3575 ret = deliver_skb(skb, pt_prev, orig_dev);
3576 pt_prev = NULL;
3577 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003578 switch (rx_handler(&skb)) {
3579 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003580 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003581 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003582 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003583 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003584 case RX_HANDLER_EXACT:
3585 deliver_exact = true;
3586 case RX_HANDLER_PASS:
3587 break;
3588 default:
3589 BUG();
3590 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
Eric Dumazetd4b812d2013-07-18 07:19:26 -07003593 if (unlikely(vlan_tx_tag_present(skb))) {
3594 if (vlan_tx_tag_get_id(skb))
3595 skb->pkt_type = PACKET_OTHERHOST;
3596 /* Note: we might in the future use prio bits
3597 * and set skb->priority like in vlan_do_receive()
3598 * For the time being, just ignore Priority Code Point
3599 */
3600 skb->vlan_tci = 0;
3601 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003602
David S. Miller63d8ea72011-02-28 10:48:59 -08003603 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003604 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003605
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003607 list_for_each_entry_rcu(ptype,
3608 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003609 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003610 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3611 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003612 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003613 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 pt_prev = ptype;
3615 }
3616 }
3617
3618 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003619 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003620 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003621 else
3622 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003624drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003625 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626 kfree_skb(skb);
3627 /* Jamal, now you will not able to escape explaining
3628 * me how you were going to use this. :-)
3629 */
3630 ret = NET_RX_DROP;
3631 }
3632
Mel Gormanb4b9e352012-07-31 16:44:26 -07003633unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003635out:
David S. Miller9754e292013-02-14 15:57:38 -05003636 return ret;
3637}
3638
3639static int __netif_receive_skb(struct sk_buff *skb)
3640{
3641 int ret;
3642
3643 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3644 unsigned long pflags = current->flags;
3645
3646 /*
3647 * PFMEMALLOC skbs are special, they should
3648 * - be delivered to SOCK_MEMALLOC sockets only
3649 * - stay away from userspace
3650 * - have bounded memory usage
3651 *
3652 * Use PF_MEMALLOC as this saves us from propagating the allocation
3653 * context down to all allocation sites.
3654 */
3655 current->flags |= PF_MEMALLOC;
3656 ret = __netif_receive_skb_core(skb, true);
3657 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3658 } else
3659 ret = __netif_receive_skb_core(skb, false);
3660
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661 return ret;
3662}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003663
3664/**
3665 * netif_receive_skb - process receive buffer from network
3666 * @skb: buffer to process
3667 *
3668 * netif_receive_skb() is the main receive data processing function.
3669 * It always succeeds. The buffer may be dropped during processing
3670 * for congestion control or by the protocol layers.
3671 *
3672 * This function may only be called from softirq context and interrupts
3673 * should be enabled.
3674 *
3675 * Return values (usually ignored):
3676 * NET_RX_SUCCESS: no congestion
3677 * NET_RX_DROP: packet was dropped
3678 */
3679int netif_receive_skb(struct sk_buff *skb)
3680{
Eric Dumazet588f0332011-11-15 04:12:55 +00003681 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003682
Richard Cochranc1f19b52010-07-17 08:49:36 +00003683 if (skb_defer_rx_timestamp(skb))
3684 return NET_RX_SUCCESS;
3685
Eric Dumazetdf334542010-03-24 19:13:54 +00003686#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003687 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003688 struct rps_dev_flow voidflow, *rflow = &voidflow;
3689 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003690
Eric Dumazet3b098e22010-05-15 23:57:10 -07003691 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003692
Eric Dumazet3b098e22010-05-15 23:57:10 -07003693 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003694
Eric Dumazet3b098e22010-05-15 23:57:10 -07003695 if (cpu >= 0) {
3696 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3697 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003698 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003699 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003700 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003701 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003702#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003703 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003704}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003705EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706
Eric Dumazet88751272010-04-19 05:07:33 +00003707/* Network device is going away, flush any packets still pending
3708 * Called with irqs disabled.
3709 */
Changli Gao152102c2010-03-30 20:16:22 +00003710static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003711{
Changli Gao152102c2010-03-30 20:16:22 +00003712 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003713 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003714 struct sk_buff *skb, *tmp;
3715
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003716 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003717 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003718 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003719 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003720 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003721 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003722 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003723 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003724 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003725
3726 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3727 if (skb->dev == dev) {
3728 __skb_unlink(skb, &sd->process_queue);
3729 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003730 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003731 }
3732 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003733}
3734
Herbert Xud565b0a2008-12-15 23:38:52 -08003735static int napi_gro_complete(struct sk_buff *skb)
3736{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003737 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003738 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003739 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003740 int err = -ENOENT;
3741
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003742 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3743
Herbert Xufc59f9a2009-04-14 15:11:06 -07003744 if (NAPI_GRO_CB(skb)->count == 1) {
3745 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003746 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003747 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003748
3749 rcu_read_lock();
3750 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003751 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003752 continue;
3753
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003754 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003755 break;
3756 }
3757 rcu_read_unlock();
3758
3759 if (err) {
3760 WARN_ON(&ptype->list == head);
3761 kfree_skb(skb);
3762 return NET_RX_SUCCESS;
3763 }
3764
3765out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003766 return netif_receive_skb(skb);
3767}
3768
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003769/* napi->gro_list contains packets ordered by age.
3770 * youngest packets at the head of it.
3771 * Complete skbs in reverse order to reduce latencies.
3772 */
3773void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003774{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003775 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003776
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003777 /* scan list and build reverse chain */
3778 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3779 skb->prev = prev;
3780 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003781 }
3782
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003783 for (skb = prev; skb; skb = prev) {
3784 skb->next = NULL;
3785
3786 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3787 return;
3788
3789 prev = skb->prev;
3790 napi_gro_complete(skb);
3791 napi->gro_count--;
3792 }
3793
Herbert Xud565b0a2008-12-15 23:38:52 -08003794 napi->gro_list = NULL;
3795}
Eric Dumazet86cac582010-08-31 18:25:32 +00003796EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003797
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003798static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3799{
3800 struct sk_buff *p;
3801 unsigned int maclen = skb->dev->hard_header_len;
3802
3803 for (p = napi->gro_list; p; p = p->next) {
3804 unsigned long diffs;
3805
3806 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3807 diffs |= p->vlan_tci ^ skb->vlan_tci;
3808 if (maclen == ETH_HLEN)
3809 diffs |= compare_ether_header(skb_mac_header(p),
3810 skb_gro_mac_header(skb));
3811 else if (!diffs)
3812 diffs = memcmp(skb_mac_header(p),
3813 skb_gro_mac_header(skb),
3814 maclen);
3815 NAPI_GRO_CB(p)->same_flow = !diffs;
3816 NAPI_GRO_CB(p)->flush = 0;
3817 }
3818}
3819
Rami Rosenbb728822012-11-28 21:55:25 +00003820static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003821{
3822 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003823 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003824 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003825 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003826 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003827 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003828
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003829 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003830 goto normal;
3831
David S. Miller21dc3302010-08-23 00:13:46 -07003832 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003833 goto normal;
3834
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003835 gro_list_prepare(napi, skb);
3836
Herbert Xud565b0a2008-12-15 23:38:52 -08003837 rcu_read_lock();
3838 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003839 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003840 continue;
3841
Herbert Xu86911732009-01-29 14:19:50 +00003842 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003843 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003844 NAPI_GRO_CB(skb)->same_flow = 0;
3845 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003846 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003847
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003848 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003849 break;
3850 }
3851 rcu_read_unlock();
3852
3853 if (&ptype->list == head)
3854 goto normal;
3855
Herbert Xu0da2afd52008-12-26 14:57:42 -08003856 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003857 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003858
Herbert Xud565b0a2008-12-15 23:38:52 -08003859 if (pp) {
3860 struct sk_buff *nskb = *pp;
3861
3862 *pp = nskb->next;
3863 nskb->next = NULL;
3864 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003865 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003866 }
3867
Herbert Xu0da2afd52008-12-26 14:57:42 -08003868 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003869 goto ok;
3870
Herbert Xu4ae55442009-02-08 18:00:36 +00003871 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003872 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003873
Herbert Xu4ae55442009-02-08 18:00:36 +00003874 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003875 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003876 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003877 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003878 skb->next = napi->gro_list;
3879 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003880 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003881
Herbert Xuad0f9902009-02-01 01:24:55 -08003882pull:
Herbert Xucb189782009-05-26 18:50:31 +00003883 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3884 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3885
3886 BUG_ON(skb->end - skb->tail < grow);
3887
3888 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3889
3890 skb->tail += grow;
3891 skb->data_len -= grow;
3892
3893 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003894 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003895
Eric Dumazet9e903e02011-10-18 21:00:24 +00003896 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003897 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003898 memmove(skb_shinfo(skb)->frags,
3899 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003900 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003901 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003902 }
3903
Herbert Xud565b0a2008-12-15 23:38:52 -08003904ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003905 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003906
3907normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003908 ret = GRO_NORMAL;
3909 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003910}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003911
Herbert Xu96e93ea2009-01-06 10:49:34 -08003912
Rami Rosenbb728822012-11-28 21:55:25 +00003913static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003914{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003915 switch (ret) {
3916 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003917 if (netif_receive_skb(skb))
3918 ret = GRO_DROP;
3919 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003920
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003921 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003922 kfree_skb(skb);
3923 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003924
Eric Dumazetdaa86542012-04-19 07:07:40 +00003925 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003926 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3927 kmem_cache_free(skbuff_head_cache, skb);
3928 else
3929 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003930 break;
3931
Ben Hutchings5b252f02009-10-29 07:17:09 +00003932 case GRO_HELD:
3933 case GRO_MERGED:
3934 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003935 }
3936
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003937 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003938}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003939
Eric Dumazetca07e432012-10-06 22:28:06 +00003940static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003941{
Eric Dumazetca07e432012-10-06 22:28:06 +00003942 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3943 const skb_frag_t *frag0 = &pinfo->frags[0];
3944
Herbert Xu78a478d2009-05-26 18:50:21 +00003945 NAPI_GRO_CB(skb)->data_offset = 0;
3946 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003947 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003948
Simon Hormanced14f62013-05-28 20:34:25 +00003949 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003950 pinfo->nr_frags &&
3951 !PageHighMem(skb_frag_page(frag0))) {
3952 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3953 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003954 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003955}
Herbert Xu78a478d2009-05-26 18:50:21 +00003956
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003957gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003958{
Herbert Xu86911732009-01-29 14:19:50 +00003959 skb_gro_reset_offset(skb);
3960
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003961 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003962}
3963EXPORT_SYMBOL(napi_gro_receive);
3964
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003965static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003966{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003967 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003968 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3969 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003970 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003971 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003972 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003973
3974 napi->skb = skb;
3975}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003976
Herbert Xu76620aa2009-04-16 02:02:07 -07003977struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003978{
Herbert Xu5d38a072009-01-04 16:13:40 -08003979 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003980
3981 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003982 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3983 if (skb)
3984 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003985 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003986 return skb;
3987}
Herbert Xu76620aa2009-04-16 02:02:07 -07003988EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003989
Rami Rosenbb728822012-11-28 21:55:25 +00003990static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003991 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003992{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003993 switch (ret) {
3994 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003995 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003996 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003997
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003998 if (ret == GRO_HELD)
3999 skb_gro_pull(skb, -ETH_HLEN);
4000 else if (netif_receive_skb(skb))
4001 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004002 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004003
4004 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004005 case GRO_MERGED_FREE:
4006 napi_reuse_skb(napi, skb);
4007 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004008
4009 case GRO_MERGED:
4010 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004011 }
4012
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004013 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004014}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004015
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004016static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004017{
Herbert Xu76620aa2009-04-16 02:02:07 -07004018 struct sk_buff *skb = napi->skb;
4019 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00004020 unsigned int hlen;
4021 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07004022
4023 napi->skb = NULL;
4024
4025 skb_reset_mac_header(skb);
4026 skb_gro_reset_offset(skb);
4027
Herbert Xua5b1cf22009-05-26 18:50:28 +00004028 off = skb_gro_offset(skb);
4029 hlen = off + sizeof(*eth);
4030 eth = skb_gro_header_fast(skb, off);
4031 if (skb_gro_header_hard(skb, hlen)) {
4032 eth = skb_gro_header_slow(skb, hlen, off);
4033 if (unlikely(!eth)) {
4034 napi_reuse_skb(napi, skb);
4035 skb = NULL;
4036 goto out;
4037 }
Herbert Xu76620aa2009-04-16 02:02:07 -07004038 }
4039
4040 skb_gro_pull(skb, sizeof(*eth));
4041
4042 /*
4043 * This works because the only protocols we care about don't require
4044 * special handling. We'll fix it up properly at the end.
4045 */
4046 skb->protocol = eth->h_proto;
4047
4048out:
4049 return skb;
4050}
Herbert Xu76620aa2009-04-16 02:02:07 -07004051
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004052gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004053{
4054 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004055
4056 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004057 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004058
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004059 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004060}
4061EXPORT_SYMBOL(napi_gro_frags);
4062
Eric Dumazete326bed2010-04-22 00:22:45 -07004063/*
4064 * net_rps_action sends any pending IPI's for rps.
4065 * Note: called with local irq disabled, but exits with local irq enabled.
4066 */
4067static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4068{
4069#ifdef CONFIG_RPS
4070 struct softnet_data *remsd = sd->rps_ipi_list;
4071
4072 if (remsd) {
4073 sd->rps_ipi_list = NULL;
4074
4075 local_irq_enable();
4076
4077 /* Send pending IPI's to kick RPS processing on remote cpus. */
4078 while (remsd) {
4079 struct softnet_data *next = remsd->rps_ipi_next;
4080
4081 if (cpu_online(remsd->cpu))
4082 __smp_call_function_single(remsd->cpu,
4083 &remsd->csd, 0);
4084 remsd = next;
4085 }
4086 } else
4087#endif
4088 local_irq_enable();
4089}
4090
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004091static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092{
4093 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004094 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095
Eric Dumazete326bed2010-04-22 00:22:45 -07004096#ifdef CONFIG_RPS
4097 /* Check if we have pending ipi, its better to send them now,
4098 * not waiting net_rx_action() end.
4099 */
4100 if (sd->rps_ipi_list) {
4101 local_irq_disable();
4102 net_rps_action_and_irq_enable(sd);
4103 }
4104#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004105 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004106 local_irq_disable();
4107 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004109 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110
Changli Gao6e7676c2010-04-27 15:07:33 -07004111 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004112 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004113 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004114 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004115 input_queue_head_incr(sd);
4116 if (++work >= quota) {
4117 local_irq_enable();
4118 return work;
4119 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004120 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121
Changli Gao6e7676c2010-04-27 15:07:33 -07004122 rps_lock(sd);
4123 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004124 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004125 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4126 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004127
Changli Gao6e7676c2010-04-27 15:07:33 -07004128 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004129 /*
4130 * Inline a custom version of __napi_complete().
4131 * only current cpu owns and manipulates this napi,
4132 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4133 * we can use a plain write instead of clear_bit(),
4134 * and we dont need an smp_mb() memory barrier.
4135 */
4136 list_del(&napi->poll_list);
4137 napi->state = 0;
4138
Changli Gao6e7676c2010-04-27 15:07:33 -07004139 quota = work + qlen;
4140 }
4141 rps_unlock(sd);
4142 }
4143 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004145 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146}
4147
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004148/**
4149 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004150 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004151 *
4152 * The entry's receive function will be scheduled to run
4153 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004154void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004155{
4156 unsigned long flags;
4157
4158 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004159 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004160 local_irq_restore(flags);
4161}
4162EXPORT_SYMBOL(__napi_schedule);
4163
Herbert Xud565b0a2008-12-15 23:38:52 -08004164void __napi_complete(struct napi_struct *n)
4165{
4166 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4167 BUG_ON(n->gro_list);
4168
4169 list_del(&n->poll_list);
4170 smp_mb__before_clear_bit();
4171 clear_bit(NAPI_STATE_SCHED, &n->state);
4172}
4173EXPORT_SYMBOL(__napi_complete);
4174
4175void napi_complete(struct napi_struct *n)
4176{
4177 unsigned long flags;
4178
4179 /*
4180 * don't let napi dequeue from the cpu poll list
4181 * just in case its running on a different cpu
4182 */
4183 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4184 return;
4185
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004186 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004187 local_irq_save(flags);
4188 __napi_complete(n);
4189 local_irq_restore(flags);
4190}
4191EXPORT_SYMBOL(napi_complete);
4192
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004193/* must be called under rcu_read_lock(), as we dont take a reference */
4194struct napi_struct *napi_by_id(unsigned int napi_id)
4195{
4196 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4197 struct napi_struct *napi;
4198
4199 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4200 if (napi->napi_id == napi_id)
4201 return napi;
4202
4203 return NULL;
4204}
4205EXPORT_SYMBOL_GPL(napi_by_id);
4206
4207void napi_hash_add(struct napi_struct *napi)
4208{
4209 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4210
4211 spin_lock(&napi_hash_lock);
4212
4213 /* 0 is not a valid id, we also skip an id that is taken
4214 * we expect both events to be extremely rare
4215 */
4216 napi->napi_id = 0;
4217 while (!napi->napi_id) {
4218 napi->napi_id = ++napi_gen_id;
4219 if (napi_by_id(napi->napi_id))
4220 napi->napi_id = 0;
4221 }
4222
4223 hlist_add_head_rcu(&napi->napi_hash_node,
4224 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4225
4226 spin_unlock(&napi_hash_lock);
4227 }
4228}
4229EXPORT_SYMBOL_GPL(napi_hash_add);
4230
4231/* Warning : caller is responsible to make sure rcu grace period
4232 * is respected before freeing memory containing @napi
4233 */
4234void napi_hash_del(struct napi_struct *napi)
4235{
4236 spin_lock(&napi_hash_lock);
4237
4238 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4239 hlist_del_rcu(&napi->napi_hash_node);
4240
4241 spin_unlock(&napi_hash_lock);
4242}
4243EXPORT_SYMBOL_GPL(napi_hash_del);
4244
Herbert Xud565b0a2008-12-15 23:38:52 -08004245void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4246 int (*poll)(struct napi_struct *, int), int weight)
4247{
4248 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004249 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004250 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004251 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004252 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004253 if (weight > NAPI_POLL_WEIGHT)
4254 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4255 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004256 napi->weight = weight;
4257 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004258 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004259#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004260 spin_lock_init(&napi->poll_lock);
4261 napi->poll_owner = -1;
4262#endif
4263 set_bit(NAPI_STATE_SCHED, &napi->state);
4264}
4265EXPORT_SYMBOL(netif_napi_add);
4266
4267void netif_napi_del(struct napi_struct *napi)
4268{
4269 struct sk_buff *skb, *next;
4270
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004271 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004272 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004273
4274 for (skb = napi->gro_list; skb; skb = next) {
4275 next = skb->next;
4276 skb->next = NULL;
4277 kfree_skb(skb);
4278 }
4279
4280 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004281 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004282}
4283EXPORT_SYMBOL(netif_napi_del);
4284
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285static void net_rx_action(struct softirq_action *h)
4286{
Eric Dumazete326bed2010-04-22 00:22:45 -07004287 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004288 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004289 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004290 void *have;
4291
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 local_irq_disable();
4293
Eric Dumazete326bed2010-04-22 00:22:45 -07004294 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004295 struct napi_struct *n;
4296 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004298 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004299 * Allow this to run for 2 jiffies since which will allow
4300 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004301 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004302 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 goto softnet_break;
4304
4305 local_irq_enable();
4306
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004307 /* Even though interrupts have been re-enabled, this
4308 * access is safe because interrupts can only add new
4309 * entries to the tail of this list, and only ->poll()
4310 * calls can remove this head entry from the list.
4311 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004312 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004314 have = netpoll_poll_lock(n);
4315
4316 weight = n->weight;
4317
David S. Miller0a7606c2007-10-29 21:28:47 -07004318 /* This NAPI_STATE_SCHED test is for avoiding a race
4319 * with netpoll's poll_napi(). Only the entity which
4320 * obtains the lock and sees NAPI_STATE_SCHED set will
4321 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004322 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004323 */
4324 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004325 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004326 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004327 trace_napi_poll(n);
4328 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004329
4330 WARN_ON_ONCE(work > weight);
4331
4332 budget -= work;
4333
4334 local_irq_disable();
4335
4336 /* Drivers must not modify the NAPI state if they
4337 * consume the entire weight. In such cases this code
4338 * still "owns" the NAPI instance and therefore can
4339 * move the instance around on the list at-will.
4340 */
David S. Millerfed17f32008-01-07 21:00:40 -08004341 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004342 if (unlikely(napi_disable_pending(n))) {
4343 local_irq_enable();
4344 napi_complete(n);
4345 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004346 } else {
4347 if (n->gro_list) {
4348 /* flush too old packets
4349 * If HZ < 1000, flush all packets.
4350 */
4351 local_irq_enable();
4352 napi_gro_flush(n, HZ >= 1000);
4353 local_irq_disable();
4354 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004355 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004356 }
David S. Millerfed17f32008-01-07 21:00:40 -08004357 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004358
4359 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360 }
4361out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004362 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004363
Chris Leechdb217332006-06-17 21:24:58 -07004364#ifdef CONFIG_NET_DMA
4365 /*
4366 * There may not be any more sk_buffs coming right now, so push
4367 * any pending DMA copies to hardware
4368 */
Dan Williams2ba05622009-01-06 11:38:14 -07004369 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004370#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004371
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372 return;
4373
4374softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004375 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4377 goto out;
4378}
4379
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004380struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004381 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004382
4383 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004384 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004385
Veaceslav Falico5d261912013-08-28 23:25:05 +02004386 /* counter for the number of times this device was added to us */
4387 u16 ref_nr;
4388
Veaceslav Falico402dae92013-09-25 09:20:09 +02004389 /* private field for the users */
4390 void *private;
4391
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004392 struct list_head list;
4393 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004394};
4395
Veaceslav Falico5249dec2013-09-25 09:20:08 +02004396static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
4397 struct net_device *adj_dev,
4398 struct list_head *adj_list)
4399{
4400 struct netdev_adjacent *adj;
4401
4402 list_for_each_entry_rcu(adj, adj_list, list) {
4403 if (adj->dev == adj_dev)
4404 return adj;
4405 }
4406 return NULL;
4407}
4408
Veaceslav Falico5d261912013-08-28 23:25:05 +02004409static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4410 struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004411 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004412{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004413 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004414
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004415 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004416 if (adj->dev == adj_dev)
4417 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004418 }
4419 return NULL;
4420}
4421
4422/**
4423 * netdev_has_upper_dev - Check if device is linked to an upper device
4424 * @dev: device
4425 * @upper_dev: upper device to check
4426 *
4427 * Find out if a device is linked to specified upper device and return true
4428 * in case it is. Note that this checks only immediate upper device,
4429 * not through a complete stack of devices. The caller must hold the RTNL lock.
4430 */
4431bool netdev_has_upper_dev(struct net_device *dev,
4432 struct net_device *upper_dev)
4433{
4434 ASSERT_RTNL();
4435
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004436 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004437}
4438EXPORT_SYMBOL(netdev_has_upper_dev);
4439
4440/**
4441 * netdev_has_any_upper_dev - Check if device is linked to some device
4442 * @dev: device
4443 *
4444 * Find out if a device is linked to an upper device and return true in case
4445 * it is. The caller must hold the RTNL lock.
4446 */
4447bool netdev_has_any_upper_dev(struct net_device *dev)
4448{
4449 ASSERT_RTNL();
4450
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004451 return !list_empty(&dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004452}
4453EXPORT_SYMBOL(netdev_has_any_upper_dev);
4454
4455/**
4456 * netdev_master_upper_dev_get - Get master upper device
4457 * @dev: device
4458 *
4459 * Find a master upper device and return pointer to it or NULL in case
4460 * it's not there. The caller must hold the RTNL lock.
4461 */
4462struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4463{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004464 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004465
4466 ASSERT_RTNL();
4467
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004468 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004469 return NULL;
4470
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004471 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004472 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004473 if (likely(upper->master))
4474 return upper->dev;
4475 return NULL;
4476}
4477EXPORT_SYMBOL(netdev_master_upper_dev_get);
4478
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02004479void *netdev_adjacent_get_private(struct list_head *adj_list)
4480{
4481 struct netdev_adjacent *adj;
4482
4483 adj = list_entry(adj_list, struct netdev_adjacent, list);
4484
4485 return adj->private;
4486}
4487EXPORT_SYMBOL(netdev_adjacent_get_private);
4488
Veaceslav Falico31088a12013-09-25 09:20:12 +02004489/**
4490 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
Veaceslav Falico48311f42013-08-28 23:25:07 +02004491 * @dev: device
4492 * @iter: list_head ** of the current position
4493 *
4494 * Gets the next device from the dev's upper list, starting from iter
4495 * position. The caller must hold RCU read lock.
4496 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004497struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4498 struct list_head **iter)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004499{
4500 struct netdev_adjacent *upper;
4501
4502 WARN_ON_ONCE(!rcu_read_lock_held());
4503
4504 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4505
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004506 if (&upper->list == &dev->all_adj_list.upper)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004507 return NULL;
4508
4509 *iter = &upper->list;
4510
4511 return upper->dev;
4512}
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004513EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
Veaceslav Falico48311f42013-08-28 23:25:07 +02004514
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004515/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02004516 * netdev_lower_get_next_private - Get the next ->private from the
4517 * lower neighbour list
4518 * @dev: device
4519 * @iter: list_head ** of the current position
4520 *
4521 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4522 * list, starting from iter position. The caller must hold either hold the
4523 * RTNL lock or its own locking that guarantees that the neighbour lower
4524 * list will remain unchainged.
4525 */
4526void *netdev_lower_get_next_private(struct net_device *dev,
4527 struct list_head **iter)
4528{
4529 struct netdev_adjacent *lower;
4530
4531 lower = list_entry(*iter, struct netdev_adjacent, list);
4532
4533 if (&lower->list == &dev->adj_list.lower)
4534 return NULL;
4535
4536 if (iter)
4537 *iter = lower->list.next;
4538
4539 return lower->private;
4540}
4541EXPORT_SYMBOL(netdev_lower_get_next_private);
4542
4543/**
4544 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4545 * lower neighbour list, RCU
4546 * variant
4547 * @dev: device
4548 * @iter: list_head ** of the current position
4549 *
4550 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4551 * list, starting from iter position. The caller must hold RCU read lock.
4552 */
4553void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4554 struct list_head **iter)
4555{
4556 struct netdev_adjacent *lower;
4557
4558 WARN_ON_ONCE(!rcu_read_lock_held());
4559
4560 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4561
4562 if (&lower->list == &dev->adj_list.lower)
4563 return NULL;
4564
4565 if (iter)
4566 *iter = &lower->list;
4567
4568 return lower->private;
4569}
4570EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4571
4572/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004573 * netdev_master_upper_dev_get_rcu - Get master upper device
4574 * @dev: device
4575 *
4576 * Find a master upper device and return pointer to it or NULL in case
4577 * it's not there. The caller must hold the RCU read lock.
4578 */
4579struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4580{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004581 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004582
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004583 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004584 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004585 if (upper && likely(upper->master))
4586 return upper->dev;
4587 return NULL;
4588}
4589EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4590
Veaceslav Falico5d261912013-08-28 23:25:05 +02004591static int __netdev_adjacent_dev_insert(struct net_device *dev,
4592 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02004593 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004594 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004595{
4596 struct netdev_adjacent *adj;
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004597 char linkname[IFNAMSIZ+7];
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004598 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004599
Veaceslav Falico7863c052013-09-25 09:20:06 +02004600 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004601
4602 if (adj) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004603 adj->ref_nr++;
4604 return 0;
4605 }
4606
4607 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4608 if (!adj)
4609 return -ENOMEM;
4610
4611 adj->dev = adj_dev;
4612 adj->master = master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004613 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02004614 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004615 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004616
4617 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4618 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004619
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004620 if (dev_list == &dev->adj_list.lower) {
4621 sprintf(linkname, "lower_%s", adj_dev->name);
4622 ret = sysfs_create_link(&(dev->dev.kobj),
4623 &(adj_dev->dev.kobj), linkname);
4624 if (ret)
4625 goto free_adj;
4626 } else if (dev_list == &dev->adj_list.upper) {
4627 sprintf(linkname, "upper_%s", adj_dev->name);
4628 ret = sysfs_create_link(&(dev->dev.kobj),
4629 &(adj_dev->dev.kobj), linkname);
4630 if (ret)
4631 goto free_adj;
4632 }
4633
Veaceslav Falico7863c052013-09-25 09:20:06 +02004634 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004635 if (master) {
4636 ret = sysfs_create_link(&(dev->dev.kobj),
4637 &(adj_dev->dev.kobj), "master");
4638 if (ret)
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004639 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004640
Veaceslav Falico7863c052013-09-25 09:20:06 +02004641 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004642 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02004643 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004644 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004645
4646 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004647
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004648remove_symlinks:
4649 if (dev_list == &dev->adj_list.lower) {
4650 sprintf(linkname, "lower_%s", adj_dev->name);
4651 sysfs_remove_link(&(dev->dev.kobj), linkname);
4652 } else if (dev_list == &dev->adj_list.upper) {
4653 sprintf(linkname, "upper_%s", adj_dev->name);
4654 sysfs_remove_link(&(dev->dev.kobj), linkname);
4655 }
4656
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004657free_adj:
4658 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02004659 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004660
4661 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004662}
4663
Veaceslav Falico5d261912013-08-28 23:25:05 +02004664void __netdev_adjacent_dev_remove(struct net_device *dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02004665 struct net_device *adj_dev,
4666 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004667{
4668 struct netdev_adjacent *adj;
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004669 char linkname[IFNAMSIZ+7];
Veaceslav Falico5d261912013-08-28 23:25:05 +02004670
Veaceslav Falico7863c052013-09-25 09:20:06 +02004671 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004672
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004673 if (!adj) {
4674 pr_err("tried to remove device %s from %s\n",
4675 dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004676 BUG();
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004677 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004678
4679 if (adj->ref_nr > 1) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004680 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4681 adj->ref_nr-1);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004682 adj->ref_nr--;
4683 return;
4684 }
4685
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004686 if (adj->master)
4687 sysfs_remove_link(&(dev->dev.kobj), "master");
4688
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004689 if (dev_list == &dev->adj_list.lower) {
4690 sprintf(linkname, "lower_%s", adj_dev->name);
4691 sysfs_remove_link(&(dev->dev.kobj), linkname);
4692 } else if (dev_list == &dev->adj_list.upper) {
4693 sprintf(linkname, "upper_%s", adj_dev->name);
4694 sysfs_remove_link(&(dev->dev.kobj), linkname);
4695 }
4696
Veaceslav Falico5d261912013-08-28 23:25:05 +02004697 list_del_rcu(&adj->list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004698 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4699 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004700 dev_put(adj_dev);
4701 kfree_rcu(adj, rcu);
4702}
4703
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004704int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4705 struct net_device *upper_dev,
4706 struct list_head *up_list,
4707 struct list_head *down_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004708 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004709{
4710 int ret;
4711
Veaceslav Falico402dae92013-09-25 09:20:09 +02004712 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4713 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004714 if (ret)
4715 return ret;
4716
Veaceslav Falico402dae92013-09-25 09:20:09 +02004717 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4718 false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004719 if (ret) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004720 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004721 return ret;
4722 }
4723
4724 return 0;
4725}
4726
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004727int __netdev_adjacent_dev_link(struct net_device *dev,
4728 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004729{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004730 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
4731 &dev->all_adj_list.upper,
4732 &upper_dev->all_adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004733 NULL, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004734}
4735
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004736void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
4737 struct net_device *upper_dev,
4738 struct list_head *up_list,
4739 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004740{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004741 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4742 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004743}
4744
4745void __netdev_adjacent_dev_unlink(struct net_device *dev,
4746 struct net_device *upper_dev)
4747{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004748 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4749 &dev->all_adj_list.upper,
4750 &upper_dev->all_adj_list.lower);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004751}
4752
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004753int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4754 struct net_device *upper_dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004755 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004756{
4757 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
4758
4759 if (ret)
4760 return ret;
4761
4762 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
4763 &dev->adj_list.upper,
4764 &upper_dev->adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004765 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004766 if (ret) {
4767 __netdev_adjacent_dev_unlink(dev, upper_dev);
4768 return ret;
4769 }
4770
4771 return 0;
4772}
4773
4774void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
4775 struct net_device *upper_dev)
4776{
4777 __netdev_adjacent_dev_unlink(dev, upper_dev);
4778 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4779 &dev->adj_list.upper,
4780 &upper_dev->adj_list.lower);
4781}
Veaceslav Falico5d261912013-08-28 23:25:05 +02004782
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004783static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004784 struct net_device *upper_dev, bool master,
4785 void *private)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004786{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004787 struct netdev_adjacent *i, *j, *to_i, *to_j;
4788 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004789
4790 ASSERT_RTNL();
4791
4792 if (dev == upper_dev)
4793 return -EBUSY;
4794
4795 /* To prevent loops, check if dev is not upper device to upper_dev. */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004796 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004797 return -EBUSY;
4798
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004799 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004800 return -EEXIST;
4801
4802 if (master && netdev_master_upper_dev_get(dev))
4803 return -EBUSY;
4804
Veaceslav Falico402dae92013-09-25 09:20:09 +02004805 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
4806 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004807 if (ret)
4808 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004809
Veaceslav Falico5d261912013-08-28 23:25:05 +02004810 /* Now that we linked these devs, make all the upper_dev's
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004811 * all_adj_list.upper visible to every dev's all_adj_list.lower an
Veaceslav Falico5d261912013-08-28 23:25:05 +02004812 * versa, and don't forget the devices itself. All of these
4813 * links are non-neighbours.
4814 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004815 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4816 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
4817 pr_debug("Interlinking %s with %s, non-neighbour\n",
4818 i->dev->name, j->dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004819 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
4820 if (ret)
4821 goto rollback_mesh;
4822 }
4823 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004824
Veaceslav Falico5d261912013-08-28 23:25:05 +02004825 /* add dev to every upper_dev's upper device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004826 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
4827 pr_debug("linking %s's upper device %s with %s\n",
4828 upper_dev->name, i->dev->name, dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004829 ret = __netdev_adjacent_dev_link(dev, i->dev);
4830 if (ret)
4831 goto rollback_upper_mesh;
4832 }
4833
4834 /* add upper_dev to every dev's lower device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004835 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4836 pr_debug("linking %s's lower device %s with %s\n", dev->name,
4837 i->dev->name, upper_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004838 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
4839 if (ret)
4840 goto rollback_lower_mesh;
4841 }
4842
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004843 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004844 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004845
4846rollback_lower_mesh:
4847 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004848 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004849 if (i == to_i)
4850 break;
4851 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4852 }
4853
4854 i = NULL;
4855
4856rollback_upper_mesh:
4857 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004858 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004859 if (i == to_i)
4860 break;
4861 __netdev_adjacent_dev_unlink(dev, i->dev);
4862 }
4863
4864 i = j = NULL;
4865
4866rollback_mesh:
4867 to_i = i;
4868 to_j = j;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004869 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4870 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004871 if (i == to_i && j == to_j)
4872 break;
4873 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4874 }
4875 if (i == to_i)
4876 break;
4877 }
4878
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004879 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004880
4881 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004882}
4883
4884/**
4885 * netdev_upper_dev_link - Add a link to the upper device
4886 * @dev: device
4887 * @upper_dev: new upper device
4888 *
4889 * Adds a link to device which is upper to this one. The caller must hold
4890 * the RTNL lock. On a failure a negative errno code is returned.
4891 * On success the reference counts are adjusted and the function
4892 * returns zero.
4893 */
4894int netdev_upper_dev_link(struct net_device *dev,
4895 struct net_device *upper_dev)
4896{
Veaceslav Falico402dae92013-09-25 09:20:09 +02004897 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004898}
4899EXPORT_SYMBOL(netdev_upper_dev_link);
4900
4901/**
4902 * netdev_master_upper_dev_link - Add a master link to the upper device
4903 * @dev: device
4904 * @upper_dev: new upper device
4905 *
4906 * Adds a link to device which is upper to this one. In this case, only
4907 * one master upper device can be linked, although other non-master devices
4908 * might be linked as well. The caller must hold the RTNL lock.
4909 * On a failure a negative errno code is returned. On success the reference
4910 * counts are adjusted and the function returns zero.
4911 */
4912int netdev_master_upper_dev_link(struct net_device *dev,
4913 struct net_device *upper_dev)
4914{
Veaceslav Falico402dae92013-09-25 09:20:09 +02004915 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004916}
4917EXPORT_SYMBOL(netdev_master_upper_dev_link);
4918
Veaceslav Falico402dae92013-09-25 09:20:09 +02004919int netdev_master_upper_dev_link_private(struct net_device *dev,
4920 struct net_device *upper_dev,
4921 void *private)
4922{
4923 return __netdev_upper_dev_link(dev, upper_dev, true, private);
4924}
4925EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
4926
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004927/**
4928 * netdev_upper_dev_unlink - Removes a link to upper device
4929 * @dev: device
4930 * @upper_dev: new upper device
4931 *
4932 * Removes a link to device which is upper to this one. The caller must hold
4933 * the RTNL lock.
4934 */
4935void netdev_upper_dev_unlink(struct net_device *dev,
4936 struct net_device *upper_dev)
4937{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004938 struct netdev_adjacent *i, *j;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004939 ASSERT_RTNL();
4940
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004941 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004942
4943 /* Here is the tricky part. We must remove all dev's lower
4944 * devices from all upper_dev's upper devices and vice
4945 * versa, to maintain the graph relationship.
4946 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004947 list_for_each_entry(i, &dev->all_adj_list.lower, list)
4948 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004949 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4950
4951 /* remove also the devices itself from lower/upper device
4952 * list
4953 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004954 list_for_each_entry(i, &dev->all_adj_list.lower, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004955 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4956
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004957 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004958 __netdev_adjacent_dev_unlink(dev, i->dev);
4959
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004960 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004961}
4962EXPORT_SYMBOL(netdev_upper_dev_unlink);
4963
Veaceslav Falico402dae92013-09-25 09:20:09 +02004964void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
4965 struct net_device *lower_dev)
4966{
4967 struct netdev_adjacent *lower;
4968
4969 if (!lower_dev)
4970 return NULL;
4971 lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
4972 if (!lower)
4973 return NULL;
4974
4975 return lower->private;
4976}
4977EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
4978
4979void *netdev_lower_dev_get_private(struct net_device *dev,
4980 struct net_device *lower_dev)
4981{
4982 struct netdev_adjacent *lower;
4983
4984 if (!lower_dev)
4985 return NULL;
4986 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
4987 if (!lower)
4988 return NULL;
4989
4990 return lower->private;
4991}
4992EXPORT_SYMBOL(netdev_lower_dev_get_private);
4993
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004994static void dev_change_rx_flags(struct net_device *dev, int flags)
4995{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004996 const struct net_device_ops *ops = dev->netdev_ops;
4997
4998 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4999 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005000}
5001
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005002static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07005003{
Eric Dumazetb536db92011-11-30 21:42:26 +00005004 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005005 kuid_t uid;
5006 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005007
Patrick McHardy24023452007-07-14 18:51:31 -07005008 ASSERT_RTNL();
5009
Wang Chendad9b332008-06-18 01:48:28 -07005010 dev->flags |= IFF_PROMISC;
5011 dev->promiscuity += inc;
5012 if (dev->promiscuity == 0) {
5013 /*
5014 * Avoid overflow.
5015 * If inc causes overflow, untouch promisc and return error.
5016 */
5017 if (inc < 0)
5018 dev->flags &= ~IFF_PROMISC;
5019 else {
5020 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005021 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5022 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005023 return -EOVERFLOW;
5024 }
5025 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005026 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005027 pr_info("device %s %s promiscuous mode\n",
5028 dev->name,
5029 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005030 if (audit_enabled) {
5031 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005032 audit_log(current->audit_context, GFP_ATOMIC,
5033 AUDIT_ANOM_PROMISCUOUS,
5034 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5035 dev->name, (dev->flags & IFF_PROMISC),
5036 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005037 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005038 from_kuid(&init_user_ns, uid),
5039 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005040 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005041 }
Patrick McHardy24023452007-07-14 18:51:31 -07005042
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005043 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005044 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005045 if (notify)
5046 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07005047 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005048}
5049
Linus Torvalds1da177e2005-04-16 15:20:36 -07005050/**
5051 * dev_set_promiscuity - update promiscuity count on a device
5052 * @dev: device
5053 * @inc: modifier
5054 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005055 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056 * remains above zero the interface remains promiscuous. Once it hits zero
5057 * the device reverts back to normal filtering operation. A negative inc
5058 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005059 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060 */
Wang Chendad9b332008-06-18 01:48:28 -07005061int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062{
Eric Dumazetb536db92011-11-30 21:42:26 +00005063 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005064 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005066 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005067 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005068 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005069 if (dev->flags != old_flags)
5070 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005071 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005073EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005075static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005077 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078
Patrick McHardy24023452007-07-14 18:51:31 -07005079 ASSERT_RTNL();
5080
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005082 dev->allmulti += inc;
5083 if (dev->allmulti == 0) {
5084 /*
5085 * Avoid overflow.
5086 * If inc causes overflow, untouch allmulti and return error.
5087 */
5088 if (inc < 0)
5089 dev->flags &= ~IFF_ALLMULTI;
5090 else {
5091 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005092 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5093 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005094 return -EOVERFLOW;
5095 }
5096 }
Patrick McHardy24023452007-07-14 18:51:31 -07005097 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005098 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005099 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005100 if (notify)
5101 __dev_notify_flags(dev, old_flags,
5102 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07005103 }
Wang Chendad9b332008-06-18 01:48:28 -07005104 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005105}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005106
5107/**
5108 * dev_set_allmulti - update allmulti count on a device
5109 * @dev: device
5110 * @inc: modifier
5111 *
5112 * Add or remove reception of all multicast frames to a device. While the
5113 * count in the device remains above zero the interface remains listening
5114 * to all interfaces. Once it hits zero the device reverts back to normal
5115 * filtering operation. A negative @inc value is used to drop the counter
5116 * when releasing a resource needing all multicasts.
5117 * Return 0 if successful or a negative errno code on error.
5118 */
5119
5120int dev_set_allmulti(struct net_device *dev, int inc)
5121{
5122 return __dev_set_allmulti(dev, inc, true);
5123}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005124EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005125
5126/*
5127 * Upload unicast and multicast address lists to device and
5128 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005129 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005130 * are present.
5131 */
5132void __dev_set_rx_mode(struct net_device *dev)
5133{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005134 const struct net_device_ops *ops = dev->netdev_ops;
5135
Patrick McHardy4417da62007-06-27 01:28:10 -07005136 /* dev_open will call this function so the list will stay sane. */
5137 if (!(dev->flags&IFF_UP))
5138 return;
5139
5140 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005141 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005142
Jiri Pirko01789342011-08-16 06:29:00 +00005143 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005144 /* Unicast addresses changes may only happen under the rtnl,
5145 * therefore calling __dev_set_promiscuity here is safe.
5146 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005147 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005148 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005149 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005150 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005151 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005152 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005153 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005154 }
Jiri Pirko01789342011-08-16 06:29:00 +00005155
5156 if (ops->ndo_set_rx_mode)
5157 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005158}
5159
5160void dev_set_rx_mode(struct net_device *dev)
5161{
David S. Millerb9e40852008-07-15 00:15:08 -07005162 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005163 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005164 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165}
5166
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005167/**
5168 * dev_get_flags - get flags reported to userspace
5169 * @dev: device
5170 *
5171 * Get the combination of flag bits exported through APIs to userspace.
5172 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005173unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174{
Eric Dumazet95c96172012-04-15 05:58:06 +00005175 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176
5177 flags = (dev->flags & ~(IFF_PROMISC |
5178 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005179 IFF_RUNNING |
5180 IFF_LOWER_UP |
5181 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182 (dev->gflags & (IFF_PROMISC |
5183 IFF_ALLMULTI));
5184
Stefan Rompfb00055a2006-03-20 17:09:11 -08005185 if (netif_running(dev)) {
5186 if (netif_oper_up(dev))
5187 flags |= IFF_RUNNING;
5188 if (netif_carrier_ok(dev))
5189 flags |= IFF_LOWER_UP;
5190 if (netif_dormant(dev))
5191 flags |= IFF_DORMANT;
5192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193
5194 return flags;
5195}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005196EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197
Patrick McHardybd380812010-02-26 06:34:53 +00005198int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199{
Eric Dumazetb536db92011-11-30 21:42:26 +00005200 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005201 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202
Patrick McHardy24023452007-07-14 18:51:31 -07005203 ASSERT_RTNL();
5204
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205 /*
5206 * Set the flags on our device.
5207 */
5208
5209 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5210 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5211 IFF_AUTOMEDIA)) |
5212 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5213 IFF_ALLMULTI));
5214
5215 /*
5216 * Load in the correct multicast list now the flags have changed.
5217 */
5218
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005219 if ((old_flags ^ flags) & IFF_MULTICAST)
5220 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005221
Patrick McHardy4417da62007-06-27 01:28:10 -07005222 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223
5224 /*
5225 * Have we downed the interface. We handle IFF_UP ourselves
5226 * according to user attempts to set it, rather than blindly
5227 * setting it.
5228 */
5229
5230 ret = 0;
5231 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005232 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233
5234 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005235 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 }
5237
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005239 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005240 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005241
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005243
5244 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5245 if (dev->flags != old_flags)
5246 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247 }
5248
5249 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5250 is important. Some (broken) drivers set IFF_PROMISC, when
5251 IFF_ALLMULTI is requested not asking us and not reporting.
5252 */
5253 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005254 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5255
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005257 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258 }
5259
Patrick McHardybd380812010-02-26 06:34:53 +00005260 return ret;
5261}
5262
Nicolas Dichtela528c212013-09-25 12:02:44 +02005263void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5264 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00005265{
5266 unsigned int changes = dev->flags ^ old_flags;
5267
Nicolas Dichtela528c212013-09-25 12:02:44 +02005268 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005269 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005270
Patrick McHardybd380812010-02-26 06:34:53 +00005271 if (changes & IFF_UP) {
5272 if (dev->flags & IFF_UP)
5273 call_netdevice_notifiers(NETDEV_UP, dev);
5274 else
5275 call_netdevice_notifiers(NETDEV_DOWN, dev);
5276 }
5277
5278 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00005279 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5280 struct netdev_notifier_change_info change_info;
5281
5282 change_info.flags_changed = changes;
5283 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5284 &change_info.info);
5285 }
Patrick McHardybd380812010-02-26 06:34:53 +00005286}
5287
5288/**
5289 * dev_change_flags - change device settings
5290 * @dev: device
5291 * @flags: device state flags
5292 *
5293 * Change settings on device based state flags. The flags are
5294 * in the userspace exported format.
5295 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005296int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005297{
Eric Dumazetb536db92011-11-30 21:42:26 +00005298 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005299 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00005300
5301 ret = __dev_change_flags(dev, flags);
5302 if (ret < 0)
5303 return ret;
5304
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005305 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005306 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307 return ret;
5308}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005309EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005311/**
5312 * dev_set_mtu - Change maximum transfer unit
5313 * @dev: device
5314 * @new_mtu: new transfer unit
5315 *
5316 * Change the maximum transfer size of the network device.
5317 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318int dev_set_mtu(struct net_device *dev, int new_mtu)
5319{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005320 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005321 int err;
5322
5323 if (new_mtu == dev->mtu)
5324 return 0;
5325
5326 /* MTU must be positive. */
5327 if (new_mtu < 0)
5328 return -EINVAL;
5329
5330 if (!netif_device_present(dev))
5331 return -ENODEV;
5332
5333 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005334 if (ops->ndo_change_mtu)
5335 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005336 else
5337 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005338
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005339 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005340 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341 return err;
5342}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005343EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005345/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005346 * dev_set_group - Change group this device belongs to
5347 * @dev: device
5348 * @new_group: group this device should belong to
5349 */
5350void dev_set_group(struct net_device *dev, int new_group)
5351{
5352 dev->group = new_group;
5353}
5354EXPORT_SYMBOL(dev_set_group);
5355
5356/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005357 * dev_set_mac_address - Change Media Access Control Address
5358 * @dev: device
5359 * @sa: new address
5360 *
5361 * Change the hardware (MAC) address of the device
5362 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005363int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5364{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005365 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005366 int err;
5367
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005368 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005369 return -EOPNOTSUPP;
5370 if (sa->sa_family != dev->type)
5371 return -EINVAL;
5372 if (!netif_device_present(dev))
5373 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005374 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005375 if (err)
5376 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005377 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005378 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005379 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005380 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005382EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005383
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005384/**
5385 * dev_change_carrier - Change device carrier
5386 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00005387 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005388 *
5389 * Change device carrier
5390 */
5391int dev_change_carrier(struct net_device *dev, bool new_carrier)
5392{
5393 const struct net_device_ops *ops = dev->netdev_ops;
5394
5395 if (!ops->ndo_change_carrier)
5396 return -EOPNOTSUPP;
5397 if (!netif_device_present(dev))
5398 return -ENODEV;
5399 return ops->ndo_change_carrier(dev, new_carrier);
5400}
5401EXPORT_SYMBOL(dev_change_carrier);
5402
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02005404 * dev_get_phys_port_id - Get device physical port ID
5405 * @dev: device
5406 * @ppid: port ID
5407 *
5408 * Get device physical port ID
5409 */
5410int dev_get_phys_port_id(struct net_device *dev,
5411 struct netdev_phys_port_id *ppid)
5412{
5413 const struct net_device_ops *ops = dev->netdev_ops;
5414
5415 if (!ops->ndo_get_phys_port_id)
5416 return -EOPNOTSUPP;
5417 return ops->ndo_get_phys_port_id(dev, ppid);
5418}
5419EXPORT_SYMBOL(dev_get_phys_port_id);
5420
5421/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005423 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 *
5425 * Returns a suitable unique value for a new device interface
5426 * number. The caller must hold the rtnl semaphore or the
5427 * dev_base_lock to be sure it remains unique.
5428 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005429static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005431 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432 for (;;) {
5433 if (++ifindex <= 0)
5434 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005435 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005436 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437 }
5438}
5439
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005441static LIST_HEAD(net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005442static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005444static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005447 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448}
5449
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005450static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005451{
Krishna Kumare93737b2009-12-08 22:26:02 +00005452 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005453 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005454
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005455 BUG_ON(dev_boot_phase);
5456 ASSERT_RTNL();
5457
Krishna Kumare93737b2009-12-08 22:26:02 +00005458 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005459 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005460 * for initialization unwind. Remove those
5461 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005462 */
5463 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005464 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5465 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005466
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005467 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005468 list_del(&dev->unreg_list);
5469 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005470 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005471 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005472 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005473 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005474
Octavian Purdila44345722010-12-13 12:44:07 +00005475 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005476 list_for_each_entry(dev, head, unreg_list)
5477 list_add_tail(&dev->close_list, &close_head);
5478 dev_close_many(&close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005479
Octavian Purdila44345722010-12-13 12:44:07 +00005480 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005481 /* And unlink it from device chain. */
5482 unlist_netdevice(dev);
5483
5484 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005485 }
5486
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005487 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005488
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005489 list_for_each_entry(dev, head, unreg_list) {
5490 /* Shutdown queueing discipline. */
5491 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005492
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005493
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005494 /* Notify protocols, that we are about to destroy
5495 this device. They should clean all the things.
5496 */
5497 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5498
Patrick McHardya2835762010-02-26 06:34:51 +00005499 if (!dev->rtnl_link_ops ||
5500 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005501 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Patrick McHardya2835762010-02-26 06:34:51 +00005502
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005503 /*
5504 * Flush the unicast and multicast chains
5505 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005506 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005507 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005508
5509 if (dev->netdev_ops->ndo_uninit)
5510 dev->netdev_ops->ndo_uninit(dev);
5511
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005512 /* Notifier chain MUST detach us all upper devices. */
5513 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005514
5515 /* Remove entries from kobject tree */
5516 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00005517#ifdef CONFIG_XPS
5518 /* Remove XPS queueing entries */
5519 netif_reset_xps_queues_gt(dev, 0);
5520#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005521 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005522
Eric W. Biederman850a5452011-10-13 22:25:23 +00005523 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005524
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005525 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005526 dev_put(dev);
5527}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005528
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005529static void rollback_registered(struct net_device *dev)
5530{
5531 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005532
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005533 list_add(&dev->unreg_list, &single);
5534 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005535 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005536}
5537
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005538static netdev_features_t netdev_fix_features(struct net_device *dev,
5539 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005540{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005541 /* Fix illegal checksum combinations */
5542 if ((features & NETIF_F_HW_CSUM) &&
5543 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005544 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005545 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5546 }
5547
Herbert Xub63365a2008-10-23 01:11:29 -07005548 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005549 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005550 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005551 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005552 }
5553
Pravin B Shelarec5f0612013-03-07 09:28:01 +00005554 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5555 !(features & NETIF_F_IP_CSUM)) {
5556 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5557 features &= ~NETIF_F_TSO;
5558 features &= ~NETIF_F_TSO_ECN;
5559 }
5560
5561 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5562 !(features & NETIF_F_IPV6_CSUM)) {
5563 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5564 features &= ~NETIF_F_TSO6;
5565 }
5566
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005567 /* TSO ECN requires that TSO is present as well. */
5568 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5569 features &= ~NETIF_F_TSO_ECN;
5570
Michał Mirosław212b5732011-02-15 16:59:16 +00005571 /* Software GSO depends on SG. */
5572 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005573 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005574 features &= ~NETIF_F_GSO;
5575 }
5576
Michał Mirosławacd11302011-01-24 15:45:15 -08005577 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005578 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005579 /* maybe split UFO into V4 and V6? */
5580 if (!((features & NETIF_F_GEN_CSUM) ||
5581 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5582 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005583 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005584 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005585 features &= ~NETIF_F_UFO;
5586 }
5587
5588 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005589 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005590 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005591 features &= ~NETIF_F_UFO;
5592 }
5593 }
5594
5595 return features;
5596}
Herbert Xub63365a2008-10-23 01:11:29 -07005597
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005598int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005599{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005600 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005601 int err = 0;
5602
Michał Mirosław87267482011-04-12 09:56:38 +00005603 ASSERT_RTNL();
5604
Michał Mirosław5455c692011-02-15 16:59:17 +00005605 features = netdev_get_wanted_features(dev);
5606
5607 if (dev->netdev_ops->ndo_fix_features)
5608 features = dev->netdev_ops->ndo_fix_features(dev, features);
5609
5610 /* driver might be less strict about feature dependencies */
5611 features = netdev_fix_features(dev, features);
5612
5613 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005614 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005615
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005616 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5617 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005618
5619 if (dev->netdev_ops->ndo_set_features)
5620 err = dev->netdev_ops->ndo_set_features(dev, features);
5621
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005622 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005623 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005624 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5625 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005626 return -1;
5627 }
5628
5629 if (!err)
5630 dev->features = features;
5631
5632 return 1;
5633}
5634
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005635/**
5636 * netdev_update_features - recalculate device features
5637 * @dev: the device to check
5638 *
5639 * Recalculate dev->features set and send notifications if it
5640 * has changed. Should be called after driver or hardware dependent
5641 * conditions might have changed that influence the features.
5642 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005643void netdev_update_features(struct net_device *dev)
5644{
5645 if (__netdev_update_features(dev))
5646 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005647}
5648EXPORT_SYMBOL(netdev_update_features);
5649
Linus Torvalds1da177e2005-04-16 15:20:36 -07005650/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005651 * netdev_change_features - recalculate device features
5652 * @dev: the device to check
5653 *
5654 * Recalculate dev->features set and send notifications even
5655 * if they have not changed. Should be called instead of
5656 * netdev_update_features() if also dev->vlan_features might
5657 * have changed to allow the changes to be propagated to stacked
5658 * VLAN devices.
5659 */
5660void netdev_change_features(struct net_device *dev)
5661{
5662 __netdev_update_features(dev);
5663 netdev_features_change(dev);
5664}
5665EXPORT_SYMBOL(netdev_change_features);
5666
5667/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005668 * netif_stacked_transfer_operstate - transfer operstate
5669 * @rootdev: the root or lower level device to transfer state from
5670 * @dev: the device to transfer operstate to
5671 *
5672 * Transfer operational state from root to device. This is normally
5673 * called when a stacking relationship exists between the root
5674 * device and the device(a leaf device).
5675 */
5676void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5677 struct net_device *dev)
5678{
5679 if (rootdev->operstate == IF_OPER_DORMANT)
5680 netif_dormant_on(dev);
5681 else
5682 netif_dormant_off(dev);
5683
5684 if (netif_carrier_ok(rootdev)) {
5685 if (!netif_carrier_ok(dev))
5686 netif_carrier_on(dev);
5687 } else {
5688 if (netif_carrier_ok(dev))
5689 netif_carrier_off(dev);
5690 }
5691}
5692EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5693
Tom Herbertbf264142010-11-26 08:36:09 +00005694#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005695static int netif_alloc_rx_queues(struct net_device *dev)
5696{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005697 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005698 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005699
Tom Herbertbd25fa72010-10-18 18:00:16 +00005700 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005701
Tom Herbertbd25fa72010-10-18 18:00:16 +00005702 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005703 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005704 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005705
Tom Herbertbd25fa72010-10-18 18:00:16 +00005706 dev->_rx = rx;
5707
Tom Herbertbd25fa72010-10-18 18:00:16 +00005708 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005709 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005710 return 0;
5711}
Tom Herbertbf264142010-11-26 08:36:09 +00005712#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005713
Changli Gaoaa942102010-12-04 02:31:41 +00005714static void netdev_init_one_queue(struct net_device *dev,
5715 struct netdev_queue *queue, void *_unused)
5716{
5717 /* Initialize queue lock */
5718 spin_lock_init(&queue->_xmit_lock);
5719 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5720 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005721 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005722 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005723#ifdef CONFIG_BQL
5724 dql_init(&queue->dql, HZ);
5725#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005726}
5727
Eric Dumazet60877a32013-06-20 01:15:51 -07005728static void netif_free_tx_queues(struct net_device *dev)
5729{
5730 if (is_vmalloc_addr(dev->_tx))
5731 vfree(dev->_tx);
5732 else
5733 kfree(dev->_tx);
5734}
5735
Tom Herberte6484932010-10-18 18:04:39 +00005736static int netif_alloc_netdev_queues(struct net_device *dev)
5737{
5738 unsigned int count = dev->num_tx_queues;
5739 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07005740 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00005741
Eric Dumazet60877a32013-06-20 01:15:51 -07005742 BUG_ON(count < 1 || count > 0xffff);
Tom Herberte6484932010-10-18 18:04:39 +00005743
Eric Dumazet60877a32013-06-20 01:15:51 -07005744 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
5745 if (!tx) {
5746 tx = vzalloc(sz);
5747 if (!tx)
5748 return -ENOMEM;
5749 }
Tom Herberte6484932010-10-18 18:04:39 +00005750 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005751
Tom Herberte6484932010-10-18 18:04:39 +00005752 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5753 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005754
5755 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005756}
5757
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005758/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005759 * register_netdevice - register a network device
5760 * @dev: device to register
5761 *
5762 * Take a completed network device structure and add it to the kernel
5763 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5764 * chain. 0 is returned on success. A negative errno code is returned
5765 * on a failure to set up the device, or if the name is a duplicate.
5766 *
5767 * Callers must hold the rtnl semaphore. You may want
5768 * register_netdev() instead of this.
5769 *
5770 * BUGS:
5771 * The locking appears insufficient to guarantee two parallel registers
5772 * will not get the same name.
5773 */
5774
5775int register_netdevice(struct net_device *dev)
5776{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005777 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005778 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005779
5780 BUG_ON(dev_boot_phase);
5781 ASSERT_RTNL();
5782
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005783 might_sleep();
5784
Linus Torvalds1da177e2005-04-16 15:20:36 -07005785 /* When net_device's are persistent, this will be fatal. */
5786 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005787 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005788
David S. Millerf1f28aa2008-07-15 00:08:33 -07005789 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005790 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005791
Linus Torvalds1da177e2005-04-16 15:20:36 -07005792 dev->iflink = -1;
5793
Gao feng828de4f2012-09-13 20:58:27 +00005794 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00005795 if (ret < 0)
5796 goto out;
5797
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005799 if (dev->netdev_ops->ndo_init) {
5800 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005801 if (ret) {
5802 if (ret > 0)
5803 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005804 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005805 }
5806 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005807
Patrick McHardyf6469682013-04-19 02:04:27 +00005808 if (((dev->hw_features | dev->features) &
5809 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00005810 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5811 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5812 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5813 ret = -EINVAL;
5814 goto err_uninit;
5815 }
5816
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00005817 ret = -EBUSY;
5818 if (!dev->ifindex)
5819 dev->ifindex = dev_new_index(net);
5820 else if (__dev_get_by_index(net, dev->ifindex))
5821 goto err_uninit;
5822
Linus Torvalds1da177e2005-04-16 15:20:36 -07005823 if (dev->iflink == -1)
5824 dev->iflink = dev->ifindex;
5825
Michał Mirosław5455c692011-02-15 16:59:17 +00005826 /* Transfer changeable features to wanted_features and enable
5827 * software offloads (GSO and GRO).
5828 */
5829 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00005830 dev->features |= NETIF_F_SOFT_FEATURES;
5831 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005832
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005833 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00005834 if (!(dev->flags & IFF_LOOPBACK)) {
5835 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5836 if (dev->features & NETIF_F_ALL_CSUM) {
5837 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5838 dev->features |= NETIF_F_NOCACHE_COPY;
5839 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005840 }
5841
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005842 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00005843 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005844 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00005845
Pravin B Shelaree579672013-03-07 09:28:08 +00005846 /* Make NETIF_F_SG inheritable to tunnel devices.
5847 */
5848 dev->hw_enc_features |= NETIF_F_SG;
5849
Simon Horman0d89d202013-05-23 21:02:52 +00005850 /* Make NETIF_F_SG inheritable to MPLS.
5851 */
5852 dev->mpls_features |= NETIF_F_SG;
5853
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005854 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5855 ret = notifier_to_errno(ret);
5856 if (ret)
5857 goto err_uninit;
5858
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005859 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005860 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005861 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005862 dev->reg_state = NETREG_REGISTERED;
5863
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005864 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00005865
Linus Torvalds1da177e2005-04-16 15:20:36 -07005866 /*
5867 * Default initial state at registry is that the
5868 * device is present.
5869 */
5870
5871 set_bit(__LINK_STATE_PRESENT, &dev->state);
5872
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01005873 linkwatch_init_dev(dev);
5874
Linus Torvalds1da177e2005-04-16 15:20:36 -07005875 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005876 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005877 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005878 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005879
Jiri Pirko948b3372013-01-08 01:38:25 +00005880 /* If the device has permanent device address, driver should
5881 * set dev_addr and also addr_assign_type should be set to
5882 * NET_ADDR_PERM (default value).
5883 */
5884 if (dev->addr_assign_type == NET_ADDR_PERM)
5885 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5886
Linus Torvalds1da177e2005-04-16 15:20:36 -07005887 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005888 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005889 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005890 if (ret) {
5891 rollback_registered(dev);
5892 dev->reg_state = NETREG_UNREGISTERED;
5893 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005894 /*
5895 * Prevent userspace races by waiting until the network
5896 * device is fully setup before sending notifications.
5897 */
Patrick McHardya2835762010-02-26 06:34:51 +00005898 if (!dev->rtnl_link_ops ||
5899 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005900 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005901
5902out:
5903 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005904
5905err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005906 if (dev->netdev_ops->ndo_uninit)
5907 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005908 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005910EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005911
5912/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005913 * init_dummy_netdev - init a dummy network device for NAPI
5914 * @dev: device to init
5915 *
5916 * This takes a network device structure and initialize the minimum
5917 * amount of fields so it can be used to schedule NAPI polls without
5918 * registering a full blown interface. This is to be used by drivers
5919 * that need to tie several hardware interfaces to a single NAPI
5920 * poll scheduler due to HW limitations.
5921 */
5922int init_dummy_netdev(struct net_device *dev)
5923{
5924 /* Clear everything. Note we don't initialize spinlocks
5925 * are they aren't supposed to be taken by any of the
5926 * NAPI code and this dummy netdev is supposed to be
5927 * only ever used for NAPI polls
5928 */
5929 memset(dev, 0, sizeof(struct net_device));
5930
5931 /* make sure we BUG if trying to hit standard
5932 * register/unregister code path
5933 */
5934 dev->reg_state = NETREG_DUMMY;
5935
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005936 /* NAPI wants this */
5937 INIT_LIST_HEAD(&dev->napi_list);
5938
5939 /* a dummy interface is started by default */
5940 set_bit(__LINK_STATE_PRESENT, &dev->state);
5941 set_bit(__LINK_STATE_START, &dev->state);
5942
Eric Dumazet29b44332010-10-11 10:22:12 +00005943 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5944 * because users of this 'device' dont need to change
5945 * its refcount.
5946 */
5947
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005948 return 0;
5949}
5950EXPORT_SYMBOL_GPL(init_dummy_netdev);
5951
5952
5953/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005954 * register_netdev - register a network device
5955 * @dev: device to register
5956 *
5957 * Take a completed network device structure and add it to the kernel
5958 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5959 * chain. 0 is returned on success. A negative errno code is returned
5960 * on a failure to set up the device, or if the name is a duplicate.
5961 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005962 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005963 * and expands the device name if you passed a format string to
5964 * alloc_netdev.
5965 */
5966int register_netdev(struct net_device *dev)
5967{
5968 int err;
5969
5970 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005971 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005972 rtnl_unlock();
5973 return err;
5974}
5975EXPORT_SYMBOL(register_netdev);
5976
Eric Dumazet29b44332010-10-11 10:22:12 +00005977int netdev_refcnt_read(const struct net_device *dev)
5978{
5979 int i, refcnt = 0;
5980
5981 for_each_possible_cpu(i)
5982 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5983 return refcnt;
5984}
5985EXPORT_SYMBOL(netdev_refcnt_read);
5986
Ben Hutchings2c530402012-07-10 10:55:09 +00005987/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00005989 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005990 *
5991 * This is called when unregistering network devices.
5992 *
5993 * Any protocol or device that holds a reference should register
5994 * for netdevice notification, and cleanup and put back the
5995 * reference if they receive an UNREGISTER event.
5996 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005997 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005998 */
5999static void netdev_wait_allrefs(struct net_device *dev)
6000{
6001 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006002 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006003
Eric Dumazete014deb2009-11-17 05:59:21 +00006004 linkwatch_forget_dev(dev);
6005
Linus Torvalds1da177e2005-04-16 15:20:36 -07006006 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006007 refcnt = netdev_refcnt_read(dev);
6008
6009 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006010 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006011 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006012
6013 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006014 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006015
Eric Dumazet748e2d92012-08-22 21:50:59 +00006016 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006017 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006018 rtnl_lock();
6019
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006020 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006021 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6022 &dev->state)) {
6023 /* We must not have linkwatch events
6024 * pending on unregister. If this
6025 * happens, we simply run the queue
6026 * unscheduled, resulting in a noop
6027 * for this device.
6028 */
6029 linkwatch_run_queue();
6030 }
6031
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006032 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006033
6034 rebroadcast_time = jiffies;
6035 }
6036
6037 msleep(250);
6038
Eric Dumazet29b44332010-10-11 10:22:12 +00006039 refcnt = netdev_refcnt_read(dev);
6040
Linus Torvalds1da177e2005-04-16 15:20:36 -07006041 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006042 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6043 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006044 warning_time = jiffies;
6045 }
6046 }
6047}
6048
6049/* The sequence is:
6050 *
6051 * rtnl_lock();
6052 * ...
6053 * register_netdevice(x1);
6054 * register_netdevice(x2);
6055 * ...
6056 * unregister_netdevice(y1);
6057 * unregister_netdevice(y2);
6058 * ...
6059 * rtnl_unlock();
6060 * free_netdev(y1);
6061 * free_netdev(y2);
6062 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006063 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006064 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006065 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006066 * without deadlocking with linkwatch via keventd.
6067 * 2) Since we run with the RTNL semaphore not held, we can sleep
6068 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006069 *
6070 * We must not return until all unregister events added during
6071 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006072 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006073void netdev_run_todo(void)
6074{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006075 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006076
Linus Torvalds1da177e2005-04-16 15:20:36 -07006077 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006078 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006079
6080 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006081
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006082
6083 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006084 if (!list_empty(&list))
6085 rcu_barrier();
6086
Linus Torvalds1da177e2005-04-16 15:20:36 -07006087 while (!list_empty(&list)) {
6088 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006089 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006090 list_del(&dev->todo_list);
6091
Eric Dumazet748e2d92012-08-22 21:50:59 +00006092 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006093 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006094 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006095
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006096 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006097 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006098 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006099 dump_stack();
6100 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006101 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006102
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006103 dev->reg_state = NETREG_UNREGISTERED;
6104
Changli Gao152102c2010-03-30 20:16:22 +00006105 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006106
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006107 netdev_wait_allrefs(dev);
6108
6109 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006110 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006111 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6112 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006113 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006114
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006115 if (dev->destructor)
6116 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006117
Eric W. Biederman50624c92013-09-23 21:19:49 -07006118 /* Report a network device has been unregistered */
6119 rtnl_lock();
6120 dev_net(dev)->dev_unreg_count--;
6121 __rtnl_unlock();
6122 wake_up(&netdev_unregistering_wq);
6123
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006124 /* Free network device */
6125 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006127}
6128
Ben Hutchings3cfde792010-07-09 09:11:52 +00006129/* Convert net_device_stats to rtnl_link_stats64. They have the same
6130 * fields in the same order, with only the type differing.
6131 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006132void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6133 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006134{
6135#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006136 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6137 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006138#else
6139 size_t i, n = sizeof(*stats64) / sizeof(u64);
6140 const unsigned long *src = (const unsigned long *)netdev_stats;
6141 u64 *dst = (u64 *)stats64;
6142
6143 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6144 sizeof(*stats64) / sizeof(u64));
6145 for (i = 0; i < n; i++)
6146 dst[i] = src[i];
6147#endif
6148}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006149EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006150
Eric Dumazetd83345a2009-11-16 03:36:51 +00006151/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006152 * dev_get_stats - get network device statistics
6153 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006154 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006155 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006156 * Get network statistics from device. Return @storage.
6157 * The device driver may provide its own method by setting
6158 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6159 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006160 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006161struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6162 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006163{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006164 const struct net_device_ops *ops = dev->netdev_ops;
6165
Eric Dumazet28172732010-07-07 14:58:56 -07006166 if (ops->ndo_get_stats64) {
6167 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006168 ops->ndo_get_stats64(dev, storage);
6169 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006170 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006171 } else {
6172 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006173 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006174 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006175 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006176}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006177EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006178
Eric Dumazet24824a02010-10-02 06:11:55 +00006179struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006180{
Eric Dumazet24824a02010-10-02 06:11:55 +00006181 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006182
Eric Dumazet24824a02010-10-02 06:11:55 +00006183#ifdef CONFIG_NET_CLS_ACT
6184 if (queue)
6185 return queue;
6186 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6187 if (!queue)
6188 return NULL;
6189 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006190 queue->qdisc = &noop_qdisc;
6191 queue->qdisc_sleeping = &noop_qdisc;
6192 rcu_assign_pointer(dev->ingress_queue, queue);
6193#endif
6194 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006195}
6196
Eric Dumazet2c60db02012-09-16 09:17:26 +00006197static const struct ethtool_ops default_ethtool_ops;
6198
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00006199void netdev_set_default_ethtool_ops(struct net_device *dev,
6200 const struct ethtool_ops *ops)
6201{
6202 if (dev->ethtool_ops == &default_ethtool_ops)
6203 dev->ethtool_ops = ops;
6204}
6205EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6206
Eric Dumazet74d332c2013-10-30 13:10:44 -07006207void netdev_freemem(struct net_device *dev)
6208{
6209 char *addr = (char *)dev - dev->padded;
6210
6211 if (is_vmalloc_addr(addr))
6212 vfree(addr);
6213 else
6214 kfree(addr);
6215}
6216
Linus Torvalds1da177e2005-04-16 15:20:36 -07006217/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006218 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006219 * @sizeof_priv: size of private data to allocate space for
6220 * @name: device name format string
6221 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006222 * @txqs: the number of TX subqueues to allocate
6223 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006224 *
6225 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006226 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006227 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006228 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006229struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6230 void (*setup)(struct net_device *),
6231 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006232{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006233 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006234 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006235 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006236
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006237 BUG_ON(strlen(name) >= sizeof(dev->name));
6238
Tom Herbert36909ea2011-01-09 19:36:31 +00006239 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006240 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006241 return NULL;
6242 }
6243
Tom Herbert36909ea2011-01-09 19:36:31 +00006244#ifdef CONFIG_RPS
6245 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006246 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006247 return NULL;
6248 }
6249#endif
6250
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006251 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006252 if (sizeof_priv) {
6253 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006254 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006255 alloc_size += sizeof_priv;
6256 }
6257 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006258 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006259
Eric Dumazet74d332c2013-10-30 13:10:44 -07006260 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6261 if (!p)
6262 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00006263 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006264 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006265
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006266 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006267 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006268
Eric Dumazet29b44332010-10-11 10:22:12 +00006269 dev->pcpu_refcnt = alloc_percpu(int);
6270 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07006271 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006272
Linus Torvalds1da177e2005-04-16 15:20:36 -07006273 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006274 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006275
Jiri Pirko22bedad32010-04-01 21:22:57 +00006276 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006277 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006278
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006279 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006280
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006281 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006282 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006283
Herbert Xud565b0a2008-12-15 23:38:52 -08006284 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006285 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006286 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006287 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006288 INIT_LIST_HEAD(&dev->adj_list.upper);
6289 INIT_LIST_HEAD(&dev->adj_list.lower);
6290 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6291 INIT_LIST_HEAD(&dev->all_adj_list.lower);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006292 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006293 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006294
6295 dev->num_tx_queues = txqs;
6296 dev->real_num_tx_queues = txqs;
6297 if (netif_alloc_netdev_queues(dev))
6298 goto free_all;
6299
6300#ifdef CONFIG_RPS
6301 dev->num_rx_queues = rxqs;
6302 dev->real_num_rx_queues = rxqs;
6303 if (netif_alloc_rx_queues(dev))
6304 goto free_all;
6305#endif
6306
Linus Torvalds1da177e2005-04-16 15:20:36 -07006307 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006308 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006309 if (!dev->ethtool_ops)
6310 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006311 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006312
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006313free_all:
6314 free_netdev(dev);
6315 return NULL;
6316
Eric Dumazet29b44332010-10-11 10:22:12 +00006317free_pcpu:
6318 free_percpu(dev->pcpu_refcnt);
Eric Dumazet60877a32013-06-20 01:15:51 -07006319 netif_free_tx_queues(dev);
Tom Herbertfe822242010-11-09 10:47:38 +00006320#ifdef CONFIG_RPS
6321 kfree(dev->_rx);
6322#endif
6323
Eric Dumazet74d332c2013-10-30 13:10:44 -07006324free_dev:
6325 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006326 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006327}
Tom Herbert36909ea2011-01-09 19:36:31 +00006328EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006329
6330/**
6331 * free_netdev - free network device
6332 * @dev: device
6333 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006334 * This function does the last stage of destroying an allocated device
6335 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006336 * If this is the last reference then it will be freed.
6337 */
6338void free_netdev(struct net_device *dev)
6339{
Herbert Xud565b0a2008-12-15 23:38:52 -08006340 struct napi_struct *p, *n;
6341
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006342 release_net(dev_net(dev));
6343
Eric Dumazet60877a32013-06-20 01:15:51 -07006344 netif_free_tx_queues(dev);
Tom Herbertfe822242010-11-09 10:47:38 +00006345#ifdef CONFIG_RPS
6346 kfree(dev->_rx);
6347#endif
David S. Millere8a04642008-07-17 00:34:19 -07006348
Eric Dumazet33d480c2011-08-11 19:30:52 +00006349 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006350
Jiri Pirkof001fde2009-05-05 02:48:28 +00006351 /* Flush device addresses */
6352 dev_addr_flush(dev);
6353
Herbert Xud565b0a2008-12-15 23:38:52 -08006354 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6355 netif_napi_del(p);
6356
Eric Dumazet29b44332010-10-11 10:22:12 +00006357 free_percpu(dev->pcpu_refcnt);
6358 dev->pcpu_refcnt = NULL;
6359
Stephen Hemminger3041a062006-05-26 13:25:24 -07006360 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07006362 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363 return;
6364 }
6365
6366 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6367 dev->reg_state = NETREG_RELEASED;
6368
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006369 /* will free via device release */
6370 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006371}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006372EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006373
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006374/**
6375 * synchronize_net - Synchronize with packet receive processing
6376 *
6377 * Wait for packets currently being received to be done.
6378 * Does not block later packets from starting.
6379 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006380void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006381{
6382 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006383 if (rtnl_is_locked())
6384 synchronize_rcu_expedited();
6385 else
6386 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006387}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006388EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006389
6390/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006391 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006392 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006393 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006394 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006395 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006396 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006397 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006398 *
6399 * Callers must hold the rtnl semaphore. You may want
6400 * unregister_netdev() instead of this.
6401 */
6402
Eric Dumazet44a08732009-10-27 07:03:04 +00006403void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006404{
Herbert Xua6620712007-12-12 19:21:56 -08006405 ASSERT_RTNL();
6406
Eric Dumazet44a08732009-10-27 07:03:04 +00006407 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006408 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006409 } else {
6410 rollback_registered(dev);
6411 /* Finish processing unregister after unlock */
6412 net_set_todo(dev);
6413 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006414}
Eric Dumazet44a08732009-10-27 07:03:04 +00006415EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006416
6417/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006418 * unregister_netdevice_many - unregister many devices
6419 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006420 */
6421void unregister_netdevice_many(struct list_head *head)
6422{
6423 struct net_device *dev;
6424
6425 if (!list_empty(head)) {
6426 rollback_registered_many(head);
6427 list_for_each_entry(dev, head, unreg_list)
6428 net_set_todo(dev);
6429 }
6430}
Eric Dumazet63c80992009-10-27 07:06:49 +00006431EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006432
6433/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006434 * unregister_netdev - remove device from the kernel
6435 * @dev: device
6436 *
6437 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006438 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006439 *
6440 * This is just a wrapper for unregister_netdevice that takes
6441 * the rtnl semaphore. In general you want to use this and not
6442 * unregister_netdevice.
6443 */
6444void unregister_netdev(struct net_device *dev)
6445{
6446 rtnl_lock();
6447 unregister_netdevice(dev);
6448 rtnl_unlock();
6449}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006450EXPORT_SYMBOL(unregister_netdev);
6451
Eric W. Biedermance286d32007-09-12 13:53:49 +02006452/**
6453 * dev_change_net_namespace - move device to different nethost namespace
6454 * @dev: device
6455 * @net: network namespace
6456 * @pat: If not NULL name pattern to try if the current device name
6457 * is already taken in the destination network namespace.
6458 *
6459 * This function shuts down a device interface and moves it
6460 * to a new network namespace. On success 0 is returned, on
6461 * a failure a netagive errno code is returned.
6462 *
6463 * Callers must hold the rtnl semaphore.
6464 */
6465
6466int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6467{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006468 int err;
6469
6470 ASSERT_RTNL();
6471
6472 /* Don't allow namespace local devices to be moved. */
6473 err = -EINVAL;
6474 if (dev->features & NETIF_F_NETNS_LOCAL)
6475 goto out;
6476
6477 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006478 if (dev->reg_state != NETREG_REGISTERED)
6479 goto out;
6480
6481 /* Get out if there is nothing todo */
6482 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006483 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006484 goto out;
6485
6486 /* Pick the destination device name, and ensure
6487 * we can use it in the destination network namespace.
6488 */
6489 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006490 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006491 /* We get here if we can't use the current device name */
6492 if (!pat)
6493 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006494 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006495 goto out;
6496 }
6497
6498 /*
6499 * And now a mini version of register_netdevice unregister_netdevice.
6500 */
6501
6502 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006503 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006504
6505 /* And unlink it from device chain */
6506 err = -ENODEV;
6507 unlist_netdevice(dev);
6508
6509 synchronize_net();
6510
6511 /* Shutdown queueing discipline. */
6512 dev_shutdown(dev);
6513
6514 /* Notify protocols, that we are about to destroy
6515 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006516
6517 Note that dev->reg_state stays at NETREG_REGISTERED.
6518 This is wanted because this way 8021q and macvlan know
6519 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006520 */
6521 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006522 rcu_barrier();
6523 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006524 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006525
6526 /*
6527 * Flush the unicast and multicast chains
6528 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006529 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006530 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006531
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006532 /* Send a netdev-removed uevent to the old namespace */
6533 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6534
Eric W. Biedermance286d32007-09-12 13:53:49 +02006535 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006536 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006537
Eric W. Biedermance286d32007-09-12 13:53:49 +02006538 /* If there is an ifindex conflict assign a new one */
6539 if (__dev_get_by_index(net, dev->ifindex)) {
6540 int iflink = (dev->iflink == dev->ifindex);
6541 dev->ifindex = dev_new_index(net);
6542 if (iflink)
6543 dev->iflink = dev->ifindex;
6544 }
6545
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006546 /* Send a netdev-add uevent to the new namespace */
6547 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6548
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006549 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006550 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006551 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006552
6553 /* Add the device back in the hashes */
6554 list_netdevice(dev);
6555
6556 /* Notify protocols, that a new device appeared. */
6557 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6558
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006559 /*
6560 * Prevent userspace races by waiting until the network
6561 * device is fully setup before sending notifications.
6562 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006563 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006564
Eric W. Biedermance286d32007-09-12 13:53:49 +02006565 synchronize_net();
6566 err = 0;
6567out:
6568 return err;
6569}
Johannes Berg463d0182009-07-14 00:33:35 +02006570EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006571
Linus Torvalds1da177e2005-04-16 15:20:36 -07006572static int dev_cpu_callback(struct notifier_block *nfb,
6573 unsigned long action,
6574 void *ocpu)
6575{
6576 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006577 struct sk_buff *skb;
6578 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6579 struct softnet_data *sd, *oldsd;
6580
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006581 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006582 return NOTIFY_OK;
6583
6584 local_irq_disable();
6585 cpu = smp_processor_id();
6586 sd = &per_cpu(softnet_data, cpu);
6587 oldsd = &per_cpu(softnet_data, oldcpu);
6588
6589 /* Find end of our completion_queue. */
6590 list_skb = &sd->completion_queue;
6591 while (*list_skb)
6592 list_skb = &(*list_skb)->next;
6593 /* Append completion queue from offline CPU. */
6594 *list_skb = oldsd->completion_queue;
6595 oldsd->completion_queue = NULL;
6596
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006598 if (oldsd->output_queue) {
6599 *sd->output_queue_tailp = oldsd->output_queue;
6600 sd->output_queue_tailp = oldsd->output_queue_tailp;
6601 oldsd->output_queue = NULL;
6602 oldsd->output_queue_tailp = &oldsd->output_queue;
6603 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006604 /* Append NAPI poll list from offline CPU. */
6605 if (!list_empty(&oldsd->poll_list)) {
6606 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6607 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6608 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006609
6610 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6611 local_irq_enable();
6612
6613 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006614 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6615 netif_rx(skb);
6616 input_queue_head_incr(oldsd);
6617 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006618 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006620 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006622
6623 return NOTIFY_OK;
6624}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625
6626
Herbert Xu7f353bf2007-08-10 15:47:58 -07006627/**
Herbert Xub63365a2008-10-23 01:11:29 -07006628 * netdev_increment_features - increment feature set by one
6629 * @all: current feature set
6630 * @one: new feature set
6631 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006632 *
6633 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006634 * @one to the master device with current feature set @all. Will not
6635 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006636 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006637netdev_features_t netdev_increment_features(netdev_features_t all,
6638 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006639{
Michał Mirosław1742f182011-04-22 06:31:16 +00006640 if (mask & NETIF_F_GEN_CSUM)
6641 mask |= NETIF_F_ALL_CSUM;
6642 mask |= NETIF_F_VLAN_CHALLENGED;
6643
6644 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6645 all &= one | ~NETIF_F_ALL_FOR_ALL;
6646
Michał Mirosław1742f182011-04-22 06:31:16 +00006647 /* If one device supports hw checksumming, set for all. */
6648 if (all & NETIF_F_GEN_CSUM)
6649 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006650
6651 return all;
6652}
Herbert Xub63365a2008-10-23 01:11:29 -07006653EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006654
Baruch Siach430f03c2013-06-02 20:43:55 +00006655static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006656{
6657 int i;
6658 struct hlist_head *hash;
6659
6660 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6661 if (hash != NULL)
6662 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6663 INIT_HLIST_HEAD(&hash[i]);
6664
6665 return hash;
6666}
6667
Eric W. Biederman881d9662007-09-17 11:56:21 -07006668/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006669static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006670{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006671 if (net != &init_net)
6672 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006673
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006674 net->dev_name_head = netdev_create_hash();
6675 if (net->dev_name_head == NULL)
6676 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006677
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006678 net->dev_index_head = netdev_create_hash();
6679 if (net->dev_index_head == NULL)
6680 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006681
6682 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006683
6684err_idx:
6685 kfree(net->dev_name_head);
6686err_name:
6687 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006688}
6689
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006690/**
6691 * netdev_drivername - network driver for the device
6692 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006693 *
6694 * Determine network driver for device.
6695 */
David S. Miller3019de12011-06-06 16:41:33 -07006696const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006697{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006698 const struct device_driver *driver;
6699 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006700 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006701
6702 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006703 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006704 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006705
6706 driver = parent->driver;
6707 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006708 return driver->name;
6709 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006710}
6711
Joe Perchesb004ff42012-09-12 20:12:19 -07006712static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006713 struct va_format *vaf)
6714{
6715 int r;
6716
Joe Perchesb004ff42012-09-12 20:12:19 -07006717 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006718 r = dev_printk_emit(level[1] - '0',
6719 dev->dev.parent,
6720 "%s %s %s: %pV",
6721 dev_driver_string(dev->dev.parent),
6722 dev_name(dev->dev.parent),
6723 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006724 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006725 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006726 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006727 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006728 }
Joe Perches256df2f2010-06-27 01:02:35 +00006729
6730 return r;
6731}
6732
6733int netdev_printk(const char *level, const struct net_device *dev,
6734 const char *format, ...)
6735{
6736 struct va_format vaf;
6737 va_list args;
6738 int r;
6739
6740 va_start(args, format);
6741
6742 vaf.fmt = format;
6743 vaf.va = &args;
6744
6745 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006746
Joe Perches256df2f2010-06-27 01:02:35 +00006747 va_end(args);
6748
6749 return r;
6750}
6751EXPORT_SYMBOL(netdev_printk);
6752
6753#define define_netdev_printk_level(func, level) \
6754int func(const struct net_device *dev, const char *fmt, ...) \
6755{ \
6756 int r; \
6757 struct va_format vaf; \
6758 va_list args; \
6759 \
6760 va_start(args, fmt); \
6761 \
6762 vaf.fmt = fmt; \
6763 vaf.va = &args; \
6764 \
6765 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07006766 \
Joe Perches256df2f2010-06-27 01:02:35 +00006767 va_end(args); \
6768 \
6769 return r; \
6770} \
6771EXPORT_SYMBOL(func);
6772
6773define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6774define_netdev_printk_level(netdev_alert, KERN_ALERT);
6775define_netdev_printk_level(netdev_crit, KERN_CRIT);
6776define_netdev_printk_level(netdev_err, KERN_ERR);
6777define_netdev_printk_level(netdev_warn, KERN_WARNING);
6778define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6779define_netdev_printk_level(netdev_info, KERN_INFO);
6780
Pavel Emelyanov46650792007-10-08 20:38:39 -07006781static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006782{
6783 kfree(net->dev_name_head);
6784 kfree(net->dev_index_head);
6785}
6786
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006787static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006788 .init = netdev_init,
6789 .exit = netdev_exit,
6790};
6791
Pavel Emelyanov46650792007-10-08 20:38:39 -07006792static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006793{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006794 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006795 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006796 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006797 * initial network namespace
6798 */
6799 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006800 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006801 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006802 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006803
6804 /* Ignore unmoveable devices (i.e. loopback) */
6805 if (dev->features & NETIF_F_NETNS_LOCAL)
6806 continue;
6807
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006808 /* Leave virtual devices for the generic cleanup */
6809 if (dev->rtnl_link_ops)
6810 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006811
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006812 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006813 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6814 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006815 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006816 pr_emerg("%s: failed to move %s to init_net: %d\n",
6817 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006818 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006819 }
6820 }
6821 rtnl_unlock();
6822}
6823
Eric W. Biederman50624c92013-09-23 21:19:49 -07006824static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6825{
6826 /* Return with the rtnl_lock held when there are no network
6827 * devices unregistering in any network namespace in net_list.
6828 */
6829 struct net *net;
6830 bool unregistering;
6831 DEFINE_WAIT(wait);
6832
6833 for (;;) {
6834 prepare_to_wait(&netdev_unregistering_wq, &wait,
6835 TASK_UNINTERRUPTIBLE);
6836 unregistering = false;
6837 rtnl_lock();
6838 list_for_each_entry(net, net_list, exit_list) {
6839 if (net->dev_unreg_count > 0) {
6840 unregistering = true;
6841 break;
6842 }
6843 }
6844 if (!unregistering)
6845 break;
6846 __rtnl_unlock();
6847 schedule();
6848 }
6849 finish_wait(&netdev_unregistering_wq, &wait);
6850}
6851
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006852static void __net_exit default_device_exit_batch(struct list_head *net_list)
6853{
6854 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04006855 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006856 * Do this across as many network namespaces as possible to
6857 * improve batching efficiency.
6858 */
6859 struct net_device *dev;
6860 struct net *net;
6861 LIST_HEAD(dev_kill_list);
6862
Eric W. Biederman50624c92013-09-23 21:19:49 -07006863 /* To prevent network device cleanup code from dereferencing
6864 * loopback devices or network devices that have been freed
6865 * wait here for all pending unregistrations to complete,
6866 * before unregistring the loopback device and allowing the
6867 * network namespace be freed.
6868 *
6869 * The netdev todo list containing all network devices
6870 * unregistrations that happen in default_device_exit_batch
6871 * will run in the rtnl_unlock() at the end of
6872 * default_device_exit_batch.
6873 */
6874 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006875 list_for_each_entry(net, net_list, exit_list) {
6876 for_each_netdev_reverse(net, dev) {
6877 if (dev->rtnl_link_ops)
6878 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6879 else
6880 unregister_netdevice_queue(dev, &dev_kill_list);
6881 }
6882 }
6883 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006884 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006885 rtnl_unlock();
6886}
6887
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006888static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006889 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006890 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006891};
6892
Linus Torvalds1da177e2005-04-16 15:20:36 -07006893/*
6894 * Initialize the DEV module. At boot time this walks the device list and
6895 * unhooks any devices that fail to initialise (normally hardware not
6896 * present) and leaves us with a valid list of present and active devices.
6897 *
6898 */
6899
6900/*
6901 * This is called single threaded during boot, so no need
6902 * to take the rtnl semaphore.
6903 */
6904static int __init net_dev_init(void)
6905{
6906 int i, rc = -ENOMEM;
6907
6908 BUG_ON(!dev_boot_phase);
6909
Linus Torvalds1da177e2005-04-16 15:20:36 -07006910 if (dev_proc_init())
6911 goto out;
6912
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006913 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914 goto out;
6915
6916 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006917 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006918 INIT_LIST_HEAD(&ptype_base[i]);
6919
Vlad Yasevich62532da2012-11-15 08:49:10 +00006920 INIT_LIST_HEAD(&offload_base);
6921
Eric W. Biederman881d9662007-09-17 11:56:21 -07006922 if (register_pernet_subsys(&netdev_net_ops))
6923 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006924
6925 /*
6926 * Initialise the packet receive queues.
6927 */
6928
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006929 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006930 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006931
Changli Gaodee42872010-05-02 05:42:16 +00006932 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006933 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006934 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006935 sd->completion_queue = NULL;
6936 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00006937 sd->output_queue = NULL;
6938 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00006939#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006940 sd->csd.func = rps_trigger_softirq;
6941 sd->csd.info = sd;
6942 sd->csd.flags = 0;
6943 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07006944#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006945
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006946 sd->backlog.poll = process_backlog;
6947 sd->backlog.weight = weight_p;
6948 sd->backlog.gro_list = NULL;
6949 sd->backlog.gro_count = 0;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00006950
6951#ifdef CONFIG_NET_FLOW_LIMIT
6952 sd->flow_limit = NULL;
6953#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006954 }
6955
Linus Torvalds1da177e2005-04-16 15:20:36 -07006956 dev_boot_phase = 0;
6957
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006958 /* The loopback device is special if any other network devices
6959 * is present in a network namespace the loopback device must
6960 * be present. Since we now dynamically allocate and free the
6961 * loopback device ensure this invariant is maintained by
6962 * keeping the loopback device as the first device on the
6963 * list of network devices. Ensuring the loopback devices
6964 * is the first device that appears and the last network device
6965 * that disappears.
6966 */
6967 if (register_pernet_device(&loopback_net_ops))
6968 goto out;
6969
6970 if (register_pernet_device(&default_device_ops))
6971 goto out;
6972
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006973 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6974 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006975
6976 hotcpu_notifier(dev_cpu_callback, 0);
6977 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006978 rc = 0;
6979out:
6980 return rc;
6981}
6982
6983subsys_initcall(net_dev_init);