blob: 0e6136546a8ce378f92be706bea10a3e753e7e33 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300132#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700133#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700135#include "net-sysfs.h"
136
Herbert Xud565b0a2008-12-15 23:38:52 -0800137/* Instead of increasing this, you should create a hash table. */
138#define MAX_GRO_SKBS 8
139
Herbert Xu5d38a072009-01-04 16:13:40 -0800140/* This should be increased if a protocol with a bigger head is added. */
141#define GRO_MAX_HEAD (MAX_HEADER + 128)
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000144static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000145struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
146struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000147static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700150 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * semaphore.
152 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800153 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 *
155 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700156 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 * actual updates. This allows pure readers to access the list even
158 * while a writer is preparing to update it.
159 *
160 * To put it another way, dev_base_lock is held for writing only to
161 * protect against pure readers; the rtnl semaphore provides the
162 * protection against other writers.
163 *
164 * See, for example usages, register_netdevice() and
165 * unregister_netdevice(), which must be called with the rtnl
166 * semaphore held.
167 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169EXPORT_SYMBOL(dev_base_lock);
170
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300171/* protects napi_hash addition/deletion and napi_gen_id */
172static DEFINE_SPINLOCK(napi_hash_lock);
173
174static unsigned int napi_gen_id;
175static DEFINE_HASHTABLE(napi_hash, 8);
176
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200177static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000178
Thomas Graf4e985ad2011-06-21 03:11:20 +0000179static inline void dev_base_seq_inc(struct net *net)
180{
181 while (++net->dev_base_seq == 0);
182}
183
Eric W. Biederman881d9662007-09-17 11:56:21 -0700184static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Eric Dumazet95c96172012-04-15 05:58:06 +0000186 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
187
stephen hemminger08e98972009-11-10 07:20:34 +0000188 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
Eric W. Biederman881d9662007-09-17 11:56:21 -0700191static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700193 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000196static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000197{
198#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000199 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000200#endif
201}
202
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000203static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000204{
205#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000206 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000207#endif
208}
209
Eric W. Biedermance286d32007-09-12 13:53:49 +0200210/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000211static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200212{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900213 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214
215 ASSERT_RTNL();
216
217 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800218 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000219 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000220 hlist_add_head_rcu(&dev->index_hlist,
221 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000223
224 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200225}
226
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000227/* Device list removal
228 * caller must respect a RCU grace period before freeing/reusing dev
229 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200230static void unlist_netdevice(struct net_device *dev)
231{
232 ASSERT_RTNL();
233
234 /* Unlink dev from the device chain */
235 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800236 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000237 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000238 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200239 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000240
241 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200242}
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244/*
245 * Our notifier list
246 */
247
Alan Sternf07d5b92006-05-09 15:23:03 -0700248static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
250/*
251 * Device drivers call our routines to queue packets here. We empty the
252 * queue in the local softnet handler.
253 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700254
Eric Dumazet9958da02010-04-17 04:17:02 +0000255DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700256EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
David S. Millercf508b12008-07-22 14:16:42 -0700258#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700259/*
David S. Millerc773e842008-07-08 23:13:53 -0700260 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700261 * according to dev->type
262 */
263static const unsigned short netdev_lock_type[] =
264 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
265 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
266 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
267 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
268 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
269 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
270 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
271 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
272 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
273 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
274 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
275 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400276 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
277 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
278 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700279
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700280static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700281 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
282 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
283 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
284 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
285 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
286 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
287 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
288 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
289 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
290 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
291 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
292 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400293 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
294 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
295 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700296
297static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700298static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700299
300static inline unsigned short netdev_lock_pos(unsigned short dev_type)
301{
302 int i;
303
304 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
305 if (netdev_lock_type[i] == dev_type)
306 return i;
307 /* the last key is used by default */
308 return ARRAY_SIZE(netdev_lock_type) - 1;
309}
310
David S. Millercf508b12008-07-22 14:16:42 -0700311static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
312 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700313{
314 int i;
315
316 i = netdev_lock_pos(dev_type);
317 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
318 netdev_lock_name[i]);
319}
David S. Millercf508b12008-07-22 14:16:42 -0700320
321static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
322{
323 int i;
324
325 i = netdev_lock_pos(dev->type);
326 lockdep_set_class_and_name(&dev->addr_list_lock,
327 &netdev_addr_lock_key[i],
328 netdev_lock_name[i]);
329}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330#else
David S. Millercf508b12008-07-22 14:16:42 -0700331static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
332 unsigned short dev_type)
333{
334}
335static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700336{
337}
338#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
340/*******************************************************************************
341
342 Protocol management and registration routines
343
344*******************************************************************************/
345
346/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 * Add a protocol ID to the list. Now that the input handler is
348 * smarter we can dispense with all the messy stuff that used to be
349 * here.
350 *
351 * BEWARE!!! Protocol handlers, mangling input packets,
352 * MUST BE last in hash buckets and checking protocol handlers
353 * MUST start from promiscuous ptype_all chain in net_bh.
354 * It is true now, do not change it.
355 * Explanation follows: if protocol handler, mangling packet, will
356 * be the first on list, it is not able to sense, that packet
357 * is cloned and should be copied-on-write, so that it will
358 * change it and subsequent readers will get broken packet.
359 * --ANK (980803)
360 */
361
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000362static inline struct list_head *ptype_head(const struct packet_type *pt)
363{
364 if (pt->type == htons(ETH_P_ALL))
365 return &ptype_all;
366 else
367 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
368}
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370/**
371 * dev_add_pack - add packet handler
372 * @pt: packet type declaration
373 *
374 * Add a protocol handler to the networking stack. The passed &packet_type
375 * is linked into kernel lists and may not be freed until it has been
376 * removed from the kernel lists.
377 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900378 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 * guarantee all CPU's that are in middle of receiving packets
380 * will see the new packet type (until the next received packet).
381 */
382
383void dev_add_pack(struct packet_type *pt)
384{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000385 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000387 spin_lock(&ptype_lock);
388 list_add_rcu(&pt->list, head);
389 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700391EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393/**
394 * __dev_remove_pack - remove packet handler
395 * @pt: packet type declaration
396 *
397 * Remove a protocol handler that was previously added to the kernel
398 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
399 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900400 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 *
402 * The packet type might still be in use by receivers
403 * and must not be freed until after all the CPU's have gone
404 * through a quiescent state.
405 */
406void __dev_remove_pack(struct packet_type *pt)
407{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000408 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 struct packet_type *pt1;
410
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000411 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 list_for_each_entry(pt1, head, list) {
414 if (pt == pt1) {
415 list_del_rcu(&pt->list);
416 goto out;
417 }
418 }
419
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000420 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000422 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700424EXPORT_SYMBOL(__dev_remove_pack);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426/**
427 * dev_remove_pack - remove packet handler
428 * @pt: packet type declaration
429 *
430 * Remove a protocol handler that was previously added to the kernel
431 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
432 * from the kernel lists and can be freed or reused once this function
433 * returns.
434 *
435 * This call sleeps to guarantee that no CPU is looking at the packet
436 * type after return.
437 */
438void dev_remove_pack(struct packet_type *pt)
439{
440 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 synchronize_net();
443}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700444EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Vlad Yasevich62532da2012-11-15 08:49:10 +0000446
447/**
448 * dev_add_offload - register offload handlers
449 * @po: protocol offload declaration
450 *
451 * Add protocol offload handlers to the networking stack. The passed
452 * &proto_offload is linked into kernel lists and may not be freed until
453 * it has been removed from the kernel lists.
454 *
455 * This call does not sleep therefore it can not
456 * guarantee all CPU's that are in middle of receiving packets
457 * will see the new offload handlers (until the next received packet).
458 */
459void dev_add_offload(struct packet_offload *po)
460{
461 struct list_head *head = &offload_base;
462
463 spin_lock(&offload_lock);
464 list_add_rcu(&po->list, head);
465 spin_unlock(&offload_lock);
466}
467EXPORT_SYMBOL(dev_add_offload);
468
469/**
470 * __dev_remove_offload - remove offload handler
471 * @po: packet offload declaration
472 *
473 * Remove a protocol offload handler that was previously added to the
474 * kernel offload handlers by dev_add_offload(). The passed &offload_type
475 * is removed from the kernel lists and can be freed or reused once this
476 * function returns.
477 *
478 * The packet type might still be in use by receivers
479 * and must not be freed until after all the CPU's have gone
480 * through a quiescent state.
481 */
482void __dev_remove_offload(struct packet_offload *po)
483{
484 struct list_head *head = &offload_base;
485 struct packet_offload *po1;
486
Eric Dumazetc53aa502012-11-16 08:08:23 +0000487 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000488
489 list_for_each_entry(po1, head, list) {
490 if (po == po1) {
491 list_del_rcu(&po->list);
492 goto out;
493 }
494 }
495
496 pr_warn("dev_remove_offload: %p not found\n", po);
497out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000498 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000499}
500EXPORT_SYMBOL(__dev_remove_offload);
501
502/**
503 * dev_remove_offload - remove packet offload handler
504 * @po: packet offload declaration
505 *
506 * Remove a packet offload handler that was previously added to the kernel
507 * offload handlers by dev_add_offload(). The passed &offload_type is
508 * removed from the kernel lists and can be freed or reused once this
509 * function returns.
510 *
511 * This call sleeps to guarantee that no CPU is looking at the packet
512 * type after return.
513 */
514void dev_remove_offload(struct packet_offload *po)
515{
516 __dev_remove_offload(po);
517
518 synchronize_net();
519}
520EXPORT_SYMBOL(dev_remove_offload);
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522/******************************************************************************
523
524 Device Boot-time Settings Routines
525
526*******************************************************************************/
527
528/* Boot time configuration table */
529static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
530
531/**
532 * netdev_boot_setup_add - add new setup entry
533 * @name: name of the device
534 * @map: configured settings for the device
535 *
536 * Adds new setup entry to the dev_boot_setup list. The function
537 * returns 0 on error and 1 on success. This is a generic routine to
538 * all netdevices.
539 */
540static int netdev_boot_setup_add(char *name, struct ifmap *map)
541{
542 struct netdev_boot_setup *s;
543 int i;
544
545 s = dev_boot_setup;
546 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
547 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
548 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700549 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 memcpy(&s[i].map, map, sizeof(s[i].map));
551 break;
552 }
553 }
554
555 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
556}
557
558/**
559 * netdev_boot_setup_check - check boot time settings
560 * @dev: the netdevice
561 *
562 * Check boot time settings for the device.
563 * The found settings are set for the device to be used
564 * later in the device probing.
565 * Returns 0 if no settings found, 1 if they are.
566 */
567int netdev_boot_setup_check(struct net_device *dev)
568{
569 struct netdev_boot_setup *s = dev_boot_setup;
570 int i;
571
572 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
573 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700574 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 dev->irq = s[i].map.irq;
576 dev->base_addr = s[i].map.base_addr;
577 dev->mem_start = s[i].map.mem_start;
578 dev->mem_end = s[i].map.mem_end;
579 return 1;
580 }
581 }
582 return 0;
583}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700584EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
586
587/**
588 * netdev_boot_base - get address from boot time settings
589 * @prefix: prefix for network device
590 * @unit: id for network device
591 *
592 * Check boot time settings for the base address of device.
593 * The found settings are set for the device to be used
594 * later in the device probing.
595 * Returns 0 if no settings found.
596 */
597unsigned long netdev_boot_base(const char *prefix, int unit)
598{
599 const struct netdev_boot_setup *s = dev_boot_setup;
600 char name[IFNAMSIZ];
601 int i;
602
603 sprintf(name, "%s%d", prefix, unit);
604
605 /*
606 * If device already registered then return base of 1
607 * to indicate not to probe for this interface
608 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700609 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 return 1;
611
612 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
613 if (!strcmp(name, s[i].name))
614 return s[i].map.base_addr;
615 return 0;
616}
617
618/*
619 * Saves at boot time configured settings for any netdevice.
620 */
621int __init netdev_boot_setup(char *str)
622{
623 int ints[5];
624 struct ifmap map;
625
626 str = get_options(str, ARRAY_SIZE(ints), ints);
627 if (!str || !*str)
628 return 0;
629
630 /* Save settings */
631 memset(&map, 0, sizeof(map));
632 if (ints[0] > 0)
633 map.irq = ints[1];
634 if (ints[0] > 1)
635 map.base_addr = ints[2];
636 if (ints[0] > 2)
637 map.mem_start = ints[3];
638 if (ints[0] > 3)
639 map.mem_end = ints[4];
640
641 /* Add new entry to the list */
642 return netdev_boot_setup_add(str, &map);
643}
644
645__setup("netdev=", netdev_boot_setup);
646
647/*******************************************************************************
648
649 Device Interface Subroutines
650
651*******************************************************************************/
652
653/**
654 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700655 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 * @name: name to find
657 *
658 * Find an interface by name. Must be called under RTNL semaphore
659 * or @dev_base_lock. If the name is found a pointer to the device
660 * is returned. If the name is not found then %NULL is returned. The
661 * reference counters are not incremented so the caller must be
662 * careful with locks.
663 */
664
Eric W. Biederman881d9662007-09-17 11:56:21 -0700665struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700667 struct net_device *dev;
668 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Sasha Levinb67bfe02013-02-27 17:06:00 -0800670 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if (!strncmp(dev->name, name, IFNAMSIZ))
672 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 return NULL;
675}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700676EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000679 * dev_get_by_name_rcu - find a device by its name
680 * @net: the applicable net namespace
681 * @name: name to find
682 *
683 * Find an interface by name.
684 * If the name is found a pointer to the device is returned.
685 * If the name is not found then %NULL is returned.
686 * The reference counters are not incremented so the caller must be
687 * careful with locks. The caller must hold RCU lock.
688 */
689
690struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
691{
Eric Dumazet72c95282009-10-30 07:11:27 +0000692 struct net_device *dev;
693 struct hlist_head *head = dev_name_hash(net, name);
694
Sasha Levinb67bfe02013-02-27 17:06:00 -0800695 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000696 if (!strncmp(dev->name, name, IFNAMSIZ))
697 return dev;
698
699 return NULL;
700}
701EXPORT_SYMBOL(dev_get_by_name_rcu);
702
703/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700705 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 * @name: name to find
707 *
708 * Find an interface by name. This can be called from any
709 * context and does its own locking. The returned handle has
710 * the usage count incremented and the caller must use dev_put() to
711 * release it when it is no longer needed. %NULL is returned if no
712 * matching device is found.
713 */
714
Eric W. Biederman881d9662007-09-17 11:56:21 -0700715struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 struct net_device *dev;
718
Eric Dumazet72c95282009-10-30 07:11:27 +0000719 rcu_read_lock();
720 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if (dev)
722 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000723 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 return dev;
725}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700726EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
728/**
729 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700730 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 * @ifindex: index of device
732 *
733 * Search for an interface by index. Returns %NULL if the device
734 * is not found or a pointer to the device. The device has not
735 * had its reference counter increased so the caller must be careful
736 * about locking. The caller must hold either the RTNL semaphore
737 * or @dev_base_lock.
738 */
739
Eric W. Biederman881d9662007-09-17 11:56:21 -0700740struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700742 struct net_device *dev;
743 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Sasha Levinb67bfe02013-02-27 17:06:00 -0800745 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (dev->ifindex == ifindex)
747 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700748
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 return NULL;
750}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700751EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000753/**
754 * dev_get_by_index_rcu - find a device by its ifindex
755 * @net: the applicable net namespace
756 * @ifindex: index of device
757 *
758 * Search for an interface by index. Returns %NULL if the device
759 * is not found or a pointer to the device. The device has not
760 * had its reference counter increased so the caller must be careful
761 * about locking. The caller must hold RCU lock.
762 */
763
764struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
765{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000766 struct net_device *dev;
767 struct hlist_head *head = dev_index_hash(net, ifindex);
768
Sasha Levinb67bfe02013-02-27 17:06:00 -0800769 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000770 if (dev->ifindex == ifindex)
771 return dev;
772
773 return NULL;
774}
775EXPORT_SYMBOL(dev_get_by_index_rcu);
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778/**
779 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700780 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 * @ifindex: index of device
782 *
783 * Search for an interface by index. Returns NULL if the device
784 * is not found or a pointer to the device. The device returned has
785 * had a reference added and the pointer is safe until the user calls
786 * dev_put to indicate they have finished with it.
787 */
788
Eric W. Biederman881d9662007-09-17 11:56:21 -0700789struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
791 struct net_device *dev;
792
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000793 rcu_read_lock();
794 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 if (dev)
796 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000797 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 return dev;
799}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700800EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200803 * netdev_get_name - get a netdevice name, knowing its ifindex.
804 * @net: network namespace
805 * @name: a pointer to the buffer where the name will be stored.
806 * @ifindex: the ifindex of the interface to get the name from.
807 *
808 * The use of raw_seqcount_begin() and cond_resched() before
809 * retrying is required as we want to give the writers a chance
810 * to complete when CONFIG_PREEMPT is not set.
811 */
812int netdev_get_name(struct net *net, char *name, int ifindex)
813{
814 struct net_device *dev;
815 unsigned int seq;
816
817retry:
818 seq = raw_seqcount_begin(&devnet_rename_seq);
819 rcu_read_lock();
820 dev = dev_get_by_index_rcu(net, ifindex);
821 if (!dev) {
822 rcu_read_unlock();
823 return -ENODEV;
824 }
825
826 strcpy(name, dev->name);
827 rcu_read_unlock();
828 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
829 cond_resched();
830 goto retry;
831 }
832
833 return 0;
834}
835
836/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000837 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700838 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 * @type: media type of device
840 * @ha: hardware address
841 *
842 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800843 * is not found or a pointer to the device.
844 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000845 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 * and the caller must therefore be careful about locking
847 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 */
849
Eric Dumazet941666c2010-12-05 01:23:53 +0000850struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
851 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852{
853 struct net_device *dev;
854
Eric Dumazet941666c2010-12-05 01:23:53 +0000855 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 if (dev->type == type &&
857 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700858 return dev;
859
860 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861}
Eric Dumazet941666c2010-12-05 01:23:53 +0000862EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300863
Eric W. Biederman881d9662007-09-17 11:56:21 -0700864struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700865{
866 struct net_device *dev;
867
868 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700869 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700870 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700871 return dev;
872
873 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700874}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875EXPORT_SYMBOL(__dev_getfirstbyhwtype);
876
Eric W. Biederman881d9662007-09-17 11:56:21 -0700877struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000879 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000881 rcu_read_lock();
882 for_each_netdev_rcu(net, dev)
883 if (dev->type == type) {
884 dev_hold(dev);
885 ret = dev;
886 break;
887 }
888 rcu_read_unlock();
889 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891EXPORT_SYMBOL(dev_getfirstbyhwtype);
892
893/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000894 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700895 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 * @if_flags: IFF_* values
897 * @mask: bitmask of bits in if_flags to check
898 *
899 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000900 * is not found or a pointer to the device. Must be called inside
901 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 */
903
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000904struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700905 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700907 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Pavel Emelianov7562f872007-05-03 15:13:45 -0700909 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800910 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700912 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914 }
915 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700916 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000918EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
920/**
921 * dev_valid_name - check if name is okay for network device
922 * @name: name string
923 *
924 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700925 * to allow sysfs to work. We also disallow any kind of
926 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 */
David S. Miller95f050b2012-03-06 16:12:15 -0500928bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700930 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500931 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700932 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500933 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700934 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500935 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700936
937 while (*name) {
938 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500939 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700940 name++;
941 }
David S. Miller95f050b2012-03-06 16:12:15 -0500942 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700944EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200947 * __dev_alloc_name - allocate a name for a device
948 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200950 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 *
952 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700953 * id. It scans list of devices to build up a free map, then chooses
954 * the first empty slot. The caller must hold the dev_base or rtnl lock
955 * while allocating the name and adding the device in order to avoid
956 * duplicates.
957 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
958 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 */
960
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200961static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
963 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 const char *p;
965 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700966 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 struct net_device *d;
968
969 p = strnchr(name, IFNAMSIZ-1, '%');
970 if (p) {
971 /*
972 * Verify the string as this thing may have come from
973 * the user. There must be either one "%d" and no other "%"
974 * characters.
975 */
976 if (p[1] != 'd' || strchr(p + 2, '%'))
977 return -EINVAL;
978
979 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700980 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 if (!inuse)
982 return -ENOMEM;
983
Eric W. Biederman881d9662007-09-17 11:56:21 -0700984 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 if (!sscanf(d->name, name, &i))
986 continue;
987 if (i < 0 || i >= max_netdevices)
988 continue;
989
990 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200991 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 if (!strncmp(buf, d->name, IFNAMSIZ))
993 set_bit(i, inuse);
994 }
995
996 i = find_first_zero_bit(inuse, max_netdevices);
997 free_page((unsigned long) inuse);
998 }
999
Octavian Purdilad9031022009-11-18 02:36:59 +00001000 if (buf != name)
1001 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001002 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 /* It is possible to run out of possible slots
1006 * when the name is long and there isn't enough space left
1007 * for the digits, or if all bits are used.
1008 */
1009 return -ENFILE;
1010}
1011
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001012/**
1013 * dev_alloc_name - allocate a name for a device
1014 * @dev: device
1015 * @name: name format string
1016 *
1017 * Passed a format string - eg "lt%d" it will try and find a suitable
1018 * id. It scans list of devices to build up a free map, then chooses
1019 * the first empty slot. The caller must hold the dev_base or rtnl lock
1020 * while allocating the name and adding the device in order to avoid
1021 * duplicates.
1022 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1023 * Returns the number of the unit assigned or a negative errno code.
1024 */
1025
1026int dev_alloc_name(struct net_device *dev, const char *name)
1027{
1028 char buf[IFNAMSIZ];
1029 struct net *net;
1030 int ret;
1031
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001032 BUG_ON(!dev_net(dev));
1033 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001034 ret = __dev_alloc_name(net, name, buf);
1035 if (ret >= 0)
1036 strlcpy(dev->name, buf, IFNAMSIZ);
1037 return ret;
1038}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001039EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001040
Gao feng828de4f2012-09-13 20:58:27 +00001041static int dev_alloc_name_ns(struct net *net,
1042 struct net_device *dev,
1043 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001044{
Gao feng828de4f2012-09-13 20:58:27 +00001045 char buf[IFNAMSIZ];
1046 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001047
Gao feng828de4f2012-09-13 20:58:27 +00001048 ret = __dev_alloc_name(net, name, buf);
1049 if (ret >= 0)
1050 strlcpy(dev->name, buf, IFNAMSIZ);
1051 return ret;
1052}
1053
1054static int dev_get_valid_name(struct net *net,
1055 struct net_device *dev,
1056 const char *name)
1057{
1058 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001059
Octavian Purdilad9031022009-11-18 02:36:59 +00001060 if (!dev_valid_name(name))
1061 return -EINVAL;
1062
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001063 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001064 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001065 else if (__dev_get_by_name(net, name))
1066 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001067 else if (dev->name != name)
1068 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001069
1070 return 0;
1071}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
1073/**
1074 * dev_change_name - change name of a device
1075 * @dev: device
1076 * @newname: name (or format string) must be at least IFNAMSIZ
1077 *
1078 * Change name of a device, can pass format strings "eth%d".
1079 * for wildcarding.
1080 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001081int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082{
Herbert Xufcc5a032007-07-30 17:03:38 -07001083 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001085 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001086 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
1088 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001089 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001091 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 if (dev->flags & IFF_UP)
1093 return -EBUSY;
1094
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001095 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001096
1097 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001098 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001099 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001100 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001101
Herbert Xufcc5a032007-07-30 17:03:38 -07001102 memcpy(oldname, dev->name, IFNAMSIZ);
1103
Gao feng828de4f2012-09-13 20:58:27 +00001104 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001105 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001106 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001107 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Herbert Xufcc5a032007-07-30 17:03:38 -07001110rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001111 ret = device_rename(&dev->dev, dev->name);
1112 if (ret) {
1113 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001114 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001115 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001116 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001117
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001118 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001119
Herbert Xu7f988ea2007-07-30 16:35:46 -07001120 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001121 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001122 write_unlock_bh(&dev_base_lock);
1123
1124 synchronize_rcu();
1125
1126 write_lock_bh(&dev_base_lock);
1127 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001128 write_unlock_bh(&dev_base_lock);
1129
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001130 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001131 ret = notifier_to_errno(ret);
1132
1133 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001134 /* err >= 0 after dev_alloc_name() or stores the first errno */
1135 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001136 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001137 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001138 memcpy(dev->name, oldname, IFNAMSIZ);
1139 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001140 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001141 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001142 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001143 }
1144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 return err;
1147}
1148
1149/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001150 * dev_set_alias - change ifalias of a device
1151 * @dev: device
1152 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001153 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001154 *
1155 * Set ifalias for a device,
1156 */
1157int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1158{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001159 char *new_ifalias;
1160
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001161 ASSERT_RTNL();
1162
1163 if (len >= IFALIASZ)
1164 return -EINVAL;
1165
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001166 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001167 kfree(dev->ifalias);
1168 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001169 return 0;
1170 }
1171
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001172 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1173 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001174 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001175 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001176
1177 strlcpy(dev->ifalias, alias, len+1);
1178 return len;
1179}
1180
1181
1182/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001183 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001184 * @dev: device to cause notification
1185 *
1186 * Called to indicate a device has changed features.
1187 */
1188void netdev_features_change(struct net_device *dev)
1189{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001190 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001191}
1192EXPORT_SYMBOL(netdev_features_change);
1193
1194/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 * netdev_state_change - device changes state
1196 * @dev: device to cause notification
1197 *
1198 * Called to indicate a device has changed state. This function calls
1199 * the notifier chains for netdev_chain and sends a NEWLINK message
1200 * to the routing socket.
1201 */
1202void netdev_state_change(struct net_device *dev)
1203{
1204 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001205 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001206 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 }
1208}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001209EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Amerigo Wangee89bab2012-08-09 22:14:56 +00001211/**
1212 * netdev_notify_peers - notify network peers about existence of @dev
1213 * @dev: network device
1214 *
1215 * Generate traffic such that interested network peers are aware of
1216 * @dev, such as by generating a gratuitous ARP. This may be used when
1217 * a device wants to inform the rest of the network about some sort of
1218 * reconfiguration such as a failover event or virtual machine
1219 * migration.
1220 */
1221void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001222{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001223 rtnl_lock();
1224 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1225 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001226}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001227EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001228
Patrick McHardybd380812010-02-26 06:34:53 +00001229static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001231 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001232 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001234 ASSERT_RTNL();
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 if (!netif_device_present(dev))
1237 return -ENODEV;
1238
Neil Hormanca99ca12013-02-05 08:05:43 +00001239 /* Block netpoll from trying to do any rx path servicing.
1240 * If we don't do this there is a chance ndo_poll_controller
1241 * or ndo_poll may be running while we open the device
1242 */
dingtianhongda6e3782013-05-27 19:53:31 +00001243 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001244
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001245 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1246 ret = notifier_to_errno(ret);
1247 if (ret)
1248 return ret;
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001251
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001252 if (ops->ndo_validate_addr)
1253 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001254
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001255 if (!ret && ops->ndo_open)
1256 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
Neil Hormanca99ca12013-02-05 08:05:43 +00001258 netpoll_rx_enable(dev);
1259
Jeff Garzikbada3392007-10-23 20:19:37 -07001260 if (ret)
1261 clear_bit(__LINK_STATE_START, &dev->state);
1262 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001264 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001265 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001267 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 return ret;
1271}
Patrick McHardybd380812010-02-26 06:34:53 +00001272
1273/**
1274 * dev_open - prepare an interface for use.
1275 * @dev: device to open
1276 *
1277 * Takes a device from down to up state. The device's private open
1278 * function is invoked and then the multicast lists are loaded. Finally
1279 * the device is moved into the up state and a %NETDEV_UP message is
1280 * sent to the netdev notifier chain.
1281 *
1282 * Calling this function on an active interface is a nop. On a failure
1283 * a negative errno code is returned.
1284 */
1285int dev_open(struct net_device *dev)
1286{
1287 int ret;
1288
Patrick McHardybd380812010-02-26 06:34:53 +00001289 if (dev->flags & IFF_UP)
1290 return 0;
1291
Patrick McHardybd380812010-02-26 06:34:53 +00001292 ret = __dev_open(dev);
1293 if (ret < 0)
1294 return ret;
1295
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001296 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001297 call_netdevice_notifiers(NETDEV_UP, dev);
1298
1299 return ret;
1300}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001301EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Octavian Purdila44345722010-12-13 12:44:07 +00001303static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304{
Octavian Purdila44345722010-12-13 12:44:07 +00001305 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001306
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001307 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001308 might_sleep();
1309
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001310 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001311 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Octavian Purdila44345722010-12-13 12:44:07 +00001313 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
Octavian Purdila44345722010-12-13 12:44:07 +00001315 /* Synchronize to scheduled poll. We cannot touch poll list, it
1316 * can be even on different cpu. So just clear netif_running().
1317 *
1318 * dev->stop() will invoke napi_disable() on all of it's
1319 * napi_struct instances on this device.
1320 */
1321 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Octavian Purdila44345722010-12-13 12:44:07 +00001324 dev_deactivate_many(head);
1325
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001326 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001327 const struct net_device_ops *ops = dev->netdev_ops;
1328
1329 /*
1330 * Call the device specific close. This cannot fail.
1331 * Only if device is UP
1332 *
1333 * We allow it to be called even after a DETACH hot-plug
1334 * event.
1335 */
1336 if (ops->ndo_stop)
1337 ops->ndo_stop(dev);
1338
Octavian Purdila44345722010-12-13 12:44:07 +00001339 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001340 net_dmaengine_put();
1341 }
1342
1343 return 0;
1344}
1345
1346static int __dev_close(struct net_device *dev)
1347{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001348 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001349 LIST_HEAD(single);
1350
Neil Hormanca99ca12013-02-05 08:05:43 +00001351 /* Temporarily disable netpoll until the interface is down */
dingtianhongda6e3782013-05-27 19:53:31 +00001352 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001353
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001354 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001355 retval = __dev_close_many(&single);
1356 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001357
1358 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001359 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001360}
1361
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001362static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001363{
1364 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001365
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001366 /* Remove the devices that don't need to be closed */
1367 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001368 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001369 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001370
1371 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001372
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001373 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001374 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001375 call_netdevice_notifiers(NETDEV_DOWN, dev);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001376 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 return 0;
1380}
Patrick McHardybd380812010-02-26 06:34:53 +00001381
1382/**
1383 * dev_close - shutdown an interface.
1384 * @dev: device to shutdown
1385 *
1386 * This function moves an active device into down state. A
1387 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1388 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1389 * chain.
1390 */
1391int dev_close(struct net_device *dev)
1392{
Eric Dumazete14a5992011-05-10 12:26:06 -07001393 if (dev->flags & IFF_UP) {
1394 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001395
Neil Hormanca99ca12013-02-05 08:05:43 +00001396 /* Block netpoll rx while the interface is going down */
dingtianhongda6e3782013-05-27 19:53:31 +00001397 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001398
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001399 list_add(&dev->close_list, &single);
Eric Dumazete14a5992011-05-10 12:26:06 -07001400 dev_close_many(&single);
1401 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001402
1403 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001404 }
dingtianhongda6e3782013-05-27 19:53:31 +00001405 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001406}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001407EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001410/**
1411 * dev_disable_lro - disable Large Receive Offload on a device
1412 * @dev: device
1413 *
1414 * Disable Large Receive Offload (LRO) on a net device. Must be
1415 * called under RTNL. This is needed if received packets may be
1416 * forwarded to another interface.
1417 */
1418void dev_disable_lro(struct net_device *dev)
1419{
Neil Hormanf11970e2011-05-24 08:31:09 +00001420 /*
1421 * If we're trying to disable lro on a vlan device
1422 * use the underlying physical device instead
1423 */
1424 if (is_vlan_dev(dev))
1425 dev = vlan_dev_real_dev(dev);
1426
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001427 dev->wanted_features &= ~NETIF_F_LRO;
1428 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001429
Michał Mirosław22d59692011-04-21 12:42:15 +00001430 if (unlikely(dev->features & NETIF_F_LRO))
1431 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001432}
1433EXPORT_SYMBOL(dev_disable_lro);
1434
Jiri Pirko351638e2013-05-28 01:30:21 +00001435static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1436 struct net_device *dev)
1437{
1438 struct netdev_notifier_info info;
1439
1440 netdev_notifier_info_init(&info, dev);
1441 return nb->notifier_call(nb, val, &info);
1442}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001443
Eric W. Biederman881d9662007-09-17 11:56:21 -07001444static int dev_boot_phase = 1;
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446/**
1447 * register_netdevice_notifier - register a network notifier block
1448 * @nb: notifier
1449 *
1450 * Register a notifier to be called when network device events occur.
1451 * The notifier passed is linked into the kernel structures and must
1452 * not be reused until it has been unregistered. A negative errno code
1453 * is returned on a failure.
1454 *
1455 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001456 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 * view of the network device list.
1458 */
1459
1460int register_netdevice_notifier(struct notifier_block *nb)
1461{
1462 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001463 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001464 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 int err;
1466
1467 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001468 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001469 if (err)
1470 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001471 if (dev_boot_phase)
1472 goto unlock;
1473 for_each_net(net) {
1474 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001475 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001476 err = notifier_to_errno(err);
1477 if (err)
1478 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
Eric W. Biederman881d9662007-09-17 11:56:21 -07001480 if (!(dev->flags & IFF_UP))
1481 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001482
Jiri Pirko351638e2013-05-28 01:30:21 +00001483 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001486
1487unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 rtnl_unlock();
1489 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001490
1491rollback:
1492 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001493 for_each_net(net) {
1494 for_each_netdev(net, dev) {
1495 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001496 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001497
Eric W. Biederman881d9662007-09-17 11:56:21 -07001498 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001499 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1500 dev);
1501 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001502 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001503 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001504 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001505 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001506
RongQing.Li8f891482011-11-30 23:43:07 -05001507outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001508 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001509 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001511EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
1513/**
1514 * unregister_netdevice_notifier - unregister a network notifier block
1515 * @nb: notifier
1516 *
1517 * Unregister a notifier previously registered by
1518 * register_netdevice_notifier(). The notifier is unlinked into the
1519 * kernel structures and may then be reused. A negative errno code
1520 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001521 *
1522 * After unregistering unregister and down device events are synthesized
1523 * for all devices on the device list to the removed notifier to remove
1524 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 */
1526
1527int unregister_netdevice_notifier(struct notifier_block *nb)
1528{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001529 struct net_device *dev;
1530 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001531 int err;
1532
1533 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001534 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001535 if (err)
1536 goto unlock;
1537
1538 for_each_net(net) {
1539 for_each_netdev(net, dev) {
1540 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001541 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1542 dev);
1543 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001544 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001545 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001546 }
1547 }
1548unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001549 rtnl_unlock();
1550 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001552EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
1554/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001555 * call_netdevice_notifiers_info - call all network notifier blocks
1556 * @val: value passed unmodified to notifier function
1557 * @dev: net_device pointer passed unmodified to notifier function
1558 * @info: notifier information data
1559 *
1560 * Call all network notifier blocks. Parameters and return value
1561 * are as for raw_notifier_call_chain().
1562 */
1563
1564int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1565 struct netdev_notifier_info *info)
1566{
1567 ASSERT_RTNL();
1568 netdev_notifier_info_init(info, dev);
1569 return raw_notifier_call_chain(&netdev_chain, val, info);
1570}
1571EXPORT_SYMBOL(call_netdevice_notifiers_info);
1572
1573/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 * call_netdevice_notifiers - call all network notifier blocks
1575 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001576 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 *
1578 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001579 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 */
1581
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001582int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583{
Jiri Pirko351638e2013-05-28 01:30:21 +00001584 struct netdev_notifier_info info;
1585
1586 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001588EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Ingo Molnarc5905af2012-02-24 08:31:31 +01001590static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001591#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001592/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001593 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001594 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001595 */
1596static atomic_t netstamp_needed_deferred;
1597#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599void net_enable_timestamp(void)
1600{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001601#ifdef HAVE_JUMP_LABEL
1602 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1603
1604 if (deferred) {
1605 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001606 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001607 return;
1608 }
1609#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001610 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001612EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
1614void net_disable_timestamp(void)
1615{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001616#ifdef HAVE_JUMP_LABEL
1617 if (in_interrupt()) {
1618 atomic_inc(&netstamp_needed_deferred);
1619 return;
1620 }
1621#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001622 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001624EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Eric Dumazet3b098e22010-05-15 23:57:10 -07001626static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627{
Eric Dumazet588f0332011-11-15 04:12:55 +00001628 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001629 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001630 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631}
1632
Eric Dumazet588f0332011-11-15 04:12:55 +00001633#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001634 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001635 if ((COND) && !(SKB)->tstamp.tv64) \
1636 __net_timestamp(SKB); \
1637 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001638
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001639static inline bool is_skb_forwardable(struct net_device *dev,
1640 struct sk_buff *skb)
1641{
1642 unsigned int len;
1643
1644 if (!(dev->flags & IFF_UP))
1645 return false;
1646
1647 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1648 if (skb->len <= len)
1649 return true;
1650
1651 /* if TSO is enabled, we don't care about the length as the packet
1652 * could be forwarded without being segmented before
1653 */
1654 if (skb_is_gso(skb))
1655 return true;
1656
1657 return false;
1658}
1659
Arnd Bergmann44540962009-11-26 06:07:08 +00001660/**
1661 * dev_forward_skb - loopback an skb to another netif
1662 *
1663 * @dev: destination network device
1664 * @skb: buffer to forward
1665 *
1666 * return values:
1667 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001668 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001669 *
1670 * dev_forward_skb can be used for injecting an skb from the
1671 * start_xmit function of one device into the receive queue
1672 * of another device.
1673 *
1674 * The receiving device may be in another namespace, so
1675 * we have to clear all information in the skb that could
1676 * impact namespace isolation.
1677 */
1678int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1679{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001680 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1681 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1682 atomic_long_inc(&dev->rx_dropped);
1683 kfree_skb(skb);
1684 return NET_RX_DROP;
1685 }
1686 }
1687
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001688 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001689 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001690 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001691 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001692 }
Arnd Bergmann44540962009-11-26 06:07:08 +00001693 skb->protocol = eth_type_trans(skb, dev);
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001694
1695 /* eth_type_trans() can set pkt_type.
Nicolas Dichtel64261f22013-08-13 17:51:09 +02001696 * call skb_scrub_packet() after it to clear pkt_type _after_ calling
1697 * eth_type_trans().
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001698 */
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02001699 skb_scrub_packet(skb, true);
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001700
Arnd Bergmann44540962009-11-26 06:07:08 +00001701 return netif_rx(skb);
1702}
1703EXPORT_SYMBOL_GPL(dev_forward_skb);
1704
Changli Gao71d9dec2010-12-15 19:57:25 +00001705static inline int deliver_skb(struct sk_buff *skb,
1706 struct packet_type *pt_prev,
1707 struct net_device *orig_dev)
1708{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001709 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1710 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001711 atomic_inc(&skb->users);
1712 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1713}
1714
Eric Leblondc0de08d2012-08-16 22:02:58 +00001715static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1716{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001717 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001718 return false;
1719
1720 if (ptype->id_match)
1721 return ptype->id_match(ptype, skb->sk);
1722 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1723 return true;
1724
1725 return false;
1726}
1727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728/*
1729 * Support routine. Sends outgoing frames to any network
1730 * taps currently in use.
1731 */
1732
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001733static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734{
1735 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001736 struct sk_buff *skb2 = NULL;
1737 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 rcu_read_lock();
1740 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1741 /* Never send packets back to the socket
1742 * they originated from - MvS (miquels@drinkel.ow.org)
1743 */
1744 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001745 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001746 if (pt_prev) {
1747 deliver_skb(skb2, pt_prev, skb->dev);
1748 pt_prev = ptype;
1749 continue;
1750 }
1751
1752 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 if (!skb2)
1754 break;
1755
Eric Dumazet70978182010-12-20 21:22:51 +00001756 net_timestamp_set(skb2);
1757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 /* skb->nh should be correctly
1759 set by sender, so that the second statement is
1760 just protection against buggy protocols.
1761 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001762 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001764 if (skb_network_header(skb2) < skb2->data ||
Simon Hormanced14f62013-05-28 20:34:25 +00001765 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
Joe Perchese87cc472012-05-13 21:56:26 +00001766 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1767 ntohs(skb2->protocol),
1768 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001769 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 }
1771
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001772 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001774 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 }
1776 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001777 if (pt_prev)
1778 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 rcu_read_unlock();
1780}
1781
Ben Hutchings2c530402012-07-10 10:55:09 +00001782/**
1783 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001784 * @dev: Network device
1785 * @txq: number of queues available
1786 *
1787 * If real_num_tx_queues is changed the tc mappings may no longer be
1788 * valid. To resolve this verify the tc mapping remains valid and if
1789 * not NULL the mapping. With no priorities mapping to this
1790 * offset/count pair it will no longer be used. In the worst case TC0
1791 * is invalid nothing can be done so disable priority mappings. If is
1792 * expected that drivers will fix this mapping if they can before
1793 * calling netif_set_real_num_tx_queues.
1794 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001795static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001796{
1797 int i;
1798 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1799
1800 /* If TC0 is invalidated disable TC mapping */
1801 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001802 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001803 dev->num_tc = 0;
1804 return;
1805 }
1806
1807 /* Invalidated prio to tc mappings set to TC0 */
1808 for (i = 1; i < TC_BITMASK + 1; i++) {
1809 int q = netdev_get_prio_tc_map(dev, i);
1810
1811 tc = &dev->tc_to_txq[q];
1812 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001813 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1814 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001815 netdev_set_prio_tc_map(dev, i, 0);
1816 }
1817 }
1818}
1819
Alexander Duyck537c00d2013-01-10 08:57:02 +00001820#ifdef CONFIG_XPS
1821static DEFINE_MUTEX(xps_map_mutex);
1822#define xmap_dereference(P) \
1823 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1824
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001825static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1826 int cpu, u16 index)
1827{
1828 struct xps_map *map = NULL;
1829 int pos;
1830
1831 if (dev_maps)
1832 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1833
1834 for (pos = 0; map && pos < map->len; pos++) {
1835 if (map->queues[pos] == index) {
1836 if (map->len > 1) {
1837 map->queues[pos] = map->queues[--map->len];
1838 } else {
1839 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1840 kfree_rcu(map, rcu);
1841 map = NULL;
1842 }
1843 break;
1844 }
1845 }
1846
1847 return map;
1848}
1849
Alexander Duyck024e9672013-01-10 08:57:46 +00001850static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001851{
1852 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001853 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001854 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001855
1856 mutex_lock(&xps_map_mutex);
1857 dev_maps = xmap_dereference(dev->xps_maps);
1858
1859 if (!dev_maps)
1860 goto out_no_maps;
1861
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001862 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001863 for (i = index; i < dev->num_tx_queues; i++) {
1864 if (!remove_xps_queue(dev_maps, cpu, i))
1865 break;
1866 }
1867 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001868 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001869 }
1870
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001871 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001872 RCU_INIT_POINTER(dev->xps_maps, NULL);
1873 kfree_rcu(dev_maps, rcu);
1874 }
1875
Alexander Duyck024e9672013-01-10 08:57:46 +00001876 for (i = index; i < dev->num_tx_queues; i++)
1877 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1878 NUMA_NO_NODE);
1879
Alexander Duyck537c00d2013-01-10 08:57:02 +00001880out_no_maps:
1881 mutex_unlock(&xps_map_mutex);
1882}
1883
Alexander Duyck01c5f862013-01-10 08:57:35 +00001884static struct xps_map *expand_xps_map(struct xps_map *map,
1885 int cpu, u16 index)
1886{
1887 struct xps_map *new_map;
1888 int alloc_len = XPS_MIN_MAP_ALLOC;
1889 int i, pos;
1890
1891 for (pos = 0; map && pos < map->len; pos++) {
1892 if (map->queues[pos] != index)
1893 continue;
1894 return map;
1895 }
1896
1897 /* Need to add queue to this CPU's existing map */
1898 if (map) {
1899 if (pos < map->alloc_len)
1900 return map;
1901
1902 alloc_len = map->alloc_len * 2;
1903 }
1904
1905 /* Need to allocate new map to store queue on this CPU's map */
1906 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1907 cpu_to_node(cpu));
1908 if (!new_map)
1909 return NULL;
1910
1911 for (i = 0; i < pos; i++)
1912 new_map->queues[i] = map->queues[i];
1913 new_map->alloc_len = alloc_len;
1914 new_map->len = pos;
1915
1916 return new_map;
1917}
1918
Michael S. Tsirkin35735402013-10-02 09:14:06 +03001919int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1920 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001921{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001922 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001923 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001924 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001925 int cpu, numa_node_id = -2;
1926 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001927
1928 mutex_lock(&xps_map_mutex);
1929
1930 dev_maps = xmap_dereference(dev->xps_maps);
1931
Alexander Duyck01c5f862013-01-10 08:57:35 +00001932 /* allocate memory for queue storage */
1933 for_each_online_cpu(cpu) {
1934 if (!cpumask_test_cpu(cpu, mask))
1935 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001936
Alexander Duyck01c5f862013-01-10 08:57:35 +00001937 if (!new_dev_maps)
1938 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001939 if (!new_dev_maps) {
1940 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001941 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001942 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001943
1944 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1945 NULL;
1946
1947 map = expand_xps_map(map, cpu, index);
1948 if (!map)
1949 goto error;
1950
1951 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1952 }
1953
1954 if (!new_dev_maps)
1955 goto out_no_new_maps;
1956
1957 for_each_possible_cpu(cpu) {
1958 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1959 /* add queue to CPU maps */
1960 int pos = 0;
1961
1962 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1963 while ((pos < map->len) && (map->queues[pos] != index))
1964 pos++;
1965
1966 if (pos == map->len)
1967 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001968#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001969 if (numa_node_id == -2)
1970 numa_node_id = cpu_to_node(cpu);
1971 else if (numa_node_id != cpu_to_node(cpu))
1972 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001973#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001974 } else if (dev_maps) {
1975 /* fill in the new device map from the old device map */
1976 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1977 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001978 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001979
Alexander Duyck537c00d2013-01-10 08:57:02 +00001980 }
1981
Alexander Duyck01c5f862013-01-10 08:57:35 +00001982 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1983
Alexander Duyck537c00d2013-01-10 08:57:02 +00001984 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00001985 if (dev_maps) {
1986 for_each_possible_cpu(cpu) {
1987 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1988 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1989 if (map && map != new_map)
1990 kfree_rcu(map, rcu);
1991 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001992
Alexander Duyck537c00d2013-01-10 08:57:02 +00001993 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001994 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001995
Alexander Duyck01c5f862013-01-10 08:57:35 +00001996 dev_maps = new_dev_maps;
1997 active = true;
1998
1999out_no_new_maps:
2000 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002001 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2002 (numa_node_id >= 0) ? numa_node_id :
2003 NUMA_NO_NODE);
2004
Alexander Duyck01c5f862013-01-10 08:57:35 +00002005 if (!dev_maps)
2006 goto out_no_maps;
2007
2008 /* removes queue from unused CPUs */
2009 for_each_possible_cpu(cpu) {
2010 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2011 continue;
2012
2013 if (remove_xps_queue(dev_maps, cpu, index))
2014 active = true;
2015 }
2016
2017 /* free map if not active */
2018 if (!active) {
2019 RCU_INIT_POINTER(dev->xps_maps, NULL);
2020 kfree_rcu(dev_maps, rcu);
2021 }
2022
2023out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002024 mutex_unlock(&xps_map_mutex);
2025
2026 return 0;
2027error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002028 /* remove any maps that we added */
2029 for_each_possible_cpu(cpu) {
2030 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2031 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2032 NULL;
2033 if (new_map && new_map != map)
2034 kfree(new_map);
2035 }
2036
Alexander Duyck537c00d2013-01-10 08:57:02 +00002037 mutex_unlock(&xps_map_mutex);
2038
Alexander Duyck537c00d2013-01-10 08:57:02 +00002039 kfree(new_dev_maps);
2040 return -ENOMEM;
2041}
2042EXPORT_SYMBOL(netif_set_xps_queue);
2043
2044#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002045/*
2046 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2047 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2048 */
Tom Herberte6484932010-10-18 18:04:39 +00002049int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002050{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002051 int rc;
2052
Tom Herberte6484932010-10-18 18:04:39 +00002053 if (txq < 1 || txq > dev->num_tx_queues)
2054 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002055
Ben Hutchings5c565802011-02-15 19:39:21 +00002056 if (dev->reg_state == NETREG_REGISTERED ||
2057 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002058 ASSERT_RTNL();
2059
Tom Herbert1d24eb42010-11-21 13:17:27 +00002060 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2061 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002062 if (rc)
2063 return rc;
2064
John Fastabend4f57c082011-01-17 08:06:04 +00002065 if (dev->num_tc)
2066 netif_setup_tc(dev, txq);
2067
Alexander Duyck024e9672013-01-10 08:57:46 +00002068 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002069 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002070#ifdef CONFIG_XPS
2071 netif_reset_xps_queues_gt(dev, txq);
2072#endif
2073 }
John Fastabendf0796d52010-07-01 13:21:57 +00002074 }
Tom Herberte6484932010-10-18 18:04:39 +00002075
2076 dev->real_num_tx_queues = txq;
2077 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002078}
2079EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002080
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002081#ifdef CONFIG_RPS
2082/**
2083 * netif_set_real_num_rx_queues - set actual number of RX queues used
2084 * @dev: Network device
2085 * @rxq: Actual number of RX queues
2086 *
2087 * This must be called either with the rtnl_lock held or before
2088 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002089 * negative error code. If called before registration, it always
2090 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002091 */
2092int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2093{
2094 int rc;
2095
Tom Herbertbd25fa72010-10-18 18:00:16 +00002096 if (rxq < 1 || rxq > dev->num_rx_queues)
2097 return -EINVAL;
2098
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002099 if (dev->reg_state == NETREG_REGISTERED) {
2100 ASSERT_RTNL();
2101
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002102 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2103 rxq);
2104 if (rc)
2105 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002106 }
2107
2108 dev->real_num_rx_queues = rxq;
2109 return 0;
2110}
2111EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2112#endif
2113
Ben Hutchings2c530402012-07-10 10:55:09 +00002114/**
2115 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002116 *
2117 * This routine should set an upper limit on the number of RSS queues
2118 * used by default by multiqueue devices.
2119 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002120int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002121{
2122 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2123}
2124EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2125
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002126static inline void __netif_reschedule(struct Qdisc *q)
2127{
2128 struct softnet_data *sd;
2129 unsigned long flags;
2130
2131 local_irq_save(flags);
2132 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002133 q->next_sched = NULL;
2134 *sd->output_queue_tailp = q;
2135 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002136 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2137 local_irq_restore(flags);
2138}
2139
David S. Miller37437bb2008-07-16 02:15:04 -07002140void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002141{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002142 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2143 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002144}
2145EXPORT_SYMBOL(__netif_schedule);
2146
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002147void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002148{
David S. Miller3578b0c2010-08-03 00:24:04 -07002149 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002150 struct softnet_data *sd;
2151 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002152
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002153 local_irq_save(flags);
2154 sd = &__get_cpu_var(softnet_data);
2155 skb->next = sd->completion_queue;
2156 sd->completion_queue = skb;
2157 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2158 local_irq_restore(flags);
2159 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002160}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002161EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002162
2163void dev_kfree_skb_any(struct sk_buff *skb)
2164{
2165 if (in_irq() || irqs_disabled())
2166 dev_kfree_skb_irq(skb);
2167 else
2168 dev_kfree_skb(skb);
2169}
2170EXPORT_SYMBOL(dev_kfree_skb_any);
2171
2172
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002173/**
2174 * netif_device_detach - mark device as removed
2175 * @dev: network device
2176 *
2177 * Mark device as removed from system and therefore no longer available.
2178 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002179void netif_device_detach(struct net_device *dev)
2180{
2181 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2182 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002183 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002184 }
2185}
2186EXPORT_SYMBOL(netif_device_detach);
2187
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002188/**
2189 * netif_device_attach - mark device as attached
2190 * @dev: network device
2191 *
2192 * Mark device as attached from system and restart if needed.
2193 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002194void netif_device_attach(struct net_device *dev)
2195{
2196 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2197 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002198 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002199 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002200 }
2201}
2202EXPORT_SYMBOL(netif_device_attach);
2203
Ben Hutchings36c92472012-01-17 07:57:56 +00002204static void skb_warn_bad_offload(const struct sk_buff *skb)
2205{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002206 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002207 struct net_device *dev = skb->dev;
2208 const char *driver = "";
2209
Ben Greearc846ad92013-04-19 10:45:52 +00002210 if (!net_ratelimit())
2211 return;
2212
Ben Hutchings36c92472012-01-17 07:57:56 +00002213 if (dev && dev->dev.parent)
2214 driver = dev_driver_string(dev->dev.parent);
2215
2216 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2217 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002218 driver, dev ? &dev->features : &null_features,
2219 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002220 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2221 skb_shinfo(skb)->gso_type, skb->ip_summed);
2222}
2223
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224/*
2225 * Invalidate hardware checksum when packet is to be mangled, and
2226 * complete checksum manually on outgoing path.
2227 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002228int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229{
Al Virod3bc23e2006-11-14 21:24:49 -08002230 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002231 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Patrick McHardy84fa7932006-08-29 16:44:56 -07002233 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002234 goto out_set_summed;
2235
2236 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002237 skb_warn_bad_offload(skb);
2238 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 }
2240
Eric Dumazetcef401d2013-01-25 20:34:37 +00002241 /* Before computing a checksum, we should make sure no frag could
2242 * be modified by an external entity : checksum could be wrong.
2243 */
2244 if (skb_has_shared_frag(skb)) {
2245 ret = __skb_linearize(skb);
2246 if (ret)
2247 goto out;
2248 }
2249
Michał Mirosław55508d62010-12-14 15:24:08 +00002250 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002251 BUG_ON(offset >= skb_headlen(skb));
2252 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2253
2254 offset += skb->csum_offset;
2255 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2256
2257 if (skb_cloned(skb) &&
2258 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2260 if (ret)
2261 goto out;
2262 }
2263
Herbert Xua0308472007-10-15 01:47:15 -07002264 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002265out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002267out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 return ret;
2269}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002270EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002272__be16 skb_network_protocol(struct sk_buff *skb)
2273{
2274 __be16 type = skb->protocol;
David S. Miller61816592013-03-20 12:46:26 -04002275 int vlan_depth = ETH_HLEN;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002276
Pravin B Shelar19acc322013-05-07 20:41:07 +00002277 /* Tunnel gso handlers can set protocol to ethernet. */
2278 if (type == htons(ETH_P_TEB)) {
2279 struct ethhdr *eth;
2280
2281 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2282 return 0;
2283
2284 eth = (struct ethhdr *)skb_mac_header(skb);
2285 type = eth->h_proto;
2286 }
2287
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002288 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002289 struct vlan_hdr *vh;
2290
2291 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2292 return 0;
2293
2294 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2295 type = vh->h_vlan_encapsulated_proto;
2296 vlan_depth += VLAN_HLEN;
2297 }
2298
2299 return type;
2300}
2301
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002302/**
2303 * skb_mac_gso_segment - mac layer segmentation handler.
2304 * @skb: buffer to segment
2305 * @features: features for the output path (see dev->features)
2306 */
2307struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2308 netdev_features_t features)
2309{
2310 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2311 struct packet_offload *ptype;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002312 __be16 type = skb_network_protocol(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002313
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002314 if (unlikely(!type))
2315 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002316
2317 __skb_pull(skb, skb->mac_len);
2318
2319 rcu_read_lock();
2320 list_for_each_entry_rcu(ptype, &offload_base, list) {
2321 if (ptype->type == type && ptype->callbacks.gso_segment) {
2322 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2323 int err;
2324
2325 err = ptype->callbacks.gso_send_check(skb);
2326 segs = ERR_PTR(err);
2327 if (err || skb_gso_ok(skb, features))
2328 break;
2329 __skb_push(skb, (skb->data -
2330 skb_network_header(skb)));
2331 }
2332 segs = ptype->callbacks.gso_segment(skb, features);
2333 break;
2334 }
2335 }
2336 rcu_read_unlock();
2337
2338 __skb_push(skb, skb->data - skb_mac_header(skb));
2339
2340 return segs;
2341}
2342EXPORT_SYMBOL(skb_mac_gso_segment);
2343
2344
Cong Wang12b00042013-02-05 16:36:38 +00002345/* openvswitch calls this on rx path, so we need a different check.
2346 */
2347static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2348{
2349 if (tx_path)
2350 return skb->ip_summed != CHECKSUM_PARTIAL;
2351 else
2352 return skb->ip_summed == CHECKSUM_NONE;
2353}
2354
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002355/**
Cong Wang12b00042013-02-05 16:36:38 +00002356 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002357 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002358 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002359 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002360 *
2361 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002362 *
2363 * It may return NULL if the skb requires no segmentation. This is
2364 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002365 */
Cong Wang12b00042013-02-05 16:36:38 +00002366struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2367 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002368{
Cong Wang12b00042013-02-05 16:36:38 +00002369 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002370 int err;
2371
Ben Hutchings36c92472012-01-17 07:57:56 +00002372 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002373
Herbert Xua430a432006-07-08 13:34:56 -07002374 if (skb_header_cloned(skb) &&
2375 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2376 return ERR_PTR(err);
2377 }
2378
Pravin B Shelar68c33162013-02-14 14:02:41 +00002379 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002380 SKB_GSO_CB(skb)->encap_level = 0;
2381
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002382 skb_reset_mac_header(skb);
2383 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002384
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002385 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002386}
Cong Wang12b00042013-02-05 16:36:38 +00002387EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002388
Herbert Xufb286bb2005-11-10 13:01:24 -08002389/* Take action when hardware reception checksum errors are detected. */
2390#ifdef CONFIG_BUG
2391void netdev_rx_csum_fault(struct net_device *dev)
2392{
2393 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002394 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002395 dump_stack();
2396 }
2397}
2398EXPORT_SYMBOL(netdev_rx_csum_fault);
2399#endif
2400
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401/* Actually, we should eliminate this check as soon as we know, that:
2402 * 1. IOMMU is present and allows to map all the memory.
2403 * 2. No high memory really exists on this machine.
2404 */
2405
Eric Dumazet9092c652010-04-02 13:34:49 -07002406static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002408#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002410 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2412 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2413 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002414 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002415 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002418 if (PCI_DMA_BUS_IS_PHYS) {
2419 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
Eric Dumazet9092c652010-04-02 13:34:49 -07002421 if (!pdev)
2422 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002423 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002424 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2425 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002426 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2427 return 1;
2428 }
2429 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002430#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 return 0;
2432}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002434struct dev_gso_cb {
2435 void (*destructor)(struct sk_buff *skb);
2436};
2437
2438#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2439
2440static void dev_gso_skb_destructor(struct sk_buff *skb)
2441{
2442 struct dev_gso_cb *cb;
2443
2444 do {
2445 struct sk_buff *nskb = skb->next;
2446
2447 skb->next = nskb->next;
2448 nskb->next = NULL;
2449 kfree_skb(nskb);
2450 } while (skb->next);
2451
2452 cb = DEV_GSO_CB(skb);
2453 if (cb->destructor)
2454 cb->destructor(skb);
2455}
2456
2457/**
2458 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2459 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002460 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002461 *
2462 * This function segments the given skb and stores the list of segments
2463 * in skb->next.
2464 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002465static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002466{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002467 struct sk_buff *segs;
2468
Herbert Xu576a30e2006-06-27 13:22:38 -07002469 segs = skb_gso_segment(skb, features);
2470
2471 /* Verifying header integrity only. */
2472 if (!segs)
2473 return 0;
2474
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002475 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002476 return PTR_ERR(segs);
2477
2478 skb->next = segs;
2479 DEV_GSO_CB(skb)->destructor = skb->destructor;
2480 skb->destructor = dev_gso_skb_destructor;
2481
2482 return 0;
2483}
2484
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002485static netdev_features_t harmonize_features(struct sk_buff *skb,
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002486 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002487{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002488 if (skb->ip_summed != CHECKSUM_NONE &&
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002489 !can_checksum_protocol(features, skb_network_protocol(skb))) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002490 features &= ~NETIF_F_ALL_CSUM;
Jesse Grossf01a5232011-01-09 06:23:31 +00002491 } else if (illegal_highdma(skb->dev, skb)) {
2492 features &= ~NETIF_F_SG;
2493 }
2494
2495 return features;
2496}
2497
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002498netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002499{
2500 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002501 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002502
Ben Hutchings30b678d2012-07-30 15:57:00 +00002503 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2504 features &= ~NETIF_F_GSO_MASK;
2505
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002506 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002507 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2508 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002509 } else if (!vlan_tx_tag_present(skb)) {
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002510 return harmonize_features(skb, features);
Jesse Grossf01a5232011-01-09 06:23:31 +00002511 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002512
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002513 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2514 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002515
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002516 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
Jesse Grossf01a5232011-01-09 06:23:31 +00002517 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002518 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2519 NETIF_F_HW_VLAN_STAG_TX;
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002520
2521 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002522}
Jesse Grossf01a5232011-01-09 06:23:31 +00002523EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002524
John Fastabend6afff0c2010-06-16 14:18:12 +00002525/*
2526 * Returns true if either:
2527 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002528 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002529 */
2530static inline int skb_needs_linearize(struct sk_buff *skb,
Patrick McHardy6708c9e2013-05-01 22:36:49 +00002531 netdev_features_t features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002532{
Jesse Gross02932ce2011-01-09 06:23:34 +00002533 return skb_is_nonlinear(skb) &&
2534 ((skb_has_frag_list(skb) &&
2535 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002536 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002537 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002538}
2539
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002540int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2541 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002542{
Stephen Hemminger00829822008-11-20 20:14:53 -08002543 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002544 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002545 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002546
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002547 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002548 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002549
Eric Dumazet93f154b2009-05-18 22:19:19 -07002550 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002551 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002552 * its hot in this cpu cache
2553 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002554 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2555 skb_dst_drop(skb);
2556
Jesse Grossfc741212011-01-09 06:23:32 +00002557 features = netif_skb_features(skb);
2558
Jesse Gross7b9c6092010-10-20 13:56:04 +00002559 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002560 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2561 skb = __vlan_put_tag(skb, skb->vlan_proto,
2562 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002563 if (unlikely(!skb))
2564 goto out;
2565
2566 skb->vlan_tci = 0;
2567 }
2568
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002569 /* If encapsulation offload request, verify we are testing
2570 * hardware encapsulation features instead of standard
2571 * features for the netdev
2572 */
2573 if (skb->encapsulation)
2574 features &= dev->hw_enc_features;
2575
Jesse Grossfc741212011-01-09 06:23:32 +00002576 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002577 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002578 goto out_kfree_skb;
2579 if (skb->next)
2580 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002581 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002582 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002583 __skb_linearize(skb))
2584 goto out_kfree_skb;
2585
2586 /* If packet is not checksummed and device does not
2587 * support checksumming for this protocol, complete
2588 * checksumming here.
2589 */
2590 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002591 if (skb->encapsulation)
2592 skb_set_inner_transport_header(skb,
2593 skb_checksum_start_offset(skb));
2594 else
2595 skb_set_transport_header(skb,
2596 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002597 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002598 skb_checksum_help(skb))
2599 goto out_kfree_skb;
2600 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002601 }
2602
Eric Dumazetb40863c2012-09-18 20:44:49 +00002603 if (!list_empty(&ptype_all))
2604 dev_queue_xmit_nit(skb, dev);
2605
Koki Sanagiec764bf2011-05-30 21:48:34 +00002606 skb_len = skb->len;
Patrick Ohlyac45f602009-02-12 05:03:37 +00002607 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002608 trace_net_dev_xmit(skb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002609 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002610 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002611 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002612 }
2613
Herbert Xu576a30e2006-06-27 13:22:38 -07002614gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002615 do {
2616 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002617
2618 skb->next = nskb->next;
2619 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002620
Eric Dumazetb40863c2012-09-18 20:44:49 +00002621 if (!list_empty(&ptype_all))
2622 dev_queue_xmit_nit(nskb, dev);
2623
Koki Sanagiec764bf2011-05-30 21:48:34 +00002624 skb_len = nskb->len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002625 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002626 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002627 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002628 if (rc & ~NETDEV_TX_MASK)
2629 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002630 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002631 skb->next = nskb;
2632 return rc;
2633 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002634 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002635 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002636 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002637 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002638
Patrick McHardy572a9d72009-11-10 06:14:14 +00002639out_kfree_gso_skb:
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002640 if (likely(skb->next == NULL)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002641 skb->destructor = DEV_GSO_CB(skb)->destructor;
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002642 consume_skb(skb);
2643 return rc;
2644 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002645out_kfree_skb:
2646 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002647out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002648 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002649}
2650
Eric Dumazet1def9232013-01-10 12:36:42 +00002651static void qdisc_pkt_len_init(struct sk_buff *skb)
2652{
2653 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2654
2655 qdisc_skb_cb(skb)->pkt_len = skb->len;
2656
2657 /* To get more precise estimation of bytes sent on wire,
2658 * we add to pkt_len the headers size of all segments
2659 */
2660 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002661 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002662 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002663
Eric Dumazet757b8b12013-01-15 21:14:21 -08002664 /* mac layer + network layer */
2665 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2666
2667 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002668 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2669 hdr_len += tcp_hdrlen(skb);
2670 else
2671 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002672
2673 if (shinfo->gso_type & SKB_GSO_DODGY)
2674 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2675 shinfo->gso_size);
2676
2677 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002678 }
2679}
2680
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002681static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2682 struct net_device *dev,
2683 struct netdev_queue *txq)
2684{
2685 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002686 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002687 int rc;
2688
Eric Dumazet1def9232013-01-10 12:36:42 +00002689 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002690 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002691 /*
2692 * Heuristic to force contended enqueues to serialize on a
2693 * separate lock before trying to get qdisc main lock.
2694 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2695 * and dequeue packets faster.
2696 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002697 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002698 if (unlikely(contended))
2699 spin_lock(&q->busylock);
2700
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002701 spin_lock(root_lock);
2702 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2703 kfree_skb(skb);
2704 rc = NET_XMIT_DROP;
2705 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002706 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002707 /*
2708 * This is a work-conserving queue; there are no old skbs
2709 * waiting to be sent out; and the qdisc is not running -
2710 * xmit the skb directly.
2711 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002712 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2713 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002714
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002715 qdisc_bstats_update(q, skb);
2716
Eric Dumazet79640a42010-06-02 05:09:29 -07002717 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2718 if (unlikely(contended)) {
2719 spin_unlock(&q->busylock);
2720 contended = false;
2721 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002722 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002723 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002724 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002725
2726 rc = NET_XMIT_SUCCESS;
2727 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002728 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002729 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002730 if (qdisc_run_begin(q)) {
2731 if (unlikely(contended)) {
2732 spin_unlock(&q->busylock);
2733 contended = false;
2734 }
2735 __qdisc_run(q);
2736 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002737 }
2738 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002739 if (unlikely(contended))
2740 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002741 return rc;
2742}
2743
Neil Horman5bc14212011-11-22 05:10:51 +00002744#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2745static void skb_update_prio(struct sk_buff *skb)
2746{
Igor Maravic6977a792011-11-25 07:44:54 +00002747 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002748
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002749 if (!skb->priority && skb->sk && map) {
2750 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2751
2752 if (prioidx < map->priomap_len)
2753 skb->priority = map->priomap[prioidx];
2754 }
Neil Horman5bc14212011-11-22 05:10:51 +00002755}
2756#else
2757#define skb_update_prio(skb)
2758#endif
2759
Eric Dumazet745e20f2010-09-29 13:23:09 -07002760static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002761#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002762
Dave Jonesd29f7492008-07-22 14:09:06 -07002763/**
Michel Machado95603e22012-06-12 10:16:35 +00002764 * dev_loopback_xmit - loop back @skb
2765 * @skb: buffer to transmit
2766 */
2767int dev_loopback_xmit(struct sk_buff *skb)
2768{
2769 skb_reset_mac_header(skb);
2770 __skb_pull(skb, skb_network_offset(skb));
2771 skb->pkt_type = PACKET_LOOPBACK;
2772 skb->ip_summed = CHECKSUM_UNNECESSARY;
2773 WARN_ON(!skb_dst(skb));
2774 skb_dst_force(skb);
2775 netif_rx_ni(skb);
2776 return 0;
2777}
2778EXPORT_SYMBOL(dev_loopback_xmit);
2779
2780/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002781 * dev_queue_xmit - transmit a buffer
2782 * @skb: buffer to transmit
2783 *
2784 * Queue a buffer for transmission to a network device. The caller must
2785 * have set the device and priority and built the buffer before calling
2786 * this function. The function can be called from an interrupt.
2787 *
2788 * A negative errno code is returned on a failure. A success does not
2789 * guarantee the frame will be transmitted as it may be dropped due
2790 * to congestion or traffic shaping.
2791 *
2792 * -----------------------------------------------------------------------------------
2793 * I notice this method can also return errors from the queue disciplines,
2794 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2795 * be positive.
2796 *
2797 * Regardless of the return value, the skb is consumed, so it is currently
2798 * difficult to retry a send to this method. (You can bump the ref count
2799 * before sending to hold a reference for retry if you are careful.)
2800 *
2801 * When calling this method, interrupts MUST be enabled. This is because
2802 * the BH enable code must have IRQs enabled so that it will not deadlock.
2803 * --BLG
2804 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805int dev_queue_xmit(struct sk_buff *skb)
2806{
2807 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002808 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 struct Qdisc *q;
2810 int rc = -ENOMEM;
2811
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002812 skb_reset_mac_header(skb);
2813
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002814 /* Disable soft irqs for various locks below. Also
2815 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002817 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818
Neil Horman5bc14212011-11-22 05:10:51 +00002819 skb_update_prio(skb);
2820
Amerigo Wang8c4c49d2012-09-17 20:16:31 +00002821 txq = netdev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002822 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002823
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002825 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002827 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002829 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002830 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 }
2832
2833 /* The device has no queue. Common case for software devices:
2834 loopback, all the sorts of tunnels...
2835
Herbert Xu932ff272006-06-09 12:20:56 -07002836 Really, it is unlikely that netif_tx_lock protection is necessary
2837 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 counters.)
2839 However, it is possible, that they rely on protection
2840 made by us here.
2841
2842 Check this and shot the lock. It is not prone from deadlocks.
2843 Either shot noqueue qdisc, it is even simpler 8)
2844 */
2845 if (dev->flags & IFF_UP) {
2846 int cpu = smp_processor_id(); /* ok because BHs are off */
2847
David S. Millerc773e842008-07-08 23:13:53 -07002848 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849
Eric Dumazet745e20f2010-09-29 13:23:09 -07002850 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2851 goto recursion_alert;
2852
David S. Millerc773e842008-07-08 23:13:53 -07002853 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
Tom Herbert734664982011-11-28 16:32:44 +00002855 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002856 __this_cpu_inc(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002857 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002858 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002859 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002860 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 goto out;
2862 }
2863 }
David S. Millerc773e842008-07-08 23:13:53 -07002864 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002865 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2866 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 } else {
2868 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002869 * unfortunately
2870 */
2871recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002872 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2873 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 }
2875 }
2876
2877 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002878 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 kfree_skb(skb);
2881 return rc;
2882out:
Herbert Xud4828d82006-06-22 02:28:18 -07002883 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 return rc;
2885}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002886EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
2888
2889/*=======================================================================
2890 Receiver routines
2891 =======================================================================*/
2892
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002893int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002894EXPORT_SYMBOL(netdev_max_backlog);
2895
Eric Dumazet3b098e22010-05-15 23:57:10 -07002896int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002897int netdev_budget __read_mostly = 300;
2898int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002900/* Called with irq disabled */
2901static inline void ____napi_schedule(struct softnet_data *sd,
2902 struct napi_struct *napi)
2903{
2904 list_add_tail(&napi->poll_list, &sd->poll_list);
2905 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2906}
2907
Eric Dumazetdf334542010-03-24 19:13:54 +00002908#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002909
2910/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002911struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002912EXPORT_SYMBOL(rps_sock_flow_table);
2913
Ingo Molnarc5905af2012-02-24 08:31:31 +01002914struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002915
Ben Hutchingsc4454772011-01-19 11:03:53 +00002916static struct rps_dev_flow *
2917set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2918 struct rps_dev_flow *rflow, u16 next_cpu)
2919{
Ben Hutchings09994d12011-10-03 04:42:46 +00002920 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002921#ifdef CONFIG_RFS_ACCEL
2922 struct netdev_rx_queue *rxqueue;
2923 struct rps_dev_flow_table *flow_table;
2924 struct rps_dev_flow *old_rflow;
2925 u32 flow_id;
2926 u16 rxq_index;
2927 int rc;
2928
2929 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002930 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2931 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002932 goto out;
2933 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2934 if (rxq_index == skb_get_rx_queue(skb))
2935 goto out;
2936
2937 rxqueue = dev->_rx + rxq_index;
2938 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2939 if (!flow_table)
2940 goto out;
2941 flow_id = skb->rxhash & flow_table->mask;
2942 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2943 rxq_index, flow_id);
2944 if (rc < 0)
2945 goto out;
2946 old_rflow = rflow;
2947 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002948 rflow->filter = rc;
2949 if (old_rflow->filter == rflow->filter)
2950 old_rflow->filter = RPS_NO_FILTER;
2951 out:
2952#endif
2953 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002954 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002955 }
2956
Ben Hutchings09994d12011-10-03 04:42:46 +00002957 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002958 return rflow;
2959}
2960
Tom Herbert0a9627f2010-03-16 08:03:29 +00002961/*
2962 * get_rps_cpu is called from netif_receive_skb and returns the target
2963 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002964 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002965 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002966static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2967 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002968{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002969 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002970 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002971 struct rps_dev_flow_table *flow_table;
2972 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002973 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07002974 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002975
Tom Herbert0a9627f2010-03-16 08:03:29 +00002976 if (skb_rx_queue_recorded(skb)) {
2977 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002978 if (unlikely(index >= dev->real_num_rx_queues)) {
2979 WARN_ONCE(dev->real_num_rx_queues > 1,
2980 "%s received packet on queue %u, but number "
2981 "of RX queues is %u\n",
2982 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002983 goto done;
2984 }
2985 rxqueue = dev->_rx + index;
2986 } else
2987 rxqueue = dev->_rx;
2988
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002989 map = rcu_dereference(rxqueue->rps_map);
2990 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08002991 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00002992 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00002993 tcpu = map->cpus[0];
2994 if (cpu_online(tcpu))
2995 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002996 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00002997 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00002998 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00002999 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003000 }
3001
Changli Gao2d47b452010-08-17 19:00:56 +00003002 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003003 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003004 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003005
Tom Herbertfec5e652010-04-16 16:01:27 -07003006 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3007 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3008 if (flow_table && sock_flow_table) {
3009 u16 next_cpu;
3010 struct rps_dev_flow *rflow;
3011
3012 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3013 tcpu = rflow->cpu;
3014
3015 next_cpu = sock_flow_table->ents[skb->rxhash &
3016 sock_flow_table->mask];
3017
3018 /*
3019 * If the desired CPU (where last recvmsg was done) is
3020 * different from current CPU (one in the rx-queue flow
3021 * table entry), switch if one of the following holds:
3022 * - Current CPU is unset (equal to RPS_NO_CPU).
3023 * - Current CPU is offline.
3024 * - The current CPU's queue tail has advanced beyond the
3025 * last packet that was enqueued using this table entry.
3026 * This guarantees that all previous packets for the flow
3027 * have been dequeued, thus preserving in order delivery.
3028 */
3029 if (unlikely(tcpu != next_cpu) &&
3030 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3031 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003032 rflow->last_qtail)) >= 0)) {
3033 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003034 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003035 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003036
Tom Herbertfec5e652010-04-16 16:01:27 -07003037 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3038 *rflowp = rflow;
3039 cpu = tcpu;
3040 goto done;
3041 }
3042 }
3043
Tom Herbert0a9627f2010-03-16 08:03:29 +00003044 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003045 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003046
3047 if (cpu_online(tcpu)) {
3048 cpu = tcpu;
3049 goto done;
3050 }
3051 }
3052
3053done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003054 return cpu;
3055}
3056
Ben Hutchingsc4454772011-01-19 11:03:53 +00003057#ifdef CONFIG_RFS_ACCEL
3058
3059/**
3060 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3061 * @dev: Device on which the filter was set
3062 * @rxq_index: RX queue index
3063 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3064 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3065 *
3066 * Drivers that implement ndo_rx_flow_steer() should periodically call
3067 * this function for each installed filter and remove the filters for
3068 * which it returns %true.
3069 */
3070bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3071 u32 flow_id, u16 filter_id)
3072{
3073 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3074 struct rps_dev_flow_table *flow_table;
3075 struct rps_dev_flow *rflow;
3076 bool expire = true;
3077 int cpu;
3078
3079 rcu_read_lock();
3080 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3081 if (flow_table && flow_id <= flow_table->mask) {
3082 rflow = &flow_table->flows[flow_id];
3083 cpu = ACCESS_ONCE(rflow->cpu);
3084 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3085 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3086 rflow->last_qtail) <
3087 (int)(10 * flow_table->mask)))
3088 expire = false;
3089 }
3090 rcu_read_unlock();
3091 return expire;
3092}
3093EXPORT_SYMBOL(rps_may_expire_flow);
3094
3095#endif /* CONFIG_RFS_ACCEL */
3096
Tom Herbert0a9627f2010-03-16 08:03:29 +00003097/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003098static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003099{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003100 struct softnet_data *sd = data;
3101
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003102 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003103 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003104}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003105
Tom Herbertfec5e652010-04-16 16:01:27 -07003106#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003107
3108/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003109 * Check if this softnet_data structure is another cpu one
3110 * If yes, queue it to our IPI list and return 1
3111 * If no, return 0
3112 */
3113static int rps_ipi_queued(struct softnet_data *sd)
3114{
3115#ifdef CONFIG_RPS
3116 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3117
3118 if (sd != mysd) {
3119 sd->rps_ipi_next = mysd->rps_ipi_list;
3120 mysd->rps_ipi_list = sd;
3121
3122 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3123 return 1;
3124 }
3125#endif /* CONFIG_RPS */
3126 return 0;
3127}
3128
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003129#ifdef CONFIG_NET_FLOW_LIMIT
3130int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3131#endif
3132
3133static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3134{
3135#ifdef CONFIG_NET_FLOW_LIMIT
3136 struct sd_flow_limit *fl;
3137 struct softnet_data *sd;
3138 unsigned int old_flow, new_flow;
3139
3140 if (qlen < (netdev_max_backlog >> 1))
3141 return false;
3142
3143 sd = &__get_cpu_var(softnet_data);
3144
3145 rcu_read_lock();
3146 fl = rcu_dereference(sd->flow_limit);
3147 if (fl) {
3148 new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
3149 old_flow = fl->history[fl->history_head];
3150 fl->history[fl->history_head] = new_flow;
3151
3152 fl->history_head++;
3153 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3154
3155 if (likely(fl->buckets[old_flow]))
3156 fl->buckets[old_flow]--;
3157
3158 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3159 fl->count++;
3160 rcu_read_unlock();
3161 return true;
3162 }
3163 }
3164 rcu_read_unlock();
3165#endif
3166 return false;
3167}
3168
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003169/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003170 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3171 * queue (may be a remote CPU queue).
3172 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003173static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3174 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003175{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003176 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003177 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003178 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003179
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003180 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003181
3182 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003183
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003184 rps_lock(sd);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003185 qlen = skb_queue_len(&sd->input_pkt_queue);
3186 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Changli Gao6e7676c2010-04-27 15:07:33 -07003187 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003188enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003189 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003190 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003191 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003192 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003193 return NET_RX_SUCCESS;
3194 }
3195
Eric Dumazetebda37c22010-05-06 23:51:21 +00003196 /* Schedule NAPI for backlog device
3197 * We can use non atomic operation since we own the queue lock
3198 */
3199 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003200 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003201 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003202 }
3203 goto enqueue;
3204 }
3205
Changli Gaodee42872010-05-02 05:42:16 +00003206 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003207 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003208
Tom Herbert0a9627f2010-03-16 08:03:29 +00003209 local_irq_restore(flags);
3210
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003211 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003212 kfree_skb(skb);
3213 return NET_RX_DROP;
3214}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216/**
3217 * netif_rx - post buffer to the network code
3218 * @skb: buffer to post
3219 *
3220 * This function receives a packet from a device driver and queues it for
3221 * the upper (protocol) levels to process. It always succeeds. The buffer
3222 * may be dropped during processing for congestion control or by the
3223 * protocol layers.
3224 *
3225 * return values:
3226 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 * NET_RX_DROP (packet was dropped)
3228 *
3229 */
3230
3231int netif_rx(struct sk_buff *skb)
3232{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003233 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234
3235 /* if netpoll wants it, pretend we never saw it */
3236 if (netpoll_rx(skb))
3237 return NET_RX_DROP;
3238
Eric Dumazet588f0332011-11-15 04:12:55 +00003239 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240
Koki Sanagicf66ba52010-08-23 18:45:02 +09003241 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003242#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003243 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003244 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003245 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246
Changli Gaocece1942010-08-07 20:35:43 -07003247 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003248 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003249
3250 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003251 if (cpu < 0)
3252 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003253
3254 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3255
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003256 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003257 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003258 } else
3259#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003260 {
3261 unsigned int qtail;
3262 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3263 put_cpu();
3264 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003265 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003267EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268
3269int netif_rx_ni(struct sk_buff *skb)
3270{
3271 int err;
3272
3273 preempt_disable();
3274 err = netif_rx(skb);
3275 if (local_softirq_pending())
3276 do_softirq();
3277 preempt_enable();
3278
3279 return err;
3280}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281EXPORT_SYMBOL(netif_rx_ni);
3282
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283static void net_tx_action(struct softirq_action *h)
3284{
3285 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3286
3287 if (sd->completion_queue) {
3288 struct sk_buff *clist;
3289
3290 local_irq_disable();
3291 clist = sd->completion_queue;
3292 sd->completion_queue = NULL;
3293 local_irq_enable();
3294
3295 while (clist) {
3296 struct sk_buff *skb = clist;
3297 clist = clist->next;
3298
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003299 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003300 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 __kfree_skb(skb);
3302 }
3303 }
3304
3305 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003306 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307
3308 local_irq_disable();
3309 head = sd->output_queue;
3310 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003311 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312 local_irq_enable();
3313
3314 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003315 struct Qdisc *q = head;
3316 spinlock_t *root_lock;
3317
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 head = head->next_sched;
3319
David S. Miller5fb66222008-08-02 20:02:43 -07003320 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003321 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003322 smp_mb__before_clear_bit();
3323 clear_bit(__QDISC_STATE_SCHED,
3324 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003325 qdisc_run(q);
3326 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003328 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003329 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003330 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003331 } else {
3332 smp_mb__before_clear_bit();
3333 clear_bit(__QDISC_STATE_SCHED,
3334 &q->state);
3335 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 }
3337 }
3338 }
3339}
3340
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003341#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3342 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003343/* This hook is defined here for ATM LANE */
3344int (*br_fdb_test_addr_hook)(struct net_device *dev,
3345 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003346EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003347#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349#ifdef CONFIG_NET_CLS_ACT
3350/* TODO: Maybe we should just force sch_ingress to be compiled in
3351 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3352 * a compare and 2 stores extra right now if we dont have it on
3353 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003354 * NOTE: This doesn't stop any functionality; if you dont have
3355 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 *
3357 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003358static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003361 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003362 int result = TC_ACT_OK;
3363 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003364
Stephen Hemmingerde384832010-08-01 00:33:23 -07003365 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003366 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3367 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003368 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 }
3370
Herbert Xuf697c3e2007-10-14 00:38:47 -07003371 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3372 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3373
David S. Miller83874002008-07-17 00:53:03 -07003374 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003375 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003376 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003377 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3378 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003379 spin_unlock(qdisc_lock(q));
3380 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003381
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 return result;
3383}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003384
3385static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3386 struct packet_type **pt_prev,
3387 int *ret, struct net_device *orig_dev)
3388{
Eric Dumazet24824a02010-10-02 06:11:55 +00003389 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3390
3391 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003392 goto out;
3393
3394 if (*pt_prev) {
3395 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3396 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003397 }
3398
Eric Dumazet24824a02010-10-02 06:11:55 +00003399 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003400 case TC_ACT_SHOT:
3401 case TC_ACT_STOLEN:
3402 kfree_skb(skb);
3403 return NULL;
3404 }
3405
3406out:
3407 skb->tc_verd = 0;
3408 return skb;
3409}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410#endif
3411
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003412/**
3413 * netdev_rx_handler_register - register receive handler
3414 * @dev: device to register a handler for
3415 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003416 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003417 *
3418 * Register a receive hander for a device. This handler will then be
3419 * called from __netif_receive_skb. A negative errno code is returned
3420 * on a failure.
3421 *
3422 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003423 *
3424 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003425 */
3426int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003427 rx_handler_func_t *rx_handler,
3428 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003429{
3430 ASSERT_RTNL();
3431
3432 if (dev->rx_handler)
3433 return -EBUSY;
3434
Eric Dumazet00cfec32013-03-29 03:01:22 +00003435 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003436 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003437 rcu_assign_pointer(dev->rx_handler, rx_handler);
3438
3439 return 0;
3440}
3441EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3442
3443/**
3444 * netdev_rx_handler_unregister - unregister receive handler
3445 * @dev: device to unregister a handler from
3446 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003447 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003448 *
3449 * The caller must hold the rtnl_mutex.
3450 */
3451void netdev_rx_handler_unregister(struct net_device *dev)
3452{
3453
3454 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003455 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003456 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3457 * section has a guarantee to see a non NULL rx_handler_data
3458 * as well.
3459 */
3460 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003461 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003462}
3463EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3464
Mel Gormanb4b9e352012-07-31 16:44:26 -07003465/*
3466 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3467 * the special handling of PFMEMALLOC skbs.
3468 */
3469static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3470{
3471 switch (skb->protocol) {
3472 case __constant_htons(ETH_P_ARP):
3473 case __constant_htons(ETH_P_IP):
3474 case __constant_htons(ETH_P_IPV6):
3475 case __constant_htons(ETH_P_8021Q):
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003476 case __constant_htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003477 return true;
3478 default:
3479 return false;
3480 }
3481}
3482
David S. Miller9754e292013-02-14 15:57:38 -05003483static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484{
3485 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003486 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003487 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003488 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003489 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08003491 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492
Eric Dumazet588f0332011-11-15 04:12:55 +00003493 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003494
Koki Sanagicf66ba52010-08-23 18:45:02 +09003495 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003496
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003498 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003499 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003501 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003502
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003503 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003504 if (!skb_transport_header_was_set(skb))
3505 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003506 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507
3508 pt_prev = NULL;
3509
3510 rcu_read_lock();
3511
David S. Miller63d8ea72011-02-28 10:48:59 -08003512another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003513 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003514
3515 __this_cpu_inc(softnet_data.processed);
3516
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003517 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3518 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003519 skb = vlan_untag(skb);
3520 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003521 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003522 }
3523
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524#ifdef CONFIG_NET_CLS_ACT
3525 if (skb->tc_verd & TC_NCLS) {
3526 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3527 goto ncls;
3528 }
3529#endif
3530
David S. Miller9754e292013-02-14 15:57:38 -05003531 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003532 goto skip_taps;
3533
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003535 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003536 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003537 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 pt_prev = ptype;
3539 }
3540 }
3541
Mel Gormanb4b9e352012-07-31 16:44:26 -07003542skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003544 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3545 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003546 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547ncls:
3548#endif
3549
David S. Miller9754e292013-02-14 15:57:38 -05003550 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003551 goto drop;
3552
John Fastabend24257172011-10-10 09:16:41 +00003553 if (vlan_tx_tag_present(skb)) {
3554 if (pt_prev) {
3555 ret = deliver_skb(skb, pt_prev, orig_dev);
3556 pt_prev = NULL;
3557 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003558 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003559 goto another_round;
3560 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003561 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003562 }
3563
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003564 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003565 if (rx_handler) {
3566 if (pt_prev) {
3567 ret = deliver_skb(skb, pt_prev, orig_dev);
3568 pt_prev = NULL;
3569 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003570 switch (rx_handler(&skb)) {
3571 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003572 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003573 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003574 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003575 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003576 case RX_HANDLER_EXACT:
3577 deliver_exact = true;
3578 case RX_HANDLER_PASS:
3579 break;
3580 default:
3581 BUG();
3582 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584
Eric Dumazetd4b812d2013-07-18 07:19:26 -07003585 if (unlikely(vlan_tx_tag_present(skb))) {
3586 if (vlan_tx_tag_get_id(skb))
3587 skb->pkt_type = PACKET_OTHERHOST;
3588 /* Note: we might in the future use prio bits
3589 * and set skb->priority like in vlan_do_receive()
3590 * For the time being, just ignore Priority Code Point
3591 */
3592 skb->vlan_tci = 0;
3593 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003594
David S. Miller63d8ea72011-02-28 10:48:59 -08003595 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003596 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003597
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003599 list_for_each_entry_rcu(ptype,
3600 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003601 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003602 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3603 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003604 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003605 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 pt_prev = ptype;
3607 }
3608 }
3609
3610 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003611 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003612 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003613 else
3614 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003616drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003617 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 kfree_skb(skb);
3619 /* Jamal, now you will not able to escape explaining
3620 * me how you were going to use this. :-)
3621 */
3622 ret = NET_RX_DROP;
3623 }
3624
Mel Gormanb4b9e352012-07-31 16:44:26 -07003625unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003627out:
David S. Miller9754e292013-02-14 15:57:38 -05003628 return ret;
3629}
3630
3631static int __netif_receive_skb(struct sk_buff *skb)
3632{
3633 int ret;
3634
3635 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3636 unsigned long pflags = current->flags;
3637
3638 /*
3639 * PFMEMALLOC skbs are special, they should
3640 * - be delivered to SOCK_MEMALLOC sockets only
3641 * - stay away from userspace
3642 * - have bounded memory usage
3643 *
3644 * Use PF_MEMALLOC as this saves us from propagating the allocation
3645 * context down to all allocation sites.
3646 */
3647 current->flags |= PF_MEMALLOC;
3648 ret = __netif_receive_skb_core(skb, true);
3649 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3650 } else
3651 ret = __netif_receive_skb_core(skb, false);
3652
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 return ret;
3654}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003655
3656/**
3657 * netif_receive_skb - process receive buffer from network
3658 * @skb: buffer to process
3659 *
3660 * netif_receive_skb() is the main receive data processing function.
3661 * It always succeeds. The buffer may be dropped during processing
3662 * for congestion control or by the protocol layers.
3663 *
3664 * This function may only be called from softirq context and interrupts
3665 * should be enabled.
3666 *
3667 * Return values (usually ignored):
3668 * NET_RX_SUCCESS: no congestion
3669 * NET_RX_DROP: packet was dropped
3670 */
3671int netif_receive_skb(struct sk_buff *skb)
3672{
Eric Dumazet588f0332011-11-15 04:12:55 +00003673 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003674
Richard Cochranc1f19b52010-07-17 08:49:36 +00003675 if (skb_defer_rx_timestamp(skb))
3676 return NET_RX_SUCCESS;
3677
Eric Dumazetdf334542010-03-24 19:13:54 +00003678#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003679 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003680 struct rps_dev_flow voidflow, *rflow = &voidflow;
3681 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003682
Eric Dumazet3b098e22010-05-15 23:57:10 -07003683 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003684
Eric Dumazet3b098e22010-05-15 23:57:10 -07003685 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003686
Eric Dumazet3b098e22010-05-15 23:57:10 -07003687 if (cpu >= 0) {
3688 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3689 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003690 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003691 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003692 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003693 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003694#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003695 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003696}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003697EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698
Eric Dumazet88751272010-04-19 05:07:33 +00003699/* Network device is going away, flush any packets still pending
3700 * Called with irqs disabled.
3701 */
Changli Gao152102c2010-03-30 20:16:22 +00003702static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003703{
Changli Gao152102c2010-03-30 20:16:22 +00003704 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003705 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003706 struct sk_buff *skb, *tmp;
3707
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003708 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003709 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003710 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003711 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003712 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003713 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003714 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003715 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003716 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003717
3718 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3719 if (skb->dev == dev) {
3720 __skb_unlink(skb, &sd->process_queue);
3721 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003722 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003723 }
3724 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003725}
3726
Herbert Xud565b0a2008-12-15 23:38:52 -08003727static int napi_gro_complete(struct sk_buff *skb)
3728{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003729 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003730 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003731 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003732 int err = -ENOENT;
3733
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003734 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3735
Herbert Xufc59f9a2009-04-14 15:11:06 -07003736 if (NAPI_GRO_CB(skb)->count == 1) {
3737 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003738 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003739 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003740
3741 rcu_read_lock();
3742 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003743 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003744 continue;
3745
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003746 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003747 break;
3748 }
3749 rcu_read_unlock();
3750
3751 if (err) {
3752 WARN_ON(&ptype->list == head);
3753 kfree_skb(skb);
3754 return NET_RX_SUCCESS;
3755 }
3756
3757out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003758 return netif_receive_skb(skb);
3759}
3760
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003761/* napi->gro_list contains packets ordered by age.
3762 * youngest packets at the head of it.
3763 * Complete skbs in reverse order to reduce latencies.
3764 */
3765void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003766{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003767 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003768
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003769 /* scan list and build reverse chain */
3770 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3771 skb->prev = prev;
3772 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003773 }
3774
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003775 for (skb = prev; skb; skb = prev) {
3776 skb->next = NULL;
3777
3778 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3779 return;
3780
3781 prev = skb->prev;
3782 napi_gro_complete(skb);
3783 napi->gro_count--;
3784 }
3785
Herbert Xud565b0a2008-12-15 23:38:52 -08003786 napi->gro_list = NULL;
3787}
Eric Dumazet86cac582010-08-31 18:25:32 +00003788EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003789
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003790static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3791{
3792 struct sk_buff *p;
3793 unsigned int maclen = skb->dev->hard_header_len;
3794
3795 for (p = napi->gro_list; p; p = p->next) {
3796 unsigned long diffs;
3797
3798 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3799 diffs |= p->vlan_tci ^ skb->vlan_tci;
3800 if (maclen == ETH_HLEN)
3801 diffs |= compare_ether_header(skb_mac_header(p),
3802 skb_gro_mac_header(skb));
3803 else if (!diffs)
3804 diffs = memcmp(skb_mac_header(p),
3805 skb_gro_mac_header(skb),
3806 maclen);
3807 NAPI_GRO_CB(p)->same_flow = !diffs;
3808 NAPI_GRO_CB(p)->flush = 0;
3809 }
3810}
3811
Rami Rosenbb728822012-11-28 21:55:25 +00003812static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003813{
3814 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003815 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003816 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003817 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003818 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003819 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003820
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003821 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003822 goto normal;
3823
David S. Miller21dc3302010-08-23 00:13:46 -07003824 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003825 goto normal;
3826
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003827 gro_list_prepare(napi, skb);
3828
Herbert Xud565b0a2008-12-15 23:38:52 -08003829 rcu_read_lock();
3830 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003831 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003832 continue;
3833
Herbert Xu86911732009-01-29 14:19:50 +00003834 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003835 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003836 NAPI_GRO_CB(skb)->same_flow = 0;
3837 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003838 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003839
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003840 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003841 break;
3842 }
3843 rcu_read_unlock();
3844
3845 if (&ptype->list == head)
3846 goto normal;
3847
Herbert Xu0da2afd52008-12-26 14:57:42 -08003848 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003849 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003850
Herbert Xud565b0a2008-12-15 23:38:52 -08003851 if (pp) {
3852 struct sk_buff *nskb = *pp;
3853
3854 *pp = nskb->next;
3855 nskb->next = NULL;
3856 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003857 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003858 }
3859
Herbert Xu0da2afd52008-12-26 14:57:42 -08003860 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003861 goto ok;
3862
Herbert Xu4ae55442009-02-08 18:00:36 +00003863 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003864 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003865
Herbert Xu4ae55442009-02-08 18:00:36 +00003866 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003867 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003868 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003869 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003870 skb->next = napi->gro_list;
3871 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003872 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003873
Herbert Xuad0f9902009-02-01 01:24:55 -08003874pull:
Herbert Xucb189782009-05-26 18:50:31 +00003875 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3876 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3877
3878 BUG_ON(skb->end - skb->tail < grow);
3879
3880 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3881
3882 skb->tail += grow;
3883 skb->data_len -= grow;
3884
3885 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003886 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003887
Eric Dumazet9e903e02011-10-18 21:00:24 +00003888 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003889 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003890 memmove(skb_shinfo(skb)->frags,
3891 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003892 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003893 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003894 }
3895
Herbert Xud565b0a2008-12-15 23:38:52 -08003896ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003897 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003898
3899normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003900 ret = GRO_NORMAL;
3901 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003902}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003903
Herbert Xu96e93ea2009-01-06 10:49:34 -08003904
Rami Rosenbb728822012-11-28 21:55:25 +00003905static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003906{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003907 switch (ret) {
3908 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003909 if (netif_receive_skb(skb))
3910 ret = GRO_DROP;
3911 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003912
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003913 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003914 kfree_skb(skb);
3915 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003916
Eric Dumazetdaa86542012-04-19 07:07:40 +00003917 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003918 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3919 kmem_cache_free(skbuff_head_cache, skb);
3920 else
3921 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003922 break;
3923
Ben Hutchings5b252f02009-10-29 07:17:09 +00003924 case GRO_HELD:
3925 case GRO_MERGED:
3926 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003927 }
3928
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003929 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003930}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003931
Eric Dumazetca07e432012-10-06 22:28:06 +00003932static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003933{
Eric Dumazetca07e432012-10-06 22:28:06 +00003934 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3935 const skb_frag_t *frag0 = &pinfo->frags[0];
3936
Herbert Xu78a478d2009-05-26 18:50:21 +00003937 NAPI_GRO_CB(skb)->data_offset = 0;
3938 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003939 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003940
Simon Hormanced14f62013-05-28 20:34:25 +00003941 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003942 pinfo->nr_frags &&
3943 !PageHighMem(skb_frag_page(frag0))) {
3944 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3945 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003946 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003947}
Herbert Xu78a478d2009-05-26 18:50:21 +00003948
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003949gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003950{
Herbert Xu86911732009-01-29 14:19:50 +00003951 skb_gro_reset_offset(skb);
3952
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003953 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003954}
3955EXPORT_SYMBOL(napi_gro_receive);
3956
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003957static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003958{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003959 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003960 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3961 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003962 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003963 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003964 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003965
3966 napi->skb = skb;
3967}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003968
Herbert Xu76620aa2009-04-16 02:02:07 -07003969struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003970{
Herbert Xu5d38a072009-01-04 16:13:40 -08003971 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003972
3973 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003974 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3975 if (skb)
3976 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003977 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003978 return skb;
3979}
Herbert Xu76620aa2009-04-16 02:02:07 -07003980EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003981
Rami Rosenbb728822012-11-28 21:55:25 +00003982static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003983 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003984{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003985 switch (ret) {
3986 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003987 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003988 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003989
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003990 if (ret == GRO_HELD)
3991 skb_gro_pull(skb, -ETH_HLEN);
3992 else if (netif_receive_skb(skb))
3993 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003994 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003995
3996 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003997 case GRO_MERGED_FREE:
3998 napi_reuse_skb(napi, skb);
3999 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004000
4001 case GRO_MERGED:
4002 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004003 }
4004
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004005 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004006}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004007
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004008static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004009{
Herbert Xu76620aa2009-04-16 02:02:07 -07004010 struct sk_buff *skb = napi->skb;
4011 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00004012 unsigned int hlen;
4013 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07004014
4015 napi->skb = NULL;
4016
4017 skb_reset_mac_header(skb);
4018 skb_gro_reset_offset(skb);
4019
Herbert Xua5b1cf22009-05-26 18:50:28 +00004020 off = skb_gro_offset(skb);
4021 hlen = off + sizeof(*eth);
4022 eth = skb_gro_header_fast(skb, off);
4023 if (skb_gro_header_hard(skb, hlen)) {
4024 eth = skb_gro_header_slow(skb, hlen, off);
4025 if (unlikely(!eth)) {
4026 napi_reuse_skb(napi, skb);
4027 skb = NULL;
4028 goto out;
4029 }
Herbert Xu76620aa2009-04-16 02:02:07 -07004030 }
4031
4032 skb_gro_pull(skb, sizeof(*eth));
4033
4034 /*
4035 * This works because the only protocols we care about don't require
4036 * special handling. We'll fix it up properly at the end.
4037 */
4038 skb->protocol = eth->h_proto;
4039
4040out:
4041 return skb;
4042}
Herbert Xu76620aa2009-04-16 02:02:07 -07004043
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004044gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004045{
4046 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004047
4048 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004049 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004050
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004051 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004052}
4053EXPORT_SYMBOL(napi_gro_frags);
4054
Eric Dumazete326bed2010-04-22 00:22:45 -07004055/*
4056 * net_rps_action sends any pending IPI's for rps.
4057 * Note: called with local irq disabled, but exits with local irq enabled.
4058 */
4059static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4060{
4061#ifdef CONFIG_RPS
4062 struct softnet_data *remsd = sd->rps_ipi_list;
4063
4064 if (remsd) {
4065 sd->rps_ipi_list = NULL;
4066
4067 local_irq_enable();
4068
4069 /* Send pending IPI's to kick RPS processing on remote cpus. */
4070 while (remsd) {
4071 struct softnet_data *next = remsd->rps_ipi_next;
4072
4073 if (cpu_online(remsd->cpu))
4074 __smp_call_function_single(remsd->cpu,
4075 &remsd->csd, 0);
4076 remsd = next;
4077 }
4078 } else
4079#endif
4080 local_irq_enable();
4081}
4082
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004083static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084{
4085 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004086 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087
Eric Dumazete326bed2010-04-22 00:22:45 -07004088#ifdef CONFIG_RPS
4089 /* Check if we have pending ipi, its better to send them now,
4090 * not waiting net_rx_action() end.
4091 */
4092 if (sd->rps_ipi_list) {
4093 local_irq_disable();
4094 net_rps_action_and_irq_enable(sd);
4095 }
4096#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004097 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004098 local_irq_disable();
4099 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004101 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102
Changli Gao6e7676c2010-04-27 15:07:33 -07004103 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004104 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004105 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004106 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004107 input_queue_head_incr(sd);
4108 if (++work >= quota) {
4109 local_irq_enable();
4110 return work;
4111 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113
Changli Gao6e7676c2010-04-27 15:07:33 -07004114 rps_lock(sd);
4115 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004116 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004117 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4118 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004119
Changli Gao6e7676c2010-04-27 15:07:33 -07004120 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004121 /*
4122 * Inline a custom version of __napi_complete().
4123 * only current cpu owns and manipulates this napi,
4124 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4125 * we can use a plain write instead of clear_bit(),
4126 * and we dont need an smp_mb() memory barrier.
4127 */
4128 list_del(&napi->poll_list);
4129 napi->state = 0;
4130
Changli Gao6e7676c2010-04-27 15:07:33 -07004131 quota = work + qlen;
4132 }
4133 rps_unlock(sd);
4134 }
4135 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004137 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138}
4139
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004140/**
4141 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004142 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004143 *
4144 * The entry's receive function will be scheduled to run
4145 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004146void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004147{
4148 unsigned long flags;
4149
4150 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004151 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004152 local_irq_restore(flags);
4153}
4154EXPORT_SYMBOL(__napi_schedule);
4155
Herbert Xud565b0a2008-12-15 23:38:52 -08004156void __napi_complete(struct napi_struct *n)
4157{
4158 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4159 BUG_ON(n->gro_list);
4160
4161 list_del(&n->poll_list);
4162 smp_mb__before_clear_bit();
4163 clear_bit(NAPI_STATE_SCHED, &n->state);
4164}
4165EXPORT_SYMBOL(__napi_complete);
4166
4167void napi_complete(struct napi_struct *n)
4168{
4169 unsigned long flags;
4170
4171 /*
4172 * don't let napi dequeue from the cpu poll list
4173 * just in case its running on a different cpu
4174 */
4175 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4176 return;
4177
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004178 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004179 local_irq_save(flags);
4180 __napi_complete(n);
4181 local_irq_restore(flags);
4182}
4183EXPORT_SYMBOL(napi_complete);
4184
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004185/* must be called under rcu_read_lock(), as we dont take a reference */
4186struct napi_struct *napi_by_id(unsigned int napi_id)
4187{
4188 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4189 struct napi_struct *napi;
4190
4191 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4192 if (napi->napi_id == napi_id)
4193 return napi;
4194
4195 return NULL;
4196}
4197EXPORT_SYMBOL_GPL(napi_by_id);
4198
4199void napi_hash_add(struct napi_struct *napi)
4200{
4201 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4202
4203 spin_lock(&napi_hash_lock);
4204
4205 /* 0 is not a valid id, we also skip an id that is taken
4206 * we expect both events to be extremely rare
4207 */
4208 napi->napi_id = 0;
4209 while (!napi->napi_id) {
4210 napi->napi_id = ++napi_gen_id;
4211 if (napi_by_id(napi->napi_id))
4212 napi->napi_id = 0;
4213 }
4214
4215 hlist_add_head_rcu(&napi->napi_hash_node,
4216 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4217
4218 spin_unlock(&napi_hash_lock);
4219 }
4220}
4221EXPORT_SYMBOL_GPL(napi_hash_add);
4222
4223/* Warning : caller is responsible to make sure rcu grace period
4224 * is respected before freeing memory containing @napi
4225 */
4226void napi_hash_del(struct napi_struct *napi)
4227{
4228 spin_lock(&napi_hash_lock);
4229
4230 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4231 hlist_del_rcu(&napi->napi_hash_node);
4232
4233 spin_unlock(&napi_hash_lock);
4234}
4235EXPORT_SYMBOL_GPL(napi_hash_del);
4236
Herbert Xud565b0a2008-12-15 23:38:52 -08004237void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4238 int (*poll)(struct napi_struct *, int), int weight)
4239{
4240 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004241 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004242 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004243 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004244 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004245 if (weight > NAPI_POLL_WEIGHT)
4246 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4247 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004248 napi->weight = weight;
4249 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004250 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004251#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004252 spin_lock_init(&napi->poll_lock);
4253 napi->poll_owner = -1;
4254#endif
4255 set_bit(NAPI_STATE_SCHED, &napi->state);
4256}
4257EXPORT_SYMBOL(netif_napi_add);
4258
4259void netif_napi_del(struct napi_struct *napi)
4260{
4261 struct sk_buff *skb, *next;
4262
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004263 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004264 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004265
4266 for (skb = napi->gro_list; skb; skb = next) {
4267 next = skb->next;
4268 skb->next = NULL;
4269 kfree_skb(skb);
4270 }
4271
4272 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004273 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004274}
4275EXPORT_SYMBOL(netif_napi_del);
4276
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277static void net_rx_action(struct softirq_action *h)
4278{
Eric Dumazete326bed2010-04-22 00:22:45 -07004279 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004280 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004281 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004282 void *have;
4283
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284 local_irq_disable();
4285
Eric Dumazete326bed2010-04-22 00:22:45 -07004286 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004287 struct napi_struct *n;
4288 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004290 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004291 * Allow this to run for 2 jiffies since which will allow
4292 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004293 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004294 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 goto softnet_break;
4296
4297 local_irq_enable();
4298
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004299 /* Even though interrupts have been re-enabled, this
4300 * access is safe because interrupts can only add new
4301 * entries to the tail of this list, and only ->poll()
4302 * calls can remove this head entry from the list.
4303 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004304 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004306 have = netpoll_poll_lock(n);
4307
4308 weight = n->weight;
4309
David S. Miller0a7606c2007-10-29 21:28:47 -07004310 /* This NAPI_STATE_SCHED test is for avoiding a race
4311 * with netpoll's poll_napi(). Only the entity which
4312 * obtains the lock and sees NAPI_STATE_SCHED set will
4313 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004314 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004315 */
4316 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004317 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004318 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004319 trace_napi_poll(n);
4320 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004321
4322 WARN_ON_ONCE(work > weight);
4323
4324 budget -= work;
4325
4326 local_irq_disable();
4327
4328 /* Drivers must not modify the NAPI state if they
4329 * consume the entire weight. In such cases this code
4330 * still "owns" the NAPI instance and therefore can
4331 * move the instance around on the list at-will.
4332 */
David S. Millerfed17f32008-01-07 21:00:40 -08004333 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004334 if (unlikely(napi_disable_pending(n))) {
4335 local_irq_enable();
4336 napi_complete(n);
4337 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004338 } else {
4339 if (n->gro_list) {
4340 /* flush too old packets
4341 * If HZ < 1000, flush all packets.
4342 */
4343 local_irq_enable();
4344 napi_gro_flush(n, HZ >= 1000);
4345 local_irq_disable();
4346 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004347 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004348 }
David S. Millerfed17f32008-01-07 21:00:40 -08004349 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004350
4351 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352 }
4353out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004354 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004355
Chris Leechdb217332006-06-17 21:24:58 -07004356#ifdef CONFIG_NET_DMA
4357 /*
4358 * There may not be any more sk_buffs coming right now, so push
4359 * any pending DMA copies to hardware
4360 */
Dan Williams2ba05622009-01-06 11:38:14 -07004361 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004362#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004363
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 return;
4365
4366softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004367 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4369 goto out;
4370}
4371
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004372struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004373 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004374
4375 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004376 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004377
Veaceslav Falico5d261912013-08-28 23:25:05 +02004378 /* counter for the number of times this device was added to us */
4379 u16 ref_nr;
4380
Veaceslav Falico402dae92013-09-25 09:20:09 +02004381 /* private field for the users */
4382 void *private;
4383
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004384 struct list_head list;
4385 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004386};
4387
Veaceslav Falico5249dec2013-09-25 09:20:08 +02004388static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
4389 struct net_device *adj_dev,
4390 struct list_head *adj_list)
4391{
4392 struct netdev_adjacent *adj;
4393
4394 list_for_each_entry_rcu(adj, adj_list, list) {
4395 if (adj->dev == adj_dev)
4396 return adj;
4397 }
4398 return NULL;
4399}
4400
Veaceslav Falico5d261912013-08-28 23:25:05 +02004401static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4402 struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004403 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004404{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004405 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004406
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004407 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004408 if (adj->dev == adj_dev)
4409 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004410 }
4411 return NULL;
4412}
4413
4414/**
4415 * netdev_has_upper_dev - Check if device is linked to an upper device
4416 * @dev: device
4417 * @upper_dev: upper device to check
4418 *
4419 * Find out if a device is linked to specified upper device and return true
4420 * in case it is. Note that this checks only immediate upper device,
4421 * not through a complete stack of devices. The caller must hold the RTNL lock.
4422 */
4423bool netdev_has_upper_dev(struct net_device *dev,
4424 struct net_device *upper_dev)
4425{
4426 ASSERT_RTNL();
4427
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004428 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004429}
4430EXPORT_SYMBOL(netdev_has_upper_dev);
4431
4432/**
4433 * netdev_has_any_upper_dev - Check if device is linked to some device
4434 * @dev: device
4435 *
4436 * Find out if a device is linked to an upper device and return true in case
4437 * it is. The caller must hold the RTNL lock.
4438 */
4439bool netdev_has_any_upper_dev(struct net_device *dev)
4440{
4441 ASSERT_RTNL();
4442
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004443 return !list_empty(&dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004444}
4445EXPORT_SYMBOL(netdev_has_any_upper_dev);
4446
4447/**
4448 * netdev_master_upper_dev_get - Get master upper device
4449 * @dev: device
4450 *
4451 * Find a master upper device and return pointer to it or NULL in case
4452 * it's not there. The caller must hold the RTNL lock.
4453 */
4454struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4455{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004456 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004457
4458 ASSERT_RTNL();
4459
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004460 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004461 return NULL;
4462
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004463 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004464 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004465 if (likely(upper->master))
4466 return upper->dev;
4467 return NULL;
4468}
4469EXPORT_SYMBOL(netdev_master_upper_dev_get);
4470
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02004471void *netdev_adjacent_get_private(struct list_head *adj_list)
4472{
4473 struct netdev_adjacent *adj;
4474
4475 adj = list_entry(adj_list, struct netdev_adjacent, list);
4476
4477 return adj->private;
4478}
4479EXPORT_SYMBOL(netdev_adjacent_get_private);
4480
Veaceslav Falico31088a12013-09-25 09:20:12 +02004481/**
4482 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
Veaceslav Falico48311f42013-08-28 23:25:07 +02004483 * @dev: device
4484 * @iter: list_head ** of the current position
4485 *
4486 * Gets the next device from the dev's upper list, starting from iter
4487 * position. The caller must hold RCU read lock.
4488 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004489struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4490 struct list_head **iter)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004491{
4492 struct netdev_adjacent *upper;
4493
4494 WARN_ON_ONCE(!rcu_read_lock_held());
4495
4496 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4497
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004498 if (&upper->list == &dev->all_adj_list.upper)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004499 return NULL;
4500
4501 *iter = &upper->list;
4502
4503 return upper->dev;
4504}
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004505EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
Veaceslav Falico48311f42013-08-28 23:25:07 +02004506
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004507/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02004508 * netdev_lower_get_next_private - Get the next ->private from the
4509 * lower neighbour list
4510 * @dev: device
4511 * @iter: list_head ** of the current position
4512 *
4513 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4514 * list, starting from iter position. The caller must hold either hold the
4515 * RTNL lock or its own locking that guarantees that the neighbour lower
4516 * list will remain unchainged.
4517 */
4518void *netdev_lower_get_next_private(struct net_device *dev,
4519 struct list_head **iter)
4520{
4521 struct netdev_adjacent *lower;
4522
4523 lower = list_entry(*iter, struct netdev_adjacent, list);
4524
4525 if (&lower->list == &dev->adj_list.lower)
4526 return NULL;
4527
4528 if (iter)
4529 *iter = lower->list.next;
4530
4531 return lower->private;
4532}
4533EXPORT_SYMBOL(netdev_lower_get_next_private);
4534
4535/**
4536 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4537 * lower neighbour list, RCU
4538 * variant
4539 * @dev: device
4540 * @iter: list_head ** of the current position
4541 *
4542 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4543 * list, starting from iter position. The caller must hold RCU read lock.
4544 */
4545void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4546 struct list_head **iter)
4547{
4548 struct netdev_adjacent *lower;
4549
4550 WARN_ON_ONCE(!rcu_read_lock_held());
4551
4552 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4553
4554 if (&lower->list == &dev->adj_list.lower)
4555 return NULL;
4556
4557 if (iter)
4558 *iter = &lower->list;
4559
4560 return lower->private;
4561}
4562EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4563
4564/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004565 * netdev_master_upper_dev_get_rcu - Get master upper device
4566 * @dev: device
4567 *
4568 * Find a master upper device and return pointer to it or NULL in case
4569 * it's not there. The caller must hold the RCU read lock.
4570 */
4571struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4572{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004573 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004574
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004575 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004576 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004577 if (upper && likely(upper->master))
4578 return upper->dev;
4579 return NULL;
4580}
4581EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4582
Veaceslav Falico5d261912013-08-28 23:25:05 +02004583static int __netdev_adjacent_dev_insert(struct net_device *dev,
4584 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02004585 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004586 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004587{
4588 struct netdev_adjacent *adj;
Veaceslav Falico5831d662013-09-25 09:20:32 +02004589 char linkname[IFNAMSIZ+7];
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004590 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004591
Veaceslav Falico7863c052013-09-25 09:20:06 +02004592 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004593
4594 if (adj) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004595 adj->ref_nr++;
4596 return 0;
4597 }
4598
4599 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4600 if (!adj)
4601 return -ENOMEM;
4602
4603 adj->dev = adj_dev;
4604 adj->master = master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004605 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02004606 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004607 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004608
4609 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4610 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004611
Veaceslav Falico5831d662013-09-25 09:20:32 +02004612 if (dev_list == &dev->adj_list.lower) {
4613 sprintf(linkname, "lower_%s", adj_dev->name);
4614 ret = sysfs_create_link(&(dev->dev.kobj),
4615 &(adj_dev->dev.kobj), linkname);
4616 if (ret)
4617 goto free_adj;
4618 } else if (dev_list == &dev->adj_list.upper) {
4619 sprintf(linkname, "upper_%s", adj_dev->name);
4620 ret = sysfs_create_link(&(dev->dev.kobj),
4621 &(adj_dev->dev.kobj), linkname);
4622 if (ret)
4623 goto free_adj;
4624 }
4625
Veaceslav Falico7863c052013-09-25 09:20:06 +02004626 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004627 if (master) {
4628 ret = sysfs_create_link(&(dev->dev.kobj),
4629 &(adj_dev->dev.kobj), "master");
4630 if (ret)
Veaceslav Falico5831d662013-09-25 09:20:32 +02004631 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004632
Veaceslav Falico7863c052013-09-25 09:20:06 +02004633 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004634 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02004635 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004636 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004637
4638 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004639
Veaceslav Falico5831d662013-09-25 09:20:32 +02004640remove_symlinks:
4641 if (dev_list == &dev->adj_list.lower) {
4642 sprintf(linkname, "lower_%s", adj_dev->name);
4643 sysfs_remove_link(&(dev->dev.kobj), linkname);
4644 } else if (dev_list == &dev->adj_list.upper) {
4645 sprintf(linkname, "upper_%s", adj_dev->name);
4646 sysfs_remove_link(&(dev->dev.kobj), linkname);
4647 }
4648
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004649free_adj:
4650 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02004651 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004652
4653 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004654}
4655
Veaceslav Falico5d261912013-08-28 23:25:05 +02004656void __netdev_adjacent_dev_remove(struct net_device *dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02004657 struct net_device *adj_dev,
4658 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004659{
4660 struct netdev_adjacent *adj;
Veaceslav Falico5831d662013-09-25 09:20:32 +02004661 char linkname[IFNAMSIZ+7];
Veaceslav Falico5d261912013-08-28 23:25:05 +02004662
Veaceslav Falico7863c052013-09-25 09:20:06 +02004663 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004664
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004665 if (!adj) {
4666 pr_err("tried to remove device %s from %s\n",
4667 dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004668 BUG();
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004669 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004670
4671 if (adj->ref_nr > 1) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004672 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4673 adj->ref_nr-1);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004674 adj->ref_nr--;
4675 return;
4676 }
4677
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004678 if (adj->master)
4679 sysfs_remove_link(&(dev->dev.kobj), "master");
4680
Veaceslav Falico5831d662013-09-25 09:20:32 +02004681 if (dev_list == &dev->adj_list.lower) {
4682 sprintf(linkname, "lower_%s", adj_dev->name);
4683 sysfs_remove_link(&(dev->dev.kobj), linkname);
4684 } else if (dev_list == &dev->adj_list.upper) {
4685 sprintf(linkname, "upper_%s", adj_dev->name);
4686 sysfs_remove_link(&(dev->dev.kobj), linkname);
4687 }
4688
Veaceslav Falico5d261912013-08-28 23:25:05 +02004689 list_del_rcu(&adj->list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004690 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4691 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004692 dev_put(adj_dev);
4693 kfree_rcu(adj, rcu);
4694}
4695
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004696int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4697 struct net_device *upper_dev,
4698 struct list_head *up_list,
4699 struct list_head *down_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004700 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004701{
4702 int ret;
4703
Veaceslav Falico402dae92013-09-25 09:20:09 +02004704 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4705 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004706 if (ret)
4707 return ret;
4708
Veaceslav Falico402dae92013-09-25 09:20:09 +02004709 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4710 false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004711 if (ret) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004712 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004713 return ret;
4714 }
4715
4716 return 0;
4717}
4718
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004719int __netdev_adjacent_dev_link(struct net_device *dev,
4720 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004721{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004722 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
4723 &dev->all_adj_list.upper,
4724 &upper_dev->all_adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004725 NULL, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004726}
4727
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004728void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
4729 struct net_device *upper_dev,
4730 struct list_head *up_list,
4731 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004732{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004733 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4734 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004735}
4736
4737void __netdev_adjacent_dev_unlink(struct net_device *dev,
4738 struct net_device *upper_dev)
4739{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004740 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4741 &dev->all_adj_list.upper,
4742 &upper_dev->all_adj_list.lower);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004743}
4744
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004745int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4746 struct net_device *upper_dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004747 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004748{
4749 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
4750
4751 if (ret)
4752 return ret;
4753
4754 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
4755 &dev->adj_list.upper,
4756 &upper_dev->adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004757 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004758 if (ret) {
4759 __netdev_adjacent_dev_unlink(dev, upper_dev);
4760 return ret;
4761 }
4762
4763 return 0;
4764}
4765
4766void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
4767 struct net_device *upper_dev)
4768{
4769 __netdev_adjacent_dev_unlink(dev, upper_dev);
4770 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4771 &dev->adj_list.upper,
4772 &upper_dev->adj_list.lower);
4773}
Veaceslav Falico5d261912013-08-28 23:25:05 +02004774
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004775static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004776 struct net_device *upper_dev, bool master,
4777 void *private)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004778{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004779 struct netdev_adjacent *i, *j, *to_i, *to_j;
4780 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004781
4782 ASSERT_RTNL();
4783
4784 if (dev == upper_dev)
4785 return -EBUSY;
4786
4787 /* To prevent loops, check if dev is not upper device to upper_dev. */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004788 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004789 return -EBUSY;
4790
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004791 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004792 return -EEXIST;
4793
4794 if (master && netdev_master_upper_dev_get(dev))
4795 return -EBUSY;
4796
Veaceslav Falico402dae92013-09-25 09:20:09 +02004797 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
4798 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004799 if (ret)
4800 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004801
Veaceslav Falico5d261912013-08-28 23:25:05 +02004802 /* Now that we linked these devs, make all the upper_dev's
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004803 * all_adj_list.upper visible to every dev's all_adj_list.lower an
Veaceslav Falico5d261912013-08-28 23:25:05 +02004804 * versa, and don't forget the devices itself. All of these
4805 * links are non-neighbours.
4806 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004807 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4808 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
4809 pr_debug("Interlinking %s with %s, non-neighbour\n",
4810 i->dev->name, j->dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004811 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
4812 if (ret)
4813 goto rollback_mesh;
4814 }
4815 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004816
Veaceslav Falico5d261912013-08-28 23:25:05 +02004817 /* add dev to every upper_dev's upper device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004818 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
4819 pr_debug("linking %s's upper device %s with %s\n",
4820 upper_dev->name, i->dev->name, dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004821 ret = __netdev_adjacent_dev_link(dev, i->dev);
4822 if (ret)
4823 goto rollback_upper_mesh;
4824 }
4825
4826 /* add upper_dev to every dev's lower device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004827 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4828 pr_debug("linking %s's lower device %s with %s\n", dev->name,
4829 i->dev->name, upper_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004830 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
4831 if (ret)
4832 goto rollback_lower_mesh;
4833 }
4834
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004835 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004836 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004837
4838rollback_lower_mesh:
4839 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004840 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004841 if (i == to_i)
4842 break;
4843 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4844 }
4845
4846 i = NULL;
4847
4848rollback_upper_mesh:
4849 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004850 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004851 if (i == to_i)
4852 break;
4853 __netdev_adjacent_dev_unlink(dev, i->dev);
4854 }
4855
4856 i = j = NULL;
4857
4858rollback_mesh:
4859 to_i = i;
4860 to_j = j;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004861 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4862 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004863 if (i == to_i && j == to_j)
4864 break;
4865 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4866 }
4867 if (i == to_i)
4868 break;
4869 }
4870
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004871 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004872
4873 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004874}
4875
4876/**
4877 * netdev_upper_dev_link - Add a link to the upper device
4878 * @dev: device
4879 * @upper_dev: new upper device
4880 *
4881 * Adds a link to device which is upper to this one. The caller must hold
4882 * the RTNL lock. On a failure a negative errno code is returned.
4883 * On success the reference counts are adjusted and the function
4884 * returns zero.
4885 */
4886int netdev_upper_dev_link(struct net_device *dev,
4887 struct net_device *upper_dev)
4888{
Veaceslav Falico402dae92013-09-25 09:20:09 +02004889 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004890}
4891EXPORT_SYMBOL(netdev_upper_dev_link);
4892
4893/**
4894 * netdev_master_upper_dev_link - Add a master link to the upper device
4895 * @dev: device
4896 * @upper_dev: new upper device
4897 *
4898 * Adds a link to device which is upper to this one. In this case, only
4899 * one master upper device can be linked, although other non-master devices
4900 * might be linked as well. The caller must hold the RTNL lock.
4901 * On a failure a negative errno code is returned. On success the reference
4902 * counts are adjusted and the function returns zero.
4903 */
4904int netdev_master_upper_dev_link(struct net_device *dev,
4905 struct net_device *upper_dev)
4906{
Veaceslav Falico402dae92013-09-25 09:20:09 +02004907 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004908}
4909EXPORT_SYMBOL(netdev_master_upper_dev_link);
4910
Veaceslav Falico402dae92013-09-25 09:20:09 +02004911int netdev_master_upper_dev_link_private(struct net_device *dev,
4912 struct net_device *upper_dev,
4913 void *private)
4914{
4915 return __netdev_upper_dev_link(dev, upper_dev, true, private);
4916}
4917EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
4918
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004919/**
4920 * netdev_upper_dev_unlink - Removes a link to upper device
4921 * @dev: device
4922 * @upper_dev: new upper device
4923 *
4924 * Removes a link to device which is upper to this one. The caller must hold
4925 * the RTNL lock.
4926 */
4927void netdev_upper_dev_unlink(struct net_device *dev,
4928 struct net_device *upper_dev)
4929{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004930 struct netdev_adjacent *i, *j;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004931 ASSERT_RTNL();
4932
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004933 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004934
4935 /* Here is the tricky part. We must remove all dev's lower
4936 * devices from all upper_dev's upper devices and vice
4937 * versa, to maintain the graph relationship.
4938 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004939 list_for_each_entry(i, &dev->all_adj_list.lower, list)
4940 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004941 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4942
4943 /* remove also the devices itself from lower/upper device
4944 * list
4945 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004946 list_for_each_entry(i, &dev->all_adj_list.lower, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004947 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4948
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004949 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004950 __netdev_adjacent_dev_unlink(dev, i->dev);
4951
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004952 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004953}
4954EXPORT_SYMBOL(netdev_upper_dev_unlink);
4955
Veaceslav Falico402dae92013-09-25 09:20:09 +02004956void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
4957 struct net_device *lower_dev)
4958{
4959 struct netdev_adjacent *lower;
4960
4961 if (!lower_dev)
4962 return NULL;
4963 lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
4964 if (!lower)
4965 return NULL;
4966
4967 return lower->private;
4968}
4969EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
4970
4971void *netdev_lower_dev_get_private(struct net_device *dev,
4972 struct net_device *lower_dev)
4973{
4974 struct netdev_adjacent *lower;
4975
4976 if (!lower_dev)
4977 return NULL;
4978 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
4979 if (!lower)
4980 return NULL;
4981
4982 return lower->private;
4983}
4984EXPORT_SYMBOL(netdev_lower_dev_get_private);
4985
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004986static void dev_change_rx_flags(struct net_device *dev, int flags)
4987{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004988 const struct net_device_ops *ops = dev->netdev_ops;
4989
4990 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4991 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004992}
4993
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02004994static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07004995{
Eric Dumazetb536db92011-11-30 21:42:26 +00004996 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06004997 kuid_t uid;
4998 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07004999
Patrick McHardy24023452007-07-14 18:51:31 -07005000 ASSERT_RTNL();
5001
Wang Chendad9b332008-06-18 01:48:28 -07005002 dev->flags |= IFF_PROMISC;
5003 dev->promiscuity += inc;
5004 if (dev->promiscuity == 0) {
5005 /*
5006 * Avoid overflow.
5007 * If inc causes overflow, untouch promisc and return error.
5008 */
5009 if (inc < 0)
5010 dev->flags &= ~IFF_PROMISC;
5011 else {
5012 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005013 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5014 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005015 return -EOVERFLOW;
5016 }
5017 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005018 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005019 pr_info("device %s %s promiscuous mode\n",
5020 dev->name,
5021 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005022 if (audit_enabled) {
5023 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005024 audit_log(current->audit_context, GFP_ATOMIC,
5025 AUDIT_ANOM_PROMISCUOUS,
5026 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5027 dev->name, (dev->flags & IFF_PROMISC),
5028 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005029 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005030 from_kuid(&init_user_ns, uid),
5031 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005032 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005033 }
Patrick McHardy24023452007-07-14 18:51:31 -07005034
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005035 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005036 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005037 if (notify)
5038 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07005039 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005040}
5041
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042/**
5043 * dev_set_promiscuity - update promiscuity count on a device
5044 * @dev: device
5045 * @inc: modifier
5046 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005047 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048 * remains above zero the interface remains promiscuous. Once it hits zero
5049 * the device reverts back to normal filtering operation. A negative inc
5050 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005051 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005052 */
Wang Chendad9b332008-06-18 01:48:28 -07005053int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054{
Eric Dumazetb536db92011-11-30 21:42:26 +00005055 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005056 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005058 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005059 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005060 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005061 if (dev->flags != old_flags)
5062 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005063 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005065EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005067static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005069 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070
Patrick McHardy24023452007-07-14 18:51:31 -07005071 ASSERT_RTNL();
5072
Linus Torvalds1da177e2005-04-16 15:20:36 -07005073 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005074 dev->allmulti += inc;
5075 if (dev->allmulti == 0) {
5076 /*
5077 * Avoid overflow.
5078 * If inc causes overflow, untouch allmulti and return error.
5079 */
5080 if (inc < 0)
5081 dev->flags &= ~IFF_ALLMULTI;
5082 else {
5083 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005084 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5085 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005086 return -EOVERFLOW;
5087 }
5088 }
Patrick McHardy24023452007-07-14 18:51:31 -07005089 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005090 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005091 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005092 if (notify)
5093 __dev_notify_flags(dev, old_flags,
5094 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07005095 }
Wang Chendad9b332008-06-18 01:48:28 -07005096 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005097}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005098
5099/**
5100 * dev_set_allmulti - update allmulti count on a device
5101 * @dev: device
5102 * @inc: modifier
5103 *
5104 * Add or remove reception of all multicast frames to a device. While the
5105 * count in the device remains above zero the interface remains listening
5106 * to all interfaces. Once it hits zero the device reverts back to normal
5107 * filtering operation. A negative @inc value is used to drop the counter
5108 * when releasing a resource needing all multicasts.
5109 * Return 0 if successful or a negative errno code on error.
5110 */
5111
5112int dev_set_allmulti(struct net_device *dev, int inc)
5113{
5114 return __dev_set_allmulti(dev, inc, true);
5115}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005116EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005117
5118/*
5119 * Upload unicast and multicast address lists to device and
5120 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005121 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005122 * are present.
5123 */
5124void __dev_set_rx_mode(struct net_device *dev)
5125{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005126 const struct net_device_ops *ops = dev->netdev_ops;
5127
Patrick McHardy4417da62007-06-27 01:28:10 -07005128 /* dev_open will call this function so the list will stay sane. */
5129 if (!(dev->flags&IFF_UP))
5130 return;
5131
5132 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005133 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005134
Jiri Pirko01789342011-08-16 06:29:00 +00005135 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005136 /* Unicast addresses changes may only happen under the rtnl,
5137 * therefore calling __dev_set_promiscuity here is safe.
5138 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005139 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005140 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005141 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005142 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005143 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005144 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005145 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005146 }
Jiri Pirko01789342011-08-16 06:29:00 +00005147
5148 if (ops->ndo_set_rx_mode)
5149 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005150}
5151
5152void dev_set_rx_mode(struct net_device *dev)
5153{
David S. Millerb9e40852008-07-15 00:15:08 -07005154 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005155 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005156 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157}
5158
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005159/**
5160 * dev_get_flags - get flags reported to userspace
5161 * @dev: device
5162 *
5163 * Get the combination of flag bits exported through APIs to userspace.
5164 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005165unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166{
Eric Dumazet95c96172012-04-15 05:58:06 +00005167 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168
5169 flags = (dev->flags & ~(IFF_PROMISC |
5170 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005171 IFF_RUNNING |
5172 IFF_LOWER_UP |
5173 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 (dev->gflags & (IFF_PROMISC |
5175 IFF_ALLMULTI));
5176
Stefan Rompfb00055a2006-03-20 17:09:11 -08005177 if (netif_running(dev)) {
5178 if (netif_oper_up(dev))
5179 flags |= IFF_RUNNING;
5180 if (netif_carrier_ok(dev))
5181 flags |= IFF_LOWER_UP;
5182 if (netif_dormant(dev))
5183 flags |= IFF_DORMANT;
5184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185
5186 return flags;
5187}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005188EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189
Patrick McHardybd380812010-02-26 06:34:53 +00005190int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191{
Eric Dumazetb536db92011-11-30 21:42:26 +00005192 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005193 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194
Patrick McHardy24023452007-07-14 18:51:31 -07005195 ASSERT_RTNL();
5196
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197 /*
5198 * Set the flags on our device.
5199 */
5200
5201 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5202 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5203 IFF_AUTOMEDIA)) |
5204 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5205 IFF_ALLMULTI));
5206
5207 /*
5208 * Load in the correct multicast list now the flags have changed.
5209 */
5210
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005211 if ((old_flags ^ flags) & IFF_MULTICAST)
5212 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005213
Patrick McHardy4417da62007-06-27 01:28:10 -07005214 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215
5216 /*
5217 * Have we downed the interface. We handle IFF_UP ourselves
5218 * according to user attempts to set it, rather than blindly
5219 * setting it.
5220 */
5221
5222 ret = 0;
5223 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005224 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225
5226 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005227 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228 }
5229
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005231 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005232 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005233
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005235
5236 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5237 if (dev->flags != old_flags)
5238 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239 }
5240
5241 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5242 is important. Some (broken) drivers set IFF_PROMISC, when
5243 IFF_ALLMULTI is requested not asking us and not reporting.
5244 */
5245 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005246 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5247
Linus Torvalds1da177e2005-04-16 15:20:36 -07005248 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005249 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005250 }
5251
Patrick McHardybd380812010-02-26 06:34:53 +00005252 return ret;
5253}
5254
Nicolas Dichtela528c212013-09-25 12:02:44 +02005255void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5256 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00005257{
5258 unsigned int changes = dev->flags ^ old_flags;
5259
Nicolas Dichtela528c212013-09-25 12:02:44 +02005260 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005261 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005262
Patrick McHardybd380812010-02-26 06:34:53 +00005263 if (changes & IFF_UP) {
5264 if (dev->flags & IFF_UP)
5265 call_netdevice_notifiers(NETDEV_UP, dev);
5266 else
5267 call_netdevice_notifiers(NETDEV_DOWN, dev);
5268 }
5269
5270 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00005271 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5272 struct netdev_notifier_change_info change_info;
5273
5274 change_info.flags_changed = changes;
5275 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5276 &change_info.info);
5277 }
Patrick McHardybd380812010-02-26 06:34:53 +00005278}
5279
5280/**
5281 * dev_change_flags - change device settings
5282 * @dev: device
5283 * @flags: device state flags
5284 *
5285 * Change settings on device based state flags. The flags are
5286 * in the userspace exported format.
5287 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005288int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005289{
Eric Dumazetb536db92011-11-30 21:42:26 +00005290 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005291 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00005292
5293 ret = __dev_change_flags(dev, flags);
5294 if (ret < 0)
5295 return ret;
5296
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005297 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005298 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299 return ret;
5300}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005301EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005303/**
5304 * dev_set_mtu - Change maximum transfer unit
5305 * @dev: device
5306 * @new_mtu: new transfer unit
5307 *
5308 * Change the maximum transfer size of the network device.
5309 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310int dev_set_mtu(struct net_device *dev, int new_mtu)
5311{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005312 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313 int err;
5314
5315 if (new_mtu == dev->mtu)
5316 return 0;
5317
5318 /* MTU must be positive. */
5319 if (new_mtu < 0)
5320 return -EINVAL;
5321
5322 if (!netif_device_present(dev))
5323 return -ENODEV;
5324
5325 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005326 if (ops->ndo_change_mtu)
5327 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 else
5329 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005330
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005331 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005332 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 return err;
5334}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005335EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005336
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005337/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005338 * dev_set_group - Change group this device belongs to
5339 * @dev: device
5340 * @new_group: group this device should belong to
5341 */
5342void dev_set_group(struct net_device *dev, int new_group)
5343{
5344 dev->group = new_group;
5345}
5346EXPORT_SYMBOL(dev_set_group);
5347
5348/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005349 * dev_set_mac_address - Change Media Access Control Address
5350 * @dev: device
5351 * @sa: new address
5352 *
5353 * Change the hardware (MAC) address of the device
5354 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005355int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5356{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005357 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005358 int err;
5359
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005360 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361 return -EOPNOTSUPP;
5362 if (sa->sa_family != dev->type)
5363 return -EINVAL;
5364 if (!netif_device_present(dev))
5365 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005366 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005367 if (err)
5368 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005369 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005370 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005371 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005372 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005374EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005376/**
5377 * dev_change_carrier - Change device carrier
5378 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00005379 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005380 *
5381 * Change device carrier
5382 */
5383int dev_change_carrier(struct net_device *dev, bool new_carrier)
5384{
5385 const struct net_device_ops *ops = dev->netdev_ops;
5386
5387 if (!ops->ndo_change_carrier)
5388 return -EOPNOTSUPP;
5389 if (!netif_device_present(dev))
5390 return -ENODEV;
5391 return ops->ndo_change_carrier(dev, new_carrier);
5392}
5393EXPORT_SYMBOL(dev_change_carrier);
5394
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02005396 * dev_get_phys_port_id - Get device physical port ID
5397 * @dev: device
5398 * @ppid: port ID
5399 *
5400 * Get device physical port ID
5401 */
5402int dev_get_phys_port_id(struct net_device *dev,
5403 struct netdev_phys_port_id *ppid)
5404{
5405 const struct net_device_ops *ops = dev->netdev_ops;
5406
5407 if (!ops->ndo_get_phys_port_id)
5408 return -EOPNOTSUPP;
5409 return ops->ndo_get_phys_port_id(dev, ppid);
5410}
5411EXPORT_SYMBOL(dev_get_phys_port_id);
5412
5413/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005415 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416 *
5417 * Returns a suitable unique value for a new device interface
5418 * number. The caller must hold the rtnl semaphore or the
5419 * dev_base_lock to be sure it remains unique.
5420 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005421static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005423 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 for (;;) {
5425 if (++ifindex <= 0)
5426 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005427 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005428 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429 }
5430}
5431
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005433static LIST_HEAD(net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005434static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005436static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005439 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440}
5441
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005442static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005443{
Krishna Kumare93737b2009-12-08 22:26:02 +00005444 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005445 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005446
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005447 BUG_ON(dev_boot_phase);
5448 ASSERT_RTNL();
5449
Krishna Kumare93737b2009-12-08 22:26:02 +00005450 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005451 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005452 * for initialization unwind. Remove those
5453 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005454 */
5455 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005456 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5457 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005458
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005459 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005460 list_del(&dev->unreg_list);
5461 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005462 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005463 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005464 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005465 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005466
Octavian Purdila44345722010-12-13 12:44:07 +00005467 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005468 list_for_each_entry(dev, head, unreg_list)
5469 list_add_tail(&dev->close_list, &close_head);
5470 dev_close_many(&close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005471
Octavian Purdila44345722010-12-13 12:44:07 +00005472 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005473 /* And unlink it from device chain. */
5474 unlist_netdevice(dev);
5475
5476 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005477 }
5478
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005479 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005480
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005481 list_for_each_entry(dev, head, unreg_list) {
5482 /* Shutdown queueing discipline. */
5483 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005484
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005485
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005486 /* Notify protocols, that we are about to destroy
5487 this device. They should clean all the things.
5488 */
5489 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5490
Patrick McHardya2835762010-02-26 06:34:51 +00005491 if (!dev->rtnl_link_ops ||
5492 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005493 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Patrick McHardya2835762010-02-26 06:34:51 +00005494
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005495 /*
5496 * Flush the unicast and multicast chains
5497 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005498 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005499 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005500
5501 if (dev->netdev_ops->ndo_uninit)
5502 dev->netdev_ops->ndo_uninit(dev);
5503
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005504 /* Notifier chain MUST detach us all upper devices. */
5505 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005506
5507 /* Remove entries from kobject tree */
5508 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00005509#ifdef CONFIG_XPS
5510 /* Remove XPS queueing entries */
5511 netif_reset_xps_queues_gt(dev, 0);
5512#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005513 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005514
Eric W. Biederman850a5452011-10-13 22:25:23 +00005515 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005516
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005517 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005518 dev_put(dev);
5519}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005520
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005521static void rollback_registered(struct net_device *dev)
5522{
5523 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005524
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005525 list_add(&dev->unreg_list, &single);
5526 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005527 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005528}
5529
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005530static netdev_features_t netdev_fix_features(struct net_device *dev,
5531 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005532{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005533 /* Fix illegal checksum combinations */
5534 if ((features & NETIF_F_HW_CSUM) &&
5535 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005536 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005537 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5538 }
5539
Herbert Xub63365a2008-10-23 01:11:29 -07005540 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005541 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005542 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005543 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005544 }
5545
Pravin B Shelarec5f0612013-03-07 09:28:01 +00005546 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5547 !(features & NETIF_F_IP_CSUM)) {
5548 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5549 features &= ~NETIF_F_TSO;
5550 features &= ~NETIF_F_TSO_ECN;
5551 }
5552
5553 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5554 !(features & NETIF_F_IPV6_CSUM)) {
5555 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5556 features &= ~NETIF_F_TSO6;
5557 }
5558
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005559 /* TSO ECN requires that TSO is present as well. */
5560 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5561 features &= ~NETIF_F_TSO_ECN;
5562
Michał Mirosław212b5732011-02-15 16:59:16 +00005563 /* Software GSO depends on SG. */
5564 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005565 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005566 features &= ~NETIF_F_GSO;
5567 }
5568
Michał Mirosławacd11302011-01-24 15:45:15 -08005569 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005570 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005571 /* maybe split UFO into V4 and V6? */
5572 if (!((features & NETIF_F_GEN_CSUM) ||
5573 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5574 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005575 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005576 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005577 features &= ~NETIF_F_UFO;
5578 }
5579
5580 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005581 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005582 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005583 features &= ~NETIF_F_UFO;
5584 }
5585 }
5586
5587 return features;
5588}
Herbert Xub63365a2008-10-23 01:11:29 -07005589
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005590int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005591{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005592 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005593 int err = 0;
5594
Michał Mirosław87267482011-04-12 09:56:38 +00005595 ASSERT_RTNL();
5596
Michał Mirosław5455c692011-02-15 16:59:17 +00005597 features = netdev_get_wanted_features(dev);
5598
5599 if (dev->netdev_ops->ndo_fix_features)
5600 features = dev->netdev_ops->ndo_fix_features(dev, features);
5601
5602 /* driver might be less strict about feature dependencies */
5603 features = netdev_fix_features(dev, features);
5604
5605 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005606 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005607
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005608 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5609 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005610
5611 if (dev->netdev_ops->ndo_set_features)
5612 err = dev->netdev_ops->ndo_set_features(dev, features);
5613
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005614 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005615 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005616 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5617 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005618 return -1;
5619 }
5620
5621 if (!err)
5622 dev->features = features;
5623
5624 return 1;
5625}
5626
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005627/**
5628 * netdev_update_features - recalculate device features
5629 * @dev: the device to check
5630 *
5631 * Recalculate dev->features set and send notifications if it
5632 * has changed. Should be called after driver or hardware dependent
5633 * conditions might have changed that influence the features.
5634 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005635void netdev_update_features(struct net_device *dev)
5636{
5637 if (__netdev_update_features(dev))
5638 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005639}
5640EXPORT_SYMBOL(netdev_update_features);
5641
Linus Torvalds1da177e2005-04-16 15:20:36 -07005642/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005643 * netdev_change_features - recalculate device features
5644 * @dev: the device to check
5645 *
5646 * Recalculate dev->features set and send notifications even
5647 * if they have not changed. Should be called instead of
5648 * netdev_update_features() if also dev->vlan_features might
5649 * have changed to allow the changes to be propagated to stacked
5650 * VLAN devices.
5651 */
5652void netdev_change_features(struct net_device *dev)
5653{
5654 __netdev_update_features(dev);
5655 netdev_features_change(dev);
5656}
5657EXPORT_SYMBOL(netdev_change_features);
5658
5659/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005660 * netif_stacked_transfer_operstate - transfer operstate
5661 * @rootdev: the root or lower level device to transfer state from
5662 * @dev: the device to transfer operstate to
5663 *
5664 * Transfer operational state from root to device. This is normally
5665 * called when a stacking relationship exists between the root
5666 * device and the device(a leaf device).
5667 */
5668void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5669 struct net_device *dev)
5670{
5671 if (rootdev->operstate == IF_OPER_DORMANT)
5672 netif_dormant_on(dev);
5673 else
5674 netif_dormant_off(dev);
5675
5676 if (netif_carrier_ok(rootdev)) {
5677 if (!netif_carrier_ok(dev))
5678 netif_carrier_on(dev);
5679 } else {
5680 if (netif_carrier_ok(dev))
5681 netif_carrier_off(dev);
5682 }
5683}
5684EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5685
Tom Herbertbf264142010-11-26 08:36:09 +00005686#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005687static int netif_alloc_rx_queues(struct net_device *dev)
5688{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005689 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005690 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005691
Tom Herbertbd25fa72010-10-18 18:00:16 +00005692 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005693
Tom Herbertbd25fa72010-10-18 18:00:16 +00005694 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005695 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005696 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005697
Tom Herbertbd25fa72010-10-18 18:00:16 +00005698 dev->_rx = rx;
5699
Tom Herbertbd25fa72010-10-18 18:00:16 +00005700 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005701 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005702 return 0;
5703}
Tom Herbertbf264142010-11-26 08:36:09 +00005704#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005705
Changli Gaoaa942102010-12-04 02:31:41 +00005706static void netdev_init_one_queue(struct net_device *dev,
5707 struct netdev_queue *queue, void *_unused)
5708{
5709 /* Initialize queue lock */
5710 spin_lock_init(&queue->_xmit_lock);
5711 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5712 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005713 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005714 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005715#ifdef CONFIG_BQL
5716 dql_init(&queue->dql, HZ);
5717#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005718}
5719
Eric Dumazet60877a32013-06-20 01:15:51 -07005720static void netif_free_tx_queues(struct net_device *dev)
5721{
5722 if (is_vmalloc_addr(dev->_tx))
5723 vfree(dev->_tx);
5724 else
5725 kfree(dev->_tx);
5726}
5727
Tom Herberte6484932010-10-18 18:04:39 +00005728static int netif_alloc_netdev_queues(struct net_device *dev)
5729{
5730 unsigned int count = dev->num_tx_queues;
5731 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07005732 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00005733
Eric Dumazet60877a32013-06-20 01:15:51 -07005734 BUG_ON(count < 1 || count > 0xffff);
Tom Herberte6484932010-10-18 18:04:39 +00005735
Eric Dumazet60877a32013-06-20 01:15:51 -07005736 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
5737 if (!tx) {
5738 tx = vzalloc(sz);
5739 if (!tx)
5740 return -ENOMEM;
5741 }
Tom Herberte6484932010-10-18 18:04:39 +00005742 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005743
Tom Herberte6484932010-10-18 18:04:39 +00005744 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5745 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005746
5747 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005748}
5749
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005750/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005751 * register_netdevice - register a network device
5752 * @dev: device to register
5753 *
5754 * Take a completed network device structure and add it to the kernel
5755 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5756 * chain. 0 is returned on success. A negative errno code is returned
5757 * on a failure to set up the device, or if the name is a duplicate.
5758 *
5759 * Callers must hold the rtnl semaphore. You may want
5760 * register_netdev() instead of this.
5761 *
5762 * BUGS:
5763 * The locking appears insufficient to guarantee two parallel registers
5764 * will not get the same name.
5765 */
5766
5767int register_netdevice(struct net_device *dev)
5768{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005769 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005770 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005771
5772 BUG_ON(dev_boot_phase);
5773 ASSERT_RTNL();
5774
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005775 might_sleep();
5776
Linus Torvalds1da177e2005-04-16 15:20:36 -07005777 /* When net_device's are persistent, this will be fatal. */
5778 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005779 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780
David S. Millerf1f28aa2008-07-15 00:08:33 -07005781 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005782 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005783
Linus Torvalds1da177e2005-04-16 15:20:36 -07005784 dev->iflink = -1;
5785
Gao feng828de4f2012-09-13 20:58:27 +00005786 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00005787 if (ret < 0)
5788 goto out;
5789
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005791 if (dev->netdev_ops->ndo_init) {
5792 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005793 if (ret) {
5794 if (ret > 0)
5795 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005796 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797 }
5798 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005799
Patrick McHardyf6469682013-04-19 02:04:27 +00005800 if (((dev->hw_features | dev->features) &
5801 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00005802 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5803 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5804 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5805 ret = -EINVAL;
5806 goto err_uninit;
5807 }
5808
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00005809 ret = -EBUSY;
5810 if (!dev->ifindex)
5811 dev->ifindex = dev_new_index(net);
5812 else if (__dev_get_by_index(net, dev->ifindex))
5813 goto err_uninit;
5814
Linus Torvalds1da177e2005-04-16 15:20:36 -07005815 if (dev->iflink == -1)
5816 dev->iflink = dev->ifindex;
5817
Michał Mirosław5455c692011-02-15 16:59:17 +00005818 /* Transfer changeable features to wanted_features and enable
5819 * software offloads (GSO and GRO).
5820 */
5821 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00005822 dev->features |= NETIF_F_SOFT_FEATURES;
5823 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005824
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005825 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00005826 if (!(dev->flags & IFF_LOOPBACK)) {
5827 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5828 if (dev->features & NETIF_F_ALL_CSUM) {
5829 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5830 dev->features |= NETIF_F_NOCACHE_COPY;
5831 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005832 }
5833
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005834 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00005835 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005836 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00005837
Pravin B Shelaree579672013-03-07 09:28:08 +00005838 /* Make NETIF_F_SG inheritable to tunnel devices.
5839 */
5840 dev->hw_enc_features |= NETIF_F_SG;
5841
Simon Horman0d89d202013-05-23 21:02:52 +00005842 /* Make NETIF_F_SG inheritable to MPLS.
5843 */
5844 dev->mpls_features |= NETIF_F_SG;
5845
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005846 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5847 ret = notifier_to_errno(ret);
5848 if (ret)
5849 goto err_uninit;
5850
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005851 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005852 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005853 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005854 dev->reg_state = NETREG_REGISTERED;
5855
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005856 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00005857
Linus Torvalds1da177e2005-04-16 15:20:36 -07005858 /*
5859 * Default initial state at registry is that the
5860 * device is present.
5861 */
5862
5863 set_bit(__LINK_STATE_PRESENT, &dev->state);
5864
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01005865 linkwatch_init_dev(dev);
5866
Linus Torvalds1da177e2005-04-16 15:20:36 -07005867 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005868 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005869 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005870 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005871
Jiri Pirko948b3372013-01-08 01:38:25 +00005872 /* If the device has permanent device address, driver should
5873 * set dev_addr and also addr_assign_type should be set to
5874 * NET_ADDR_PERM (default value).
5875 */
5876 if (dev->addr_assign_type == NET_ADDR_PERM)
5877 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5878
Linus Torvalds1da177e2005-04-16 15:20:36 -07005879 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005880 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005881 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005882 if (ret) {
5883 rollback_registered(dev);
5884 dev->reg_state = NETREG_UNREGISTERED;
5885 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005886 /*
5887 * Prevent userspace races by waiting until the network
5888 * device is fully setup before sending notifications.
5889 */
Patrick McHardya2835762010-02-26 06:34:51 +00005890 if (!dev->rtnl_link_ops ||
5891 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005892 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005893
5894out:
5895 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005896
5897err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005898 if (dev->netdev_ops->ndo_uninit)
5899 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005900 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005901}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005902EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005903
5904/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005905 * init_dummy_netdev - init a dummy network device for NAPI
5906 * @dev: device to init
5907 *
5908 * This takes a network device structure and initialize the minimum
5909 * amount of fields so it can be used to schedule NAPI polls without
5910 * registering a full blown interface. This is to be used by drivers
5911 * that need to tie several hardware interfaces to a single NAPI
5912 * poll scheduler due to HW limitations.
5913 */
5914int init_dummy_netdev(struct net_device *dev)
5915{
5916 /* Clear everything. Note we don't initialize spinlocks
5917 * are they aren't supposed to be taken by any of the
5918 * NAPI code and this dummy netdev is supposed to be
5919 * only ever used for NAPI polls
5920 */
5921 memset(dev, 0, sizeof(struct net_device));
5922
5923 /* make sure we BUG if trying to hit standard
5924 * register/unregister code path
5925 */
5926 dev->reg_state = NETREG_DUMMY;
5927
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005928 /* NAPI wants this */
5929 INIT_LIST_HEAD(&dev->napi_list);
5930
5931 /* a dummy interface is started by default */
5932 set_bit(__LINK_STATE_PRESENT, &dev->state);
5933 set_bit(__LINK_STATE_START, &dev->state);
5934
Eric Dumazet29b44332010-10-11 10:22:12 +00005935 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5936 * because users of this 'device' dont need to change
5937 * its refcount.
5938 */
5939
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005940 return 0;
5941}
5942EXPORT_SYMBOL_GPL(init_dummy_netdev);
5943
5944
5945/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005946 * register_netdev - register a network device
5947 * @dev: device to register
5948 *
5949 * Take a completed network device structure and add it to the kernel
5950 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5951 * chain. 0 is returned on success. A negative errno code is returned
5952 * on a failure to set up the device, or if the name is a duplicate.
5953 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005954 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005955 * and expands the device name if you passed a format string to
5956 * alloc_netdev.
5957 */
5958int register_netdev(struct net_device *dev)
5959{
5960 int err;
5961
5962 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005963 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005964 rtnl_unlock();
5965 return err;
5966}
5967EXPORT_SYMBOL(register_netdev);
5968
Eric Dumazet29b44332010-10-11 10:22:12 +00005969int netdev_refcnt_read(const struct net_device *dev)
5970{
5971 int i, refcnt = 0;
5972
5973 for_each_possible_cpu(i)
5974 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5975 return refcnt;
5976}
5977EXPORT_SYMBOL(netdev_refcnt_read);
5978
Ben Hutchings2c530402012-07-10 10:55:09 +00005979/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005980 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00005981 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005982 *
5983 * This is called when unregistering network devices.
5984 *
5985 * Any protocol or device that holds a reference should register
5986 * for netdevice notification, and cleanup and put back the
5987 * reference if they receive an UNREGISTER event.
5988 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005989 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005990 */
5991static void netdev_wait_allrefs(struct net_device *dev)
5992{
5993 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00005994 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005995
Eric Dumazete014deb2009-11-17 05:59:21 +00005996 linkwatch_forget_dev(dev);
5997
Linus Torvalds1da177e2005-04-16 15:20:36 -07005998 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00005999 refcnt = netdev_refcnt_read(dev);
6000
6001 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006002 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006003 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006004
6005 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006006 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006007
Eric Dumazet748e2d92012-08-22 21:50:59 +00006008 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006009 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006010 rtnl_lock();
6011
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006012 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006013 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6014 &dev->state)) {
6015 /* We must not have linkwatch events
6016 * pending on unregister. If this
6017 * happens, we simply run the queue
6018 * unscheduled, resulting in a noop
6019 * for this device.
6020 */
6021 linkwatch_run_queue();
6022 }
6023
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006024 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006025
6026 rebroadcast_time = jiffies;
6027 }
6028
6029 msleep(250);
6030
Eric Dumazet29b44332010-10-11 10:22:12 +00006031 refcnt = netdev_refcnt_read(dev);
6032
Linus Torvalds1da177e2005-04-16 15:20:36 -07006033 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006034 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6035 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006036 warning_time = jiffies;
6037 }
6038 }
6039}
6040
6041/* The sequence is:
6042 *
6043 * rtnl_lock();
6044 * ...
6045 * register_netdevice(x1);
6046 * register_netdevice(x2);
6047 * ...
6048 * unregister_netdevice(y1);
6049 * unregister_netdevice(y2);
6050 * ...
6051 * rtnl_unlock();
6052 * free_netdev(y1);
6053 * free_netdev(y2);
6054 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006055 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006056 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006057 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006058 * without deadlocking with linkwatch via keventd.
6059 * 2) Since we run with the RTNL semaphore not held, we can sleep
6060 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006061 *
6062 * We must not return until all unregister events added during
6063 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006064 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006065void netdev_run_todo(void)
6066{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006067 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006068
Linus Torvalds1da177e2005-04-16 15:20:36 -07006069 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006070 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006071
6072 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006073
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006074
6075 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006076 if (!list_empty(&list))
6077 rcu_barrier();
6078
Linus Torvalds1da177e2005-04-16 15:20:36 -07006079 while (!list_empty(&list)) {
6080 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006081 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006082 list_del(&dev->todo_list);
6083
Eric Dumazet748e2d92012-08-22 21:50:59 +00006084 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006085 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006086 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006087
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006088 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006089 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006090 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006091 dump_stack();
6092 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006093 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006094
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006095 dev->reg_state = NETREG_UNREGISTERED;
6096
Changli Gao152102c2010-03-30 20:16:22 +00006097 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006098
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006099 netdev_wait_allrefs(dev);
6100
6101 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006102 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006103 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6104 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006105 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006106
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006107 if (dev->destructor)
6108 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006109
Eric W. Biederman50624c92013-09-23 21:19:49 -07006110 /* Report a network device has been unregistered */
6111 rtnl_lock();
6112 dev_net(dev)->dev_unreg_count--;
6113 __rtnl_unlock();
6114 wake_up(&netdev_unregistering_wq);
6115
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006116 /* Free network device */
6117 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006119}
6120
Ben Hutchings3cfde792010-07-09 09:11:52 +00006121/* Convert net_device_stats to rtnl_link_stats64. They have the same
6122 * fields in the same order, with only the type differing.
6123 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006124void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6125 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006126{
6127#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006128 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6129 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006130#else
6131 size_t i, n = sizeof(*stats64) / sizeof(u64);
6132 const unsigned long *src = (const unsigned long *)netdev_stats;
6133 u64 *dst = (u64 *)stats64;
6134
6135 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6136 sizeof(*stats64) / sizeof(u64));
6137 for (i = 0; i < n; i++)
6138 dst[i] = src[i];
6139#endif
6140}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006141EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006142
Eric Dumazetd83345a2009-11-16 03:36:51 +00006143/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006144 * dev_get_stats - get network device statistics
6145 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006146 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006147 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006148 * Get network statistics from device. Return @storage.
6149 * The device driver may provide its own method by setting
6150 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6151 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006152 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006153struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6154 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006155{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006156 const struct net_device_ops *ops = dev->netdev_ops;
6157
Eric Dumazet28172732010-07-07 14:58:56 -07006158 if (ops->ndo_get_stats64) {
6159 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006160 ops->ndo_get_stats64(dev, storage);
6161 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006162 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006163 } else {
6164 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006165 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006166 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006167 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006168}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006169EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006170
Eric Dumazet24824a02010-10-02 06:11:55 +00006171struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006172{
Eric Dumazet24824a02010-10-02 06:11:55 +00006173 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006174
Eric Dumazet24824a02010-10-02 06:11:55 +00006175#ifdef CONFIG_NET_CLS_ACT
6176 if (queue)
6177 return queue;
6178 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6179 if (!queue)
6180 return NULL;
6181 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006182 queue->qdisc = &noop_qdisc;
6183 queue->qdisc_sleeping = &noop_qdisc;
6184 rcu_assign_pointer(dev->ingress_queue, queue);
6185#endif
6186 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006187}
6188
Eric Dumazet2c60db02012-09-16 09:17:26 +00006189static const struct ethtool_ops default_ethtool_ops;
6190
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00006191void netdev_set_default_ethtool_ops(struct net_device *dev,
6192 const struct ethtool_ops *ops)
6193{
6194 if (dev->ethtool_ops == &default_ethtool_ops)
6195 dev->ethtool_ops = ops;
6196}
6197EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6198
Eric Dumazet74d332c2013-10-30 13:10:44 -07006199void netdev_freemem(struct net_device *dev)
6200{
6201 char *addr = (char *)dev - dev->padded;
6202
6203 if (is_vmalloc_addr(addr))
6204 vfree(addr);
6205 else
6206 kfree(addr);
6207}
6208
Linus Torvalds1da177e2005-04-16 15:20:36 -07006209/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006210 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006211 * @sizeof_priv: size of private data to allocate space for
6212 * @name: device name format string
6213 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006214 * @txqs: the number of TX subqueues to allocate
6215 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006216 *
6217 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006218 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006219 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006220 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006221struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6222 void (*setup)(struct net_device *),
6223 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006224{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006225 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006226 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006227 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006228
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006229 BUG_ON(strlen(name) >= sizeof(dev->name));
6230
Tom Herbert36909ea2011-01-09 19:36:31 +00006231 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006232 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006233 return NULL;
6234 }
6235
Tom Herbert36909ea2011-01-09 19:36:31 +00006236#ifdef CONFIG_RPS
6237 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006238 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006239 return NULL;
6240 }
6241#endif
6242
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006243 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006244 if (sizeof_priv) {
6245 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006246 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006247 alloc_size += sizeof_priv;
6248 }
6249 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006250 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006251
Eric Dumazet74d332c2013-10-30 13:10:44 -07006252 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6253 if (!p)
6254 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00006255 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006256 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006257
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006258 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006259 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006260
Eric Dumazet29b44332010-10-11 10:22:12 +00006261 dev->pcpu_refcnt = alloc_percpu(int);
6262 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07006263 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006264
Linus Torvalds1da177e2005-04-16 15:20:36 -07006265 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006266 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006267
Jiri Pirko22bedad32010-04-01 21:22:57 +00006268 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006269 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006270
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006271 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006272
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006273 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006274 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006275
Herbert Xud565b0a2008-12-15 23:38:52 -08006276 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006277 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006278 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006279 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006280 INIT_LIST_HEAD(&dev->adj_list.upper);
6281 INIT_LIST_HEAD(&dev->adj_list.lower);
6282 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6283 INIT_LIST_HEAD(&dev->all_adj_list.lower);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006284 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006285 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006286
6287 dev->num_tx_queues = txqs;
6288 dev->real_num_tx_queues = txqs;
6289 if (netif_alloc_netdev_queues(dev))
6290 goto free_all;
6291
6292#ifdef CONFIG_RPS
6293 dev->num_rx_queues = rxqs;
6294 dev->real_num_rx_queues = rxqs;
6295 if (netif_alloc_rx_queues(dev))
6296 goto free_all;
6297#endif
6298
Linus Torvalds1da177e2005-04-16 15:20:36 -07006299 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006300 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006301 if (!dev->ethtool_ops)
6302 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006303 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006304
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006305free_all:
6306 free_netdev(dev);
6307 return NULL;
6308
Eric Dumazet29b44332010-10-11 10:22:12 +00006309free_pcpu:
6310 free_percpu(dev->pcpu_refcnt);
Eric Dumazet60877a32013-06-20 01:15:51 -07006311 netif_free_tx_queues(dev);
Tom Herbertfe822242010-11-09 10:47:38 +00006312#ifdef CONFIG_RPS
6313 kfree(dev->_rx);
6314#endif
6315
Eric Dumazet74d332c2013-10-30 13:10:44 -07006316free_dev:
6317 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006318 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006319}
Tom Herbert36909ea2011-01-09 19:36:31 +00006320EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006321
6322/**
6323 * free_netdev - free network device
6324 * @dev: device
6325 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006326 * This function does the last stage of destroying an allocated device
6327 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006328 * If this is the last reference then it will be freed.
6329 */
6330void free_netdev(struct net_device *dev)
6331{
Herbert Xud565b0a2008-12-15 23:38:52 -08006332 struct napi_struct *p, *n;
6333
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006334 release_net(dev_net(dev));
6335
Eric Dumazet60877a32013-06-20 01:15:51 -07006336 netif_free_tx_queues(dev);
Tom Herbertfe822242010-11-09 10:47:38 +00006337#ifdef CONFIG_RPS
6338 kfree(dev->_rx);
6339#endif
David S. Millere8a04642008-07-17 00:34:19 -07006340
Eric Dumazet33d480c2011-08-11 19:30:52 +00006341 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006342
Jiri Pirkof001fde2009-05-05 02:48:28 +00006343 /* Flush device addresses */
6344 dev_addr_flush(dev);
6345
Herbert Xud565b0a2008-12-15 23:38:52 -08006346 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6347 netif_napi_del(p);
6348
Eric Dumazet29b44332010-10-11 10:22:12 +00006349 free_percpu(dev->pcpu_refcnt);
6350 dev->pcpu_refcnt = NULL;
6351
Stephen Hemminger3041a062006-05-26 13:25:24 -07006352 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006353 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07006354 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006355 return;
6356 }
6357
6358 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6359 dev->reg_state = NETREG_RELEASED;
6360
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006361 /* will free via device release */
6362 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006363}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006364EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006365
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006366/**
6367 * synchronize_net - Synchronize with packet receive processing
6368 *
6369 * Wait for packets currently being received to be done.
6370 * Does not block later packets from starting.
6371 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006372void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006373{
6374 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006375 if (rtnl_is_locked())
6376 synchronize_rcu_expedited();
6377 else
6378 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006379}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006380EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006381
6382/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006383 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006384 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006385 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006386 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006387 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006388 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006389 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006390 *
6391 * Callers must hold the rtnl semaphore. You may want
6392 * unregister_netdev() instead of this.
6393 */
6394
Eric Dumazet44a08732009-10-27 07:03:04 +00006395void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006396{
Herbert Xua6620712007-12-12 19:21:56 -08006397 ASSERT_RTNL();
6398
Eric Dumazet44a08732009-10-27 07:03:04 +00006399 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006400 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006401 } else {
6402 rollback_registered(dev);
6403 /* Finish processing unregister after unlock */
6404 net_set_todo(dev);
6405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006406}
Eric Dumazet44a08732009-10-27 07:03:04 +00006407EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006408
6409/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006410 * unregister_netdevice_many - unregister many devices
6411 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006412 */
6413void unregister_netdevice_many(struct list_head *head)
6414{
6415 struct net_device *dev;
6416
6417 if (!list_empty(head)) {
6418 rollback_registered_many(head);
6419 list_for_each_entry(dev, head, unreg_list)
6420 net_set_todo(dev);
6421 }
6422}
Eric Dumazet63c80992009-10-27 07:06:49 +00006423EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006424
6425/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006426 * unregister_netdev - remove device from the kernel
6427 * @dev: device
6428 *
6429 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006430 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006431 *
6432 * This is just a wrapper for unregister_netdevice that takes
6433 * the rtnl semaphore. In general you want to use this and not
6434 * unregister_netdevice.
6435 */
6436void unregister_netdev(struct net_device *dev)
6437{
6438 rtnl_lock();
6439 unregister_netdevice(dev);
6440 rtnl_unlock();
6441}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006442EXPORT_SYMBOL(unregister_netdev);
6443
Eric W. Biedermance286d32007-09-12 13:53:49 +02006444/**
6445 * dev_change_net_namespace - move device to different nethost namespace
6446 * @dev: device
6447 * @net: network namespace
6448 * @pat: If not NULL name pattern to try if the current device name
6449 * is already taken in the destination network namespace.
6450 *
6451 * This function shuts down a device interface and moves it
6452 * to a new network namespace. On success 0 is returned, on
6453 * a failure a netagive errno code is returned.
6454 *
6455 * Callers must hold the rtnl semaphore.
6456 */
6457
6458int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6459{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006460 int err;
6461
6462 ASSERT_RTNL();
6463
6464 /* Don't allow namespace local devices to be moved. */
6465 err = -EINVAL;
6466 if (dev->features & NETIF_F_NETNS_LOCAL)
6467 goto out;
6468
6469 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006470 if (dev->reg_state != NETREG_REGISTERED)
6471 goto out;
6472
6473 /* Get out if there is nothing todo */
6474 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006475 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006476 goto out;
6477
6478 /* Pick the destination device name, and ensure
6479 * we can use it in the destination network namespace.
6480 */
6481 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006482 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006483 /* We get here if we can't use the current device name */
6484 if (!pat)
6485 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006486 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006487 goto out;
6488 }
6489
6490 /*
6491 * And now a mini version of register_netdevice unregister_netdevice.
6492 */
6493
6494 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006495 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006496
6497 /* And unlink it from device chain */
6498 err = -ENODEV;
6499 unlist_netdevice(dev);
6500
6501 synchronize_net();
6502
6503 /* Shutdown queueing discipline. */
6504 dev_shutdown(dev);
6505
6506 /* Notify protocols, that we are about to destroy
6507 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006508
6509 Note that dev->reg_state stays at NETREG_REGISTERED.
6510 This is wanted because this way 8021q and macvlan know
6511 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006512 */
6513 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006514 rcu_barrier();
6515 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006516 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006517
6518 /*
6519 * Flush the unicast and multicast chains
6520 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006521 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006522 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006523
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006524 /* Send a netdev-removed uevent to the old namespace */
6525 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6526
Eric W. Biedermance286d32007-09-12 13:53:49 +02006527 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006528 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006529
Eric W. Biedermance286d32007-09-12 13:53:49 +02006530 /* If there is an ifindex conflict assign a new one */
6531 if (__dev_get_by_index(net, dev->ifindex)) {
6532 int iflink = (dev->iflink == dev->ifindex);
6533 dev->ifindex = dev_new_index(net);
6534 if (iflink)
6535 dev->iflink = dev->ifindex;
6536 }
6537
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006538 /* Send a netdev-add uevent to the new namespace */
6539 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6540
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006541 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006542 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006543 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006544
6545 /* Add the device back in the hashes */
6546 list_netdevice(dev);
6547
6548 /* Notify protocols, that a new device appeared. */
6549 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6550
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006551 /*
6552 * Prevent userspace races by waiting until the network
6553 * device is fully setup before sending notifications.
6554 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006555 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006556
Eric W. Biedermance286d32007-09-12 13:53:49 +02006557 synchronize_net();
6558 err = 0;
6559out:
6560 return err;
6561}
Johannes Berg463d0182009-07-14 00:33:35 +02006562EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006563
Linus Torvalds1da177e2005-04-16 15:20:36 -07006564static int dev_cpu_callback(struct notifier_block *nfb,
6565 unsigned long action,
6566 void *ocpu)
6567{
6568 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006569 struct sk_buff *skb;
6570 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6571 struct softnet_data *sd, *oldsd;
6572
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006573 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574 return NOTIFY_OK;
6575
6576 local_irq_disable();
6577 cpu = smp_processor_id();
6578 sd = &per_cpu(softnet_data, cpu);
6579 oldsd = &per_cpu(softnet_data, oldcpu);
6580
6581 /* Find end of our completion_queue. */
6582 list_skb = &sd->completion_queue;
6583 while (*list_skb)
6584 list_skb = &(*list_skb)->next;
6585 /* Append completion queue from offline CPU. */
6586 *list_skb = oldsd->completion_queue;
6587 oldsd->completion_queue = NULL;
6588
Linus Torvalds1da177e2005-04-16 15:20:36 -07006589 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006590 if (oldsd->output_queue) {
6591 *sd->output_queue_tailp = oldsd->output_queue;
6592 sd->output_queue_tailp = oldsd->output_queue_tailp;
6593 oldsd->output_queue = NULL;
6594 oldsd->output_queue_tailp = &oldsd->output_queue;
6595 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006596 /* Append NAPI poll list from offline CPU. */
6597 if (!list_empty(&oldsd->poll_list)) {
6598 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6599 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601
6602 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6603 local_irq_enable();
6604
6605 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006606 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6607 netif_rx(skb);
6608 input_queue_head_incr(oldsd);
6609 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006610 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006611 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006612 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006613 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006614
6615 return NOTIFY_OK;
6616}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006617
6618
Herbert Xu7f353bf2007-08-10 15:47:58 -07006619/**
Herbert Xub63365a2008-10-23 01:11:29 -07006620 * netdev_increment_features - increment feature set by one
6621 * @all: current feature set
6622 * @one: new feature set
6623 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006624 *
6625 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006626 * @one to the master device with current feature set @all. Will not
6627 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006628 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006629netdev_features_t netdev_increment_features(netdev_features_t all,
6630 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006631{
Michał Mirosław1742f182011-04-22 06:31:16 +00006632 if (mask & NETIF_F_GEN_CSUM)
6633 mask |= NETIF_F_ALL_CSUM;
6634 mask |= NETIF_F_VLAN_CHALLENGED;
6635
6636 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6637 all &= one | ~NETIF_F_ALL_FOR_ALL;
6638
Michał Mirosław1742f182011-04-22 06:31:16 +00006639 /* If one device supports hw checksumming, set for all. */
6640 if (all & NETIF_F_GEN_CSUM)
6641 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006642
6643 return all;
6644}
Herbert Xub63365a2008-10-23 01:11:29 -07006645EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006646
Baruch Siach430f03c2013-06-02 20:43:55 +00006647static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006648{
6649 int i;
6650 struct hlist_head *hash;
6651
6652 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6653 if (hash != NULL)
6654 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6655 INIT_HLIST_HEAD(&hash[i]);
6656
6657 return hash;
6658}
6659
Eric W. Biederman881d9662007-09-17 11:56:21 -07006660/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006661static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006662{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006663 if (net != &init_net)
6664 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006665
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006666 net->dev_name_head = netdev_create_hash();
6667 if (net->dev_name_head == NULL)
6668 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006669
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006670 net->dev_index_head = netdev_create_hash();
6671 if (net->dev_index_head == NULL)
6672 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006673
6674 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006675
6676err_idx:
6677 kfree(net->dev_name_head);
6678err_name:
6679 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006680}
6681
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006682/**
6683 * netdev_drivername - network driver for the device
6684 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006685 *
6686 * Determine network driver for device.
6687 */
David S. Miller3019de12011-06-06 16:41:33 -07006688const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006689{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006690 const struct device_driver *driver;
6691 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006692 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006693
6694 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006695 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006696 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006697
6698 driver = parent->driver;
6699 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006700 return driver->name;
6701 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006702}
6703
Joe Perchesb004ff42012-09-12 20:12:19 -07006704static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006705 struct va_format *vaf)
6706{
6707 int r;
6708
Joe Perchesb004ff42012-09-12 20:12:19 -07006709 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006710 r = dev_printk_emit(level[1] - '0',
6711 dev->dev.parent,
6712 "%s %s %s: %pV",
6713 dev_driver_string(dev->dev.parent),
6714 dev_name(dev->dev.parent),
6715 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006716 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006717 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006718 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006719 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006720 }
Joe Perches256df2f2010-06-27 01:02:35 +00006721
6722 return r;
6723}
6724
6725int netdev_printk(const char *level, const struct net_device *dev,
6726 const char *format, ...)
6727{
6728 struct va_format vaf;
6729 va_list args;
6730 int r;
6731
6732 va_start(args, format);
6733
6734 vaf.fmt = format;
6735 vaf.va = &args;
6736
6737 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006738
Joe Perches256df2f2010-06-27 01:02:35 +00006739 va_end(args);
6740
6741 return r;
6742}
6743EXPORT_SYMBOL(netdev_printk);
6744
6745#define define_netdev_printk_level(func, level) \
6746int func(const struct net_device *dev, const char *fmt, ...) \
6747{ \
6748 int r; \
6749 struct va_format vaf; \
6750 va_list args; \
6751 \
6752 va_start(args, fmt); \
6753 \
6754 vaf.fmt = fmt; \
6755 vaf.va = &args; \
6756 \
6757 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07006758 \
Joe Perches256df2f2010-06-27 01:02:35 +00006759 va_end(args); \
6760 \
6761 return r; \
6762} \
6763EXPORT_SYMBOL(func);
6764
6765define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6766define_netdev_printk_level(netdev_alert, KERN_ALERT);
6767define_netdev_printk_level(netdev_crit, KERN_CRIT);
6768define_netdev_printk_level(netdev_err, KERN_ERR);
6769define_netdev_printk_level(netdev_warn, KERN_WARNING);
6770define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6771define_netdev_printk_level(netdev_info, KERN_INFO);
6772
Pavel Emelyanov46650792007-10-08 20:38:39 -07006773static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006774{
6775 kfree(net->dev_name_head);
6776 kfree(net->dev_index_head);
6777}
6778
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006779static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006780 .init = netdev_init,
6781 .exit = netdev_exit,
6782};
6783
Pavel Emelyanov46650792007-10-08 20:38:39 -07006784static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006785{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006786 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006787 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006788 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006789 * initial network namespace
6790 */
6791 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006792 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006793 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006794 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006795
6796 /* Ignore unmoveable devices (i.e. loopback) */
6797 if (dev->features & NETIF_F_NETNS_LOCAL)
6798 continue;
6799
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006800 /* Leave virtual devices for the generic cleanup */
6801 if (dev->rtnl_link_ops)
6802 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006803
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006804 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006805 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6806 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006807 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006808 pr_emerg("%s: failed to move %s to init_net: %d\n",
6809 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006810 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006811 }
6812 }
6813 rtnl_unlock();
6814}
6815
Eric W. Biederman50624c92013-09-23 21:19:49 -07006816static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6817{
6818 /* Return with the rtnl_lock held when there are no network
6819 * devices unregistering in any network namespace in net_list.
6820 */
6821 struct net *net;
6822 bool unregistering;
6823 DEFINE_WAIT(wait);
6824
6825 for (;;) {
6826 prepare_to_wait(&netdev_unregistering_wq, &wait,
6827 TASK_UNINTERRUPTIBLE);
6828 unregistering = false;
6829 rtnl_lock();
6830 list_for_each_entry(net, net_list, exit_list) {
6831 if (net->dev_unreg_count > 0) {
6832 unregistering = true;
6833 break;
6834 }
6835 }
6836 if (!unregistering)
6837 break;
6838 __rtnl_unlock();
6839 schedule();
6840 }
6841 finish_wait(&netdev_unregistering_wq, &wait);
6842}
6843
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006844static void __net_exit default_device_exit_batch(struct list_head *net_list)
6845{
6846 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04006847 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006848 * Do this across as many network namespaces as possible to
6849 * improve batching efficiency.
6850 */
6851 struct net_device *dev;
6852 struct net *net;
6853 LIST_HEAD(dev_kill_list);
6854
Eric W. Biederman50624c92013-09-23 21:19:49 -07006855 /* To prevent network device cleanup code from dereferencing
6856 * loopback devices or network devices that have been freed
6857 * wait here for all pending unregistrations to complete,
6858 * before unregistring the loopback device and allowing the
6859 * network namespace be freed.
6860 *
6861 * The netdev todo list containing all network devices
6862 * unregistrations that happen in default_device_exit_batch
6863 * will run in the rtnl_unlock() at the end of
6864 * default_device_exit_batch.
6865 */
6866 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006867 list_for_each_entry(net, net_list, exit_list) {
6868 for_each_netdev_reverse(net, dev) {
6869 if (dev->rtnl_link_ops)
6870 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6871 else
6872 unregister_netdevice_queue(dev, &dev_kill_list);
6873 }
6874 }
6875 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006876 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006877 rtnl_unlock();
6878}
6879
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006880static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006881 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006882 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006883};
6884
Linus Torvalds1da177e2005-04-16 15:20:36 -07006885/*
6886 * Initialize the DEV module. At boot time this walks the device list and
6887 * unhooks any devices that fail to initialise (normally hardware not
6888 * present) and leaves us with a valid list of present and active devices.
6889 *
6890 */
6891
6892/*
6893 * This is called single threaded during boot, so no need
6894 * to take the rtnl semaphore.
6895 */
6896static int __init net_dev_init(void)
6897{
6898 int i, rc = -ENOMEM;
6899
6900 BUG_ON(!dev_boot_phase);
6901
Linus Torvalds1da177e2005-04-16 15:20:36 -07006902 if (dev_proc_init())
6903 goto out;
6904
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006905 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006906 goto out;
6907
6908 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006909 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006910 INIT_LIST_HEAD(&ptype_base[i]);
6911
Vlad Yasevich62532da2012-11-15 08:49:10 +00006912 INIT_LIST_HEAD(&offload_base);
6913
Eric W. Biederman881d9662007-09-17 11:56:21 -07006914 if (register_pernet_subsys(&netdev_net_ops))
6915 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916
6917 /*
6918 * Initialise the packet receive queues.
6919 */
6920
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006921 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006922 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006923
Changli Gaodee42872010-05-02 05:42:16 +00006924 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006925 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006926 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006927 sd->completion_queue = NULL;
6928 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00006929 sd->output_queue = NULL;
6930 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00006931#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006932 sd->csd.func = rps_trigger_softirq;
6933 sd->csd.info = sd;
6934 sd->csd.flags = 0;
6935 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07006936#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006937
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006938 sd->backlog.poll = process_backlog;
6939 sd->backlog.weight = weight_p;
6940 sd->backlog.gro_list = NULL;
6941 sd->backlog.gro_count = 0;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00006942
6943#ifdef CONFIG_NET_FLOW_LIMIT
6944 sd->flow_limit = NULL;
6945#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006946 }
6947
Linus Torvalds1da177e2005-04-16 15:20:36 -07006948 dev_boot_phase = 0;
6949
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006950 /* The loopback device is special if any other network devices
6951 * is present in a network namespace the loopback device must
6952 * be present. Since we now dynamically allocate and free the
6953 * loopback device ensure this invariant is maintained by
6954 * keeping the loopback device as the first device on the
6955 * list of network devices. Ensuring the loopback devices
6956 * is the first device that appears and the last network device
6957 * that disappears.
6958 */
6959 if (register_pernet_device(&loopback_net_ops))
6960 goto out;
6961
6962 if (register_pernet_device(&default_device_ops))
6963 goto out;
6964
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006965 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6966 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006967
6968 hotcpu_notifier(dev_cpu_callback, 0);
6969 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006970 rc = 0;
6971out:
6972 return rc;
6973}
6974
6975subsys_initcall(net_dev_init);