blob: 0ce469e5ec8057c674901b404db13061fe8b7392 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300132#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700133#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100134#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700136#include "net-sysfs.h"
137
Herbert Xud565b0a2008-12-15 23:38:52 -0800138/* Instead of increasing this, you should create a hash table. */
139#define MAX_GRO_SKBS 8
140
Herbert Xu5d38a072009-01-04 16:13:40 -0800141/* This should be increased if a protocol with a bigger head is added. */
142#define GRO_MAX_HEAD (MAX_HEADER + 128)
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000145static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000146struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
147struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000148static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700151 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 * semaphore.
153 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800154 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 *
156 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700157 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 * actual updates. This allows pure readers to access the list even
159 * while a writer is preparing to update it.
160 *
161 * To put it another way, dev_base_lock is held for writing only to
162 * protect against pure readers; the rtnl semaphore provides the
163 * protection against other writers.
164 *
165 * See, for example usages, register_netdevice() and
166 * unregister_netdevice(), which must be called with the rtnl
167 * semaphore held.
168 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170EXPORT_SYMBOL(dev_base_lock);
171
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300172/* protects napi_hash addition/deletion and napi_gen_id */
173static DEFINE_SPINLOCK(napi_hash_lock);
174
175static unsigned int napi_gen_id;
176static DEFINE_HASHTABLE(napi_hash, 8);
177
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200178static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000179
Thomas Graf4e985ad2011-06-21 03:11:20 +0000180static inline void dev_base_seq_inc(struct net *net)
181{
182 while (++net->dev_base_seq == 0);
183}
184
Eric W. Biederman881d9662007-09-17 11:56:21 -0700185static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
Eric Dumazet95c96172012-04-15 05:58:06 +0000187 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
188
stephen hemminger08e98972009-11-10 07:20:34 +0000189 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
Eric W. Biederman881d9662007-09-17 11:56:21 -0700192static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700194 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
196
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000197static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000198{
199#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000200 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000201#endif
202}
203
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000204static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000205{
206#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000207 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000208#endif
209}
210
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000212static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200213{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900214 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800219 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000220 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000221 hlist_add_head_rcu(&dev->index_hlist,
222 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200223 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000224
225 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200226}
227
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000228/* Device list removal
229 * caller must respect a RCU grace period before freeing/reusing dev
230 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231static void unlist_netdevice(struct net_device *dev)
232{
233 ASSERT_RTNL();
234
235 /* Unlink dev from the device chain */
236 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800237 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000238 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000239 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200240 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000241
242 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200243}
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245/*
246 * Our notifier list
247 */
248
Alan Sternf07d5b92006-05-09 15:23:03 -0700249static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251/*
252 * Device drivers call our routines to queue packets here. We empty the
253 * queue in the local softnet handler.
254 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700255
Eric Dumazet9958da02010-04-17 04:17:02 +0000256DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700257EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
David S. Millercf508b12008-07-22 14:16:42 -0700259#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700260/*
David S. Millerc773e842008-07-08 23:13:53 -0700261 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700262 * according to dev->type
263 */
264static const unsigned short netdev_lock_type[] =
265 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
266 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
267 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
268 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
269 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
270 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
271 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
272 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
273 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
274 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
275 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
276 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400277 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
278 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
279 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700280
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700281static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700282 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
283 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
284 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
285 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
286 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
287 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
288 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
289 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
290 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
291 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
292 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
293 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400294 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
295 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
296 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700297
298static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700299static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700300
301static inline unsigned short netdev_lock_pos(unsigned short dev_type)
302{
303 int i;
304
305 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
306 if (netdev_lock_type[i] == dev_type)
307 return i;
308 /* the last key is used by default */
309 return ARRAY_SIZE(netdev_lock_type) - 1;
310}
311
David S. Millercf508b12008-07-22 14:16:42 -0700312static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
313 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700314{
315 int i;
316
317 i = netdev_lock_pos(dev_type);
318 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
319 netdev_lock_name[i]);
320}
David S. Millercf508b12008-07-22 14:16:42 -0700321
322static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
323{
324 int i;
325
326 i = netdev_lock_pos(dev->type);
327 lockdep_set_class_and_name(&dev->addr_list_lock,
328 &netdev_addr_lock_key[i],
329 netdev_lock_name[i]);
330}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700331#else
David S. Millercf508b12008-07-22 14:16:42 -0700332static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
333 unsigned short dev_type)
334{
335}
336static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700337{
338}
339#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341/*******************************************************************************
342
343 Protocol management and registration routines
344
345*******************************************************************************/
346
347/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 * Add a protocol ID to the list. Now that the input handler is
349 * smarter we can dispense with all the messy stuff that used to be
350 * here.
351 *
352 * BEWARE!!! Protocol handlers, mangling input packets,
353 * MUST BE last in hash buckets and checking protocol handlers
354 * MUST start from promiscuous ptype_all chain in net_bh.
355 * It is true now, do not change it.
356 * Explanation follows: if protocol handler, mangling packet, will
357 * be the first on list, it is not able to sense, that packet
358 * is cloned and should be copied-on-write, so that it will
359 * change it and subsequent readers will get broken packet.
360 * --ANK (980803)
361 */
362
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000363static inline struct list_head *ptype_head(const struct packet_type *pt)
364{
365 if (pt->type == htons(ETH_P_ALL))
366 return &ptype_all;
367 else
368 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
369}
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371/**
372 * dev_add_pack - add packet handler
373 * @pt: packet type declaration
374 *
375 * Add a protocol handler to the networking stack. The passed &packet_type
376 * is linked into kernel lists and may not be freed until it has been
377 * removed from the kernel lists.
378 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900379 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 * guarantee all CPU's that are in middle of receiving packets
381 * will see the new packet type (until the next received packet).
382 */
383
384void dev_add_pack(struct packet_type *pt)
385{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000386 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000388 spin_lock(&ptype_lock);
389 list_add_rcu(&pt->list, head);
390 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700392EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394/**
395 * __dev_remove_pack - remove packet handler
396 * @pt: packet type declaration
397 *
398 * Remove a protocol handler that was previously added to the kernel
399 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
400 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900401 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 *
403 * The packet type might still be in use by receivers
404 * and must not be freed until after all the CPU's have gone
405 * through a quiescent state.
406 */
407void __dev_remove_pack(struct packet_type *pt)
408{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000409 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 struct packet_type *pt1;
411
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000412 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 list_for_each_entry(pt1, head, list) {
415 if (pt == pt1) {
416 list_del_rcu(&pt->list);
417 goto out;
418 }
419 }
420
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000421 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000423 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700425EXPORT_SYMBOL(__dev_remove_pack);
426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427/**
428 * dev_remove_pack - remove packet handler
429 * @pt: packet type declaration
430 *
431 * Remove a protocol handler that was previously added to the kernel
432 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
433 * from the kernel lists and can be freed or reused once this function
434 * returns.
435 *
436 * This call sleeps to guarantee that no CPU is looking at the packet
437 * type after return.
438 */
439void dev_remove_pack(struct packet_type *pt)
440{
441 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 synchronize_net();
444}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700445EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Vlad Yasevich62532da2012-11-15 08:49:10 +0000447
448/**
449 * dev_add_offload - register offload handlers
450 * @po: protocol offload declaration
451 *
452 * Add protocol offload handlers to the networking stack. The passed
453 * &proto_offload is linked into kernel lists and may not be freed until
454 * it has been removed from the kernel lists.
455 *
456 * This call does not sleep therefore it can not
457 * guarantee all CPU's that are in middle of receiving packets
458 * will see the new offload handlers (until the next received packet).
459 */
460void dev_add_offload(struct packet_offload *po)
461{
462 struct list_head *head = &offload_base;
463
464 spin_lock(&offload_lock);
465 list_add_rcu(&po->list, head);
466 spin_unlock(&offload_lock);
467}
468EXPORT_SYMBOL(dev_add_offload);
469
470/**
471 * __dev_remove_offload - remove offload handler
472 * @po: packet offload declaration
473 *
474 * Remove a protocol offload handler that was previously added to the
475 * kernel offload handlers by dev_add_offload(). The passed &offload_type
476 * is removed from the kernel lists and can be freed or reused once this
477 * function returns.
478 *
479 * The packet type might still be in use by receivers
480 * and must not be freed until after all the CPU's have gone
481 * through a quiescent state.
482 */
483void __dev_remove_offload(struct packet_offload *po)
484{
485 struct list_head *head = &offload_base;
486 struct packet_offload *po1;
487
Eric Dumazetc53aa502012-11-16 08:08:23 +0000488 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000489
490 list_for_each_entry(po1, head, list) {
491 if (po == po1) {
492 list_del_rcu(&po->list);
493 goto out;
494 }
495 }
496
497 pr_warn("dev_remove_offload: %p not found\n", po);
498out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000499 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000500}
501EXPORT_SYMBOL(__dev_remove_offload);
502
503/**
504 * dev_remove_offload - remove packet offload handler
505 * @po: packet offload declaration
506 *
507 * Remove a packet offload handler that was previously added to the kernel
508 * offload handlers by dev_add_offload(). The passed &offload_type is
509 * removed from the kernel lists and can be freed or reused once this
510 * function returns.
511 *
512 * This call sleeps to guarantee that no CPU is looking at the packet
513 * type after return.
514 */
515void dev_remove_offload(struct packet_offload *po)
516{
517 __dev_remove_offload(po);
518
519 synchronize_net();
520}
521EXPORT_SYMBOL(dev_remove_offload);
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523/******************************************************************************
524
525 Device Boot-time Settings Routines
526
527*******************************************************************************/
528
529/* Boot time configuration table */
530static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
531
532/**
533 * netdev_boot_setup_add - add new setup entry
534 * @name: name of the device
535 * @map: configured settings for the device
536 *
537 * Adds new setup entry to the dev_boot_setup list. The function
538 * returns 0 on error and 1 on success. This is a generic routine to
539 * all netdevices.
540 */
541static int netdev_boot_setup_add(char *name, struct ifmap *map)
542{
543 struct netdev_boot_setup *s;
544 int i;
545
546 s = dev_boot_setup;
547 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
548 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
549 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700550 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 memcpy(&s[i].map, map, sizeof(s[i].map));
552 break;
553 }
554 }
555
556 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
557}
558
559/**
560 * netdev_boot_setup_check - check boot time settings
561 * @dev: the netdevice
562 *
563 * Check boot time settings for the device.
564 * The found settings are set for the device to be used
565 * later in the device probing.
566 * Returns 0 if no settings found, 1 if they are.
567 */
568int netdev_boot_setup_check(struct net_device *dev)
569{
570 struct netdev_boot_setup *s = dev_boot_setup;
571 int i;
572
573 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
574 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700575 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 dev->irq = s[i].map.irq;
577 dev->base_addr = s[i].map.base_addr;
578 dev->mem_start = s[i].map.mem_start;
579 dev->mem_end = s[i].map.mem_end;
580 return 1;
581 }
582 }
583 return 0;
584}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700585EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587
588/**
589 * netdev_boot_base - get address from boot time settings
590 * @prefix: prefix for network device
591 * @unit: id for network device
592 *
593 * Check boot time settings for the base address of device.
594 * The found settings are set for the device to be used
595 * later in the device probing.
596 * Returns 0 if no settings found.
597 */
598unsigned long netdev_boot_base(const char *prefix, int unit)
599{
600 const struct netdev_boot_setup *s = dev_boot_setup;
601 char name[IFNAMSIZ];
602 int i;
603
604 sprintf(name, "%s%d", prefix, unit);
605
606 /*
607 * If device already registered then return base of 1
608 * to indicate not to probe for this interface
609 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700610 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 return 1;
612
613 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
614 if (!strcmp(name, s[i].name))
615 return s[i].map.base_addr;
616 return 0;
617}
618
619/*
620 * Saves at boot time configured settings for any netdevice.
621 */
622int __init netdev_boot_setup(char *str)
623{
624 int ints[5];
625 struct ifmap map;
626
627 str = get_options(str, ARRAY_SIZE(ints), ints);
628 if (!str || !*str)
629 return 0;
630
631 /* Save settings */
632 memset(&map, 0, sizeof(map));
633 if (ints[0] > 0)
634 map.irq = ints[1];
635 if (ints[0] > 1)
636 map.base_addr = ints[2];
637 if (ints[0] > 2)
638 map.mem_start = ints[3];
639 if (ints[0] > 3)
640 map.mem_end = ints[4];
641
642 /* Add new entry to the list */
643 return netdev_boot_setup_add(str, &map);
644}
645
646__setup("netdev=", netdev_boot_setup);
647
648/*******************************************************************************
649
650 Device Interface Subroutines
651
652*******************************************************************************/
653
654/**
655 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700656 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 * @name: name to find
658 *
659 * Find an interface by name. Must be called under RTNL semaphore
660 * or @dev_base_lock. If the name is found a pointer to the device
661 * is returned. If the name is not found then %NULL is returned. The
662 * reference counters are not incremented so the caller must be
663 * careful with locks.
664 */
665
Eric W. Biederman881d9662007-09-17 11:56:21 -0700666struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700668 struct net_device *dev;
669 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Sasha Levinb67bfe02013-02-27 17:06:00 -0800671 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 if (!strncmp(dev->name, name, IFNAMSIZ))
673 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 return NULL;
676}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700677EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000680 * dev_get_by_name_rcu - find a device by its name
681 * @net: the applicable net namespace
682 * @name: name to find
683 *
684 * Find an interface by name.
685 * If the name is found a pointer to the device is returned.
686 * If the name is not found then %NULL is returned.
687 * The reference counters are not incremented so the caller must be
688 * careful with locks. The caller must hold RCU lock.
689 */
690
691struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
692{
Eric Dumazet72c95282009-10-30 07:11:27 +0000693 struct net_device *dev;
694 struct hlist_head *head = dev_name_hash(net, name);
695
Sasha Levinb67bfe02013-02-27 17:06:00 -0800696 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000697 if (!strncmp(dev->name, name, IFNAMSIZ))
698 return dev;
699
700 return NULL;
701}
702EXPORT_SYMBOL(dev_get_by_name_rcu);
703
704/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700706 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 * @name: name to find
708 *
709 * Find an interface by name. This can be called from any
710 * context and does its own locking. The returned handle has
711 * the usage count incremented and the caller must use dev_put() to
712 * release it when it is no longer needed. %NULL is returned if no
713 * matching device is found.
714 */
715
Eric W. Biederman881d9662007-09-17 11:56:21 -0700716struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717{
718 struct net_device *dev;
719
Eric Dumazet72c95282009-10-30 07:11:27 +0000720 rcu_read_lock();
721 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 if (dev)
723 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000724 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 return dev;
726}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700727EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729/**
730 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700731 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 * @ifindex: index of device
733 *
734 * Search for an interface by index. Returns %NULL if the device
735 * is not found or a pointer to the device. The device has not
736 * had its reference counter increased so the caller must be careful
737 * about locking. The caller must hold either the RTNL semaphore
738 * or @dev_base_lock.
739 */
740
Eric W. Biederman881d9662007-09-17 11:56:21 -0700741struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700743 struct net_device *dev;
744 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Sasha Levinb67bfe02013-02-27 17:06:00 -0800746 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (dev->ifindex == ifindex)
748 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 return NULL;
751}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700752EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000754/**
755 * dev_get_by_index_rcu - find a device by its ifindex
756 * @net: the applicable net namespace
757 * @ifindex: index of device
758 *
759 * Search for an interface by index. Returns %NULL if the device
760 * is not found or a pointer to the device. The device has not
761 * had its reference counter increased so the caller must be careful
762 * about locking. The caller must hold RCU lock.
763 */
764
765struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
766{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000767 struct net_device *dev;
768 struct hlist_head *head = dev_index_hash(net, ifindex);
769
Sasha Levinb67bfe02013-02-27 17:06:00 -0800770 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000771 if (dev->ifindex == ifindex)
772 return dev;
773
774 return NULL;
775}
776EXPORT_SYMBOL(dev_get_by_index_rcu);
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779/**
780 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700781 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 * @ifindex: index of device
783 *
784 * Search for an interface by index. Returns NULL if the device
785 * is not found or a pointer to the device. The device returned has
786 * had a reference added and the pointer is safe until the user calls
787 * dev_put to indicate they have finished with it.
788 */
789
Eric W. Biederman881d9662007-09-17 11:56:21 -0700790struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791{
792 struct net_device *dev;
793
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000794 rcu_read_lock();
795 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 if (dev)
797 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000798 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 return dev;
800}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700801EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
803/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200804 * netdev_get_name - get a netdevice name, knowing its ifindex.
805 * @net: network namespace
806 * @name: a pointer to the buffer where the name will be stored.
807 * @ifindex: the ifindex of the interface to get the name from.
808 *
809 * The use of raw_seqcount_begin() and cond_resched() before
810 * retrying is required as we want to give the writers a chance
811 * to complete when CONFIG_PREEMPT is not set.
812 */
813int netdev_get_name(struct net *net, char *name, int ifindex)
814{
815 struct net_device *dev;
816 unsigned int seq;
817
818retry:
819 seq = raw_seqcount_begin(&devnet_rename_seq);
820 rcu_read_lock();
821 dev = dev_get_by_index_rcu(net, ifindex);
822 if (!dev) {
823 rcu_read_unlock();
824 return -ENODEV;
825 }
826
827 strcpy(name, dev->name);
828 rcu_read_unlock();
829 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
830 cond_resched();
831 goto retry;
832 }
833
834 return 0;
835}
836
837/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000838 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700839 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 * @type: media type of device
841 * @ha: hardware address
842 *
843 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800844 * is not found or a pointer to the device.
845 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000846 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 * and the caller must therefore be careful about locking
848 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
850
Eric Dumazet941666c2010-12-05 01:23:53 +0000851struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
852 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
854 struct net_device *dev;
855
Eric Dumazet941666c2010-12-05 01:23:53 +0000856 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 if (dev->type == type &&
858 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700859 return dev;
860
861 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
Eric Dumazet941666c2010-12-05 01:23:53 +0000863EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300864
Eric W. Biederman881d9662007-09-17 11:56:21 -0700865struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700866{
867 struct net_device *dev;
868
869 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700870 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700871 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700872 return dev;
873
874 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700876EXPORT_SYMBOL(__dev_getfirstbyhwtype);
877
Eric W. Biederman881d9662007-09-17 11:56:21 -0700878struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000880 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000882 rcu_read_lock();
883 for_each_netdev_rcu(net, dev)
884 if (dev->type == type) {
885 dev_hold(dev);
886 ret = dev;
887 break;
888 }
889 rcu_read_unlock();
890 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892EXPORT_SYMBOL(dev_getfirstbyhwtype);
893
894/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000895 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700896 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 * @if_flags: IFF_* values
898 * @mask: bitmask of bits in if_flags to check
899 *
900 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000901 * is not found or a pointer to the device. Must be called inside
902 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 */
904
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000905struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700906 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700908 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Pavel Emelianov7562f872007-05-03 15:13:45 -0700910 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800911 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700913 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915 }
916 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700917 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000919EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921/**
922 * dev_valid_name - check if name is okay for network device
923 * @name: name string
924 *
925 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700926 * to allow sysfs to work. We also disallow any kind of
927 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 */
David S. Miller95f050b2012-03-06 16:12:15 -0500929bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700931 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500932 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700933 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500934 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700935 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500936 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700937
938 while (*name) {
939 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500940 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700941 name++;
942 }
David S. Miller95f050b2012-03-06 16:12:15 -0500943 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700945EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200948 * __dev_alloc_name - allocate a name for a device
949 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200951 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 *
953 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700954 * id. It scans list of devices to build up a free map, then chooses
955 * the first empty slot. The caller must hold the dev_base or rtnl lock
956 * while allocating the name and adding the device in order to avoid
957 * duplicates.
958 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
959 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 */
961
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200962static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
964 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 const char *p;
966 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700967 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 struct net_device *d;
969
970 p = strnchr(name, IFNAMSIZ-1, '%');
971 if (p) {
972 /*
973 * Verify the string as this thing may have come from
974 * the user. There must be either one "%d" and no other "%"
975 * characters.
976 */
977 if (p[1] != 'd' || strchr(p + 2, '%'))
978 return -EINVAL;
979
980 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700981 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 if (!inuse)
983 return -ENOMEM;
984
Eric W. Biederman881d9662007-09-17 11:56:21 -0700985 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (!sscanf(d->name, name, &i))
987 continue;
988 if (i < 0 || i >= max_netdevices)
989 continue;
990
991 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200992 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 if (!strncmp(buf, d->name, IFNAMSIZ))
994 set_bit(i, inuse);
995 }
996
997 i = find_first_zero_bit(inuse, max_netdevices);
998 free_page((unsigned long) inuse);
999 }
1000
Octavian Purdilad9031022009-11-18 02:36:59 +00001001 if (buf != name)
1002 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001003 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 /* It is possible to run out of possible slots
1007 * when the name is long and there isn't enough space left
1008 * for the digits, or if all bits are used.
1009 */
1010 return -ENFILE;
1011}
1012
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001013/**
1014 * dev_alloc_name - allocate a name for a device
1015 * @dev: device
1016 * @name: name format string
1017 *
1018 * Passed a format string - eg "lt%d" it will try and find a suitable
1019 * id. It scans list of devices to build up a free map, then chooses
1020 * the first empty slot. The caller must hold the dev_base or rtnl lock
1021 * while allocating the name and adding the device in order to avoid
1022 * duplicates.
1023 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1024 * Returns the number of the unit assigned or a negative errno code.
1025 */
1026
1027int dev_alloc_name(struct net_device *dev, const char *name)
1028{
1029 char buf[IFNAMSIZ];
1030 struct net *net;
1031 int ret;
1032
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001033 BUG_ON(!dev_net(dev));
1034 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001035 ret = __dev_alloc_name(net, name, buf);
1036 if (ret >= 0)
1037 strlcpy(dev->name, buf, IFNAMSIZ);
1038 return ret;
1039}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001040EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001041
Gao feng828de4f2012-09-13 20:58:27 +00001042static int dev_alloc_name_ns(struct net *net,
1043 struct net_device *dev,
1044 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001045{
Gao feng828de4f2012-09-13 20:58:27 +00001046 char buf[IFNAMSIZ];
1047 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001048
Gao feng828de4f2012-09-13 20:58:27 +00001049 ret = __dev_alloc_name(net, name, buf);
1050 if (ret >= 0)
1051 strlcpy(dev->name, buf, IFNAMSIZ);
1052 return ret;
1053}
1054
1055static int dev_get_valid_name(struct net *net,
1056 struct net_device *dev,
1057 const char *name)
1058{
1059 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001060
Octavian Purdilad9031022009-11-18 02:36:59 +00001061 if (!dev_valid_name(name))
1062 return -EINVAL;
1063
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001064 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001065 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001066 else if (__dev_get_by_name(net, name))
1067 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001068 else if (dev->name != name)
1069 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001070
1071 return 0;
1072}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074/**
1075 * dev_change_name - change name of a device
1076 * @dev: device
1077 * @newname: name (or format string) must be at least IFNAMSIZ
1078 *
1079 * Change name of a device, can pass format strings "eth%d".
1080 * for wildcarding.
1081 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001082int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083{
Herbert Xufcc5a032007-07-30 17:03:38 -07001084 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001086 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001087 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
1089 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001090 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001092 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (dev->flags & IFF_UP)
1094 return -EBUSY;
1095
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001096 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001097
1098 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001099 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001100 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001101 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001102
Herbert Xufcc5a032007-07-30 17:03:38 -07001103 memcpy(oldname, dev->name, IFNAMSIZ);
1104
Gao feng828de4f2012-09-13 20:58:27 +00001105 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001106 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001107 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001108 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Herbert Xufcc5a032007-07-30 17:03:38 -07001111rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001112 ret = device_rename(&dev->dev, dev->name);
1113 if (ret) {
1114 memcpy(dev->name, oldname, IFNAMSIZ);
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001115 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001116 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001117 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001118
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001119 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001120
Herbert Xu7f988ea2007-07-30 16:35:46 -07001121 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001122 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001123 write_unlock_bh(&dev_base_lock);
1124
1125 synchronize_rcu();
1126
1127 write_lock_bh(&dev_base_lock);
1128 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001129 write_unlock_bh(&dev_base_lock);
1130
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001131 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001132 ret = notifier_to_errno(ret);
1133
1134 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001135 /* err >= 0 after dev_alloc_name() or stores the first errno */
1136 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001137 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001138 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001139 memcpy(dev->name, oldname, IFNAMSIZ);
1140 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001141 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001142 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001143 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001144 }
1145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 return err;
1148}
1149
1150/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001151 * dev_set_alias - change ifalias of a device
1152 * @dev: device
1153 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001154 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001155 *
1156 * Set ifalias for a device,
1157 */
1158int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1159{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001160 char *new_ifalias;
1161
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001162 ASSERT_RTNL();
1163
1164 if (len >= IFALIASZ)
1165 return -EINVAL;
1166
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001167 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001168 kfree(dev->ifalias);
1169 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001170 return 0;
1171 }
1172
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001173 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1174 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001175 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001176 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001177
1178 strlcpy(dev->ifalias, alias, len+1);
1179 return len;
1180}
1181
1182
1183/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001184 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001185 * @dev: device to cause notification
1186 *
1187 * Called to indicate a device has changed features.
1188 */
1189void netdev_features_change(struct net_device *dev)
1190{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001191 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001192}
1193EXPORT_SYMBOL(netdev_features_change);
1194
1195/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 * netdev_state_change - device changes state
1197 * @dev: device to cause notification
1198 *
1199 * Called to indicate a device has changed state. This function calls
1200 * the notifier chains for netdev_chain and sends a NEWLINK message
1201 * to the routing socket.
1202 */
1203void netdev_state_change(struct net_device *dev)
1204{
1205 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001206 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001207 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
1209}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001210EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Amerigo Wangee89bab2012-08-09 22:14:56 +00001212/**
1213 * netdev_notify_peers - notify network peers about existence of @dev
1214 * @dev: network device
1215 *
1216 * Generate traffic such that interested network peers are aware of
1217 * @dev, such as by generating a gratuitous ARP. This may be used when
1218 * a device wants to inform the rest of the network about some sort of
1219 * reconfiguration such as a failover event or virtual machine
1220 * migration.
1221 */
1222void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001223{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001224 rtnl_lock();
1225 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1226 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001227}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001228EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001229
Patrick McHardybd380812010-02-26 06:34:53 +00001230static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001232 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001233 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001235 ASSERT_RTNL();
1236
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 if (!netif_device_present(dev))
1238 return -ENODEV;
1239
Neil Hormanca99ca12013-02-05 08:05:43 +00001240 /* Block netpoll from trying to do any rx path servicing.
1241 * If we don't do this there is a chance ndo_poll_controller
1242 * or ndo_poll may be running while we open the device
1243 */
dingtianhongda6e3782013-05-27 19:53:31 +00001244 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001245
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001246 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1247 ret = notifier_to_errno(ret);
1248 if (ret)
1249 return ret;
1250
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001252
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001253 if (ops->ndo_validate_addr)
1254 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001255
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001256 if (!ret && ops->ndo_open)
1257 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Neil Hormanca99ca12013-02-05 08:05:43 +00001259 netpoll_rx_enable(dev);
1260
Jeff Garzikbada3392007-10-23 20:19:37 -07001261 if (ret)
1262 clear_bit(__LINK_STATE_START, &dev->state);
1263 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001265 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001266 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001268 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001270
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 return ret;
1272}
Patrick McHardybd380812010-02-26 06:34:53 +00001273
1274/**
1275 * dev_open - prepare an interface for use.
1276 * @dev: device to open
1277 *
1278 * Takes a device from down to up state. The device's private open
1279 * function is invoked and then the multicast lists are loaded. Finally
1280 * the device is moved into the up state and a %NETDEV_UP message is
1281 * sent to the netdev notifier chain.
1282 *
1283 * Calling this function on an active interface is a nop. On a failure
1284 * a negative errno code is returned.
1285 */
1286int dev_open(struct net_device *dev)
1287{
1288 int ret;
1289
Patrick McHardybd380812010-02-26 06:34:53 +00001290 if (dev->flags & IFF_UP)
1291 return 0;
1292
Patrick McHardybd380812010-02-26 06:34:53 +00001293 ret = __dev_open(dev);
1294 if (ret < 0)
1295 return ret;
1296
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001297 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001298 call_netdevice_notifiers(NETDEV_UP, dev);
1299
1300 return ret;
1301}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001302EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
Octavian Purdila44345722010-12-13 12:44:07 +00001304static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305{
Octavian Purdila44345722010-12-13 12:44:07 +00001306 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001307
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001308 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001309 might_sleep();
1310
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001311 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001312 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Octavian Purdila44345722010-12-13 12:44:07 +00001314 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Octavian Purdila44345722010-12-13 12:44:07 +00001316 /* Synchronize to scheduled poll. We cannot touch poll list, it
1317 * can be even on different cpu. So just clear netif_running().
1318 *
1319 * dev->stop() will invoke napi_disable() on all of it's
1320 * napi_struct instances on this device.
1321 */
1322 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1323 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Octavian Purdila44345722010-12-13 12:44:07 +00001325 dev_deactivate_many(head);
1326
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001327 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001328 const struct net_device_ops *ops = dev->netdev_ops;
1329
1330 /*
1331 * Call the device specific close. This cannot fail.
1332 * Only if device is UP
1333 *
1334 * We allow it to be called even after a DETACH hot-plug
1335 * event.
1336 */
1337 if (ops->ndo_stop)
1338 ops->ndo_stop(dev);
1339
Octavian Purdila44345722010-12-13 12:44:07 +00001340 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001341 net_dmaengine_put();
1342 }
1343
1344 return 0;
1345}
1346
1347static int __dev_close(struct net_device *dev)
1348{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001349 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001350 LIST_HEAD(single);
1351
Neil Hormanca99ca12013-02-05 08:05:43 +00001352 /* Temporarily disable netpoll until the interface is down */
dingtianhongda6e3782013-05-27 19:53:31 +00001353 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001354
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001355 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001356 retval = __dev_close_many(&single);
1357 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001358
1359 netpoll_rx_enable(dev);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001360 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001361}
1362
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001363static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001364{
1365 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001366
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001367 /* Remove the devices that don't need to be closed */
1368 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001369 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001370 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001371
1372 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001373
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001374 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001375 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001376 call_netdevice_notifiers(NETDEV_DOWN, dev);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001377 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 return 0;
1381}
Patrick McHardybd380812010-02-26 06:34:53 +00001382
1383/**
1384 * dev_close - shutdown an interface.
1385 * @dev: device to shutdown
1386 *
1387 * This function moves an active device into down state. A
1388 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1389 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1390 * chain.
1391 */
1392int dev_close(struct net_device *dev)
1393{
Eric Dumazete14a5992011-05-10 12:26:06 -07001394 if (dev->flags & IFF_UP) {
1395 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001396
Neil Hormanca99ca12013-02-05 08:05:43 +00001397 /* Block netpoll rx while the interface is going down */
dingtianhongda6e3782013-05-27 19:53:31 +00001398 netpoll_rx_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001399
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001400 list_add(&dev->close_list, &single);
Eric Dumazete14a5992011-05-10 12:26:06 -07001401 dev_close_many(&single);
1402 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001403
1404 netpoll_rx_enable(dev);
Eric Dumazete14a5992011-05-10 12:26:06 -07001405 }
dingtianhongda6e3782013-05-27 19:53:31 +00001406 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001407}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001408EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409
1410
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001411/**
1412 * dev_disable_lro - disable Large Receive Offload on a device
1413 * @dev: device
1414 *
1415 * Disable Large Receive Offload (LRO) on a net device. Must be
1416 * called under RTNL. This is needed if received packets may be
1417 * forwarded to another interface.
1418 */
1419void dev_disable_lro(struct net_device *dev)
1420{
Neil Hormanf11970e2011-05-24 08:31:09 +00001421 /*
1422 * If we're trying to disable lro on a vlan device
1423 * use the underlying physical device instead
1424 */
1425 if (is_vlan_dev(dev))
1426 dev = vlan_dev_real_dev(dev);
1427
Michal Kubeček529d0482013-11-15 06:18:50 +01001428 /* the same for macvlan devices */
1429 if (netif_is_macvlan(dev))
1430 dev = macvlan_dev_real_dev(dev);
1431
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001432 dev->wanted_features &= ~NETIF_F_LRO;
1433 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001434
Michał Mirosław22d59692011-04-21 12:42:15 +00001435 if (unlikely(dev->features & NETIF_F_LRO))
1436 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001437}
1438EXPORT_SYMBOL(dev_disable_lro);
1439
Jiri Pirko351638e2013-05-28 01:30:21 +00001440static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1441 struct net_device *dev)
1442{
1443 struct netdev_notifier_info info;
1444
1445 netdev_notifier_info_init(&info, dev);
1446 return nb->notifier_call(nb, val, &info);
1447}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001448
Eric W. Biederman881d9662007-09-17 11:56:21 -07001449static int dev_boot_phase = 1;
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451/**
1452 * register_netdevice_notifier - register a network notifier block
1453 * @nb: notifier
1454 *
1455 * Register a notifier to be called when network device events occur.
1456 * The notifier passed is linked into the kernel structures and must
1457 * not be reused until it has been unregistered. A negative errno code
1458 * is returned on a failure.
1459 *
1460 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001461 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 * view of the network device list.
1463 */
1464
1465int register_netdevice_notifier(struct notifier_block *nb)
1466{
1467 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001468 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001469 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 int err;
1471
1472 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001473 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001474 if (err)
1475 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001476 if (dev_boot_phase)
1477 goto unlock;
1478 for_each_net(net) {
1479 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001480 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001481 err = notifier_to_errno(err);
1482 if (err)
1483 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
Eric W. Biederman881d9662007-09-17 11:56:21 -07001485 if (!(dev->flags & IFF_UP))
1486 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001487
Jiri Pirko351638e2013-05-28 01:30:21 +00001488 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001489 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001491
1492unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 rtnl_unlock();
1494 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001495
1496rollback:
1497 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001498 for_each_net(net) {
1499 for_each_netdev(net, dev) {
1500 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001501 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001502
Eric W. Biederman881d9662007-09-17 11:56:21 -07001503 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001504 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1505 dev);
1506 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001507 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001508 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001509 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001510 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001511
RongQing.Li8f891482011-11-30 23:43:07 -05001512outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001513 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001514 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001516EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518/**
1519 * unregister_netdevice_notifier - unregister a network notifier block
1520 * @nb: notifier
1521 *
1522 * Unregister a notifier previously registered by
1523 * register_netdevice_notifier(). The notifier is unlinked into the
1524 * kernel structures and may then be reused. A negative errno code
1525 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001526 *
1527 * After unregistering unregister and down device events are synthesized
1528 * for all devices on the device list to the removed notifier to remove
1529 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 */
1531
1532int unregister_netdevice_notifier(struct notifier_block *nb)
1533{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001534 struct net_device *dev;
1535 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001536 int err;
1537
1538 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001539 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001540 if (err)
1541 goto unlock;
1542
1543 for_each_net(net) {
1544 for_each_netdev(net, dev) {
1545 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001546 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1547 dev);
1548 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001549 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001550 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001551 }
1552 }
1553unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001554 rtnl_unlock();
1555 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001557EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001560 * call_netdevice_notifiers_info - call all network notifier blocks
1561 * @val: value passed unmodified to notifier function
1562 * @dev: net_device pointer passed unmodified to notifier function
1563 * @info: notifier information data
1564 *
1565 * Call all network notifier blocks. Parameters and return value
1566 * are as for raw_notifier_call_chain().
1567 */
1568
1569int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1570 struct netdev_notifier_info *info)
1571{
1572 ASSERT_RTNL();
1573 netdev_notifier_info_init(info, dev);
1574 return raw_notifier_call_chain(&netdev_chain, val, info);
1575}
1576EXPORT_SYMBOL(call_netdevice_notifiers_info);
1577
1578/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 * call_netdevice_notifiers - call all network notifier blocks
1580 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001581 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 *
1583 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001584 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 */
1586
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001587int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588{
Jiri Pirko351638e2013-05-28 01:30:21 +00001589 struct netdev_notifier_info info;
1590
1591 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001593EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
Ingo Molnarc5905af2012-02-24 08:31:31 +01001595static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001596#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001597/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001598 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001599 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001600 */
1601static atomic_t netstamp_needed_deferred;
1602#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603
1604void net_enable_timestamp(void)
1605{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001606#ifdef HAVE_JUMP_LABEL
1607 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1608
1609 if (deferred) {
1610 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001611 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001612 return;
1613 }
1614#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001615 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001617EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
1619void net_disable_timestamp(void)
1620{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001621#ifdef HAVE_JUMP_LABEL
1622 if (in_interrupt()) {
1623 atomic_inc(&netstamp_needed_deferred);
1624 return;
1625 }
1626#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001627 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001629EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
Eric Dumazet3b098e22010-05-15 23:57:10 -07001631static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632{
Eric Dumazet588f0332011-11-15 04:12:55 +00001633 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001634 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001635 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636}
1637
Eric Dumazet588f0332011-11-15 04:12:55 +00001638#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001639 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001640 if ((COND) && !(SKB)->tstamp.tv64) \
1641 __net_timestamp(SKB); \
1642 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001643
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001644static inline bool is_skb_forwardable(struct net_device *dev,
1645 struct sk_buff *skb)
1646{
1647 unsigned int len;
1648
1649 if (!(dev->flags & IFF_UP))
1650 return false;
1651
1652 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1653 if (skb->len <= len)
1654 return true;
1655
1656 /* if TSO is enabled, we don't care about the length as the packet
1657 * could be forwarded without being segmented before
1658 */
1659 if (skb_is_gso(skb))
1660 return true;
1661
1662 return false;
1663}
1664
Arnd Bergmann44540962009-11-26 06:07:08 +00001665/**
1666 * dev_forward_skb - loopback an skb to another netif
1667 *
1668 * @dev: destination network device
1669 * @skb: buffer to forward
1670 *
1671 * return values:
1672 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001673 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001674 *
1675 * dev_forward_skb can be used for injecting an skb from the
1676 * start_xmit function of one device into the receive queue
1677 * of another device.
1678 *
1679 * The receiving device may be in another namespace, so
1680 * we have to clear all information in the skb that could
1681 * impact namespace isolation.
1682 */
1683int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1684{
Michael S. Tsirkin48c83012011-08-31 08:03:29 +00001685 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1686 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1687 atomic_long_inc(&dev->rx_dropped);
1688 kfree_skb(skb);
1689 return NET_RX_DROP;
1690 }
1691 }
1692
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001693 if (unlikely(!is_skb_forwardable(dev, skb))) {
Eric Dumazetcaf586e2010-09-30 21:06:55 +00001694 atomic_long_inc(&dev->rx_dropped);
Eric Dumazet6ec82562010-05-06 00:53:53 -07001695 kfree_skb(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001696 return NET_RX_DROP;
Eric Dumazet6ec82562010-05-06 00:53:53 -07001697 }
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001698
Nicolas Dichtel8b27f272013-09-02 15:34:56 +02001699 skb_scrub_packet(skb, true);
Alexei Starovoitov81b9eab2013-11-12 14:39:13 -08001700 skb->protocol = eth_type_trans(skb, dev);
Isaku Yamahata06a23fe2013-07-02 20:30:10 +09001701
Arnd Bergmann44540962009-11-26 06:07:08 +00001702 return netif_rx(skb);
1703}
1704EXPORT_SYMBOL_GPL(dev_forward_skb);
1705
Changli Gao71d9dec2010-12-15 19:57:25 +00001706static inline int deliver_skb(struct sk_buff *skb,
1707 struct packet_type *pt_prev,
1708 struct net_device *orig_dev)
1709{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001710 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1711 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001712 atomic_inc(&skb->users);
1713 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1714}
1715
Eric Leblondc0de08d2012-08-16 22:02:58 +00001716static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1717{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001718 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001719 return false;
1720
1721 if (ptype->id_match)
1722 return ptype->id_match(ptype, skb->sk);
1723 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1724 return true;
1725
1726 return false;
1727}
1728
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729/*
1730 * Support routine. Sends outgoing frames to any network
1731 * taps currently in use.
1732 */
1733
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001734static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735{
1736 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001737 struct sk_buff *skb2 = NULL;
1738 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 rcu_read_lock();
1741 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1742 /* Never send packets back to the socket
1743 * they originated from - MvS (miquels@drinkel.ow.org)
1744 */
1745 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001746 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001747 if (pt_prev) {
1748 deliver_skb(skb2, pt_prev, skb->dev);
1749 pt_prev = ptype;
1750 continue;
1751 }
1752
1753 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 if (!skb2)
1755 break;
1756
Eric Dumazet70978182010-12-20 21:22:51 +00001757 net_timestamp_set(skb2);
1758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 /* skb->nh should be correctly
1760 set by sender, so that the second statement is
1761 just protection against buggy protocols.
1762 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001763 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001765 if (skb_network_header(skb2) < skb2->data ||
Simon Hormanced14f62013-05-28 20:34:25 +00001766 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
Joe Perchese87cc472012-05-13 21:56:26 +00001767 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1768 ntohs(skb2->protocol),
1769 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001770 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 }
1772
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001773 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001775 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 }
1777 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001778 if (pt_prev)
1779 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 rcu_read_unlock();
1781}
1782
Ben Hutchings2c530402012-07-10 10:55:09 +00001783/**
1784 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001785 * @dev: Network device
1786 * @txq: number of queues available
1787 *
1788 * If real_num_tx_queues is changed the tc mappings may no longer be
1789 * valid. To resolve this verify the tc mapping remains valid and if
1790 * not NULL the mapping. With no priorities mapping to this
1791 * offset/count pair it will no longer be used. In the worst case TC0
1792 * is invalid nothing can be done so disable priority mappings. If is
1793 * expected that drivers will fix this mapping if they can before
1794 * calling netif_set_real_num_tx_queues.
1795 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001796static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001797{
1798 int i;
1799 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1800
1801 /* If TC0 is invalidated disable TC mapping */
1802 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001803 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001804 dev->num_tc = 0;
1805 return;
1806 }
1807
1808 /* Invalidated prio to tc mappings set to TC0 */
1809 for (i = 1; i < TC_BITMASK + 1; i++) {
1810 int q = netdev_get_prio_tc_map(dev, i);
1811
1812 tc = &dev->tc_to_txq[q];
1813 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001814 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1815 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001816 netdev_set_prio_tc_map(dev, i, 0);
1817 }
1818 }
1819}
1820
Alexander Duyck537c00d2013-01-10 08:57:02 +00001821#ifdef CONFIG_XPS
1822static DEFINE_MUTEX(xps_map_mutex);
1823#define xmap_dereference(P) \
1824 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1825
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001826static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1827 int cpu, u16 index)
1828{
1829 struct xps_map *map = NULL;
1830 int pos;
1831
1832 if (dev_maps)
1833 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1834
1835 for (pos = 0; map && pos < map->len; pos++) {
1836 if (map->queues[pos] == index) {
1837 if (map->len > 1) {
1838 map->queues[pos] = map->queues[--map->len];
1839 } else {
1840 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1841 kfree_rcu(map, rcu);
1842 map = NULL;
1843 }
1844 break;
1845 }
1846 }
1847
1848 return map;
1849}
1850
Alexander Duyck024e9672013-01-10 08:57:46 +00001851static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001852{
1853 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001854 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001855 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001856
1857 mutex_lock(&xps_map_mutex);
1858 dev_maps = xmap_dereference(dev->xps_maps);
1859
1860 if (!dev_maps)
1861 goto out_no_maps;
1862
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001863 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001864 for (i = index; i < dev->num_tx_queues; i++) {
1865 if (!remove_xps_queue(dev_maps, cpu, i))
1866 break;
1867 }
1868 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001869 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001870 }
1871
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001872 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001873 RCU_INIT_POINTER(dev->xps_maps, NULL);
1874 kfree_rcu(dev_maps, rcu);
1875 }
1876
Alexander Duyck024e9672013-01-10 08:57:46 +00001877 for (i = index; i < dev->num_tx_queues; i++)
1878 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1879 NUMA_NO_NODE);
1880
Alexander Duyck537c00d2013-01-10 08:57:02 +00001881out_no_maps:
1882 mutex_unlock(&xps_map_mutex);
1883}
1884
Alexander Duyck01c5f862013-01-10 08:57:35 +00001885static struct xps_map *expand_xps_map(struct xps_map *map,
1886 int cpu, u16 index)
1887{
1888 struct xps_map *new_map;
1889 int alloc_len = XPS_MIN_MAP_ALLOC;
1890 int i, pos;
1891
1892 for (pos = 0; map && pos < map->len; pos++) {
1893 if (map->queues[pos] != index)
1894 continue;
1895 return map;
1896 }
1897
1898 /* Need to add queue to this CPU's existing map */
1899 if (map) {
1900 if (pos < map->alloc_len)
1901 return map;
1902
1903 alloc_len = map->alloc_len * 2;
1904 }
1905
1906 /* Need to allocate new map to store queue on this CPU's map */
1907 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1908 cpu_to_node(cpu));
1909 if (!new_map)
1910 return NULL;
1911
1912 for (i = 0; i < pos; i++)
1913 new_map->queues[i] = map->queues[i];
1914 new_map->alloc_len = alloc_len;
1915 new_map->len = pos;
1916
1917 return new_map;
1918}
1919
Michael S. Tsirkin35735402013-10-02 09:14:06 +03001920int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1921 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001922{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001923 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001924 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001925 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001926 int cpu, numa_node_id = -2;
1927 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001928
1929 mutex_lock(&xps_map_mutex);
1930
1931 dev_maps = xmap_dereference(dev->xps_maps);
1932
Alexander Duyck01c5f862013-01-10 08:57:35 +00001933 /* allocate memory for queue storage */
1934 for_each_online_cpu(cpu) {
1935 if (!cpumask_test_cpu(cpu, mask))
1936 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001937
Alexander Duyck01c5f862013-01-10 08:57:35 +00001938 if (!new_dev_maps)
1939 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001940 if (!new_dev_maps) {
1941 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001942 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001943 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001944
1945 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1946 NULL;
1947
1948 map = expand_xps_map(map, cpu, index);
1949 if (!map)
1950 goto error;
1951
1952 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1953 }
1954
1955 if (!new_dev_maps)
1956 goto out_no_new_maps;
1957
1958 for_each_possible_cpu(cpu) {
1959 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1960 /* add queue to CPU maps */
1961 int pos = 0;
1962
1963 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1964 while ((pos < map->len) && (map->queues[pos] != index))
1965 pos++;
1966
1967 if (pos == map->len)
1968 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001969#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001970 if (numa_node_id == -2)
1971 numa_node_id = cpu_to_node(cpu);
1972 else if (numa_node_id != cpu_to_node(cpu))
1973 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001974#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001975 } else if (dev_maps) {
1976 /* fill in the new device map from the old device map */
1977 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1978 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001979 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001980
Alexander Duyck537c00d2013-01-10 08:57:02 +00001981 }
1982
Alexander Duyck01c5f862013-01-10 08:57:35 +00001983 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1984
Alexander Duyck537c00d2013-01-10 08:57:02 +00001985 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00001986 if (dev_maps) {
1987 for_each_possible_cpu(cpu) {
1988 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1989 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1990 if (map && map != new_map)
1991 kfree_rcu(map, rcu);
1992 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001993
Alexander Duyck537c00d2013-01-10 08:57:02 +00001994 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001995 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00001996
Alexander Duyck01c5f862013-01-10 08:57:35 +00001997 dev_maps = new_dev_maps;
1998 active = true;
1999
2000out_no_new_maps:
2001 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002002 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2003 (numa_node_id >= 0) ? numa_node_id :
2004 NUMA_NO_NODE);
2005
Alexander Duyck01c5f862013-01-10 08:57:35 +00002006 if (!dev_maps)
2007 goto out_no_maps;
2008
2009 /* removes queue from unused CPUs */
2010 for_each_possible_cpu(cpu) {
2011 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2012 continue;
2013
2014 if (remove_xps_queue(dev_maps, cpu, index))
2015 active = true;
2016 }
2017
2018 /* free map if not active */
2019 if (!active) {
2020 RCU_INIT_POINTER(dev->xps_maps, NULL);
2021 kfree_rcu(dev_maps, rcu);
2022 }
2023
2024out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002025 mutex_unlock(&xps_map_mutex);
2026
2027 return 0;
2028error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002029 /* remove any maps that we added */
2030 for_each_possible_cpu(cpu) {
2031 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2032 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2033 NULL;
2034 if (new_map && new_map != map)
2035 kfree(new_map);
2036 }
2037
Alexander Duyck537c00d2013-01-10 08:57:02 +00002038 mutex_unlock(&xps_map_mutex);
2039
Alexander Duyck537c00d2013-01-10 08:57:02 +00002040 kfree(new_dev_maps);
2041 return -ENOMEM;
2042}
2043EXPORT_SYMBOL(netif_set_xps_queue);
2044
2045#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002046/*
2047 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2048 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2049 */
Tom Herberte6484932010-10-18 18:04:39 +00002050int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002051{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002052 int rc;
2053
Tom Herberte6484932010-10-18 18:04:39 +00002054 if (txq < 1 || txq > dev->num_tx_queues)
2055 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002056
Ben Hutchings5c565802011-02-15 19:39:21 +00002057 if (dev->reg_state == NETREG_REGISTERED ||
2058 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002059 ASSERT_RTNL();
2060
Tom Herbert1d24eb42010-11-21 13:17:27 +00002061 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2062 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002063 if (rc)
2064 return rc;
2065
John Fastabend4f57c082011-01-17 08:06:04 +00002066 if (dev->num_tc)
2067 netif_setup_tc(dev, txq);
2068
Alexander Duyck024e9672013-01-10 08:57:46 +00002069 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002070 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002071#ifdef CONFIG_XPS
2072 netif_reset_xps_queues_gt(dev, txq);
2073#endif
2074 }
John Fastabendf0796d52010-07-01 13:21:57 +00002075 }
Tom Herberte6484932010-10-18 18:04:39 +00002076
2077 dev->real_num_tx_queues = txq;
2078 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002079}
2080EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002081
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002082#ifdef CONFIG_RPS
2083/**
2084 * netif_set_real_num_rx_queues - set actual number of RX queues used
2085 * @dev: Network device
2086 * @rxq: Actual number of RX queues
2087 *
2088 * This must be called either with the rtnl_lock held or before
2089 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002090 * negative error code. If called before registration, it always
2091 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002092 */
2093int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2094{
2095 int rc;
2096
Tom Herbertbd25fa72010-10-18 18:00:16 +00002097 if (rxq < 1 || rxq > dev->num_rx_queues)
2098 return -EINVAL;
2099
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002100 if (dev->reg_state == NETREG_REGISTERED) {
2101 ASSERT_RTNL();
2102
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002103 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2104 rxq);
2105 if (rc)
2106 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002107 }
2108
2109 dev->real_num_rx_queues = rxq;
2110 return 0;
2111}
2112EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2113#endif
2114
Ben Hutchings2c530402012-07-10 10:55:09 +00002115/**
2116 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002117 *
2118 * This routine should set an upper limit on the number of RSS queues
2119 * used by default by multiqueue devices.
2120 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002121int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002122{
2123 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2124}
2125EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2126
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002127static inline void __netif_reschedule(struct Qdisc *q)
2128{
2129 struct softnet_data *sd;
2130 unsigned long flags;
2131
2132 local_irq_save(flags);
2133 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002134 q->next_sched = NULL;
2135 *sd->output_queue_tailp = q;
2136 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002137 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2138 local_irq_restore(flags);
2139}
2140
David S. Miller37437bb2008-07-16 02:15:04 -07002141void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002142{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002143 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2144 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002145}
2146EXPORT_SYMBOL(__netif_schedule);
2147
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002148void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002149{
David S. Miller3578b0c2010-08-03 00:24:04 -07002150 if (atomic_dec_and_test(&skb->users)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002151 struct softnet_data *sd;
2152 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08002153
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002154 local_irq_save(flags);
2155 sd = &__get_cpu_var(softnet_data);
2156 skb->next = sd->completion_queue;
2157 sd->completion_queue = skb;
2158 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2159 local_irq_restore(flags);
2160 }
Denis Vlasenko56079432006-03-29 15:57:29 -08002161}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002162EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08002163
2164void dev_kfree_skb_any(struct sk_buff *skb)
2165{
2166 if (in_irq() || irqs_disabled())
2167 dev_kfree_skb_irq(skb);
2168 else
2169 dev_kfree_skb(skb);
2170}
2171EXPORT_SYMBOL(dev_kfree_skb_any);
2172
2173
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002174/**
2175 * netif_device_detach - mark device as removed
2176 * @dev: network device
2177 *
2178 * Mark device as removed from system and therefore no longer available.
2179 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002180void netif_device_detach(struct net_device *dev)
2181{
2182 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2183 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002184 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002185 }
2186}
2187EXPORT_SYMBOL(netif_device_detach);
2188
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002189/**
2190 * netif_device_attach - mark device as attached
2191 * @dev: network device
2192 *
2193 * Mark device as attached from system and restart if needed.
2194 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002195void netif_device_attach(struct net_device *dev)
2196{
2197 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2198 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002199 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002200 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002201 }
2202}
2203EXPORT_SYMBOL(netif_device_attach);
2204
Ben Hutchings36c92472012-01-17 07:57:56 +00002205static void skb_warn_bad_offload(const struct sk_buff *skb)
2206{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002207 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002208 struct net_device *dev = skb->dev;
2209 const char *driver = "";
2210
Ben Greearc846ad92013-04-19 10:45:52 +00002211 if (!net_ratelimit())
2212 return;
2213
Ben Hutchings36c92472012-01-17 07:57:56 +00002214 if (dev && dev->dev.parent)
2215 driver = dev_driver_string(dev->dev.parent);
2216
2217 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2218 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002219 driver, dev ? &dev->features : &null_features,
2220 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002221 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2222 skb_shinfo(skb)->gso_type, skb->ip_summed);
2223}
2224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225/*
2226 * Invalidate hardware checksum when packet is to be mangled, and
2227 * complete checksum manually on outgoing path.
2228 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002229int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230{
Al Virod3bc23e2006-11-14 21:24:49 -08002231 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002232 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
Patrick McHardy84fa7932006-08-29 16:44:56 -07002234 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002235 goto out_set_summed;
2236
2237 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002238 skb_warn_bad_offload(skb);
2239 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 }
2241
Eric Dumazetcef401d2013-01-25 20:34:37 +00002242 /* Before computing a checksum, we should make sure no frag could
2243 * be modified by an external entity : checksum could be wrong.
2244 */
2245 if (skb_has_shared_frag(skb)) {
2246 ret = __skb_linearize(skb);
2247 if (ret)
2248 goto out;
2249 }
2250
Michał Mirosław55508d62010-12-14 15:24:08 +00002251 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002252 BUG_ON(offset >= skb_headlen(skb));
2253 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2254
2255 offset += skb->csum_offset;
2256 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2257
2258 if (skb_cloned(skb) &&
2259 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2261 if (ret)
2262 goto out;
2263 }
2264
Herbert Xua0308472007-10-15 01:47:15 -07002265 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002266out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002268out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 return ret;
2270}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002271EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002273__be16 skb_network_protocol(struct sk_buff *skb)
2274{
2275 __be16 type = skb->protocol;
David S. Miller61816592013-03-20 12:46:26 -04002276 int vlan_depth = ETH_HLEN;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002277
Pravin B Shelar19acc322013-05-07 20:41:07 +00002278 /* Tunnel gso handlers can set protocol to ethernet. */
2279 if (type == htons(ETH_P_TEB)) {
2280 struct ethhdr *eth;
2281
2282 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2283 return 0;
2284
2285 eth = (struct ethhdr *)skb_mac_header(skb);
2286 type = eth->h_proto;
2287 }
2288
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002289 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002290 struct vlan_hdr *vh;
2291
2292 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2293 return 0;
2294
2295 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2296 type = vh->h_vlan_encapsulated_proto;
2297 vlan_depth += VLAN_HLEN;
2298 }
2299
2300 return type;
2301}
2302
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002303/**
2304 * skb_mac_gso_segment - mac layer segmentation handler.
2305 * @skb: buffer to segment
2306 * @features: features for the output path (see dev->features)
2307 */
2308struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2309 netdev_features_t features)
2310{
2311 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2312 struct packet_offload *ptype;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002313 __be16 type = skb_network_protocol(skb);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002314
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002315 if (unlikely(!type))
2316 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002317
2318 __skb_pull(skb, skb->mac_len);
2319
2320 rcu_read_lock();
2321 list_for_each_entry_rcu(ptype, &offload_base, list) {
2322 if (ptype->type == type && ptype->callbacks.gso_segment) {
2323 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2324 int err;
2325
2326 err = ptype->callbacks.gso_send_check(skb);
2327 segs = ERR_PTR(err);
2328 if (err || skb_gso_ok(skb, features))
2329 break;
2330 __skb_push(skb, (skb->data -
2331 skb_network_header(skb)));
2332 }
2333 segs = ptype->callbacks.gso_segment(skb, features);
2334 break;
2335 }
2336 }
2337 rcu_read_unlock();
2338
2339 __skb_push(skb, skb->data - skb_mac_header(skb));
2340
2341 return segs;
2342}
2343EXPORT_SYMBOL(skb_mac_gso_segment);
2344
2345
Cong Wang12b00042013-02-05 16:36:38 +00002346/* openvswitch calls this on rx path, so we need a different check.
2347 */
2348static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2349{
2350 if (tx_path)
2351 return skb->ip_summed != CHECKSUM_PARTIAL;
2352 else
2353 return skb->ip_summed == CHECKSUM_NONE;
2354}
2355
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002356/**
Cong Wang12b00042013-02-05 16:36:38 +00002357 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002358 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002359 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002360 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002361 *
2362 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002363 *
2364 * It may return NULL if the skb requires no segmentation. This is
2365 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002366 */
Cong Wang12b00042013-02-05 16:36:38 +00002367struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2368 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002369{
Cong Wang12b00042013-02-05 16:36:38 +00002370 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002371 int err;
2372
Ben Hutchings36c92472012-01-17 07:57:56 +00002373 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002374
Herbert Xua430a432006-07-08 13:34:56 -07002375 if (skb_header_cloned(skb) &&
2376 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2377 return ERR_PTR(err);
2378 }
2379
Pravin B Shelar68c33162013-02-14 14:02:41 +00002380 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002381 SKB_GSO_CB(skb)->encap_level = 0;
2382
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002383 skb_reset_mac_header(skb);
2384 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002385
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002386 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002387}
Cong Wang12b00042013-02-05 16:36:38 +00002388EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002389
Herbert Xufb286bb2005-11-10 13:01:24 -08002390/* Take action when hardware reception checksum errors are detected. */
2391#ifdef CONFIG_BUG
2392void netdev_rx_csum_fault(struct net_device *dev)
2393{
2394 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002395 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002396 dump_stack();
2397 }
2398}
2399EXPORT_SYMBOL(netdev_rx_csum_fault);
2400#endif
2401
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402/* Actually, we should eliminate this check as soon as we know, that:
2403 * 1. IOMMU is present and allows to map all the memory.
2404 * 2. No high memory really exists on this machine.
2405 */
2406
Eric Dumazet9092c652010-04-02 13:34:49 -07002407static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002409#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002411 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002412 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2413 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2414 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002415 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002416 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002419 if (PCI_DMA_BUS_IS_PHYS) {
2420 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
Eric Dumazet9092c652010-04-02 13:34:49 -07002422 if (!pdev)
2423 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002424 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002425 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2426 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002427 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2428 return 1;
2429 }
2430 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002431#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 return 0;
2433}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002435struct dev_gso_cb {
2436 void (*destructor)(struct sk_buff *skb);
2437};
2438
2439#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2440
2441static void dev_gso_skb_destructor(struct sk_buff *skb)
2442{
2443 struct dev_gso_cb *cb;
2444
2445 do {
2446 struct sk_buff *nskb = skb->next;
2447
2448 skb->next = nskb->next;
2449 nskb->next = NULL;
2450 kfree_skb(nskb);
2451 } while (skb->next);
2452
2453 cb = DEV_GSO_CB(skb);
2454 if (cb->destructor)
2455 cb->destructor(skb);
2456}
2457
2458/**
2459 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2460 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002461 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002462 *
2463 * This function segments the given skb and stores the list of segments
2464 * in skb->next.
2465 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002466static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002467{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002468 struct sk_buff *segs;
2469
Herbert Xu576a30e2006-06-27 13:22:38 -07002470 segs = skb_gso_segment(skb, features);
2471
2472 /* Verifying header integrity only. */
2473 if (!segs)
2474 return 0;
2475
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002476 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002477 return PTR_ERR(segs);
2478
2479 skb->next = segs;
2480 DEV_GSO_CB(skb)->destructor = skb->destructor;
2481 skb->destructor = dev_gso_skb_destructor;
2482
2483 return 0;
2484}
2485
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002486static netdev_features_t harmonize_features(struct sk_buff *skb,
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002487 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002488{
Ed Cashinc0d680e2012-09-19 15:49:00 +00002489 if (skb->ip_summed != CHECKSUM_NONE &&
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002490 !can_checksum_protocol(features, skb_network_protocol(skb))) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002491 features &= ~NETIF_F_ALL_CSUM;
Jesse Grossf01a5232011-01-09 06:23:31 +00002492 } else if (illegal_highdma(skb->dev, skb)) {
2493 features &= ~NETIF_F_SG;
2494 }
2495
2496 return features;
2497}
2498
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002499netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002500{
2501 __be16 protocol = skb->protocol;
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002502 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002503
Ben Hutchings30b678d2012-07-30 15:57:00 +00002504 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2505 features &= ~NETIF_F_GSO_MASK;
2506
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002507 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002508 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2509 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002510 } else if (!vlan_tx_tag_present(skb)) {
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002511 return harmonize_features(skb, features);
Jesse Grossf01a5232011-01-09 06:23:31 +00002512 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002513
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002514 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2515 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002516
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002517 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
Jesse Grossf01a5232011-01-09 06:23:31 +00002518 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002519 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2520 NETIF_F_HW_VLAN_STAG_TX;
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002521
2522 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002523}
Jesse Grossf01a5232011-01-09 06:23:31 +00002524EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002525
John Fastabend6afff0c2010-06-16 14:18:12 +00002526/*
2527 * Returns true if either:
2528 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
Rami Rosend1a53df2012-08-27 23:39:24 +00002529 * 2. skb is fragmented and the device does not support SG.
John Fastabend6afff0c2010-06-16 14:18:12 +00002530 */
2531static inline int skb_needs_linearize(struct sk_buff *skb,
Patrick McHardy6708c9e2013-05-01 22:36:49 +00002532 netdev_features_t features)
John Fastabend6afff0c2010-06-16 14:18:12 +00002533{
Jesse Gross02932ce2011-01-09 06:23:34 +00002534 return skb_is_nonlinear(skb) &&
2535 ((skb_has_frag_list(skb) &&
2536 !(features & NETIF_F_FRAGLIST)) ||
Jesse Grosse1e78db2010-10-29 12:14:53 +00002537 (skb_shinfo(skb)->nr_frags &&
Jesse Gross02932ce2011-01-09 06:23:34 +00002538 !(features & NETIF_F_SG)));
John Fastabend6afff0c2010-06-16 14:18:12 +00002539}
2540
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002541int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +08002542 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002543{
Stephen Hemminger00829822008-11-20 20:14:53 -08002544 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002545 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002546 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002547
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002548 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002549 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002550
Eric Dumazet93f154b2009-05-18 22:19:19 -07002551 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002552 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002553 * its hot in this cpu cache
2554 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002555 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2556 skb_dst_drop(skb);
2557
Jesse Grossfc741212011-01-09 06:23:32 +00002558 features = netif_skb_features(skb);
2559
Jesse Gross7b9c6092010-10-20 13:56:04 +00002560 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002561 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2562 skb = __vlan_put_tag(skb, skb->vlan_proto,
2563 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002564 if (unlikely(!skb))
2565 goto out;
2566
2567 skb->vlan_tci = 0;
2568 }
2569
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002570 /* If encapsulation offload request, verify we are testing
2571 * hardware encapsulation features instead of standard
2572 * features for the netdev
2573 */
2574 if (skb->encapsulation)
2575 features &= dev->hw_enc_features;
2576
Jesse Grossfc741212011-01-09 06:23:32 +00002577 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002578 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002579 goto out_kfree_skb;
2580 if (skb->next)
2581 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002582 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002583 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002584 __skb_linearize(skb))
2585 goto out_kfree_skb;
2586
2587 /* If packet is not checksummed and device does not
2588 * support checksumming for this protocol, complete
2589 * checksumming here.
2590 */
2591 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002592 if (skb->encapsulation)
2593 skb_set_inner_transport_header(skb,
2594 skb_checksum_start_offset(skb));
2595 else
2596 skb_set_transport_header(skb,
2597 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002598 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002599 skb_checksum_help(skb))
2600 goto out_kfree_skb;
2601 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002602 }
2603
Eric Dumazetb40863c2012-09-18 20:44:49 +00002604 if (!list_empty(&ptype_all))
2605 dev_queue_xmit_nit(skb, dev);
2606
Koki Sanagiec764bf2011-05-30 21:48:34 +00002607 skb_len = skb->len;
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002608 rc = ops->ndo_start_xmit(skb, dev);
2609
Koki Sanagiec764bf2011-05-30 21:48:34 +00002610 trace_net_dev_xmit(skb, rc, dev, skb_len);
Jason Wangf663dd92014-01-10 16:18:26 +08002611 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002612 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002613 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002614 }
2615
Herbert Xu576a30e2006-06-27 13:22:38 -07002616gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002617 do {
2618 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002619
2620 skb->next = nskb->next;
2621 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002622
Eric Dumazetb40863c2012-09-18 20:44:49 +00002623 if (!list_empty(&ptype_all))
2624 dev_queue_xmit_nit(nskb, dev);
2625
Koki Sanagiec764bf2011-05-30 21:48:34 +00002626 skb_len = nskb->len;
Jason Wangf663dd92014-01-10 16:18:26 +08002627 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002628 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002629 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002630 if (rc & ~NETDEV_TX_MASK)
2631 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002632 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002633 skb->next = nskb;
2634 return rc;
2635 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002636 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002637 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002638 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002639 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002640
Patrick McHardy572a9d72009-11-10 06:14:14 +00002641out_kfree_gso_skb:
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002642 if (likely(skb->next == NULL)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002643 skb->destructor = DEV_GSO_CB(skb)->destructor;
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002644 consume_skb(skb);
2645 return rc;
2646 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002647out_kfree_skb:
2648 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002649out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002650 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002651}
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002652EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002653
Eric Dumazet1def9232013-01-10 12:36:42 +00002654static void qdisc_pkt_len_init(struct sk_buff *skb)
2655{
2656 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2657
2658 qdisc_skb_cb(skb)->pkt_len = skb->len;
2659
2660 /* To get more precise estimation of bytes sent on wire,
2661 * we add to pkt_len the headers size of all segments
2662 */
2663 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002664 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002665 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002666
Eric Dumazet757b8b12013-01-15 21:14:21 -08002667 /* mac layer + network layer */
2668 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2669
2670 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002671 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2672 hdr_len += tcp_hdrlen(skb);
2673 else
2674 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002675
2676 if (shinfo->gso_type & SKB_GSO_DODGY)
2677 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2678 shinfo->gso_size);
2679
2680 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002681 }
2682}
2683
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002684static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2685 struct net_device *dev,
2686 struct netdev_queue *txq)
2687{
2688 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002689 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002690 int rc;
2691
Eric Dumazet1def9232013-01-10 12:36:42 +00002692 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002693 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002694 /*
2695 * Heuristic to force contended enqueues to serialize on a
2696 * separate lock before trying to get qdisc main lock.
2697 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2698 * and dequeue packets faster.
2699 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002700 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002701 if (unlikely(contended))
2702 spin_lock(&q->busylock);
2703
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002704 spin_lock(root_lock);
2705 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2706 kfree_skb(skb);
2707 rc = NET_XMIT_DROP;
2708 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002709 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002710 /*
2711 * This is a work-conserving queue; there are no old skbs
2712 * waiting to be sent out; and the qdisc is not running -
2713 * xmit the skb directly.
2714 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002715 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2716 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002717
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002718 qdisc_bstats_update(q, skb);
2719
Eric Dumazet79640a42010-06-02 05:09:29 -07002720 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2721 if (unlikely(contended)) {
2722 spin_unlock(&q->busylock);
2723 contended = false;
2724 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002725 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002726 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002727 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002728
2729 rc = NET_XMIT_SUCCESS;
2730 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002731 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002732 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002733 if (qdisc_run_begin(q)) {
2734 if (unlikely(contended)) {
2735 spin_unlock(&q->busylock);
2736 contended = false;
2737 }
2738 __qdisc_run(q);
2739 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002740 }
2741 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002742 if (unlikely(contended))
2743 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002744 return rc;
2745}
2746
Neil Horman5bc14212011-11-22 05:10:51 +00002747#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2748static void skb_update_prio(struct sk_buff *skb)
2749{
Igor Maravic6977a792011-11-25 07:44:54 +00002750 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002751
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002752 if (!skb->priority && skb->sk && map) {
2753 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2754
2755 if (prioidx < map->priomap_len)
2756 skb->priority = map->priomap[prioidx];
2757 }
Neil Horman5bc14212011-11-22 05:10:51 +00002758}
2759#else
2760#define skb_update_prio(skb)
2761#endif
2762
Eric Dumazet745e20f2010-09-29 13:23:09 -07002763static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002764#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002765
Dave Jonesd29f7492008-07-22 14:09:06 -07002766/**
Michel Machado95603e22012-06-12 10:16:35 +00002767 * dev_loopback_xmit - loop back @skb
2768 * @skb: buffer to transmit
2769 */
2770int dev_loopback_xmit(struct sk_buff *skb)
2771{
2772 skb_reset_mac_header(skb);
2773 __skb_pull(skb, skb_network_offset(skb));
2774 skb->pkt_type = PACKET_LOOPBACK;
2775 skb->ip_summed = CHECKSUM_UNNECESSARY;
2776 WARN_ON(!skb_dst(skb));
2777 skb_dst_force(skb);
2778 netif_rx_ni(skb);
2779 return 0;
2780}
2781EXPORT_SYMBOL(dev_loopback_xmit);
2782
2783/**
Dave Jonesd29f7492008-07-22 14:09:06 -07002784 * dev_queue_xmit - transmit a buffer
2785 * @skb: buffer to transmit
2786 *
2787 * Queue a buffer for transmission to a network device. The caller must
2788 * have set the device and priority and built the buffer before calling
2789 * this function. The function can be called from an interrupt.
2790 *
2791 * A negative errno code is returned on a failure. A success does not
2792 * guarantee the frame will be transmitted as it may be dropped due
2793 * to congestion or traffic shaping.
2794 *
2795 * -----------------------------------------------------------------------------------
2796 * I notice this method can also return errors from the queue disciplines,
2797 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2798 * be positive.
2799 *
2800 * Regardless of the return value, the skb is consumed, so it is currently
2801 * difficult to retry a send to this method. (You can bump the ref count
2802 * before sending to hold a reference for retry if you are careful.)
2803 *
2804 * When calling this method, interrupts MUST be enabled. This is because
2805 * the BH enable code must have IRQs enabled so that it will not deadlock.
2806 * --BLG
2807 */
Jason Wangf663dd92014-01-10 16:18:26 +08002808int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809{
2810 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002811 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 struct Qdisc *q;
2813 int rc = -ENOMEM;
2814
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002815 skb_reset_mac_header(skb);
2816
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002817 /* Disable soft irqs for various locks below. Also
2818 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002820 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821
Neil Horman5bc14212011-11-22 05:10:51 +00002822 skb_update_prio(skb);
2823
Jason Wangf663dd92014-01-10 16:18:26 +08002824 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002825 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002826
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002828 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002830 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002832 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002833 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 }
2835
2836 /* The device has no queue. Common case for software devices:
2837 loopback, all the sorts of tunnels...
2838
Herbert Xu932ff272006-06-09 12:20:56 -07002839 Really, it is unlikely that netif_tx_lock protection is necessary
2840 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 counters.)
2842 However, it is possible, that they rely on protection
2843 made by us here.
2844
2845 Check this and shot the lock. It is not prone from deadlocks.
2846 Either shot noqueue qdisc, it is even simpler 8)
2847 */
2848 if (dev->flags & IFF_UP) {
2849 int cpu = smp_processor_id(); /* ok because BHs are off */
2850
David S. Millerc773e842008-07-08 23:13:53 -07002851 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852
Eric Dumazet745e20f2010-09-29 13:23:09 -07002853 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2854 goto recursion_alert;
2855
David S. Millerc773e842008-07-08 23:13:53 -07002856 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
Tom Herbert734664982011-11-28 16:32:44 +00002858 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002859 __this_cpu_inc(xmit_recursion);
Jason Wangf663dd92014-01-10 16:18:26 +08002860 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002861 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002862 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002863 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 goto out;
2865 }
2866 }
David S. Millerc773e842008-07-08 23:13:53 -07002867 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002868 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2869 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 } else {
2871 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002872 * unfortunately
2873 */
2874recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002875 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2876 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 }
2878 }
2879
2880 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002881 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 kfree_skb(skb);
2884 return rc;
2885out:
Herbert Xud4828d82006-06-22 02:28:18 -07002886 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 return rc;
2888}
Jason Wangf663dd92014-01-10 16:18:26 +08002889
2890int dev_queue_xmit(struct sk_buff *skb)
2891{
2892 return __dev_queue_xmit(skb, NULL);
2893}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002894EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895
Jason Wangf663dd92014-01-10 16:18:26 +08002896int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2897{
2898 return __dev_queue_xmit(skb, accel_priv);
2899}
2900EXPORT_SYMBOL(dev_queue_xmit_accel);
2901
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
2903/*=======================================================================
2904 Receiver routines
2905 =======================================================================*/
2906
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002907int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002908EXPORT_SYMBOL(netdev_max_backlog);
2909
Eric Dumazet3b098e22010-05-15 23:57:10 -07002910int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002911int netdev_budget __read_mostly = 300;
2912int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002914/* Called with irq disabled */
2915static inline void ____napi_schedule(struct softnet_data *sd,
2916 struct napi_struct *napi)
2917{
2918 list_add_tail(&napi->poll_list, &sd->poll_list);
2919 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2920}
2921
Eric Dumazetdf334542010-03-24 19:13:54 +00002922#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002923
2924/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002925struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002926EXPORT_SYMBOL(rps_sock_flow_table);
2927
Ingo Molnarc5905af2012-02-24 08:31:31 +01002928struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002929
Ben Hutchingsc4454772011-01-19 11:03:53 +00002930static struct rps_dev_flow *
2931set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2932 struct rps_dev_flow *rflow, u16 next_cpu)
2933{
Ben Hutchings09994d12011-10-03 04:42:46 +00002934 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002935#ifdef CONFIG_RFS_ACCEL
2936 struct netdev_rx_queue *rxqueue;
2937 struct rps_dev_flow_table *flow_table;
2938 struct rps_dev_flow *old_rflow;
2939 u32 flow_id;
2940 u16 rxq_index;
2941 int rc;
2942
2943 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00002944 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2945 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00002946 goto out;
2947 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2948 if (rxq_index == skb_get_rx_queue(skb))
2949 goto out;
2950
2951 rxqueue = dev->_rx + rxq_index;
2952 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2953 if (!flow_table)
2954 goto out;
2955 flow_id = skb->rxhash & flow_table->mask;
2956 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2957 rxq_index, flow_id);
2958 if (rc < 0)
2959 goto out;
2960 old_rflow = rflow;
2961 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00002962 rflow->filter = rc;
2963 if (old_rflow->filter == rflow->filter)
2964 old_rflow->filter = RPS_NO_FILTER;
2965 out:
2966#endif
2967 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00002968 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002969 }
2970
Ben Hutchings09994d12011-10-03 04:42:46 +00002971 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00002972 return rflow;
2973}
2974
Tom Herbert0a9627f2010-03-16 08:03:29 +00002975/*
2976 * get_rps_cpu is called from netif_receive_skb and returns the target
2977 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002978 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002979 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002980static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2981 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002982{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002983 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002984 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002985 struct rps_dev_flow_table *flow_table;
2986 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002987 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07002988 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002989
Tom Herbert0a9627f2010-03-16 08:03:29 +00002990 if (skb_rx_queue_recorded(skb)) {
2991 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002992 if (unlikely(index >= dev->real_num_rx_queues)) {
2993 WARN_ONCE(dev->real_num_rx_queues > 1,
2994 "%s received packet on queue %u, but number "
2995 "of RX queues is %u\n",
2996 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002997 goto done;
2998 }
2999 rxqueue = dev->_rx + index;
3000 } else
3001 rxqueue = dev->_rx;
3002
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003003 map = rcu_dereference(rxqueue->rps_map);
3004 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08003005 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00003006 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00003007 tcpu = map->cpus[0];
3008 if (cpu_online(tcpu))
3009 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003010 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00003011 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00003012 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003013 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003014 }
3015
Changli Gao2d47b452010-08-17 19:00:56 +00003016 skb_reset_network_header(skb);
Krishna Kumarbfb564e2010-08-04 06:15:52 +00003017 if (!skb_get_rxhash(skb))
Tom Herbert0a9627f2010-03-16 08:03:29 +00003018 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003019
Tom Herbertfec5e652010-04-16 16:01:27 -07003020 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3021 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3022 if (flow_table && sock_flow_table) {
3023 u16 next_cpu;
3024 struct rps_dev_flow *rflow;
3025
3026 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3027 tcpu = rflow->cpu;
3028
3029 next_cpu = sock_flow_table->ents[skb->rxhash &
3030 sock_flow_table->mask];
3031
3032 /*
3033 * If the desired CPU (where last recvmsg was done) is
3034 * different from current CPU (one in the rx-queue flow
3035 * table entry), switch if one of the following holds:
3036 * - Current CPU is unset (equal to RPS_NO_CPU).
3037 * - Current CPU is offline.
3038 * - The current CPU's queue tail has advanced beyond the
3039 * last packet that was enqueued using this table entry.
3040 * This guarantees that all previous packets for the flow
3041 * have been dequeued, thus preserving in order delivery.
3042 */
3043 if (unlikely(tcpu != next_cpu) &&
3044 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3045 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003046 rflow->last_qtail)) >= 0)) {
3047 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003048 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003049 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003050
Tom Herbertfec5e652010-04-16 16:01:27 -07003051 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3052 *rflowp = rflow;
3053 cpu = tcpu;
3054 goto done;
3055 }
3056 }
3057
Tom Herbert0a9627f2010-03-16 08:03:29 +00003058 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003059 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003060
3061 if (cpu_online(tcpu)) {
3062 cpu = tcpu;
3063 goto done;
3064 }
3065 }
3066
3067done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003068 return cpu;
3069}
3070
Ben Hutchingsc4454772011-01-19 11:03:53 +00003071#ifdef CONFIG_RFS_ACCEL
3072
3073/**
3074 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3075 * @dev: Device on which the filter was set
3076 * @rxq_index: RX queue index
3077 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3078 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3079 *
3080 * Drivers that implement ndo_rx_flow_steer() should periodically call
3081 * this function for each installed filter and remove the filters for
3082 * which it returns %true.
3083 */
3084bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3085 u32 flow_id, u16 filter_id)
3086{
3087 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3088 struct rps_dev_flow_table *flow_table;
3089 struct rps_dev_flow *rflow;
3090 bool expire = true;
3091 int cpu;
3092
3093 rcu_read_lock();
3094 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3095 if (flow_table && flow_id <= flow_table->mask) {
3096 rflow = &flow_table->flows[flow_id];
3097 cpu = ACCESS_ONCE(rflow->cpu);
3098 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3099 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3100 rflow->last_qtail) <
3101 (int)(10 * flow_table->mask)))
3102 expire = false;
3103 }
3104 rcu_read_unlock();
3105 return expire;
3106}
3107EXPORT_SYMBOL(rps_may_expire_flow);
3108
3109#endif /* CONFIG_RFS_ACCEL */
3110
Tom Herbert0a9627f2010-03-16 08:03:29 +00003111/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003112static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003113{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003114 struct softnet_data *sd = data;
3115
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003116 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003117 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003118}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003119
Tom Herbertfec5e652010-04-16 16:01:27 -07003120#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003121
3122/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003123 * Check if this softnet_data structure is another cpu one
3124 * If yes, queue it to our IPI list and return 1
3125 * If no, return 0
3126 */
3127static int rps_ipi_queued(struct softnet_data *sd)
3128{
3129#ifdef CONFIG_RPS
3130 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3131
3132 if (sd != mysd) {
3133 sd->rps_ipi_next = mysd->rps_ipi_list;
3134 mysd->rps_ipi_list = sd;
3135
3136 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3137 return 1;
3138 }
3139#endif /* CONFIG_RPS */
3140 return 0;
3141}
3142
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003143#ifdef CONFIG_NET_FLOW_LIMIT
3144int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3145#endif
3146
3147static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3148{
3149#ifdef CONFIG_NET_FLOW_LIMIT
3150 struct sd_flow_limit *fl;
3151 struct softnet_data *sd;
3152 unsigned int old_flow, new_flow;
3153
3154 if (qlen < (netdev_max_backlog >> 1))
3155 return false;
3156
3157 sd = &__get_cpu_var(softnet_data);
3158
3159 rcu_read_lock();
3160 fl = rcu_dereference(sd->flow_limit);
3161 if (fl) {
3162 new_flow = skb_get_rxhash(skb) & (fl->num_buckets - 1);
3163 old_flow = fl->history[fl->history_head];
3164 fl->history[fl->history_head] = new_flow;
3165
3166 fl->history_head++;
3167 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3168
3169 if (likely(fl->buckets[old_flow]))
3170 fl->buckets[old_flow]--;
3171
3172 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3173 fl->count++;
3174 rcu_read_unlock();
3175 return true;
3176 }
3177 }
3178 rcu_read_unlock();
3179#endif
3180 return false;
3181}
3182
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003183/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003184 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3185 * queue (may be a remote CPU queue).
3186 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003187static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3188 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003189{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003190 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003191 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003192 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003193
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003194 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003195
3196 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003197
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003198 rps_lock(sd);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003199 qlen = skb_queue_len(&sd->input_pkt_queue);
3200 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Changli Gao6e7676c2010-04-27 15:07:33 -07003201 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003202enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003203 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003204 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003205 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003206 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003207 return NET_RX_SUCCESS;
3208 }
3209
Eric Dumazetebda37c22010-05-06 23:51:21 +00003210 /* Schedule NAPI for backlog device
3211 * We can use non atomic operation since we own the queue lock
3212 */
3213 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003214 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003215 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003216 }
3217 goto enqueue;
3218 }
3219
Changli Gaodee42872010-05-02 05:42:16 +00003220 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003221 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003222
Tom Herbert0a9627f2010-03-16 08:03:29 +00003223 local_irq_restore(flags);
3224
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003225 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003226 kfree_skb(skb);
3227 return NET_RX_DROP;
3228}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230/**
3231 * netif_rx - post buffer to the network code
3232 * @skb: buffer to post
3233 *
3234 * This function receives a packet from a device driver and queues it for
3235 * the upper (protocol) levels to process. It always succeeds. The buffer
3236 * may be dropped during processing for congestion control or by the
3237 * protocol layers.
3238 *
3239 * return values:
3240 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 * NET_RX_DROP (packet was dropped)
3242 *
3243 */
3244
3245int netif_rx(struct sk_buff *skb)
3246{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003247 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
3249 /* if netpoll wants it, pretend we never saw it */
3250 if (netpoll_rx(skb))
3251 return NET_RX_DROP;
3252
Eric Dumazet588f0332011-11-15 04:12:55 +00003253 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254
Koki Sanagicf66ba52010-08-23 18:45:02 +09003255 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003256#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003257 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003258 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003259 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260
Changli Gaocece1942010-08-07 20:35:43 -07003261 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003262 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003263
3264 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003265 if (cpu < 0)
3266 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003267
3268 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3269
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003270 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003271 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003272 } else
3273#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003274 {
3275 unsigned int qtail;
3276 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3277 put_cpu();
3278 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003279 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003281EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282
3283int netif_rx_ni(struct sk_buff *skb)
3284{
3285 int err;
3286
3287 preempt_disable();
3288 err = netif_rx(skb);
3289 if (local_softirq_pending())
3290 do_softirq();
3291 preempt_enable();
3292
3293 return err;
3294}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295EXPORT_SYMBOL(netif_rx_ni);
3296
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297static void net_tx_action(struct softirq_action *h)
3298{
3299 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3300
3301 if (sd->completion_queue) {
3302 struct sk_buff *clist;
3303
3304 local_irq_disable();
3305 clist = sd->completion_queue;
3306 sd->completion_queue = NULL;
3307 local_irq_enable();
3308
3309 while (clist) {
3310 struct sk_buff *skb = clist;
3311 clist = clist->next;
3312
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003313 WARN_ON(atomic_read(&skb->users));
Koki Sanagi07dc22e2010-08-23 18:46:12 +09003314 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 __kfree_skb(skb);
3316 }
3317 }
3318
3319 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003320 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321
3322 local_irq_disable();
3323 head = sd->output_queue;
3324 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003325 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326 local_irq_enable();
3327
3328 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003329 struct Qdisc *q = head;
3330 spinlock_t *root_lock;
3331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332 head = head->next_sched;
3333
David S. Miller5fb66222008-08-02 20:02:43 -07003334 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003335 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003336 smp_mb__before_clear_bit();
3337 clear_bit(__QDISC_STATE_SCHED,
3338 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003339 qdisc_run(q);
3340 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003342 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003343 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003344 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003345 } else {
3346 smp_mb__before_clear_bit();
3347 clear_bit(__QDISC_STATE_SCHED,
3348 &q->state);
3349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 }
3351 }
3352 }
3353}
3354
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003355#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3356 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003357/* This hook is defined here for ATM LANE */
3358int (*br_fdb_test_addr_hook)(struct net_device *dev,
3359 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003360EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003361#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363#ifdef CONFIG_NET_CLS_ACT
3364/* TODO: Maybe we should just force sch_ingress to be compiled in
3365 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3366 * a compare and 2 stores extra right now if we dont have it on
3367 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003368 * NOTE: This doesn't stop any functionality; if you dont have
3369 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 *
3371 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003372static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003375 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003376 int result = TC_ACT_OK;
3377 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003378
Stephen Hemmingerde384832010-08-01 00:33:23 -07003379 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003380 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3381 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003382 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383 }
3384
Herbert Xuf697c3e2007-10-14 00:38:47 -07003385 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3386 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3387
David S. Miller83874002008-07-17 00:53:03 -07003388 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003389 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003390 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003391 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3392 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003393 spin_unlock(qdisc_lock(q));
3394 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003395
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 return result;
3397}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003398
3399static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3400 struct packet_type **pt_prev,
3401 int *ret, struct net_device *orig_dev)
3402{
Eric Dumazet24824a02010-10-02 06:11:55 +00003403 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3404
3405 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003406 goto out;
3407
3408 if (*pt_prev) {
3409 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3410 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003411 }
3412
Eric Dumazet24824a02010-10-02 06:11:55 +00003413 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003414 case TC_ACT_SHOT:
3415 case TC_ACT_STOLEN:
3416 kfree_skb(skb);
3417 return NULL;
3418 }
3419
3420out:
3421 skb->tc_verd = 0;
3422 return skb;
3423}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424#endif
3425
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003426/**
3427 * netdev_rx_handler_register - register receive handler
3428 * @dev: device to register a handler for
3429 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003430 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003431 *
3432 * Register a receive hander for a device. This handler will then be
3433 * called from __netif_receive_skb. A negative errno code is returned
3434 * on a failure.
3435 *
3436 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003437 *
3438 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003439 */
3440int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003441 rx_handler_func_t *rx_handler,
3442 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003443{
3444 ASSERT_RTNL();
3445
3446 if (dev->rx_handler)
3447 return -EBUSY;
3448
Eric Dumazet00cfec32013-03-29 03:01:22 +00003449 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003450 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003451 rcu_assign_pointer(dev->rx_handler, rx_handler);
3452
3453 return 0;
3454}
3455EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3456
3457/**
3458 * netdev_rx_handler_unregister - unregister receive handler
3459 * @dev: device to unregister a handler from
3460 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003461 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003462 *
3463 * The caller must hold the rtnl_mutex.
3464 */
3465void netdev_rx_handler_unregister(struct net_device *dev)
3466{
3467
3468 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003469 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003470 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3471 * section has a guarantee to see a non NULL rx_handler_data
3472 * as well.
3473 */
3474 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003475 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003476}
3477EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3478
Mel Gormanb4b9e352012-07-31 16:44:26 -07003479/*
3480 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3481 * the special handling of PFMEMALLOC skbs.
3482 */
3483static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3484{
3485 switch (skb->protocol) {
3486 case __constant_htons(ETH_P_ARP):
3487 case __constant_htons(ETH_P_IP):
3488 case __constant_htons(ETH_P_IPV6):
3489 case __constant_htons(ETH_P_8021Q):
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003490 case __constant_htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003491 return true;
3492 default:
3493 return false;
3494 }
3495}
3496
David S. Miller9754e292013-02-14 15:57:38 -05003497static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498{
3499 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003500 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003501 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003502 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003503 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003505 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506
Eric Dumazet588f0332011-11-15 04:12:55 +00003507 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003508
Koki Sanagicf66ba52010-08-23 18:45:02 +09003509 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003510
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003512 if (netpoll_receive_skb(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003513 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003515 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003516
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003517 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003518 if (!skb_transport_header_was_set(skb))
3519 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003520 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
3522 pt_prev = NULL;
3523
3524 rcu_read_lock();
3525
David S. Miller63d8ea72011-02-28 10:48:59 -08003526another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003527 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003528
3529 __this_cpu_inc(softnet_data.processed);
3530
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003531 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3532 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003533 skb = vlan_untag(skb);
3534 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003535 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003536 }
3537
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538#ifdef CONFIG_NET_CLS_ACT
3539 if (skb->tc_verd & TC_NCLS) {
3540 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3541 goto ncls;
3542 }
3543#endif
3544
David S. Miller9754e292013-02-14 15:57:38 -05003545 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003546 goto skip_taps;
3547
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003549 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003550 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003551 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 pt_prev = ptype;
3553 }
3554 }
3555
Mel Gormanb4b9e352012-07-31 16:44:26 -07003556skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003558 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3559 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003560 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561ncls:
3562#endif
3563
David S. Miller9754e292013-02-14 15:57:38 -05003564 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003565 goto drop;
3566
John Fastabend24257172011-10-10 09:16:41 +00003567 if (vlan_tx_tag_present(skb)) {
3568 if (pt_prev) {
3569 ret = deliver_skb(skb, pt_prev, orig_dev);
3570 pt_prev = NULL;
3571 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003572 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003573 goto another_round;
3574 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003575 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003576 }
3577
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003578 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003579 if (rx_handler) {
3580 if (pt_prev) {
3581 ret = deliver_skb(skb, pt_prev, orig_dev);
3582 pt_prev = NULL;
3583 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003584 switch (rx_handler(&skb)) {
3585 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003586 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003587 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003588 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003589 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003590 case RX_HANDLER_EXACT:
3591 deliver_exact = true;
3592 case RX_HANDLER_PASS:
3593 break;
3594 default:
3595 BUG();
3596 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598
Eric Dumazetd4b812d2013-07-18 07:19:26 -07003599 if (unlikely(vlan_tx_tag_present(skb))) {
3600 if (vlan_tx_tag_get_id(skb))
3601 skb->pkt_type = PACKET_OTHERHOST;
3602 /* Note: we might in the future use prio bits
3603 * and set skb->priority like in vlan_do_receive()
3604 * For the time being, just ignore Priority Code Point
3605 */
3606 skb->vlan_tci = 0;
3607 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003608
David S. Miller63d8ea72011-02-28 10:48:59 -08003609 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003610 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003611
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003613 list_for_each_entry_rcu(ptype,
3614 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003615 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003616 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3617 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003618 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003619 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 pt_prev = ptype;
3621 }
3622 }
3623
3624 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003625 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003626 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003627 else
3628 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003630drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003631 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 kfree_skb(skb);
3633 /* Jamal, now you will not able to escape explaining
3634 * me how you were going to use this. :-)
3635 */
3636 ret = NET_RX_DROP;
3637 }
3638
Mel Gormanb4b9e352012-07-31 16:44:26 -07003639unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 rcu_read_unlock();
Mel Gormanb4b9e352012-07-31 16:44:26 -07003641out:
David S. Miller9754e292013-02-14 15:57:38 -05003642 return ret;
3643}
3644
3645static int __netif_receive_skb(struct sk_buff *skb)
3646{
3647 int ret;
3648
3649 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3650 unsigned long pflags = current->flags;
3651
3652 /*
3653 * PFMEMALLOC skbs are special, they should
3654 * - be delivered to SOCK_MEMALLOC sockets only
3655 * - stay away from userspace
3656 * - have bounded memory usage
3657 *
3658 * Use PF_MEMALLOC as this saves us from propagating the allocation
3659 * context down to all allocation sites.
3660 */
3661 current->flags |= PF_MEMALLOC;
3662 ret = __netif_receive_skb_core(skb, true);
3663 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3664 } else
3665 ret = __netif_receive_skb_core(skb, false);
3666
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667 return ret;
3668}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003669
3670/**
3671 * netif_receive_skb - process receive buffer from network
3672 * @skb: buffer to process
3673 *
3674 * netif_receive_skb() is the main receive data processing function.
3675 * It always succeeds. The buffer may be dropped during processing
3676 * for congestion control or by the protocol layers.
3677 *
3678 * This function may only be called from softirq context and interrupts
3679 * should be enabled.
3680 *
3681 * Return values (usually ignored):
3682 * NET_RX_SUCCESS: no congestion
3683 * NET_RX_DROP: packet was dropped
3684 */
3685int netif_receive_skb(struct sk_buff *skb)
3686{
Eric Dumazet588f0332011-11-15 04:12:55 +00003687 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003688
Richard Cochranc1f19b52010-07-17 08:49:36 +00003689 if (skb_defer_rx_timestamp(skb))
3690 return NET_RX_SUCCESS;
3691
Eric Dumazetdf334542010-03-24 19:13:54 +00003692#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003693 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003694 struct rps_dev_flow voidflow, *rflow = &voidflow;
3695 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003696
Eric Dumazet3b098e22010-05-15 23:57:10 -07003697 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003698
Eric Dumazet3b098e22010-05-15 23:57:10 -07003699 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003700
Eric Dumazet3b098e22010-05-15 23:57:10 -07003701 if (cpu >= 0) {
3702 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3703 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003704 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003705 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003706 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003707 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003708#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003709 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003710}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003711EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712
Eric Dumazet88751272010-04-19 05:07:33 +00003713/* Network device is going away, flush any packets still pending
3714 * Called with irqs disabled.
3715 */
Changli Gao152102c2010-03-30 20:16:22 +00003716static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003717{
Changli Gao152102c2010-03-30 20:16:22 +00003718 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003719 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003720 struct sk_buff *skb, *tmp;
3721
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003722 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003723 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003724 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003725 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003726 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003727 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003728 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003729 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003730 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003731
3732 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3733 if (skb->dev == dev) {
3734 __skb_unlink(skb, &sd->process_queue);
3735 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003736 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003737 }
3738 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003739}
3740
Herbert Xud565b0a2008-12-15 23:38:52 -08003741static int napi_gro_complete(struct sk_buff *skb)
3742{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003743 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003744 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003745 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003746 int err = -ENOENT;
3747
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003748 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3749
Herbert Xufc59f9a2009-04-14 15:11:06 -07003750 if (NAPI_GRO_CB(skb)->count == 1) {
3751 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003752 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003753 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003754
3755 rcu_read_lock();
3756 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003757 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003758 continue;
3759
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003760 err = ptype->callbacks.gro_complete(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003761 break;
3762 }
3763 rcu_read_unlock();
3764
3765 if (err) {
3766 WARN_ON(&ptype->list == head);
3767 kfree_skb(skb);
3768 return NET_RX_SUCCESS;
3769 }
3770
3771out:
Herbert Xud565b0a2008-12-15 23:38:52 -08003772 return netif_receive_skb(skb);
3773}
3774
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003775/* napi->gro_list contains packets ordered by age.
3776 * youngest packets at the head of it.
3777 * Complete skbs in reverse order to reduce latencies.
3778 */
3779void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003780{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003781 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003782
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003783 /* scan list and build reverse chain */
3784 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3785 skb->prev = prev;
3786 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003787 }
3788
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003789 for (skb = prev; skb; skb = prev) {
3790 skb->next = NULL;
3791
3792 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3793 return;
3794
3795 prev = skb->prev;
3796 napi_gro_complete(skb);
3797 napi->gro_count--;
3798 }
3799
Herbert Xud565b0a2008-12-15 23:38:52 -08003800 napi->gro_list = NULL;
3801}
Eric Dumazet86cac582010-08-31 18:25:32 +00003802EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003803
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003804static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3805{
3806 struct sk_buff *p;
3807 unsigned int maclen = skb->dev->hard_header_len;
3808
3809 for (p = napi->gro_list; p; p = p->next) {
3810 unsigned long diffs;
3811
3812 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3813 diffs |= p->vlan_tci ^ skb->vlan_tci;
3814 if (maclen == ETH_HLEN)
3815 diffs |= compare_ether_header(skb_mac_header(p),
3816 skb_gro_mac_header(skb));
3817 else if (!diffs)
3818 diffs = memcmp(skb_mac_header(p),
3819 skb_gro_mac_header(skb),
3820 maclen);
3821 NAPI_GRO_CB(p)->same_flow = !diffs;
3822 NAPI_GRO_CB(p)->flush = 0;
3823 }
3824}
3825
Rami Rosenbb728822012-11-28 21:55:25 +00003826static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003827{
3828 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003829 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003830 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003831 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003832 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003833 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003834
Jarek Poplawskice9e76c2010-08-05 01:19:11 +00003835 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
Herbert Xud565b0a2008-12-15 23:38:52 -08003836 goto normal;
3837
David S. Miller21dc3302010-08-23 00:13:46 -07003838 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003839 goto normal;
3840
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003841 gro_list_prepare(napi, skb);
3842
Herbert Xud565b0a2008-12-15 23:38:52 -08003843 rcu_read_lock();
3844 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003845 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003846 continue;
3847
Herbert Xu86911732009-01-29 14:19:50 +00003848 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003849 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003850 NAPI_GRO_CB(skb)->same_flow = 0;
3851 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003852 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003853
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003854 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003855 break;
3856 }
3857 rcu_read_unlock();
3858
3859 if (&ptype->list == head)
3860 goto normal;
3861
Herbert Xu0da2afd52008-12-26 14:57:42 -08003862 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003863 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003864
Herbert Xud565b0a2008-12-15 23:38:52 -08003865 if (pp) {
3866 struct sk_buff *nskb = *pp;
3867
3868 *pp = nskb->next;
3869 nskb->next = NULL;
3870 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003871 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003872 }
3873
Herbert Xu0da2afd52008-12-26 14:57:42 -08003874 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003875 goto ok;
3876
Herbert Xu4ae55442009-02-08 18:00:36 +00003877 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003878 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003879
Herbert Xu4ae55442009-02-08 18:00:36 +00003880 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003881 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003882 NAPI_GRO_CB(skb)->age = jiffies;
Herbert Xu86911732009-01-29 14:19:50 +00003883 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003884 skb->next = napi->gro_list;
3885 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003886 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003887
Herbert Xuad0f9902009-02-01 01:24:55 -08003888pull:
Herbert Xucb189782009-05-26 18:50:31 +00003889 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3890 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3891
3892 BUG_ON(skb->end - skb->tail < grow);
3893
3894 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3895
3896 skb->tail += grow;
3897 skb->data_len -= grow;
3898
3899 skb_shinfo(skb)->frags[0].page_offset += grow;
Eric Dumazet9e903e02011-10-18 21:00:24 +00003900 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
Herbert Xucb189782009-05-26 18:50:31 +00003901
Eric Dumazet9e903e02011-10-18 21:00:24 +00003902 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
Ian Campbellea2ab692011-08-22 23:44:58 +00003903 skb_frag_unref(skb, 0);
Herbert Xucb189782009-05-26 18:50:31 +00003904 memmove(skb_shinfo(skb)->frags,
3905 skb_shinfo(skb)->frags + 1,
Jarek Poplawskie5093ae2010-08-11 02:02:10 +00003906 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
Herbert Xucb189782009-05-26 18:50:31 +00003907 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003908 }
3909
Herbert Xud565b0a2008-12-15 23:38:52 -08003910ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003911 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003912
3913normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003914 ret = GRO_NORMAL;
3915 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003916}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003917
Herbert Xu96e93ea2009-01-06 10:49:34 -08003918
Rami Rosenbb728822012-11-28 21:55:25 +00003919static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003920{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003921 switch (ret) {
3922 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003923 if (netif_receive_skb(skb))
3924 ret = GRO_DROP;
3925 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003926
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003927 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08003928 kfree_skb(skb);
3929 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003930
Eric Dumazetdaa86542012-04-19 07:07:40 +00003931 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00003932 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3933 kmem_cache_free(skbuff_head_cache, skb);
3934 else
3935 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00003936 break;
3937
Ben Hutchings5b252f02009-10-29 07:17:09 +00003938 case GRO_HELD:
3939 case GRO_MERGED:
3940 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003941 }
3942
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003943 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003944}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003945
Eric Dumazetca07e432012-10-06 22:28:06 +00003946static void skb_gro_reset_offset(struct sk_buff *skb)
Herbert Xu78a478d2009-05-26 18:50:21 +00003947{
Eric Dumazetca07e432012-10-06 22:28:06 +00003948 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3949 const skb_frag_t *frag0 = &pinfo->frags[0];
3950
Herbert Xu78a478d2009-05-26 18:50:21 +00003951 NAPI_GRO_CB(skb)->data_offset = 0;
3952 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003953 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003954
Simon Hormanced14f62013-05-28 20:34:25 +00003955 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
Eric Dumazetca07e432012-10-06 22:28:06 +00003956 pinfo->nr_frags &&
3957 !PageHighMem(skb_frag_page(frag0))) {
3958 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3959 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xu74895942009-05-26 18:50:27 +00003960 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003961}
Herbert Xu78a478d2009-05-26 18:50:21 +00003962
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003963gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003964{
Herbert Xu86911732009-01-29 14:19:50 +00003965 skb_gro_reset_offset(skb);
3966
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003967 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003968}
3969EXPORT_SYMBOL(napi_gro_receive);
3970
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00003971static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003972{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003973 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00003974 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3975 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00003976 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08003977 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08003978 skb->skb_iif = 0;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003979
3980 napi->skb = skb;
3981}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003982
Herbert Xu76620aa2009-04-16 02:02:07 -07003983struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003984{
Herbert Xu5d38a072009-01-04 16:13:40 -08003985 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003986
3987 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003988 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3989 if (skb)
3990 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003991 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003992 return skb;
3993}
Herbert Xu76620aa2009-04-16 02:02:07 -07003994EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003995
Rami Rosenbb728822012-11-28 21:55:25 +00003996static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003997 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003998{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003999 switch (ret) {
4000 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00004001 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00004002 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00004003
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004004 if (ret == GRO_HELD)
4005 skb_gro_pull(skb, -ETH_HLEN);
4006 else if (netif_receive_skb(skb))
4007 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004008 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004009
4010 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004011 case GRO_MERGED_FREE:
4012 napi_reuse_skb(napi, skb);
4013 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004014
4015 case GRO_MERGED:
4016 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004017 }
4018
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004019 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004020}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004021
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004022static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004023{
Herbert Xu76620aa2009-04-16 02:02:07 -07004024 struct sk_buff *skb = napi->skb;
4025 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00004026 unsigned int hlen;
4027 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07004028
4029 napi->skb = NULL;
4030
4031 skb_reset_mac_header(skb);
4032 skb_gro_reset_offset(skb);
4033
Herbert Xua5b1cf22009-05-26 18:50:28 +00004034 off = skb_gro_offset(skb);
4035 hlen = off + sizeof(*eth);
4036 eth = skb_gro_header_fast(skb, off);
4037 if (skb_gro_header_hard(skb, hlen)) {
4038 eth = skb_gro_header_slow(skb, hlen, off);
4039 if (unlikely(!eth)) {
4040 napi_reuse_skb(napi, skb);
4041 skb = NULL;
4042 goto out;
4043 }
Herbert Xu76620aa2009-04-16 02:02:07 -07004044 }
4045
4046 skb_gro_pull(skb, sizeof(*eth));
4047
4048 /*
4049 * This works because the only protocols we care about don't require
4050 * special handling. We'll fix it up properly at the end.
4051 */
4052 skb->protocol = eth->h_proto;
4053
4054out:
4055 return skb;
4056}
Herbert Xu76620aa2009-04-16 02:02:07 -07004057
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004058gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004059{
4060 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004061
4062 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004063 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004064
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004065 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004066}
4067EXPORT_SYMBOL(napi_gro_frags);
4068
Eric Dumazete326bed2010-04-22 00:22:45 -07004069/*
4070 * net_rps_action sends any pending IPI's for rps.
4071 * Note: called with local irq disabled, but exits with local irq enabled.
4072 */
4073static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4074{
4075#ifdef CONFIG_RPS
4076 struct softnet_data *remsd = sd->rps_ipi_list;
4077
4078 if (remsd) {
4079 sd->rps_ipi_list = NULL;
4080
4081 local_irq_enable();
4082
4083 /* Send pending IPI's to kick RPS processing on remote cpus. */
4084 while (remsd) {
4085 struct softnet_data *next = remsd->rps_ipi_next;
4086
4087 if (cpu_online(remsd->cpu))
4088 __smp_call_function_single(remsd->cpu,
4089 &remsd->csd, 0);
4090 remsd = next;
4091 }
4092 } else
4093#endif
4094 local_irq_enable();
4095}
4096
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004097static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098{
4099 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004100 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101
Eric Dumazete326bed2010-04-22 00:22:45 -07004102#ifdef CONFIG_RPS
4103 /* Check if we have pending ipi, its better to send them now,
4104 * not waiting net_rx_action() end.
4105 */
4106 if (sd->rps_ipi_list) {
4107 local_irq_disable();
4108 net_rps_action_and_irq_enable(sd);
4109 }
4110#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004111 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004112 local_irq_disable();
4113 while (work < quota) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114 struct sk_buff *skb;
Changli Gao6e7676c2010-04-27 15:07:33 -07004115 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116
Changli Gao6e7676c2010-04-27 15:07:33 -07004117 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004118 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004119 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004120 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004121 input_queue_head_incr(sd);
4122 if (++work >= quota) {
4123 local_irq_enable();
4124 return work;
4125 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127
Changli Gao6e7676c2010-04-27 15:07:33 -07004128 rps_lock(sd);
4129 qlen = skb_queue_len(&sd->input_pkt_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004130 if (qlen)
Changli Gao6e7676c2010-04-27 15:07:33 -07004131 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4132 &sd->process_queue);
Tom Herbert76cc8b12010-05-20 18:37:59 +00004133
Changli Gao6e7676c2010-04-27 15:07:33 -07004134 if (qlen < quota - work) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004135 /*
4136 * Inline a custom version of __napi_complete().
4137 * only current cpu owns and manipulates this napi,
4138 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4139 * we can use a plain write instead of clear_bit(),
4140 * and we dont need an smp_mb() memory barrier.
4141 */
4142 list_del(&napi->poll_list);
4143 napi->state = 0;
4144
Changli Gao6e7676c2010-04-27 15:07:33 -07004145 quota = work + qlen;
4146 }
4147 rps_unlock(sd);
4148 }
4149 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004151 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152}
4153
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004154/**
4155 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004156 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004157 *
4158 * The entry's receive function will be scheduled to run
4159 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004160void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004161{
4162 unsigned long flags;
4163
4164 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004165 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004166 local_irq_restore(flags);
4167}
4168EXPORT_SYMBOL(__napi_schedule);
4169
Herbert Xud565b0a2008-12-15 23:38:52 -08004170void __napi_complete(struct napi_struct *n)
4171{
4172 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4173 BUG_ON(n->gro_list);
4174
4175 list_del(&n->poll_list);
4176 smp_mb__before_clear_bit();
4177 clear_bit(NAPI_STATE_SCHED, &n->state);
4178}
4179EXPORT_SYMBOL(__napi_complete);
4180
4181void napi_complete(struct napi_struct *n)
4182{
4183 unsigned long flags;
4184
4185 /*
4186 * don't let napi dequeue from the cpu poll list
4187 * just in case its running on a different cpu
4188 */
4189 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4190 return;
4191
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004192 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004193 local_irq_save(flags);
4194 __napi_complete(n);
4195 local_irq_restore(flags);
4196}
4197EXPORT_SYMBOL(napi_complete);
4198
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004199/* must be called under rcu_read_lock(), as we dont take a reference */
4200struct napi_struct *napi_by_id(unsigned int napi_id)
4201{
4202 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4203 struct napi_struct *napi;
4204
4205 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4206 if (napi->napi_id == napi_id)
4207 return napi;
4208
4209 return NULL;
4210}
4211EXPORT_SYMBOL_GPL(napi_by_id);
4212
4213void napi_hash_add(struct napi_struct *napi)
4214{
4215 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4216
4217 spin_lock(&napi_hash_lock);
4218
4219 /* 0 is not a valid id, we also skip an id that is taken
4220 * we expect both events to be extremely rare
4221 */
4222 napi->napi_id = 0;
4223 while (!napi->napi_id) {
4224 napi->napi_id = ++napi_gen_id;
4225 if (napi_by_id(napi->napi_id))
4226 napi->napi_id = 0;
4227 }
4228
4229 hlist_add_head_rcu(&napi->napi_hash_node,
4230 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4231
4232 spin_unlock(&napi_hash_lock);
4233 }
4234}
4235EXPORT_SYMBOL_GPL(napi_hash_add);
4236
4237/* Warning : caller is responsible to make sure rcu grace period
4238 * is respected before freeing memory containing @napi
4239 */
4240void napi_hash_del(struct napi_struct *napi)
4241{
4242 spin_lock(&napi_hash_lock);
4243
4244 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4245 hlist_del_rcu(&napi->napi_hash_node);
4246
4247 spin_unlock(&napi_hash_lock);
4248}
4249EXPORT_SYMBOL_GPL(napi_hash_del);
4250
Herbert Xud565b0a2008-12-15 23:38:52 -08004251void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4252 int (*poll)(struct napi_struct *, int), int weight)
4253{
4254 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004255 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004256 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004257 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004258 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004259 if (weight > NAPI_POLL_WEIGHT)
4260 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4261 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004262 napi->weight = weight;
4263 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004264 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004265#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004266 spin_lock_init(&napi->poll_lock);
4267 napi->poll_owner = -1;
4268#endif
4269 set_bit(NAPI_STATE_SCHED, &napi->state);
4270}
4271EXPORT_SYMBOL(netif_napi_add);
4272
4273void netif_napi_del(struct napi_struct *napi)
4274{
4275 struct sk_buff *skb, *next;
4276
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004277 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004278 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004279
4280 for (skb = napi->gro_list; skb; skb = next) {
4281 next = skb->next;
4282 skb->next = NULL;
4283 kfree_skb(skb);
4284 }
4285
4286 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004287 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004288}
4289EXPORT_SYMBOL(netif_napi_del);
4290
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291static void net_rx_action(struct softirq_action *h)
4292{
Eric Dumazete326bed2010-04-22 00:22:45 -07004293 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004294 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004295 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004296 void *have;
4297
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 local_irq_disable();
4299
Eric Dumazete326bed2010-04-22 00:22:45 -07004300 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004301 struct napi_struct *n;
4302 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004304 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004305 * Allow this to run for 2 jiffies since which will allow
4306 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004307 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004308 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 goto softnet_break;
4310
4311 local_irq_enable();
4312
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004313 /* Even though interrupts have been re-enabled, this
4314 * access is safe because interrupts can only add new
4315 * entries to the tail of this list, and only ->poll()
4316 * calls can remove this head entry from the list.
4317 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004318 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004320 have = netpoll_poll_lock(n);
4321
4322 weight = n->weight;
4323
David S. Miller0a7606c2007-10-29 21:28:47 -07004324 /* This NAPI_STATE_SCHED test is for avoiding a race
4325 * with netpoll's poll_napi(). Only the entity which
4326 * obtains the lock and sees NAPI_STATE_SCHED set will
4327 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004328 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004329 */
4330 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004331 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004332 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004333 trace_napi_poll(n);
4334 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004335
4336 WARN_ON_ONCE(work > weight);
4337
4338 budget -= work;
4339
4340 local_irq_disable();
4341
4342 /* Drivers must not modify the NAPI state if they
4343 * consume the entire weight. In such cases this code
4344 * still "owns" the NAPI instance and therefore can
4345 * move the instance around on the list at-will.
4346 */
David S. Millerfed17f32008-01-07 21:00:40 -08004347 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004348 if (unlikely(napi_disable_pending(n))) {
4349 local_irq_enable();
4350 napi_complete(n);
4351 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004352 } else {
4353 if (n->gro_list) {
4354 /* flush too old packets
4355 * If HZ < 1000, flush all packets.
4356 */
4357 local_irq_enable();
4358 napi_gro_flush(n, HZ >= 1000);
4359 local_irq_disable();
4360 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004361 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004362 }
David S. Millerfed17f32008-01-07 21:00:40 -08004363 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004364
4365 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366 }
4367out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004368 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004369
Chris Leechdb217332006-06-17 21:24:58 -07004370#ifdef CONFIG_NET_DMA
4371 /*
4372 * There may not be any more sk_buffs coming right now, so push
4373 * any pending DMA copies to hardware
4374 */
Dan Williams2ba05622009-01-06 11:38:14 -07004375 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004376#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004377
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378 return;
4379
4380softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004381 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4383 goto out;
4384}
4385
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004386struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004387 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004388
4389 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004390 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004391
Veaceslav Falico5d261912013-08-28 23:25:05 +02004392 /* counter for the number of times this device was added to us */
4393 u16 ref_nr;
4394
Veaceslav Falico402dae92013-09-25 09:20:09 +02004395 /* private field for the users */
4396 void *private;
4397
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004398 struct list_head list;
4399 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004400};
4401
Veaceslav Falico5249dec2013-09-25 09:20:08 +02004402static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
4403 struct net_device *adj_dev,
4404 struct list_head *adj_list)
4405{
4406 struct netdev_adjacent *adj;
4407
4408 list_for_each_entry_rcu(adj, adj_list, list) {
4409 if (adj->dev == adj_dev)
4410 return adj;
4411 }
4412 return NULL;
4413}
4414
Veaceslav Falico5d261912013-08-28 23:25:05 +02004415static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4416 struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004417 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004418{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004419 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004420
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004421 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004422 if (adj->dev == adj_dev)
4423 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004424 }
4425 return NULL;
4426}
4427
4428/**
4429 * netdev_has_upper_dev - Check if device is linked to an upper device
4430 * @dev: device
4431 * @upper_dev: upper device to check
4432 *
4433 * Find out if a device is linked to specified upper device and return true
4434 * in case it is. Note that this checks only immediate upper device,
4435 * not through a complete stack of devices. The caller must hold the RTNL lock.
4436 */
4437bool netdev_has_upper_dev(struct net_device *dev,
4438 struct net_device *upper_dev)
4439{
4440 ASSERT_RTNL();
4441
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004442 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004443}
4444EXPORT_SYMBOL(netdev_has_upper_dev);
4445
4446/**
4447 * netdev_has_any_upper_dev - Check if device is linked to some device
4448 * @dev: device
4449 *
4450 * Find out if a device is linked to an upper device and return true in case
4451 * it is. The caller must hold the RTNL lock.
4452 */
4453bool netdev_has_any_upper_dev(struct net_device *dev)
4454{
4455 ASSERT_RTNL();
4456
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004457 return !list_empty(&dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004458}
4459EXPORT_SYMBOL(netdev_has_any_upper_dev);
4460
4461/**
4462 * netdev_master_upper_dev_get - Get master upper device
4463 * @dev: device
4464 *
4465 * Find a master upper device and return pointer to it or NULL in case
4466 * it's not there. The caller must hold the RTNL lock.
4467 */
4468struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4469{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004470 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004471
4472 ASSERT_RTNL();
4473
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004474 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004475 return NULL;
4476
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004477 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004478 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004479 if (likely(upper->master))
4480 return upper->dev;
4481 return NULL;
4482}
4483EXPORT_SYMBOL(netdev_master_upper_dev_get);
4484
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02004485void *netdev_adjacent_get_private(struct list_head *adj_list)
4486{
4487 struct netdev_adjacent *adj;
4488
4489 adj = list_entry(adj_list, struct netdev_adjacent, list);
4490
4491 return adj->private;
4492}
4493EXPORT_SYMBOL(netdev_adjacent_get_private);
4494
Veaceslav Falico31088a12013-09-25 09:20:12 +02004495/**
4496 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
Veaceslav Falico48311f42013-08-28 23:25:07 +02004497 * @dev: device
4498 * @iter: list_head ** of the current position
4499 *
4500 * Gets the next device from the dev's upper list, starting from iter
4501 * position. The caller must hold RCU read lock.
4502 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004503struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4504 struct list_head **iter)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004505{
4506 struct netdev_adjacent *upper;
4507
John Fastabend85328242013-11-26 06:33:52 +00004508 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
Veaceslav Falico48311f42013-08-28 23:25:07 +02004509
4510 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4511
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004512 if (&upper->list == &dev->all_adj_list.upper)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004513 return NULL;
4514
4515 *iter = &upper->list;
4516
4517 return upper->dev;
4518}
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004519EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
Veaceslav Falico48311f42013-08-28 23:25:07 +02004520
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004521/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02004522 * netdev_lower_get_next_private - Get the next ->private from the
4523 * lower neighbour list
4524 * @dev: device
4525 * @iter: list_head ** of the current position
4526 *
4527 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4528 * list, starting from iter position. The caller must hold either hold the
4529 * RTNL lock or its own locking that guarantees that the neighbour lower
4530 * list will remain unchainged.
4531 */
4532void *netdev_lower_get_next_private(struct net_device *dev,
4533 struct list_head **iter)
4534{
4535 struct netdev_adjacent *lower;
4536
4537 lower = list_entry(*iter, struct netdev_adjacent, list);
4538
4539 if (&lower->list == &dev->adj_list.lower)
4540 return NULL;
4541
4542 if (iter)
4543 *iter = lower->list.next;
4544
4545 return lower->private;
4546}
4547EXPORT_SYMBOL(netdev_lower_get_next_private);
4548
4549/**
4550 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4551 * lower neighbour list, RCU
4552 * variant
4553 * @dev: device
4554 * @iter: list_head ** of the current position
4555 *
4556 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4557 * list, starting from iter position. The caller must hold RCU read lock.
4558 */
4559void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4560 struct list_head **iter)
4561{
4562 struct netdev_adjacent *lower;
4563
4564 WARN_ON_ONCE(!rcu_read_lock_held());
4565
4566 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4567
4568 if (&lower->list == &dev->adj_list.lower)
4569 return NULL;
4570
4571 if (iter)
4572 *iter = &lower->list;
4573
4574 return lower->private;
4575}
4576EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4577
4578/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004579 * netdev_master_upper_dev_get_rcu - Get master upper device
4580 * @dev: device
4581 *
4582 * Find a master upper device and return pointer to it or NULL in case
4583 * it's not there. The caller must hold the RCU read lock.
4584 */
4585struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4586{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004587 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004588
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004589 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004590 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004591 if (upper && likely(upper->master))
4592 return upper->dev;
4593 return NULL;
4594}
4595EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4596
Veaceslav Falico5d261912013-08-28 23:25:05 +02004597static int __netdev_adjacent_dev_insert(struct net_device *dev,
4598 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02004599 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004600 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004601{
4602 struct netdev_adjacent *adj;
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004603 char linkname[IFNAMSIZ+7];
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004604 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004605
Veaceslav Falico7863c052013-09-25 09:20:06 +02004606 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004607
4608 if (adj) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004609 adj->ref_nr++;
4610 return 0;
4611 }
4612
4613 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4614 if (!adj)
4615 return -ENOMEM;
4616
4617 adj->dev = adj_dev;
4618 adj->master = master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004619 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02004620 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004621 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004622
4623 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4624 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004625
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004626 if (dev_list == &dev->adj_list.lower) {
4627 sprintf(linkname, "lower_%s", adj_dev->name);
4628 ret = sysfs_create_link(&(dev->dev.kobj),
4629 &(adj_dev->dev.kobj), linkname);
4630 if (ret)
4631 goto free_adj;
4632 } else if (dev_list == &dev->adj_list.upper) {
4633 sprintf(linkname, "upper_%s", adj_dev->name);
4634 ret = sysfs_create_link(&(dev->dev.kobj),
4635 &(adj_dev->dev.kobj), linkname);
4636 if (ret)
4637 goto free_adj;
4638 }
4639
Veaceslav Falico7863c052013-09-25 09:20:06 +02004640 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004641 if (master) {
4642 ret = sysfs_create_link(&(dev->dev.kobj),
4643 &(adj_dev->dev.kobj), "master");
4644 if (ret)
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004645 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004646
Veaceslav Falico7863c052013-09-25 09:20:06 +02004647 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004648 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02004649 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004650 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004651
4652 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004653
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004654remove_symlinks:
4655 if (dev_list == &dev->adj_list.lower) {
4656 sprintf(linkname, "lower_%s", adj_dev->name);
4657 sysfs_remove_link(&(dev->dev.kobj), linkname);
4658 } else if (dev_list == &dev->adj_list.upper) {
4659 sprintf(linkname, "upper_%s", adj_dev->name);
4660 sysfs_remove_link(&(dev->dev.kobj), linkname);
4661 }
4662
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004663free_adj:
4664 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02004665 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004666
4667 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004668}
4669
Veaceslav Falico5d261912013-08-28 23:25:05 +02004670void __netdev_adjacent_dev_remove(struct net_device *dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02004671 struct net_device *adj_dev,
4672 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004673{
4674 struct netdev_adjacent *adj;
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004675 char linkname[IFNAMSIZ+7];
Veaceslav Falico5d261912013-08-28 23:25:05 +02004676
Veaceslav Falico7863c052013-09-25 09:20:06 +02004677 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004678
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004679 if (!adj) {
4680 pr_err("tried to remove device %s from %s\n",
4681 dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004682 BUG();
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004683 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004684
4685 if (adj->ref_nr > 1) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004686 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4687 adj->ref_nr-1);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004688 adj->ref_nr--;
4689 return;
4690 }
4691
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004692 if (adj->master)
4693 sysfs_remove_link(&(dev->dev.kobj), "master");
4694
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004695 if (dev_list == &dev->adj_list.lower) {
4696 sprintf(linkname, "lower_%s", adj_dev->name);
4697 sysfs_remove_link(&(dev->dev.kobj), linkname);
4698 } else if (dev_list == &dev->adj_list.upper) {
4699 sprintf(linkname, "upper_%s", adj_dev->name);
4700 sysfs_remove_link(&(dev->dev.kobj), linkname);
4701 }
4702
Veaceslav Falico5d261912013-08-28 23:25:05 +02004703 list_del_rcu(&adj->list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004704 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4705 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004706 dev_put(adj_dev);
4707 kfree_rcu(adj, rcu);
4708}
4709
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004710int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4711 struct net_device *upper_dev,
4712 struct list_head *up_list,
4713 struct list_head *down_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004714 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004715{
4716 int ret;
4717
Veaceslav Falico402dae92013-09-25 09:20:09 +02004718 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4719 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004720 if (ret)
4721 return ret;
4722
Veaceslav Falico402dae92013-09-25 09:20:09 +02004723 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4724 false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004725 if (ret) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004726 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004727 return ret;
4728 }
4729
4730 return 0;
4731}
4732
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004733int __netdev_adjacent_dev_link(struct net_device *dev,
4734 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004735{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004736 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
4737 &dev->all_adj_list.upper,
4738 &upper_dev->all_adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004739 NULL, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004740}
4741
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004742void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
4743 struct net_device *upper_dev,
4744 struct list_head *up_list,
4745 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004746{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004747 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4748 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004749}
4750
4751void __netdev_adjacent_dev_unlink(struct net_device *dev,
4752 struct net_device *upper_dev)
4753{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004754 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4755 &dev->all_adj_list.upper,
4756 &upper_dev->all_adj_list.lower);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004757}
4758
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004759int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4760 struct net_device *upper_dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004761 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004762{
4763 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
4764
4765 if (ret)
4766 return ret;
4767
4768 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
4769 &dev->adj_list.upper,
4770 &upper_dev->adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004771 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004772 if (ret) {
4773 __netdev_adjacent_dev_unlink(dev, upper_dev);
4774 return ret;
4775 }
4776
4777 return 0;
4778}
4779
4780void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
4781 struct net_device *upper_dev)
4782{
4783 __netdev_adjacent_dev_unlink(dev, upper_dev);
4784 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4785 &dev->adj_list.upper,
4786 &upper_dev->adj_list.lower);
4787}
Veaceslav Falico5d261912013-08-28 23:25:05 +02004788
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004789static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004790 struct net_device *upper_dev, bool master,
4791 void *private)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004792{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004793 struct netdev_adjacent *i, *j, *to_i, *to_j;
4794 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004795
4796 ASSERT_RTNL();
4797
4798 if (dev == upper_dev)
4799 return -EBUSY;
4800
4801 /* To prevent loops, check if dev is not upper device to upper_dev. */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004802 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004803 return -EBUSY;
4804
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004805 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004806 return -EEXIST;
4807
4808 if (master && netdev_master_upper_dev_get(dev))
4809 return -EBUSY;
4810
Veaceslav Falico402dae92013-09-25 09:20:09 +02004811 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
4812 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004813 if (ret)
4814 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004815
Veaceslav Falico5d261912013-08-28 23:25:05 +02004816 /* Now that we linked these devs, make all the upper_dev's
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004817 * all_adj_list.upper visible to every dev's all_adj_list.lower an
Veaceslav Falico5d261912013-08-28 23:25:05 +02004818 * versa, and don't forget the devices itself. All of these
4819 * links are non-neighbours.
4820 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004821 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4822 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
4823 pr_debug("Interlinking %s with %s, non-neighbour\n",
4824 i->dev->name, j->dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004825 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
4826 if (ret)
4827 goto rollback_mesh;
4828 }
4829 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004830
Veaceslav Falico5d261912013-08-28 23:25:05 +02004831 /* add dev to every upper_dev's upper device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004832 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
4833 pr_debug("linking %s's upper device %s with %s\n",
4834 upper_dev->name, i->dev->name, dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004835 ret = __netdev_adjacent_dev_link(dev, i->dev);
4836 if (ret)
4837 goto rollback_upper_mesh;
4838 }
4839
4840 /* add upper_dev to every dev's lower device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004841 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4842 pr_debug("linking %s's lower device %s with %s\n", dev->name,
4843 i->dev->name, upper_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004844 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
4845 if (ret)
4846 goto rollback_lower_mesh;
4847 }
4848
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004849 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004850 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004851
4852rollback_lower_mesh:
4853 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004854 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004855 if (i == to_i)
4856 break;
4857 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4858 }
4859
4860 i = NULL;
4861
4862rollback_upper_mesh:
4863 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004864 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004865 if (i == to_i)
4866 break;
4867 __netdev_adjacent_dev_unlink(dev, i->dev);
4868 }
4869
4870 i = j = NULL;
4871
4872rollback_mesh:
4873 to_i = i;
4874 to_j = j;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004875 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4876 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004877 if (i == to_i && j == to_j)
4878 break;
4879 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4880 }
4881 if (i == to_i)
4882 break;
4883 }
4884
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004885 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004886
4887 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004888}
4889
4890/**
4891 * netdev_upper_dev_link - Add a link to the upper device
4892 * @dev: device
4893 * @upper_dev: new upper device
4894 *
4895 * Adds a link to device which is upper to this one. The caller must hold
4896 * the RTNL lock. On a failure a negative errno code is returned.
4897 * On success the reference counts are adjusted and the function
4898 * returns zero.
4899 */
4900int netdev_upper_dev_link(struct net_device *dev,
4901 struct net_device *upper_dev)
4902{
Veaceslav Falico402dae92013-09-25 09:20:09 +02004903 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004904}
4905EXPORT_SYMBOL(netdev_upper_dev_link);
4906
4907/**
4908 * netdev_master_upper_dev_link - Add a master link to the upper device
4909 * @dev: device
4910 * @upper_dev: new upper device
4911 *
4912 * Adds a link to device which is upper to this one. In this case, only
4913 * one master upper device can be linked, although other non-master devices
4914 * might be linked as well. The caller must hold the RTNL lock.
4915 * On a failure a negative errno code is returned. On success the reference
4916 * counts are adjusted and the function returns zero.
4917 */
4918int netdev_master_upper_dev_link(struct net_device *dev,
4919 struct net_device *upper_dev)
4920{
Veaceslav Falico402dae92013-09-25 09:20:09 +02004921 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004922}
4923EXPORT_SYMBOL(netdev_master_upper_dev_link);
4924
Veaceslav Falico402dae92013-09-25 09:20:09 +02004925int netdev_master_upper_dev_link_private(struct net_device *dev,
4926 struct net_device *upper_dev,
4927 void *private)
4928{
4929 return __netdev_upper_dev_link(dev, upper_dev, true, private);
4930}
4931EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
4932
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004933/**
4934 * netdev_upper_dev_unlink - Removes a link to upper device
4935 * @dev: device
4936 * @upper_dev: new upper device
4937 *
4938 * Removes a link to device which is upper to this one. The caller must hold
4939 * the RTNL lock.
4940 */
4941void netdev_upper_dev_unlink(struct net_device *dev,
4942 struct net_device *upper_dev)
4943{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004944 struct netdev_adjacent *i, *j;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004945 ASSERT_RTNL();
4946
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004947 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004948
4949 /* Here is the tricky part. We must remove all dev's lower
4950 * devices from all upper_dev's upper devices and vice
4951 * versa, to maintain the graph relationship.
4952 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004953 list_for_each_entry(i, &dev->all_adj_list.lower, list)
4954 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004955 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4956
4957 /* remove also the devices itself from lower/upper device
4958 * list
4959 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004960 list_for_each_entry(i, &dev->all_adj_list.lower, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004961 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4962
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004963 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004964 __netdev_adjacent_dev_unlink(dev, i->dev);
4965
Jiri Pirko42e52bf2013-05-25 04:12:10 +00004966 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004967}
4968EXPORT_SYMBOL(netdev_upper_dev_unlink);
4969
Veaceslav Falico402dae92013-09-25 09:20:09 +02004970void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
4971 struct net_device *lower_dev)
4972{
4973 struct netdev_adjacent *lower;
4974
4975 if (!lower_dev)
4976 return NULL;
4977 lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
4978 if (!lower)
4979 return NULL;
4980
4981 return lower->private;
4982}
4983EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
4984
4985void *netdev_lower_dev_get_private(struct net_device *dev,
4986 struct net_device *lower_dev)
4987{
4988 struct netdev_adjacent *lower;
4989
4990 if (!lower_dev)
4991 return NULL;
4992 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
4993 if (!lower)
4994 return NULL;
4995
4996 return lower->private;
4997}
4998EXPORT_SYMBOL(netdev_lower_dev_get_private);
4999
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005000static void dev_change_rx_flags(struct net_device *dev, int flags)
5001{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005002 const struct net_device_ops *ops = dev->netdev_ops;
5003
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05005004 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005005 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005006}
5007
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005008static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07005009{
Eric Dumazetb536db92011-11-30 21:42:26 +00005010 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005011 kuid_t uid;
5012 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005013
Patrick McHardy24023452007-07-14 18:51:31 -07005014 ASSERT_RTNL();
5015
Wang Chendad9b332008-06-18 01:48:28 -07005016 dev->flags |= IFF_PROMISC;
5017 dev->promiscuity += inc;
5018 if (dev->promiscuity == 0) {
5019 /*
5020 * Avoid overflow.
5021 * If inc causes overflow, untouch promisc and return error.
5022 */
5023 if (inc < 0)
5024 dev->flags &= ~IFF_PROMISC;
5025 else {
5026 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005027 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5028 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005029 return -EOVERFLOW;
5030 }
5031 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005032 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005033 pr_info("device %s %s promiscuous mode\n",
5034 dev->name,
5035 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005036 if (audit_enabled) {
5037 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005038 audit_log(current->audit_context, GFP_ATOMIC,
5039 AUDIT_ANOM_PROMISCUOUS,
5040 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5041 dev->name, (dev->flags & IFF_PROMISC),
5042 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005043 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005044 from_kuid(&init_user_ns, uid),
5045 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005046 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005047 }
Patrick McHardy24023452007-07-14 18:51:31 -07005048
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005049 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005050 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005051 if (notify)
5052 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07005053 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005054}
5055
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056/**
5057 * dev_set_promiscuity - update promiscuity count on a device
5058 * @dev: device
5059 * @inc: modifier
5060 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005061 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 * remains above zero the interface remains promiscuous. Once it hits zero
5063 * the device reverts back to normal filtering operation. A negative inc
5064 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005065 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 */
Wang Chendad9b332008-06-18 01:48:28 -07005067int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068{
Eric Dumazetb536db92011-11-30 21:42:26 +00005069 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005070 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005072 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005073 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005074 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005075 if (dev->flags != old_flags)
5076 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005077 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005079EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005081static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005082{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005083 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005084
Patrick McHardy24023452007-07-14 18:51:31 -07005085 ASSERT_RTNL();
5086
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005088 dev->allmulti += inc;
5089 if (dev->allmulti == 0) {
5090 /*
5091 * Avoid overflow.
5092 * If inc causes overflow, untouch allmulti and return error.
5093 */
5094 if (inc < 0)
5095 dev->flags &= ~IFF_ALLMULTI;
5096 else {
5097 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005098 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5099 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005100 return -EOVERFLOW;
5101 }
5102 }
Patrick McHardy24023452007-07-14 18:51:31 -07005103 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005104 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005105 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005106 if (notify)
5107 __dev_notify_flags(dev, old_flags,
5108 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07005109 }
Wang Chendad9b332008-06-18 01:48:28 -07005110 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005111}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005112
5113/**
5114 * dev_set_allmulti - update allmulti count on a device
5115 * @dev: device
5116 * @inc: modifier
5117 *
5118 * Add or remove reception of all multicast frames to a device. While the
5119 * count in the device remains above zero the interface remains listening
5120 * to all interfaces. Once it hits zero the device reverts back to normal
5121 * filtering operation. A negative @inc value is used to drop the counter
5122 * when releasing a resource needing all multicasts.
5123 * Return 0 if successful or a negative errno code on error.
5124 */
5125
5126int dev_set_allmulti(struct net_device *dev, int inc)
5127{
5128 return __dev_set_allmulti(dev, inc, true);
5129}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005130EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005131
5132/*
5133 * Upload unicast and multicast address lists to device and
5134 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005135 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005136 * are present.
5137 */
5138void __dev_set_rx_mode(struct net_device *dev)
5139{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005140 const struct net_device_ops *ops = dev->netdev_ops;
5141
Patrick McHardy4417da62007-06-27 01:28:10 -07005142 /* dev_open will call this function so the list will stay sane. */
5143 if (!(dev->flags&IFF_UP))
5144 return;
5145
5146 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005147 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005148
Jiri Pirko01789342011-08-16 06:29:00 +00005149 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005150 /* Unicast addresses changes may only happen under the rtnl,
5151 * therefore calling __dev_set_promiscuity here is safe.
5152 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005153 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005154 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005155 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005156 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005157 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005158 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005159 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005160 }
Jiri Pirko01789342011-08-16 06:29:00 +00005161
5162 if (ops->ndo_set_rx_mode)
5163 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005164}
5165
5166void dev_set_rx_mode(struct net_device *dev)
5167{
David S. Millerb9e40852008-07-15 00:15:08 -07005168 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005169 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005170 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171}
5172
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005173/**
5174 * dev_get_flags - get flags reported to userspace
5175 * @dev: device
5176 *
5177 * Get the combination of flag bits exported through APIs to userspace.
5178 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005179unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180{
Eric Dumazet95c96172012-04-15 05:58:06 +00005181 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182
5183 flags = (dev->flags & ~(IFF_PROMISC |
5184 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005185 IFF_RUNNING |
5186 IFF_LOWER_UP |
5187 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005188 (dev->gflags & (IFF_PROMISC |
5189 IFF_ALLMULTI));
5190
Stefan Rompfb00055a2006-03-20 17:09:11 -08005191 if (netif_running(dev)) {
5192 if (netif_oper_up(dev))
5193 flags |= IFF_RUNNING;
5194 if (netif_carrier_ok(dev))
5195 flags |= IFF_LOWER_UP;
5196 if (netif_dormant(dev))
5197 flags |= IFF_DORMANT;
5198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199
5200 return flags;
5201}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005202EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203
Patrick McHardybd380812010-02-26 06:34:53 +00005204int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205{
Eric Dumazetb536db92011-11-30 21:42:26 +00005206 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005207 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208
Patrick McHardy24023452007-07-14 18:51:31 -07005209 ASSERT_RTNL();
5210
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 /*
5212 * Set the flags on our device.
5213 */
5214
5215 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5216 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5217 IFF_AUTOMEDIA)) |
5218 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5219 IFF_ALLMULTI));
5220
5221 /*
5222 * Load in the correct multicast list now the flags have changed.
5223 */
5224
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005225 if ((old_flags ^ flags) & IFF_MULTICAST)
5226 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005227
Patrick McHardy4417da62007-06-27 01:28:10 -07005228 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229
5230 /*
5231 * Have we downed the interface. We handle IFF_UP ourselves
5232 * according to user attempts to set it, rather than blindly
5233 * setting it.
5234 */
5235
5236 ret = 0;
5237 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00005238 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239
5240 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07005241 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242 }
5243
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005245 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005246 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005247
Linus Torvalds1da177e2005-04-16 15:20:36 -07005248 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005249
5250 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5251 if (dev->flags != old_flags)
5252 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253 }
5254
5255 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5256 is important. Some (broken) drivers set IFF_PROMISC, when
5257 IFF_ALLMULTI is requested not asking us and not reporting.
5258 */
5259 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005260 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5261
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005263 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264 }
5265
Patrick McHardybd380812010-02-26 06:34:53 +00005266 return ret;
5267}
5268
Nicolas Dichtela528c212013-09-25 12:02:44 +02005269void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5270 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00005271{
5272 unsigned int changes = dev->flags ^ old_flags;
5273
Nicolas Dichtela528c212013-09-25 12:02:44 +02005274 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005275 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005276
Patrick McHardybd380812010-02-26 06:34:53 +00005277 if (changes & IFF_UP) {
5278 if (dev->flags & IFF_UP)
5279 call_netdevice_notifiers(NETDEV_UP, dev);
5280 else
5281 call_netdevice_notifiers(NETDEV_DOWN, dev);
5282 }
5283
5284 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00005285 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5286 struct netdev_notifier_change_info change_info;
5287
5288 change_info.flags_changed = changes;
5289 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5290 &change_info.info);
5291 }
Patrick McHardybd380812010-02-26 06:34:53 +00005292}
5293
5294/**
5295 * dev_change_flags - change device settings
5296 * @dev: device
5297 * @flags: device state flags
5298 *
5299 * Change settings on device based state flags. The flags are
5300 * in the userspace exported format.
5301 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005302int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005303{
Eric Dumazetb536db92011-11-30 21:42:26 +00005304 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005305 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00005306
5307 ret = __dev_change_flags(dev, flags);
5308 if (ret < 0)
5309 return ret;
5310
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005311 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005312 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313 return ret;
5314}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005315EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005316
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005317/**
5318 * dev_set_mtu - Change maximum transfer unit
5319 * @dev: device
5320 * @new_mtu: new transfer unit
5321 *
5322 * Change the maximum transfer size of the network device.
5323 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324int dev_set_mtu(struct net_device *dev, int new_mtu)
5325{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005326 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327 int err;
5328
5329 if (new_mtu == dev->mtu)
5330 return 0;
5331
5332 /* MTU must be positive. */
5333 if (new_mtu < 0)
5334 return -EINVAL;
5335
5336 if (!netif_device_present(dev))
5337 return -ENODEV;
5338
5339 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005340 if (ops->ndo_change_mtu)
5341 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342 else
5343 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005344
Jiri Pirkoe3d8fab2012-12-03 01:16:32 +00005345 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005346 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347 return err;
5348}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005349EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005350
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005351/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005352 * dev_set_group - Change group this device belongs to
5353 * @dev: device
5354 * @new_group: group this device should belong to
5355 */
5356void dev_set_group(struct net_device *dev, int new_group)
5357{
5358 dev->group = new_group;
5359}
5360EXPORT_SYMBOL(dev_set_group);
5361
5362/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005363 * dev_set_mac_address - Change Media Access Control Address
5364 * @dev: device
5365 * @sa: new address
5366 *
5367 * Change the hardware (MAC) address of the device
5368 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005369int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5370{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005371 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005372 int err;
5373
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005374 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375 return -EOPNOTSUPP;
5376 if (sa->sa_family != dev->type)
5377 return -EINVAL;
5378 if (!netif_device_present(dev))
5379 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005380 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005381 if (err)
5382 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005383 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005384 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005385 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005386 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005388EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005390/**
5391 * dev_change_carrier - Change device carrier
5392 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00005393 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005394 *
5395 * Change device carrier
5396 */
5397int dev_change_carrier(struct net_device *dev, bool new_carrier)
5398{
5399 const struct net_device_ops *ops = dev->netdev_ops;
5400
5401 if (!ops->ndo_change_carrier)
5402 return -EOPNOTSUPP;
5403 if (!netif_device_present(dev))
5404 return -ENODEV;
5405 return ops->ndo_change_carrier(dev, new_carrier);
5406}
5407EXPORT_SYMBOL(dev_change_carrier);
5408
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02005410 * dev_get_phys_port_id - Get device physical port ID
5411 * @dev: device
5412 * @ppid: port ID
5413 *
5414 * Get device physical port ID
5415 */
5416int dev_get_phys_port_id(struct net_device *dev,
5417 struct netdev_phys_port_id *ppid)
5418{
5419 const struct net_device_ops *ops = dev->netdev_ops;
5420
5421 if (!ops->ndo_get_phys_port_id)
5422 return -EOPNOTSUPP;
5423 return ops->ndo_get_phys_port_id(dev, ppid);
5424}
5425EXPORT_SYMBOL(dev_get_phys_port_id);
5426
5427/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005429 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430 *
5431 * Returns a suitable unique value for a new device interface
5432 * number. The caller must hold the rtnl semaphore or the
5433 * dev_base_lock to be sure it remains unique.
5434 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005435static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005437 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 for (;;) {
5439 if (++ifindex <= 0)
5440 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005441 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005442 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 }
5444}
5445
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005447static LIST_HEAD(net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005448static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005450static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005452 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005453 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005454}
5455
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005456static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005457{
Krishna Kumare93737b2009-12-08 22:26:02 +00005458 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005459 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005460
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005461 BUG_ON(dev_boot_phase);
5462 ASSERT_RTNL();
5463
Krishna Kumare93737b2009-12-08 22:26:02 +00005464 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005465 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005466 * for initialization unwind. Remove those
5467 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005468 */
5469 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005470 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5471 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005472
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005473 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005474 list_del(&dev->unreg_list);
5475 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005476 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005477 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005478 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005479 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005480
Octavian Purdila44345722010-12-13 12:44:07 +00005481 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005482 list_for_each_entry(dev, head, unreg_list)
5483 list_add_tail(&dev->close_list, &close_head);
5484 dev_close_many(&close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005485
Octavian Purdila44345722010-12-13 12:44:07 +00005486 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005487 /* And unlink it from device chain. */
5488 unlist_netdevice(dev);
5489
5490 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005491 }
5492
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005493 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005494
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005495 list_for_each_entry(dev, head, unreg_list) {
5496 /* Shutdown queueing discipline. */
5497 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005498
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005499
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005500 /* Notify protocols, that we are about to destroy
5501 this device. They should clean all the things.
5502 */
5503 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5504
Patrick McHardya2835762010-02-26 06:34:51 +00005505 if (!dev->rtnl_link_ops ||
5506 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005507 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Patrick McHardya2835762010-02-26 06:34:51 +00005508
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005509 /*
5510 * Flush the unicast and multicast chains
5511 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005512 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005513 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005514
5515 if (dev->netdev_ops->ndo_uninit)
5516 dev->netdev_ops->ndo_uninit(dev);
5517
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005518 /* Notifier chain MUST detach us all upper devices. */
5519 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005520
5521 /* Remove entries from kobject tree */
5522 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00005523#ifdef CONFIG_XPS
5524 /* Remove XPS queueing entries */
5525 netif_reset_xps_queues_gt(dev, 0);
5526#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005527 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005528
Eric W. Biederman850a5452011-10-13 22:25:23 +00005529 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005530
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005531 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005532 dev_put(dev);
5533}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005534
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005535static void rollback_registered(struct net_device *dev)
5536{
5537 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005538
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005539 list_add(&dev->unreg_list, &single);
5540 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005541 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005542}
5543
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005544static netdev_features_t netdev_fix_features(struct net_device *dev,
5545 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005546{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005547 /* Fix illegal checksum combinations */
5548 if ((features & NETIF_F_HW_CSUM) &&
5549 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005550 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005551 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5552 }
5553
Herbert Xub63365a2008-10-23 01:11:29 -07005554 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005555 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005556 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005557 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005558 }
5559
Pravin B Shelarec5f0612013-03-07 09:28:01 +00005560 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5561 !(features & NETIF_F_IP_CSUM)) {
5562 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5563 features &= ~NETIF_F_TSO;
5564 features &= ~NETIF_F_TSO_ECN;
5565 }
5566
5567 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5568 !(features & NETIF_F_IPV6_CSUM)) {
5569 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5570 features &= ~NETIF_F_TSO6;
5571 }
5572
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005573 /* TSO ECN requires that TSO is present as well. */
5574 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5575 features &= ~NETIF_F_TSO_ECN;
5576
Michał Mirosław212b5732011-02-15 16:59:16 +00005577 /* Software GSO depends on SG. */
5578 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005579 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005580 features &= ~NETIF_F_GSO;
5581 }
5582
Michał Mirosławacd11302011-01-24 15:45:15 -08005583 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005584 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005585 /* maybe split UFO into V4 and V6? */
5586 if (!((features & NETIF_F_GEN_CSUM) ||
5587 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5588 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005589 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005590 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005591 features &= ~NETIF_F_UFO;
5592 }
5593
5594 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005595 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005596 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005597 features &= ~NETIF_F_UFO;
5598 }
5599 }
5600
5601 return features;
5602}
Herbert Xub63365a2008-10-23 01:11:29 -07005603
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005604int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005605{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005606 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005607 int err = 0;
5608
Michał Mirosław87267482011-04-12 09:56:38 +00005609 ASSERT_RTNL();
5610
Michał Mirosław5455c692011-02-15 16:59:17 +00005611 features = netdev_get_wanted_features(dev);
5612
5613 if (dev->netdev_ops->ndo_fix_features)
5614 features = dev->netdev_ops->ndo_fix_features(dev, features);
5615
5616 /* driver might be less strict about feature dependencies */
5617 features = netdev_fix_features(dev, features);
5618
5619 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005620 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005621
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005622 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5623 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005624
5625 if (dev->netdev_ops->ndo_set_features)
5626 err = dev->netdev_ops->ndo_set_features(dev, features);
5627
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005628 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005629 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005630 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5631 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005632 return -1;
5633 }
5634
5635 if (!err)
5636 dev->features = features;
5637
5638 return 1;
5639}
5640
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005641/**
5642 * netdev_update_features - recalculate device features
5643 * @dev: the device to check
5644 *
5645 * Recalculate dev->features set and send notifications if it
5646 * has changed. Should be called after driver or hardware dependent
5647 * conditions might have changed that influence the features.
5648 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005649void netdev_update_features(struct net_device *dev)
5650{
5651 if (__netdev_update_features(dev))
5652 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005653}
5654EXPORT_SYMBOL(netdev_update_features);
5655
Linus Torvalds1da177e2005-04-16 15:20:36 -07005656/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005657 * netdev_change_features - recalculate device features
5658 * @dev: the device to check
5659 *
5660 * Recalculate dev->features set and send notifications even
5661 * if they have not changed. Should be called instead of
5662 * netdev_update_features() if also dev->vlan_features might
5663 * have changed to allow the changes to be propagated to stacked
5664 * VLAN devices.
5665 */
5666void netdev_change_features(struct net_device *dev)
5667{
5668 __netdev_update_features(dev);
5669 netdev_features_change(dev);
5670}
5671EXPORT_SYMBOL(netdev_change_features);
5672
5673/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005674 * netif_stacked_transfer_operstate - transfer operstate
5675 * @rootdev: the root or lower level device to transfer state from
5676 * @dev: the device to transfer operstate to
5677 *
5678 * Transfer operational state from root to device. This is normally
5679 * called when a stacking relationship exists between the root
5680 * device and the device(a leaf device).
5681 */
5682void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5683 struct net_device *dev)
5684{
5685 if (rootdev->operstate == IF_OPER_DORMANT)
5686 netif_dormant_on(dev);
5687 else
5688 netif_dormant_off(dev);
5689
5690 if (netif_carrier_ok(rootdev)) {
5691 if (!netif_carrier_ok(dev))
5692 netif_carrier_on(dev);
5693 } else {
5694 if (netif_carrier_ok(dev))
5695 netif_carrier_off(dev);
5696 }
5697}
5698EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5699
Tom Herbertbf264142010-11-26 08:36:09 +00005700#ifdef CONFIG_RPS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005701static int netif_alloc_rx_queues(struct net_device *dev)
5702{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005703 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005704 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005705
Tom Herbertbd25fa72010-10-18 18:00:16 +00005706 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005707
Tom Herbertbd25fa72010-10-18 18:00:16 +00005708 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005709 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005710 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005711
Tom Herbertbd25fa72010-10-18 18:00:16 +00005712 dev->_rx = rx;
5713
Tom Herbertbd25fa72010-10-18 18:00:16 +00005714 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005715 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005716 return 0;
5717}
Tom Herbertbf264142010-11-26 08:36:09 +00005718#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005719
Changli Gaoaa942102010-12-04 02:31:41 +00005720static void netdev_init_one_queue(struct net_device *dev,
5721 struct netdev_queue *queue, void *_unused)
5722{
5723 /* Initialize queue lock */
5724 spin_lock_init(&queue->_xmit_lock);
5725 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5726 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005727 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005728 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005729#ifdef CONFIG_BQL
5730 dql_init(&queue->dql, HZ);
5731#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005732}
5733
Eric Dumazet60877a32013-06-20 01:15:51 -07005734static void netif_free_tx_queues(struct net_device *dev)
5735{
5736 if (is_vmalloc_addr(dev->_tx))
5737 vfree(dev->_tx);
5738 else
5739 kfree(dev->_tx);
5740}
5741
Tom Herberte6484932010-10-18 18:04:39 +00005742static int netif_alloc_netdev_queues(struct net_device *dev)
5743{
5744 unsigned int count = dev->num_tx_queues;
5745 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07005746 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00005747
Eric Dumazet60877a32013-06-20 01:15:51 -07005748 BUG_ON(count < 1 || count > 0xffff);
Tom Herberte6484932010-10-18 18:04:39 +00005749
Eric Dumazet60877a32013-06-20 01:15:51 -07005750 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
5751 if (!tx) {
5752 tx = vzalloc(sz);
5753 if (!tx)
5754 return -ENOMEM;
5755 }
Tom Herberte6484932010-10-18 18:04:39 +00005756 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005757
Tom Herberte6484932010-10-18 18:04:39 +00005758 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5759 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005760
5761 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005762}
5763
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005764/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005765 * register_netdevice - register a network device
5766 * @dev: device to register
5767 *
5768 * Take a completed network device structure and add it to the kernel
5769 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5770 * chain. 0 is returned on success. A negative errno code is returned
5771 * on a failure to set up the device, or if the name is a duplicate.
5772 *
5773 * Callers must hold the rtnl semaphore. You may want
5774 * register_netdev() instead of this.
5775 *
5776 * BUGS:
5777 * The locking appears insufficient to guarantee two parallel registers
5778 * will not get the same name.
5779 */
5780
5781int register_netdevice(struct net_device *dev)
5782{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005783 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005784 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005785
5786 BUG_ON(dev_boot_phase);
5787 ASSERT_RTNL();
5788
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005789 might_sleep();
5790
Linus Torvalds1da177e2005-04-16 15:20:36 -07005791 /* When net_device's are persistent, this will be fatal. */
5792 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005793 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005794
David S. Millerf1f28aa2008-07-15 00:08:33 -07005795 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005796 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798 dev->iflink = -1;
5799
Gao feng828de4f2012-09-13 20:58:27 +00005800 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00005801 if (ret < 0)
5802 goto out;
5803
Linus Torvalds1da177e2005-04-16 15:20:36 -07005804 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005805 if (dev->netdev_ops->ndo_init) {
5806 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005807 if (ret) {
5808 if (ret > 0)
5809 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005810 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811 }
5812 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005813
Patrick McHardyf6469682013-04-19 02:04:27 +00005814 if (((dev->hw_features | dev->features) &
5815 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00005816 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5817 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5818 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5819 ret = -EINVAL;
5820 goto err_uninit;
5821 }
5822
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00005823 ret = -EBUSY;
5824 if (!dev->ifindex)
5825 dev->ifindex = dev_new_index(net);
5826 else if (__dev_get_by_index(net, dev->ifindex))
5827 goto err_uninit;
5828
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829 if (dev->iflink == -1)
5830 dev->iflink = dev->ifindex;
5831
Michał Mirosław5455c692011-02-15 16:59:17 +00005832 /* Transfer changeable features to wanted_features and enable
5833 * software offloads (GSO and GRO).
5834 */
5835 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00005836 dev->features |= NETIF_F_SOFT_FEATURES;
5837 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005838
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005839 /* Turn on no cache copy if HW is doing checksum */
Michał Mirosław34324dc2011-11-15 15:29:55 +00005840 if (!(dev->flags & IFF_LOOPBACK)) {
5841 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5842 if (dev->features & NETIF_F_ALL_CSUM) {
5843 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5844 dev->features |= NETIF_F_NOCACHE_COPY;
5845 }
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07005846 }
5847
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005848 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00005849 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07005850 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00005851
Pravin B Shelaree579672013-03-07 09:28:08 +00005852 /* Make NETIF_F_SG inheritable to tunnel devices.
5853 */
5854 dev->hw_enc_features |= NETIF_F_SG;
5855
Simon Horman0d89d202013-05-23 21:02:52 +00005856 /* Make NETIF_F_SG inheritable to MPLS.
5857 */
5858 dev->mpls_features |= NETIF_F_SG;
5859
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005860 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5861 ret = notifier_to_errno(ret);
5862 if (ret)
5863 goto err_uninit;
5864
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005865 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005866 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005867 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005868 dev->reg_state = NETREG_REGISTERED;
5869
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005870 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00005871
Linus Torvalds1da177e2005-04-16 15:20:36 -07005872 /*
5873 * Default initial state at registry is that the
5874 * device is present.
5875 */
5876
5877 set_bit(__LINK_STATE_PRESENT, &dev->state);
5878
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01005879 linkwatch_init_dev(dev);
5880
Linus Torvalds1da177e2005-04-16 15:20:36 -07005881 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005882 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005883 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005884 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005885
Jiri Pirko948b3372013-01-08 01:38:25 +00005886 /* If the device has permanent device address, driver should
5887 * set dev_addr and also addr_assign_type should be set to
5888 * NET_ADDR_PERM (default value).
5889 */
5890 if (dev->addr_assign_type == NET_ADDR_PERM)
5891 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5892
Linus Torvalds1da177e2005-04-16 15:20:36 -07005893 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005894 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005895 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005896 if (ret) {
5897 rollback_registered(dev);
5898 dev->reg_state = NETREG_UNREGISTERED;
5899 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005900 /*
5901 * Prevent userspace races by waiting until the network
5902 * device is fully setup before sending notifications.
5903 */
Patrick McHardya2835762010-02-26 06:34:51 +00005904 if (!dev->rtnl_link_ops ||
5905 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005906 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005907
5908out:
5909 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005910
5911err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005912 if (dev->netdev_ops->ndo_uninit)
5913 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005914 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005915}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005916EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005917
5918/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005919 * init_dummy_netdev - init a dummy network device for NAPI
5920 * @dev: device to init
5921 *
5922 * This takes a network device structure and initialize the minimum
5923 * amount of fields so it can be used to schedule NAPI polls without
5924 * registering a full blown interface. This is to be used by drivers
5925 * that need to tie several hardware interfaces to a single NAPI
5926 * poll scheduler due to HW limitations.
5927 */
5928int init_dummy_netdev(struct net_device *dev)
5929{
5930 /* Clear everything. Note we don't initialize spinlocks
5931 * are they aren't supposed to be taken by any of the
5932 * NAPI code and this dummy netdev is supposed to be
5933 * only ever used for NAPI polls
5934 */
5935 memset(dev, 0, sizeof(struct net_device));
5936
5937 /* make sure we BUG if trying to hit standard
5938 * register/unregister code path
5939 */
5940 dev->reg_state = NETREG_DUMMY;
5941
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005942 /* NAPI wants this */
5943 INIT_LIST_HEAD(&dev->napi_list);
5944
5945 /* a dummy interface is started by default */
5946 set_bit(__LINK_STATE_PRESENT, &dev->state);
5947 set_bit(__LINK_STATE_START, &dev->state);
5948
Eric Dumazet29b44332010-10-11 10:22:12 +00005949 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5950 * because users of this 'device' dont need to change
5951 * its refcount.
5952 */
5953
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005954 return 0;
5955}
5956EXPORT_SYMBOL_GPL(init_dummy_netdev);
5957
5958
5959/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005960 * register_netdev - register a network device
5961 * @dev: device to register
5962 *
5963 * Take a completed network device structure and add it to the kernel
5964 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5965 * chain. 0 is returned on success. A negative errno code is returned
5966 * on a failure to set up the device, or if the name is a duplicate.
5967 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005968 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005969 * and expands the device name if you passed a format string to
5970 * alloc_netdev.
5971 */
5972int register_netdev(struct net_device *dev)
5973{
5974 int err;
5975
5976 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005977 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005978 rtnl_unlock();
5979 return err;
5980}
5981EXPORT_SYMBOL(register_netdev);
5982
Eric Dumazet29b44332010-10-11 10:22:12 +00005983int netdev_refcnt_read(const struct net_device *dev)
5984{
5985 int i, refcnt = 0;
5986
5987 for_each_possible_cpu(i)
5988 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5989 return refcnt;
5990}
5991EXPORT_SYMBOL(netdev_refcnt_read);
5992
Ben Hutchings2c530402012-07-10 10:55:09 +00005993/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005994 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00005995 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005996 *
5997 * This is called when unregistering network devices.
5998 *
5999 * Any protocol or device that holds a reference should register
6000 * for netdevice notification, and cleanup and put back the
6001 * reference if they receive an UNREGISTER event.
6002 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006003 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006004 */
6005static void netdev_wait_allrefs(struct net_device *dev)
6006{
6007 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006008 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006009
Eric Dumazete014deb2009-11-17 05:59:21 +00006010 linkwatch_forget_dev(dev);
6011
Linus Torvalds1da177e2005-04-16 15:20:36 -07006012 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006013 refcnt = netdev_refcnt_read(dev);
6014
6015 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006016 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006017 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006018
6019 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006020 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006021
Eric Dumazet748e2d92012-08-22 21:50:59 +00006022 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006023 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006024 rtnl_lock();
6025
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006026 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006027 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6028 &dev->state)) {
6029 /* We must not have linkwatch events
6030 * pending on unregister. If this
6031 * happens, we simply run the queue
6032 * unscheduled, resulting in a noop
6033 * for this device.
6034 */
6035 linkwatch_run_queue();
6036 }
6037
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006038 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006039
6040 rebroadcast_time = jiffies;
6041 }
6042
6043 msleep(250);
6044
Eric Dumazet29b44332010-10-11 10:22:12 +00006045 refcnt = netdev_refcnt_read(dev);
6046
Linus Torvalds1da177e2005-04-16 15:20:36 -07006047 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006048 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6049 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006050 warning_time = jiffies;
6051 }
6052 }
6053}
6054
6055/* The sequence is:
6056 *
6057 * rtnl_lock();
6058 * ...
6059 * register_netdevice(x1);
6060 * register_netdevice(x2);
6061 * ...
6062 * unregister_netdevice(y1);
6063 * unregister_netdevice(y2);
6064 * ...
6065 * rtnl_unlock();
6066 * free_netdev(y1);
6067 * free_netdev(y2);
6068 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006069 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006070 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006071 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006072 * without deadlocking with linkwatch via keventd.
6073 * 2) Since we run with the RTNL semaphore not held, we can sleep
6074 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006075 *
6076 * We must not return until all unregister events added during
6077 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006078 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006079void netdev_run_todo(void)
6080{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006081 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006082
Linus Torvalds1da177e2005-04-16 15:20:36 -07006083 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006084 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006085
6086 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006087
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006088
6089 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006090 if (!list_empty(&list))
6091 rcu_barrier();
6092
Linus Torvalds1da177e2005-04-16 15:20:36 -07006093 while (!list_empty(&list)) {
6094 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006095 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006096 list_del(&dev->todo_list);
6097
Eric Dumazet748e2d92012-08-22 21:50:59 +00006098 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006099 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006100 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006101
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006102 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006103 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006104 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006105 dump_stack();
6106 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006107 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006108
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006109 dev->reg_state = NETREG_UNREGISTERED;
6110
Changli Gao152102c2010-03-30 20:16:22 +00006111 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006112
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006113 netdev_wait_allrefs(dev);
6114
6115 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006116 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006117 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6118 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006119 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006120
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006121 if (dev->destructor)
6122 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006123
Eric W. Biederman50624c92013-09-23 21:19:49 -07006124 /* Report a network device has been unregistered */
6125 rtnl_lock();
6126 dev_net(dev)->dev_unreg_count--;
6127 __rtnl_unlock();
6128 wake_up(&netdev_unregistering_wq);
6129
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006130 /* Free network device */
6131 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006133}
6134
Ben Hutchings3cfde792010-07-09 09:11:52 +00006135/* Convert net_device_stats to rtnl_link_stats64. They have the same
6136 * fields in the same order, with only the type differing.
6137 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006138void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6139 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006140{
6141#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006142 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6143 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006144#else
6145 size_t i, n = sizeof(*stats64) / sizeof(u64);
6146 const unsigned long *src = (const unsigned long *)netdev_stats;
6147 u64 *dst = (u64 *)stats64;
6148
6149 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6150 sizeof(*stats64) / sizeof(u64));
6151 for (i = 0; i < n; i++)
6152 dst[i] = src[i];
6153#endif
6154}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006155EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006156
Eric Dumazetd83345a2009-11-16 03:36:51 +00006157/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006158 * dev_get_stats - get network device statistics
6159 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006160 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006161 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006162 * Get network statistics from device. Return @storage.
6163 * The device driver may provide its own method by setting
6164 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6165 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006166 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006167struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6168 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006169{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006170 const struct net_device_ops *ops = dev->netdev_ops;
6171
Eric Dumazet28172732010-07-07 14:58:56 -07006172 if (ops->ndo_get_stats64) {
6173 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006174 ops->ndo_get_stats64(dev, storage);
6175 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006176 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006177 } else {
6178 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006179 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006180 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006181 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006182}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006183EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006184
Eric Dumazet24824a02010-10-02 06:11:55 +00006185struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006186{
Eric Dumazet24824a02010-10-02 06:11:55 +00006187 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006188
Eric Dumazet24824a02010-10-02 06:11:55 +00006189#ifdef CONFIG_NET_CLS_ACT
6190 if (queue)
6191 return queue;
6192 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6193 if (!queue)
6194 return NULL;
6195 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006196 queue->qdisc = &noop_qdisc;
6197 queue->qdisc_sleeping = &noop_qdisc;
6198 rcu_assign_pointer(dev->ingress_queue, queue);
6199#endif
6200 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006201}
6202
Eric Dumazet2c60db02012-09-16 09:17:26 +00006203static const struct ethtool_ops default_ethtool_ops;
6204
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00006205void netdev_set_default_ethtool_ops(struct net_device *dev,
6206 const struct ethtool_ops *ops)
6207{
6208 if (dev->ethtool_ops == &default_ethtool_ops)
6209 dev->ethtool_ops = ops;
6210}
6211EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6212
Eric Dumazet74d332c2013-10-30 13:10:44 -07006213void netdev_freemem(struct net_device *dev)
6214{
6215 char *addr = (char *)dev - dev->padded;
6216
6217 if (is_vmalloc_addr(addr))
6218 vfree(addr);
6219 else
6220 kfree(addr);
6221}
6222
Linus Torvalds1da177e2005-04-16 15:20:36 -07006223/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006224 * alloc_netdev_mqs - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006225 * @sizeof_priv: size of private data to allocate space for
6226 * @name: device name format string
6227 * @setup: callback to initialize device
Tom Herbert36909ea2011-01-09 19:36:31 +00006228 * @txqs: the number of TX subqueues to allocate
6229 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006230 *
6231 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07006232 * and performs basic initialization. Also allocates subquue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006233 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006234 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006235struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6236 void (*setup)(struct net_device *),
6237 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006238{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006240 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006241 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006242
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006243 BUG_ON(strlen(name) >= sizeof(dev->name));
6244
Tom Herbert36909ea2011-01-09 19:36:31 +00006245 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006246 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006247 return NULL;
6248 }
6249
Tom Herbert36909ea2011-01-09 19:36:31 +00006250#ifdef CONFIG_RPS
6251 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006252 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006253 return NULL;
6254 }
6255#endif
6256
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006257 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006258 if (sizeof_priv) {
6259 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006260 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006261 alloc_size += sizeof_priv;
6262 }
6263 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006264 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006265
Eric Dumazet74d332c2013-10-30 13:10:44 -07006266 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6267 if (!p)
6268 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00006269 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006270 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006271
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006272 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006273 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006274
Eric Dumazet29b44332010-10-11 10:22:12 +00006275 dev->pcpu_refcnt = alloc_percpu(int);
6276 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07006277 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006278
Linus Torvalds1da177e2005-04-16 15:20:36 -07006279 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006280 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006281
Jiri Pirko22bedad32010-04-01 21:22:57 +00006282 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006283 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006284
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006285 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006286
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006287 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006288 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006289
Herbert Xud565b0a2008-12-15 23:38:52 -08006290 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006291 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006292 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006293 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006294 INIT_LIST_HEAD(&dev->adj_list.upper);
6295 INIT_LIST_HEAD(&dev->adj_list.lower);
6296 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6297 INIT_LIST_HEAD(&dev->all_adj_list.lower);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006298 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006299 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006300
6301 dev->num_tx_queues = txqs;
6302 dev->real_num_tx_queues = txqs;
6303 if (netif_alloc_netdev_queues(dev))
6304 goto free_all;
6305
6306#ifdef CONFIG_RPS
6307 dev->num_rx_queues = rxqs;
6308 dev->real_num_rx_queues = rxqs;
6309 if (netif_alloc_rx_queues(dev))
6310 goto free_all;
6311#endif
6312
Linus Torvalds1da177e2005-04-16 15:20:36 -07006313 strcpy(dev->name, name);
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006314 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006315 if (!dev->ethtool_ops)
6316 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006317 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006318
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006319free_all:
6320 free_netdev(dev);
6321 return NULL;
6322
Eric Dumazet29b44332010-10-11 10:22:12 +00006323free_pcpu:
6324 free_percpu(dev->pcpu_refcnt);
Eric Dumazet60877a32013-06-20 01:15:51 -07006325 netif_free_tx_queues(dev);
Tom Herbertfe822242010-11-09 10:47:38 +00006326#ifdef CONFIG_RPS
6327 kfree(dev->_rx);
6328#endif
6329
Eric Dumazet74d332c2013-10-30 13:10:44 -07006330free_dev:
6331 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006332 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006333}
Tom Herbert36909ea2011-01-09 19:36:31 +00006334EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006335
6336/**
6337 * free_netdev - free network device
6338 * @dev: device
6339 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006340 * This function does the last stage of destroying an allocated device
6341 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006342 * If this is the last reference then it will be freed.
6343 */
6344void free_netdev(struct net_device *dev)
6345{
Herbert Xud565b0a2008-12-15 23:38:52 -08006346 struct napi_struct *p, *n;
6347
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006348 release_net(dev_net(dev));
6349
Eric Dumazet60877a32013-06-20 01:15:51 -07006350 netif_free_tx_queues(dev);
Tom Herbertfe822242010-11-09 10:47:38 +00006351#ifdef CONFIG_RPS
6352 kfree(dev->_rx);
6353#endif
David S. Millere8a04642008-07-17 00:34:19 -07006354
Eric Dumazet33d480c2011-08-11 19:30:52 +00006355 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006356
Jiri Pirkof001fde2009-05-05 02:48:28 +00006357 /* Flush device addresses */
6358 dev_addr_flush(dev);
6359
Herbert Xud565b0a2008-12-15 23:38:52 -08006360 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6361 netif_napi_del(p);
6362
Eric Dumazet29b44332010-10-11 10:22:12 +00006363 free_percpu(dev->pcpu_refcnt);
6364 dev->pcpu_refcnt = NULL;
6365
Stephen Hemminger3041a062006-05-26 13:25:24 -07006366 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006367 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07006368 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006369 return;
6370 }
6371
6372 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6373 dev->reg_state = NETREG_RELEASED;
6374
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006375 /* will free via device release */
6376 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006377}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006378EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006379
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006380/**
6381 * synchronize_net - Synchronize with packet receive processing
6382 *
6383 * Wait for packets currently being received to be done.
6384 * Does not block later packets from starting.
6385 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006386void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006387{
6388 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006389 if (rtnl_is_locked())
6390 synchronize_rcu_expedited();
6391 else
6392 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006393}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006394EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006395
6396/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006397 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006398 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006399 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006400 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006401 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006402 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006403 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006404 *
6405 * Callers must hold the rtnl semaphore. You may want
6406 * unregister_netdev() instead of this.
6407 */
6408
Eric Dumazet44a08732009-10-27 07:03:04 +00006409void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006410{
Herbert Xua6620712007-12-12 19:21:56 -08006411 ASSERT_RTNL();
6412
Eric Dumazet44a08732009-10-27 07:03:04 +00006413 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006414 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006415 } else {
6416 rollback_registered(dev);
6417 /* Finish processing unregister after unlock */
6418 net_set_todo(dev);
6419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006420}
Eric Dumazet44a08732009-10-27 07:03:04 +00006421EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006422
6423/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006424 * unregister_netdevice_many - unregister many devices
6425 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006426 */
6427void unregister_netdevice_many(struct list_head *head)
6428{
6429 struct net_device *dev;
6430
6431 if (!list_empty(head)) {
6432 rollback_registered_many(head);
6433 list_for_each_entry(dev, head, unreg_list)
6434 net_set_todo(dev);
6435 }
6436}
Eric Dumazet63c80992009-10-27 07:06:49 +00006437EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006438
6439/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006440 * unregister_netdev - remove device from the kernel
6441 * @dev: device
6442 *
6443 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006444 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006445 *
6446 * This is just a wrapper for unregister_netdevice that takes
6447 * the rtnl semaphore. In general you want to use this and not
6448 * unregister_netdevice.
6449 */
6450void unregister_netdev(struct net_device *dev)
6451{
6452 rtnl_lock();
6453 unregister_netdevice(dev);
6454 rtnl_unlock();
6455}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006456EXPORT_SYMBOL(unregister_netdev);
6457
Eric W. Biedermance286d32007-09-12 13:53:49 +02006458/**
6459 * dev_change_net_namespace - move device to different nethost namespace
6460 * @dev: device
6461 * @net: network namespace
6462 * @pat: If not NULL name pattern to try if the current device name
6463 * is already taken in the destination network namespace.
6464 *
6465 * This function shuts down a device interface and moves it
6466 * to a new network namespace. On success 0 is returned, on
6467 * a failure a netagive errno code is returned.
6468 *
6469 * Callers must hold the rtnl semaphore.
6470 */
6471
6472int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6473{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006474 int err;
6475
6476 ASSERT_RTNL();
6477
6478 /* Don't allow namespace local devices to be moved. */
6479 err = -EINVAL;
6480 if (dev->features & NETIF_F_NETNS_LOCAL)
6481 goto out;
6482
6483 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006484 if (dev->reg_state != NETREG_REGISTERED)
6485 goto out;
6486
6487 /* Get out if there is nothing todo */
6488 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006489 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006490 goto out;
6491
6492 /* Pick the destination device name, and ensure
6493 * we can use it in the destination network namespace.
6494 */
6495 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006496 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006497 /* We get here if we can't use the current device name */
6498 if (!pat)
6499 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006500 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006501 goto out;
6502 }
6503
6504 /*
6505 * And now a mini version of register_netdevice unregister_netdevice.
6506 */
6507
6508 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006509 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006510
6511 /* And unlink it from device chain */
6512 err = -ENODEV;
6513 unlist_netdevice(dev);
6514
6515 synchronize_net();
6516
6517 /* Shutdown queueing discipline. */
6518 dev_shutdown(dev);
6519
6520 /* Notify protocols, that we are about to destroy
6521 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006522
6523 Note that dev->reg_state stays at NETREG_REGISTERED.
6524 This is wanted because this way 8021q and macvlan know
6525 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006526 */
6527 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006528 rcu_barrier();
6529 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006530 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006531
6532 /*
6533 * Flush the unicast and multicast chains
6534 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006535 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006536 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006537
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006538 /* Send a netdev-removed uevent to the old namespace */
6539 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6540
Eric W. Biedermance286d32007-09-12 13:53:49 +02006541 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006542 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006543
Eric W. Biedermance286d32007-09-12 13:53:49 +02006544 /* If there is an ifindex conflict assign a new one */
6545 if (__dev_get_by_index(net, dev->ifindex)) {
6546 int iflink = (dev->iflink == dev->ifindex);
6547 dev->ifindex = dev_new_index(net);
6548 if (iflink)
6549 dev->iflink = dev->ifindex;
6550 }
6551
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006552 /* Send a netdev-add uevent to the new namespace */
6553 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6554
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006555 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006556 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006557 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006558
6559 /* Add the device back in the hashes */
6560 list_netdevice(dev);
6561
6562 /* Notify protocols, that a new device appeared. */
6563 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6564
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006565 /*
6566 * Prevent userspace races by waiting until the network
6567 * device is fully setup before sending notifications.
6568 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006569 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006570
Eric W. Biedermance286d32007-09-12 13:53:49 +02006571 synchronize_net();
6572 err = 0;
6573out:
6574 return err;
6575}
Johannes Berg463d0182009-07-14 00:33:35 +02006576EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006577
Linus Torvalds1da177e2005-04-16 15:20:36 -07006578static int dev_cpu_callback(struct notifier_block *nfb,
6579 unsigned long action,
6580 void *ocpu)
6581{
6582 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006583 struct sk_buff *skb;
6584 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6585 struct softnet_data *sd, *oldsd;
6586
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006587 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006588 return NOTIFY_OK;
6589
6590 local_irq_disable();
6591 cpu = smp_processor_id();
6592 sd = &per_cpu(softnet_data, cpu);
6593 oldsd = &per_cpu(softnet_data, oldcpu);
6594
6595 /* Find end of our completion_queue. */
6596 list_skb = &sd->completion_queue;
6597 while (*list_skb)
6598 list_skb = &(*list_skb)->next;
6599 /* Append completion queue from offline CPU. */
6600 *list_skb = oldsd->completion_queue;
6601 oldsd->completion_queue = NULL;
6602
Linus Torvalds1da177e2005-04-16 15:20:36 -07006603 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006604 if (oldsd->output_queue) {
6605 *sd->output_queue_tailp = oldsd->output_queue;
6606 sd->output_queue_tailp = oldsd->output_queue_tailp;
6607 oldsd->output_queue = NULL;
6608 oldsd->output_queue_tailp = &oldsd->output_queue;
6609 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006610 /* Append NAPI poll list from offline CPU. */
6611 if (!list_empty(&oldsd->poll_list)) {
6612 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6613 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6614 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006615
6616 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6617 local_irq_enable();
6618
6619 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006620 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6621 netif_rx(skb);
6622 input_queue_head_incr(oldsd);
6623 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006624 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625 netif_rx(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006626 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006627 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006628
6629 return NOTIFY_OK;
6630}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006631
6632
Herbert Xu7f353bf2007-08-10 15:47:58 -07006633/**
Herbert Xub63365a2008-10-23 01:11:29 -07006634 * netdev_increment_features - increment feature set by one
6635 * @all: current feature set
6636 * @one: new feature set
6637 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006638 *
6639 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006640 * @one to the master device with current feature set @all. Will not
6641 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006642 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006643netdev_features_t netdev_increment_features(netdev_features_t all,
6644 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006645{
Michał Mirosław1742f182011-04-22 06:31:16 +00006646 if (mask & NETIF_F_GEN_CSUM)
6647 mask |= NETIF_F_ALL_CSUM;
6648 mask |= NETIF_F_VLAN_CHALLENGED;
6649
6650 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6651 all &= one | ~NETIF_F_ALL_FOR_ALL;
6652
Michał Mirosław1742f182011-04-22 06:31:16 +00006653 /* If one device supports hw checksumming, set for all. */
6654 if (all & NETIF_F_GEN_CSUM)
6655 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006656
6657 return all;
6658}
Herbert Xub63365a2008-10-23 01:11:29 -07006659EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006660
Baruch Siach430f03c2013-06-02 20:43:55 +00006661static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006662{
6663 int i;
6664 struct hlist_head *hash;
6665
6666 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6667 if (hash != NULL)
6668 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6669 INIT_HLIST_HEAD(&hash[i]);
6670
6671 return hash;
6672}
6673
Eric W. Biederman881d9662007-09-17 11:56:21 -07006674/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006675static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006676{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006677 if (net != &init_net)
6678 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006679
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006680 net->dev_name_head = netdev_create_hash();
6681 if (net->dev_name_head == NULL)
6682 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006683
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006684 net->dev_index_head = netdev_create_hash();
6685 if (net->dev_index_head == NULL)
6686 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006687
6688 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006689
6690err_idx:
6691 kfree(net->dev_name_head);
6692err_name:
6693 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006694}
6695
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006696/**
6697 * netdev_drivername - network driver for the device
6698 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006699 *
6700 * Determine network driver for device.
6701 */
David S. Miller3019de12011-06-06 16:41:33 -07006702const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006703{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006704 const struct device_driver *driver;
6705 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006706 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006707
6708 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006709 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006710 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006711
6712 driver = parent->driver;
6713 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006714 return driver->name;
6715 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006716}
6717
Joe Perchesb004ff42012-09-12 20:12:19 -07006718static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006719 struct va_format *vaf)
6720{
6721 int r;
6722
Joe Perchesb004ff42012-09-12 20:12:19 -07006723 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006724 r = dev_printk_emit(level[1] - '0',
6725 dev->dev.parent,
6726 "%s %s %s: %pV",
6727 dev_driver_string(dev->dev.parent),
6728 dev_name(dev->dev.parent),
6729 netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006730 } else if (dev) {
Joe Perches256df2f2010-06-27 01:02:35 +00006731 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006732 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006733 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006734 }
Joe Perches256df2f2010-06-27 01:02:35 +00006735
6736 return r;
6737}
6738
6739int netdev_printk(const char *level, const struct net_device *dev,
6740 const char *format, ...)
6741{
6742 struct va_format vaf;
6743 va_list args;
6744 int r;
6745
6746 va_start(args, format);
6747
6748 vaf.fmt = format;
6749 vaf.va = &args;
6750
6751 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006752
Joe Perches256df2f2010-06-27 01:02:35 +00006753 va_end(args);
6754
6755 return r;
6756}
6757EXPORT_SYMBOL(netdev_printk);
6758
6759#define define_netdev_printk_level(func, level) \
6760int func(const struct net_device *dev, const char *fmt, ...) \
6761{ \
6762 int r; \
6763 struct va_format vaf; \
6764 va_list args; \
6765 \
6766 va_start(args, fmt); \
6767 \
6768 vaf.fmt = fmt; \
6769 vaf.va = &args; \
6770 \
6771 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07006772 \
Joe Perches256df2f2010-06-27 01:02:35 +00006773 va_end(args); \
6774 \
6775 return r; \
6776} \
6777EXPORT_SYMBOL(func);
6778
6779define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6780define_netdev_printk_level(netdev_alert, KERN_ALERT);
6781define_netdev_printk_level(netdev_crit, KERN_CRIT);
6782define_netdev_printk_level(netdev_err, KERN_ERR);
6783define_netdev_printk_level(netdev_warn, KERN_WARNING);
6784define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6785define_netdev_printk_level(netdev_info, KERN_INFO);
6786
Pavel Emelyanov46650792007-10-08 20:38:39 -07006787static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006788{
6789 kfree(net->dev_name_head);
6790 kfree(net->dev_index_head);
6791}
6792
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006793static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006794 .init = netdev_init,
6795 .exit = netdev_exit,
6796};
6797
Pavel Emelyanov46650792007-10-08 20:38:39 -07006798static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006799{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006800 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006801 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006802 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006803 * initial network namespace
6804 */
6805 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006806 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006807 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006808 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006809
6810 /* Ignore unmoveable devices (i.e. loopback) */
6811 if (dev->features & NETIF_F_NETNS_LOCAL)
6812 continue;
6813
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006814 /* Leave virtual devices for the generic cleanup */
6815 if (dev->rtnl_link_ops)
6816 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006817
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006818 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006819 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6820 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006821 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006822 pr_emerg("%s: failed to move %s to init_net: %d\n",
6823 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006824 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006825 }
6826 }
6827 rtnl_unlock();
6828}
6829
Eric W. Biederman50624c92013-09-23 21:19:49 -07006830static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6831{
6832 /* Return with the rtnl_lock held when there are no network
6833 * devices unregistering in any network namespace in net_list.
6834 */
6835 struct net *net;
6836 bool unregistering;
6837 DEFINE_WAIT(wait);
6838
6839 for (;;) {
6840 prepare_to_wait(&netdev_unregistering_wq, &wait,
6841 TASK_UNINTERRUPTIBLE);
6842 unregistering = false;
6843 rtnl_lock();
6844 list_for_each_entry(net, net_list, exit_list) {
6845 if (net->dev_unreg_count > 0) {
6846 unregistering = true;
6847 break;
6848 }
6849 }
6850 if (!unregistering)
6851 break;
6852 __rtnl_unlock();
6853 schedule();
6854 }
6855 finish_wait(&netdev_unregistering_wq, &wait);
6856}
6857
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006858static void __net_exit default_device_exit_batch(struct list_head *net_list)
6859{
6860 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04006861 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006862 * Do this across as many network namespaces as possible to
6863 * improve batching efficiency.
6864 */
6865 struct net_device *dev;
6866 struct net *net;
6867 LIST_HEAD(dev_kill_list);
6868
Eric W. Biederman50624c92013-09-23 21:19:49 -07006869 /* To prevent network device cleanup code from dereferencing
6870 * loopback devices or network devices that have been freed
6871 * wait here for all pending unregistrations to complete,
6872 * before unregistring the loopback device and allowing the
6873 * network namespace be freed.
6874 *
6875 * The netdev todo list containing all network devices
6876 * unregistrations that happen in default_device_exit_batch
6877 * will run in the rtnl_unlock() at the end of
6878 * default_device_exit_batch.
6879 */
6880 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006881 list_for_each_entry(net, net_list, exit_list) {
6882 for_each_netdev_reverse(net, dev) {
6883 if (dev->rtnl_link_ops)
6884 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6885 else
6886 unregister_netdevice_queue(dev, &dev_kill_list);
6887 }
6888 }
6889 unregister_netdevice_many(&dev_kill_list);
Eric Dumazetceaaec92011-02-17 22:59:19 +00006890 list_del(&dev_kill_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006891 rtnl_unlock();
6892}
6893
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006894static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006895 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006896 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006897};
6898
Linus Torvalds1da177e2005-04-16 15:20:36 -07006899/*
6900 * Initialize the DEV module. At boot time this walks the device list and
6901 * unhooks any devices that fail to initialise (normally hardware not
6902 * present) and leaves us with a valid list of present and active devices.
6903 *
6904 */
6905
6906/*
6907 * This is called single threaded during boot, so no need
6908 * to take the rtnl semaphore.
6909 */
6910static int __init net_dev_init(void)
6911{
6912 int i, rc = -ENOMEM;
6913
6914 BUG_ON(!dev_boot_phase);
6915
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916 if (dev_proc_init())
6917 goto out;
6918
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006919 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006920 goto out;
6921
6922 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006923 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006924 INIT_LIST_HEAD(&ptype_base[i]);
6925
Vlad Yasevich62532da2012-11-15 08:49:10 +00006926 INIT_LIST_HEAD(&offload_base);
6927
Eric W. Biederman881d9662007-09-17 11:56:21 -07006928 if (register_pernet_subsys(&netdev_net_ops))
6929 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006930
6931 /*
6932 * Initialise the packet receive queues.
6933 */
6934
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006935 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006936 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006937
Changli Gaodee42872010-05-02 05:42:16 +00006938 memset(sd, 0, sizeof(*sd));
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006939 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07006940 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006941 sd->completion_queue = NULL;
6942 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00006943 sd->output_queue = NULL;
6944 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00006945#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006946 sd->csd.func = rps_trigger_softirq;
6947 sd->csd.info = sd;
6948 sd->csd.flags = 0;
6949 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07006950#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006951
Eric Dumazete36fa2f2010-04-19 21:17:14 +00006952 sd->backlog.poll = process_backlog;
6953 sd->backlog.weight = weight_p;
6954 sd->backlog.gro_list = NULL;
6955 sd->backlog.gro_count = 0;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00006956
6957#ifdef CONFIG_NET_FLOW_LIMIT
6958 sd->flow_limit = NULL;
6959#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07006960 }
6961
Linus Torvalds1da177e2005-04-16 15:20:36 -07006962 dev_boot_phase = 0;
6963
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006964 /* The loopback device is special if any other network devices
6965 * is present in a network namespace the loopback device must
6966 * be present. Since we now dynamically allocate and free the
6967 * loopback device ensure this invariant is maintained by
6968 * keeping the loopback device as the first device on the
6969 * list of network devices. Ensuring the loopback devices
6970 * is the first device that appears and the last network device
6971 * that disappears.
6972 */
6973 if (register_pernet_device(&loopback_net_ops))
6974 goto out;
6975
6976 if (register_pernet_device(&default_device_ops))
6977 goto out;
6978
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006979 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6980 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006981
6982 hotcpu_notifier(dev_cpu_callback, 0);
6983 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006984 rc = 0;
6985out:
6986 return rc;
6987}
6988
6989subsys_initcall(net_dev_init);