blob: 81d61014fd9b30bb0bc0e9cf34b856293ac2e9cd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080077#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000081#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090082#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#include <linux/stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000104#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <linux/highmem.h>
106#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500113#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700114#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700115#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700116#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700117#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700118#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700119#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700120#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700121#include <linux/ipv6.h>
122#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700123#include <linux/jhash.h>
124#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700125#include <trace/events/napi.h>
Koki Sanagicf66ba52010-08-23 18:45:02 +0900126#include <trace/events/net.h>
Koki Sanagi07dc22e2010-08-23 18:46:12 +0900127#include <trace/events/skb.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000128#include <linux/pci.h>
Stephen Rothwellcaeda9b2010-09-16 21:39:16 -0700129#include <linux/inetdevice.h>
Ben Hutchingsc4454772011-01-19 11:03:53 +0000130#include <linux/cpu_rmap.h>
Ingo Molnarc5905af2012-02-24 08:31:31 +0100131#include <linux/static_key.h>
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300132#include <linux/hashtable.h>
Eric Dumazet60877a32013-06-20 01:15:51 -0700133#include <linux/vmalloc.h>
Michal Kubeček529d0482013-11-15 06:18:50 +0100134#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700136#include "net-sysfs.h"
137
Herbert Xud565b0a2008-12-15 23:38:52 -0800138/* Instead of increasing this, you should create a hash table. */
139#define MAX_GRO_SKBS 8
140
Herbert Xu5d38a072009-01-04 16:13:40 -0800141/* This should be increased if a protocol with a bigger head is added. */
142#define GRO_MAX_HEAD (MAX_HEADER + 128)
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144static DEFINE_SPINLOCK(ptype_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000145static DEFINE_SPINLOCK(offload_lock);
Cong Wang900ff8c2013-02-18 19:20:33 +0000146struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
147struct list_head ptype_all __read_mostly; /* Taps */
Vlad Yasevich62532da2012-11-15 08:49:10 +0000148static struct list_head offload_base __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000150static int netif_rx_internal(struct sk_buff *skb);
Loic Prylli5495119462014-07-01 21:39:43 -0700151static int call_netdevice_notifiers_info(unsigned long val,
152 struct net_device *dev,
153 struct netdev_notifier_info *info);
Ben Hutchingsae78dbf2014-01-10 22:17:24 +0000154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700156 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 * semaphore.
158 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800159 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 *
161 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700162 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 * actual updates. This allows pure readers to access the list even
164 * while a writer is preparing to update it.
165 *
166 * To put it another way, dev_base_lock is held for writing only to
167 * protect against pure readers; the rtnl semaphore provides the
168 * protection against other writers.
169 *
170 * See, for example usages, register_netdevice() and
171 * unregister_netdevice(), which must be called with the rtnl
172 * semaphore held.
173 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175EXPORT_SYMBOL(dev_base_lock);
176
Eliezer Tamiraf12fa62013-06-10 11:39:41 +0300177/* protects napi_hash addition/deletion and napi_gen_id */
178static DEFINE_SPINLOCK(napi_hash_lock);
179
180static unsigned int napi_gen_id;
181static DEFINE_HASHTABLE(napi_hash, 8);
182
Thomas Gleixner18afa4b2013-07-23 16:13:17 +0200183static seqcount_t devnet_rename_seq;
Brian Haleyc91f6df2012-11-26 05:21:08 +0000184
Thomas Graf4e985ad2011-06-21 03:11:20 +0000185static inline void dev_base_seq_inc(struct net *net)
186{
187 while (++net->dev_base_seq == 0);
188}
189
Eric W. Biederman881d9662007-09-17 11:56:21 -0700190static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Eric Dumazet95c96172012-04-15 05:58:06 +0000192 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
193
stephen hemminger08e98972009-11-10 07:20:34 +0000194 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
196
Eric W. Biederman881d9662007-09-17 11:56:21 -0700197static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700199 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000202static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000203{
204#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000205 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000206#endif
207}
208
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000209static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000210{
211#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000212 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000213#endif
214}
215
Eric W. Biedermance286d32007-09-12 13:53:49 +0200216/* Device list insertion */
dingtianhong53759be2013-04-17 22:17:50 +0000217static void list_netdevice(struct net_device *dev)
Eric W. Biedermance286d32007-09-12 13:53:49 +0200218{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900219 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200220
221 ASSERT_RTNL();
222
223 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800224 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000225 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000226 hlist_add_head_rcu(&dev->index_hlist,
227 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200228 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000229
230 dev_base_seq_inc(net);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200231}
232
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000233/* Device list removal
234 * caller must respect a RCU grace period before freeing/reusing dev
235 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200236static void unlist_netdevice(struct net_device *dev)
237{
238 ASSERT_RTNL();
239
240 /* Unlink dev from the device chain */
241 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800242 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000243 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000244 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200245 write_unlock_bh(&dev_base_lock);
Thomas Graf4e985ad2011-06-21 03:11:20 +0000246
247 dev_base_seq_inc(dev_net(dev));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200248}
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250/*
251 * Our notifier list
252 */
253
Alan Sternf07d5b92006-05-09 15:23:03 -0700254static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256/*
257 * Device drivers call our routines to queue packets here. We empty the
258 * queue in the local softnet handler.
259 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700260
Eric Dumazet9958da02010-04-17 04:17:02 +0000261DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700262EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
David S. Millercf508b12008-07-22 14:16:42 -0700264#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700265/*
David S. Millerc773e842008-07-08 23:13:53 -0700266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700267 * according to dev->type
268 */
269static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
Paul Gortmaker211ed862012-05-10 17:14:35 -0400282 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
283 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
284 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700285
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700286static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
Paul Gortmaker211ed862012-05-10 17:14:35 -0400299 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
300 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
301 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700302
303static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700304static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700305
306static inline unsigned short netdev_lock_pos(unsigned short dev_type)
307{
308 int i;
309
310 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
311 if (netdev_lock_type[i] == dev_type)
312 return i;
313 /* the last key is used by default */
314 return ARRAY_SIZE(netdev_lock_type) - 1;
315}
316
David S. Millercf508b12008-07-22 14:16:42 -0700317static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
318 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700319{
320 int i;
321
322 i = netdev_lock_pos(dev_type);
323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
324 netdev_lock_name[i]);
325}
David S. Millercf508b12008-07-22 14:16:42 -0700326
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
328{
329 int i;
330
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
335}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700336#else
David S. Millercf508b12008-07-22 14:16:42 -0700337static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
338 unsigned short dev_type)
339{
340}
341static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700342{
343}
344#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346/*******************************************************************************
347
348 Protocol management and registration routines
349
350*******************************************************************************/
351
352/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 * Add a protocol ID to the list. Now that the input handler is
354 * smarter we can dispense with all the messy stuff that used to be
355 * here.
356 *
357 * BEWARE!!! Protocol handlers, mangling input packets,
358 * MUST BE last in hash buckets and checking protocol handlers
359 * MUST start from promiscuous ptype_all chain in net_bh.
360 * It is true now, do not change it.
361 * Explanation follows: if protocol handler, mangling packet, will
362 * be the first on list, it is not able to sense, that packet
363 * is cloned and should be copied-on-write, so that it will
364 * change it and subsequent readers will get broken packet.
365 * --ANK (980803)
366 */
367
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000368static inline struct list_head *ptype_head(const struct packet_type *pt)
369{
370 if (pt->type == htons(ETH_P_ALL))
371 return &ptype_all;
372 else
373 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
374}
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376/**
377 * dev_add_pack - add packet handler
378 * @pt: packet type declaration
379 *
380 * Add a protocol handler to the networking stack. The passed &packet_type
381 * is linked into kernel lists and may not be freed until it has been
382 * removed from the kernel lists.
383 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900384 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 * guarantee all CPU's that are in middle of receiving packets
386 * will see the new packet type (until the next received packet).
387 */
388
389void dev_add_pack(struct packet_type *pt)
390{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000391 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000393 spin_lock(&ptype_lock);
394 list_add_rcu(&pt->list, head);
395 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700397EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399/**
400 * __dev_remove_pack - remove packet handler
401 * @pt: packet type declaration
402 *
403 * Remove a protocol handler that was previously added to the kernel
404 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
405 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900406 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 *
408 * The packet type might still be in use by receivers
409 * and must not be freed until after all the CPU's have gone
410 * through a quiescent state.
411 */
412void __dev_remove_pack(struct packet_type *pt)
413{
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000414 struct list_head *head = ptype_head(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 struct packet_type *pt1;
416
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000417 spin_lock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 list_for_each_entry(pt1, head, list) {
420 if (pt == pt1) {
421 list_del_rcu(&pt->list);
422 goto out;
423 }
424 }
425
Joe Perches7b6cd1c2012-02-01 10:54:43 +0000426 pr_warn("dev_remove_pack: %p not found\n", pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427out:
Eric Dumazetc07b68e2010-09-02 03:53:46 +0000428 spin_unlock(&ptype_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700430EXPORT_SYMBOL(__dev_remove_pack);
431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432/**
433 * dev_remove_pack - remove packet handler
434 * @pt: packet type declaration
435 *
436 * Remove a protocol handler that was previously added to the kernel
437 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
438 * from the kernel lists and can be freed or reused once this function
439 * returns.
440 *
441 * This call sleeps to guarantee that no CPU is looking at the packet
442 * type after return.
443 */
444void dev_remove_pack(struct packet_type *pt)
445{
446 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 synchronize_net();
449}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700450EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Vlad Yasevich62532da2012-11-15 08:49:10 +0000452
453/**
454 * dev_add_offload - register offload handlers
455 * @po: protocol offload declaration
456 *
457 * Add protocol offload handlers to the networking stack. The passed
458 * &proto_offload is linked into kernel lists and may not be freed until
459 * it has been removed from the kernel lists.
460 *
461 * This call does not sleep therefore it can not
462 * guarantee all CPU's that are in middle of receiving packets
463 * will see the new offload handlers (until the next received packet).
464 */
465void dev_add_offload(struct packet_offload *po)
466{
467 struct list_head *head = &offload_base;
468
469 spin_lock(&offload_lock);
470 list_add_rcu(&po->list, head);
471 spin_unlock(&offload_lock);
472}
473EXPORT_SYMBOL(dev_add_offload);
474
475/**
476 * __dev_remove_offload - remove offload handler
477 * @po: packet offload declaration
478 *
479 * Remove a protocol offload handler that was previously added to the
480 * kernel offload handlers by dev_add_offload(). The passed &offload_type
481 * is removed from the kernel lists and can be freed or reused once this
482 * function returns.
483 *
484 * The packet type might still be in use by receivers
485 * and must not be freed until after all the CPU's have gone
486 * through a quiescent state.
487 */
stephen hemminger1d143d92013-12-29 14:01:29 -0800488static void __dev_remove_offload(struct packet_offload *po)
Vlad Yasevich62532da2012-11-15 08:49:10 +0000489{
490 struct list_head *head = &offload_base;
491 struct packet_offload *po1;
492
Eric Dumazetc53aa502012-11-16 08:08:23 +0000493 spin_lock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000494
495 list_for_each_entry(po1, head, list) {
496 if (po == po1) {
497 list_del_rcu(&po->list);
498 goto out;
499 }
500 }
501
502 pr_warn("dev_remove_offload: %p not found\n", po);
503out:
Eric Dumazetc53aa502012-11-16 08:08:23 +0000504 spin_unlock(&offload_lock);
Vlad Yasevich62532da2012-11-15 08:49:10 +0000505}
Vlad Yasevich62532da2012-11-15 08:49:10 +0000506
507/**
508 * dev_remove_offload - remove packet offload handler
509 * @po: packet offload declaration
510 *
511 * Remove a packet offload handler that was previously added to the kernel
512 * offload handlers by dev_add_offload(). The passed &offload_type is
513 * removed from the kernel lists and can be freed or reused once this
514 * function returns.
515 *
516 * This call sleeps to guarantee that no CPU is looking at the packet
517 * type after return.
518 */
519void dev_remove_offload(struct packet_offload *po)
520{
521 __dev_remove_offload(po);
522
523 synchronize_net();
524}
525EXPORT_SYMBOL(dev_remove_offload);
526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527/******************************************************************************
528
529 Device Boot-time Settings Routines
530
531*******************************************************************************/
532
533/* Boot time configuration table */
534static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
535
536/**
537 * netdev_boot_setup_add - add new setup entry
538 * @name: name of the device
539 * @map: configured settings for the device
540 *
541 * Adds new setup entry to the dev_boot_setup list. The function
542 * returns 0 on error and 1 on success. This is a generic routine to
543 * all netdevices.
544 */
545static int netdev_boot_setup_add(char *name, struct ifmap *map)
546{
547 struct netdev_boot_setup *s;
548 int i;
549
550 s = dev_boot_setup;
551 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
552 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
553 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700554 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 memcpy(&s[i].map, map, sizeof(s[i].map));
556 break;
557 }
558 }
559
560 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
561}
562
563/**
564 * netdev_boot_setup_check - check boot time settings
565 * @dev: the netdevice
566 *
567 * Check boot time settings for the device.
568 * The found settings are set for the device to be used
569 * later in the device probing.
570 * Returns 0 if no settings found, 1 if they are.
571 */
572int netdev_boot_setup_check(struct net_device *dev)
573{
574 struct netdev_boot_setup *s = dev_boot_setup;
575 int i;
576
577 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
578 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700579 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 dev->irq = s[i].map.irq;
581 dev->base_addr = s[i].map.base_addr;
582 dev->mem_start = s[i].map.mem_start;
583 dev->mem_end = s[i].map.mem_end;
584 return 1;
585 }
586 }
587 return 0;
588}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700589EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591
592/**
593 * netdev_boot_base - get address from boot time settings
594 * @prefix: prefix for network device
595 * @unit: id for network device
596 *
597 * Check boot time settings for the base address of device.
598 * The found settings are set for the device to be used
599 * later in the device probing.
600 * Returns 0 if no settings found.
601 */
602unsigned long netdev_boot_base(const char *prefix, int unit)
603{
604 const struct netdev_boot_setup *s = dev_boot_setup;
605 char name[IFNAMSIZ];
606 int i;
607
608 sprintf(name, "%s%d", prefix, unit);
609
610 /*
611 * If device already registered then return base of 1
612 * to indicate not to probe for this interface
613 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700614 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 return 1;
616
617 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
618 if (!strcmp(name, s[i].name))
619 return s[i].map.base_addr;
620 return 0;
621}
622
623/*
624 * Saves at boot time configured settings for any netdevice.
625 */
626int __init netdev_boot_setup(char *str)
627{
628 int ints[5];
629 struct ifmap map;
630
631 str = get_options(str, ARRAY_SIZE(ints), ints);
632 if (!str || !*str)
633 return 0;
634
635 /* Save settings */
636 memset(&map, 0, sizeof(map));
637 if (ints[0] > 0)
638 map.irq = ints[1];
639 if (ints[0] > 1)
640 map.base_addr = ints[2];
641 if (ints[0] > 2)
642 map.mem_start = ints[3];
643 if (ints[0] > 3)
644 map.mem_end = ints[4];
645
646 /* Add new entry to the list */
647 return netdev_boot_setup_add(str, &map);
648}
649
650__setup("netdev=", netdev_boot_setup);
651
652/*******************************************************************************
653
654 Device Interface Subroutines
655
656*******************************************************************************/
657
658/**
659 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700660 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 * @name: name to find
662 *
663 * Find an interface by name. Must be called under RTNL semaphore
664 * or @dev_base_lock. If the name is found a pointer to the device
665 * is returned. If the name is not found then %NULL is returned. The
666 * reference counters are not incremented so the caller must be
667 * careful with locks.
668 */
669
Eric W. Biederman881d9662007-09-17 11:56:21 -0700670struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700672 struct net_device *dev;
673 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Sasha Levinb67bfe02013-02-27 17:06:00 -0800675 hlist_for_each_entry(dev, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (!strncmp(dev->name, name, IFNAMSIZ))
677 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 return NULL;
680}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700681EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000684 * dev_get_by_name_rcu - find a device by its name
685 * @net: the applicable net namespace
686 * @name: name to find
687 *
688 * Find an interface by name.
689 * If the name is found a pointer to the device is returned.
690 * If the name is not found then %NULL is returned.
691 * The reference counters are not incremented so the caller must be
692 * careful with locks. The caller must hold RCU lock.
693 */
694
695struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
696{
Eric Dumazet72c95282009-10-30 07:11:27 +0000697 struct net_device *dev;
698 struct hlist_head *head = dev_name_hash(net, name);
699
Sasha Levinb67bfe02013-02-27 17:06:00 -0800700 hlist_for_each_entry_rcu(dev, head, name_hlist)
Eric Dumazet72c95282009-10-30 07:11:27 +0000701 if (!strncmp(dev->name, name, IFNAMSIZ))
702 return dev;
703
704 return NULL;
705}
706EXPORT_SYMBOL(dev_get_by_name_rcu);
707
708/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700710 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 * @name: name to find
712 *
713 * Find an interface by name. This can be called from any
714 * context and does its own locking. The returned handle has
715 * the usage count incremented and the caller must use dev_put() to
716 * release it when it is no longer needed. %NULL is returned if no
717 * matching device is found.
718 */
719
Eric W. Biederman881d9662007-09-17 11:56:21 -0700720struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
722 struct net_device *dev;
723
Eric Dumazet72c95282009-10-30 07:11:27 +0000724 rcu_read_lock();
725 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (dev)
727 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000728 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 return dev;
730}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700731EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733/**
734 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700735 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 * @ifindex: index of device
737 *
738 * Search for an interface by index. Returns %NULL if the device
739 * is not found or a pointer to the device. The device has not
740 * had its reference counter increased so the caller must be careful
741 * about locking. The caller must hold either the RTNL semaphore
742 * or @dev_base_lock.
743 */
744
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700747 struct net_device *dev;
748 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Sasha Levinb67bfe02013-02-27 17:06:00 -0800750 hlist_for_each_entry(dev, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (dev->ifindex == ifindex)
752 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 return NULL;
755}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700756EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000758/**
759 * dev_get_by_index_rcu - find a device by its ifindex
760 * @net: the applicable net namespace
761 * @ifindex: index of device
762 *
763 * Search for an interface by index. Returns %NULL if the device
764 * is not found or a pointer to the device. The device has not
765 * had its reference counter increased so the caller must be careful
766 * about locking. The caller must hold RCU lock.
767 */
768
769struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
770{
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000771 struct net_device *dev;
772 struct hlist_head *head = dev_index_hash(net, ifindex);
773
Sasha Levinb67bfe02013-02-27 17:06:00 -0800774 hlist_for_each_entry_rcu(dev, head, index_hlist)
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000775 if (dev->ifindex == ifindex)
776 return dev;
777
778 return NULL;
779}
780EXPORT_SYMBOL(dev_get_by_index_rcu);
781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
783/**
784 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700785 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 * @ifindex: index of device
787 *
788 * Search for an interface by index. Returns NULL if the device
789 * is not found or a pointer to the device. The device returned has
790 * had a reference added and the pointer is safe until the user calls
791 * dev_put to indicate they have finished with it.
792 */
793
Eric W. Biederman881d9662007-09-17 11:56:21 -0700794struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795{
796 struct net_device *dev;
797
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000798 rcu_read_lock();
799 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 if (dev)
801 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000802 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 return dev;
804}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700805EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
807/**
Nicolas Schichan5dbe7c12013-06-26 17:23:42 +0200808 * netdev_get_name - get a netdevice name, knowing its ifindex.
809 * @net: network namespace
810 * @name: a pointer to the buffer where the name will be stored.
811 * @ifindex: the ifindex of the interface to get the name from.
812 *
813 * The use of raw_seqcount_begin() and cond_resched() before
814 * retrying is required as we want to give the writers a chance
815 * to complete when CONFIG_PREEMPT is not set.
816 */
817int netdev_get_name(struct net *net, char *name, int ifindex)
818{
819 struct net_device *dev;
820 unsigned int seq;
821
822retry:
823 seq = raw_seqcount_begin(&devnet_rename_seq);
824 rcu_read_lock();
825 dev = dev_get_by_index_rcu(net, ifindex);
826 if (!dev) {
827 rcu_read_unlock();
828 return -ENODEV;
829 }
830
831 strcpy(name, dev->name);
832 rcu_read_unlock();
833 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
834 cond_resched();
835 goto retry;
836 }
837
838 return 0;
839}
840
841/**
Eric Dumazet941666c2010-12-05 01:23:53 +0000842 * dev_getbyhwaddr_rcu - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700843 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 * @type: media type of device
845 * @ha: hardware address
846 *
847 * Search for an interface by MAC address. Returns NULL if the device
Eric Dumazetc5066532011-01-24 13:16:16 -0800848 * is not found or a pointer to the device.
849 * The caller must hold RCU or RTNL.
Eric Dumazet941666c2010-12-05 01:23:53 +0000850 * The returned device has not had its ref count increased
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 * and the caller must therefore be careful about locking
852 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 */
854
Eric Dumazet941666c2010-12-05 01:23:53 +0000855struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
856 const char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857{
858 struct net_device *dev;
859
Eric Dumazet941666c2010-12-05 01:23:53 +0000860 for_each_netdev_rcu(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 if (dev->type == type &&
862 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700863 return dev;
864
865 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866}
Eric Dumazet941666c2010-12-05 01:23:53 +0000867EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300868
Eric W. Biederman881d9662007-09-17 11:56:21 -0700869struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700870{
871 struct net_device *dev;
872
873 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700874 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700875 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700876 return dev;
877
878 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700879}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700880EXPORT_SYMBOL(__dev_getfirstbyhwtype);
881
Eric W. Biederman881d9662007-09-17 11:56:21 -0700882struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000884 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000886 rcu_read_lock();
887 for_each_netdev_rcu(net, dev)
888 if (dev->type == type) {
889 dev_hold(dev);
890 ret = dev;
891 break;
892 }
893 rcu_read_unlock();
894 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896EXPORT_SYMBOL(dev_getfirstbyhwtype);
897
898/**
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000899 * dev_get_by_flags_rcu - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700900 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 * @if_flags: IFF_* values
902 * @mask: bitmask of bits in if_flags to check
903 *
904 * Search for any interface with the given flags. Returns NULL if a device
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000905 * is not found or a pointer to the device. Must be called inside
906 * rcu_read_lock(), and result refcount is unchanged.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 */
908
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000909struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700910 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700912 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Pavel Emelianov7562f872007-05-03 15:13:45 -0700914 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800915 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 if (((dev->flags ^ if_flags) & mask) == 0) {
Pavel Emelianov7562f872007-05-03 15:13:45 -0700917 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 break;
919 }
920 }
Pavel Emelianov7562f872007-05-03 15:13:45 -0700921 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922}
Eric Dumazetbb69ae02010-06-07 11:42:13 +0000923EXPORT_SYMBOL(dev_get_by_flags_rcu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
925/**
926 * dev_valid_name - check if name is okay for network device
927 * @name: name string
928 *
929 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700930 * to allow sysfs to work. We also disallow any kind of
931 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 */
David S. Miller95f050b2012-03-06 16:12:15 -0500933bool dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700935 if (*name == '\0')
David S. Miller95f050b2012-03-06 16:12:15 -0500936 return false;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700937 if (strlen(name) >= IFNAMSIZ)
David S. Miller95f050b2012-03-06 16:12:15 -0500938 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700939 if (!strcmp(name, ".") || !strcmp(name, ".."))
David S. Miller95f050b2012-03-06 16:12:15 -0500940 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700941
942 while (*name) {
943 if (*name == '/' || isspace(*name))
David S. Miller95f050b2012-03-06 16:12:15 -0500944 return false;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700945 name++;
946 }
David S. Miller95f050b2012-03-06 16:12:15 -0500947 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700949EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200952 * __dev_alloc_name - allocate a name for a device
953 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200955 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 *
957 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700958 * id. It scans list of devices to build up a free map, then chooses
959 * the first empty slot. The caller must hold the dev_base or rtnl lock
960 * while allocating the name and adding the device in order to avoid
961 * duplicates.
962 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
963 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 */
965
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200966static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967{
968 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 const char *p;
970 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700971 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 struct net_device *d;
973
974 p = strnchr(name, IFNAMSIZ-1, '%');
975 if (p) {
976 /*
977 * Verify the string as this thing may have come from
978 * the user. There must be either one "%d" and no other "%"
979 * characters.
980 */
981 if (p[1] != 'd' || strchr(p + 2, '%'))
982 return -EINVAL;
983
984 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700985 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (!inuse)
987 return -ENOMEM;
988
Eric W. Biederman881d9662007-09-17 11:56:21 -0700989 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 if (!sscanf(d->name, name, &i))
991 continue;
992 if (i < 0 || i >= max_netdevices)
993 continue;
994
995 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200996 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 if (!strncmp(buf, d->name, IFNAMSIZ))
998 set_bit(i, inuse);
999 }
1000
1001 i = find_first_zero_bit(inuse, max_netdevices);
1002 free_page((unsigned long) inuse);
1003 }
1004
Octavian Purdilad9031022009-11-18 02:36:59 +00001005 if (buf != name)
1006 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001007 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 /* It is possible to run out of possible slots
1011 * when the name is long and there isn't enough space left
1012 * for the digits, or if all bits are used.
1013 */
1014 return -ENFILE;
1015}
1016
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001017/**
1018 * dev_alloc_name - allocate a name for a device
1019 * @dev: device
1020 * @name: name format string
1021 *
1022 * Passed a format string - eg "lt%d" it will try and find a suitable
1023 * id. It scans list of devices to build up a free map, then chooses
1024 * the first empty slot. The caller must hold the dev_base or rtnl lock
1025 * while allocating the name and adding the device in order to avoid
1026 * duplicates.
1027 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1028 * Returns the number of the unit assigned or a negative errno code.
1029 */
1030
1031int dev_alloc_name(struct net_device *dev, const char *name)
1032{
1033 char buf[IFNAMSIZ];
1034 struct net *net;
1035 int ret;
1036
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001037 BUG_ON(!dev_net(dev));
1038 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001039 ret = __dev_alloc_name(net, name, buf);
1040 if (ret >= 0)
1041 strlcpy(dev->name, buf, IFNAMSIZ);
1042 return ret;
1043}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001044EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +02001045
Gao feng828de4f2012-09-13 20:58:27 +00001046static int dev_alloc_name_ns(struct net *net,
1047 struct net_device *dev,
1048 const char *name)
Octavian Purdilad9031022009-11-18 02:36:59 +00001049{
Gao feng828de4f2012-09-13 20:58:27 +00001050 char buf[IFNAMSIZ];
1051 int ret;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001052
Gao feng828de4f2012-09-13 20:58:27 +00001053 ret = __dev_alloc_name(net, name, buf);
1054 if (ret >= 0)
1055 strlcpy(dev->name, buf, IFNAMSIZ);
1056 return ret;
1057}
1058
1059static int dev_get_valid_name(struct net *net,
1060 struct net_device *dev,
1061 const char *name)
1062{
1063 BUG_ON(!net);
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001064
Octavian Purdilad9031022009-11-18 02:36:59 +00001065 if (!dev_valid_name(name))
1066 return -EINVAL;
1067
Jiri Pirko1c5cae82011-04-30 01:21:32 +00001068 if (strchr(name, '%'))
Gao feng828de4f2012-09-13 20:58:27 +00001069 return dev_alloc_name_ns(net, dev, name);
Octavian Purdilad9031022009-11-18 02:36:59 +00001070 else if (__dev_get_by_name(net, name))
1071 return -EEXIST;
Daniel Lezcano8ce6cebc2010-05-19 10:12:19 +00001072 else if (dev->name != name)
1073 strlcpy(dev->name, name, IFNAMSIZ);
Octavian Purdilad9031022009-11-18 02:36:59 +00001074
1075 return 0;
1076}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078/**
1079 * dev_change_name - change name of a device
1080 * @dev: device
1081 * @newname: name (or format string) must be at least IFNAMSIZ
1082 *
1083 * Change name of a device, can pass format strings "eth%d".
1084 * for wildcarding.
1085 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07001086int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087{
Tom Gundersen238fa362014-07-14 16:37:23 +02001088 unsigned char old_assign_type;
Herbert Xufcc5a032007-07-30 17:03:38 -07001089 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -07001091 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001092 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
1094 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001095 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001097 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 if (dev->flags & IFF_UP)
1099 return -EBUSY;
1100
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001101 write_seqcount_begin(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001102
1103 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001104 write_seqcount_end(&devnet_rename_seq);
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001105 return 0;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001106 }
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -07001107
Herbert Xufcc5a032007-07-30 17:03:38 -07001108 memcpy(oldname, dev->name, IFNAMSIZ);
1109
Gao feng828de4f2012-09-13 20:58:27 +00001110 err = dev_get_valid_name(net, dev, newname);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001111 if (err < 0) {
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001112 write_seqcount_end(&devnet_rename_seq);
Octavian Purdilad9031022009-11-18 02:36:59 +00001113 return err;
Brian Haleyc91f6df2012-11-26 05:21:08 +00001114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Tom Gundersen238fa362014-07-14 16:37:23 +02001116 old_assign_type = dev->name_assign_type;
1117 dev->name_assign_type = NET_NAME_RENAMED;
1118
Herbert Xufcc5a032007-07-30 17:03:38 -07001119rollback:
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001120 ret = device_rename(&dev->dev, dev->name);
1121 if (ret) {
1122 memcpy(dev->name, oldname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001123 dev->name_assign_type = old_assign_type;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001124 write_seqcount_end(&devnet_rename_seq);
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001125 return ret;
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001126 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001127
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001128 write_seqcount_end(&devnet_rename_seq);
Brian Haleyc91f6df2012-11-26 05:21:08 +00001129
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001130 netdev_adjacent_rename_links(dev, oldname);
1131
Herbert Xu7f988ea2007-07-30 16:35:46 -07001132 write_lock_bh(&dev_base_lock);
Eric Dumazet372b2312011-05-17 13:56:59 -04001133 hlist_del_rcu(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001134 write_unlock_bh(&dev_base_lock);
1135
1136 synchronize_rcu();
1137
1138 write_lock_bh(&dev_base_lock);
1139 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001140 write_unlock_bh(&dev_base_lock);
1141
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001142 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001143 ret = notifier_to_errno(ret);
1144
1145 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001146 /* err >= 0 after dev_alloc_name() or stores the first errno */
1147 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001148 err = ret;
Eric Dumazet30e6c9f2012-12-20 17:25:08 +00001149 write_seqcount_begin(&devnet_rename_seq);
Herbert Xufcc5a032007-07-30 17:03:38 -07001150 memcpy(dev->name, oldname, IFNAMSIZ);
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01001151 memcpy(oldname, newname, IFNAMSIZ);
Tom Gundersen238fa362014-07-14 16:37:23 +02001152 dev->name_assign_type = old_assign_type;
1153 old_assign_type = NET_NAME_RENAMED;
Herbert Xufcc5a032007-07-30 17:03:38 -07001154 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001155 } else {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001156 pr_err("%s: name change rollback failed: %d\n",
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001157 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001158 }
1159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160
1161 return err;
1162}
1163
1164/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001165 * dev_set_alias - change ifalias of a device
1166 * @dev: device
1167 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001168 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001169 *
1170 * Set ifalias for a device,
1171 */
1172int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1173{
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001174 char *new_ifalias;
1175
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001176 ASSERT_RTNL();
1177
1178 if (len >= IFALIASZ)
1179 return -EINVAL;
1180
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001181 if (!len) {
Sachin Kamat388dfc22012-11-20 00:57:04 +00001182 kfree(dev->ifalias);
1183 dev->ifalias = NULL;
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001184 return 0;
1185 }
1186
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001187 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1188 if (!new_ifalias)
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001189 return -ENOMEM;
Alexey Khoroshilov7364e442012-08-08 00:33:25 +00001190 dev->ifalias = new_ifalias;
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001191
1192 strlcpy(dev->ifalias, alias, len+1);
1193 return len;
1194}
1195
1196
1197/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001198 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001199 * @dev: device to cause notification
1200 *
1201 * Called to indicate a device has changed features.
1202 */
1203void netdev_features_change(struct net_device *dev)
1204{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001205 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001206}
1207EXPORT_SYMBOL(netdev_features_change);
1208
1209/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 * netdev_state_change - device changes state
1211 * @dev: device to cause notification
1212 *
1213 * Called to indicate a device has changed state. This function calls
1214 * the notifier chains for netdev_chain and sends a NEWLINK message
1215 * to the routing socket.
1216 */
1217void netdev_state_change(struct net_device *dev)
1218{
1219 if (dev->flags & IFF_UP) {
Loic Prylli5495119462014-07-01 21:39:43 -07001220 struct netdev_notifier_change_info change_info;
1221
1222 change_info.flags_changed = 0;
1223 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1224 &change_info.info);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001225 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 }
1227}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001228EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229
Amerigo Wangee89bab2012-08-09 22:14:56 +00001230/**
1231 * netdev_notify_peers - notify network peers about existence of @dev
1232 * @dev: network device
1233 *
1234 * Generate traffic such that interested network peers are aware of
1235 * @dev, such as by generating a gratuitous ARP. This may be used when
1236 * a device wants to inform the rest of the network about some sort of
1237 * reconfiguration such as a failover event or virtual machine
1238 * migration.
1239 */
1240void netdev_notify_peers(struct net_device *dev)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001241{
Amerigo Wangee89bab2012-08-09 22:14:56 +00001242 rtnl_lock();
1243 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1244 rtnl_unlock();
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001245}
Amerigo Wangee89bab2012-08-09 22:14:56 +00001246EXPORT_SYMBOL(netdev_notify_peers);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001247
Patrick McHardybd380812010-02-26 06:34:53 +00001248static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001250 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001251 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001253 ASSERT_RTNL();
1254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 if (!netif_device_present(dev))
1256 return -ENODEV;
1257
Neil Hormanca99ca12013-02-05 08:05:43 +00001258 /* Block netpoll from trying to do any rx path servicing.
1259 * If we don't do this there is a chance ndo_poll_controller
1260 * or ndo_poll may be running while we open the device
1261 */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001262 netpoll_poll_disable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001263
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001264 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1265 ret = notifier_to_errno(ret);
1266 if (ret)
1267 return ret;
1268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001270
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001271 if (ops->ndo_validate_addr)
1272 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001273
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001274 if (!ret && ops->ndo_open)
1275 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
Eric W. Biederman66b55522014-03-27 15:39:03 -07001277 netpoll_poll_enable(dev);
Neil Hormanca99ca12013-02-05 08:05:43 +00001278
Jeff Garzikbada3392007-10-23 20:19:37 -07001279 if (ret)
1280 clear_bit(__LINK_STATE_START, &dev->state);
1281 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 dev->flags |= IFF_UP;
David S. Millerb4bd07c2009-02-06 22:06:43 -08001283 net_dmaengine_get();
Patrick McHardy4417da62007-06-27 01:28:10 -07001284 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 dev_activate(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04001286 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001288
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 return ret;
1290}
Patrick McHardybd380812010-02-26 06:34:53 +00001291
1292/**
1293 * dev_open - prepare an interface for use.
1294 * @dev: device to open
1295 *
1296 * Takes a device from down to up state. The device's private open
1297 * function is invoked and then the multicast lists are loaded. Finally
1298 * the device is moved into the up state and a %NETDEV_UP message is
1299 * sent to the netdev notifier chain.
1300 *
1301 * Calling this function on an active interface is a nop. On a failure
1302 * a negative errno code is returned.
1303 */
1304int dev_open(struct net_device *dev)
1305{
1306 int ret;
1307
Patrick McHardybd380812010-02-26 06:34:53 +00001308 if (dev->flags & IFF_UP)
1309 return 0;
1310
Patrick McHardybd380812010-02-26 06:34:53 +00001311 ret = __dev_open(dev);
1312 if (ret < 0)
1313 return ret;
1314
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001315 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Patrick McHardybd380812010-02-26 06:34:53 +00001316 call_netdevice_notifiers(NETDEV_UP, dev);
1317
1318 return ret;
1319}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001320EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Octavian Purdila44345722010-12-13 12:44:07 +00001322static int __dev_close_many(struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323{
Octavian Purdila44345722010-12-13 12:44:07 +00001324 struct net_device *dev;
Patrick McHardybd380812010-02-26 06:34:53 +00001325
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001326 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001327 might_sleep();
1328
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001329 list_for_each_entry(dev, head, close_list) {
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001330 /* Temporarily disable netpoll until the interface is down */
Eric W. Biederman66b55522014-03-27 15:39:03 -07001331 netpoll_poll_disable(dev);
Eric W. Biederman3f4df202014-03-27 15:38:17 -07001332
Octavian Purdila44345722010-12-13 12:44:07 +00001333 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
Octavian Purdila44345722010-12-13 12:44:07 +00001335 clear_bit(__LINK_STATE_START, &dev->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
Octavian Purdila44345722010-12-13 12:44:07 +00001337 /* Synchronize to scheduled poll. We cannot touch poll list, it
1338 * can be even on different cpu. So just clear netif_running().
1339 *
1340 * dev->stop() will invoke napi_disable() on all of it's
1341 * napi_struct instances on this device.
1342 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001343 smp_mb__after_atomic(); /* Commit netif_running(). */
Octavian Purdila44345722010-12-13 12:44:07 +00001344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
Octavian Purdila44345722010-12-13 12:44:07 +00001346 dev_deactivate_many(head);
1347
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001348 list_for_each_entry(dev, head, close_list) {
Octavian Purdila44345722010-12-13 12:44:07 +00001349 const struct net_device_ops *ops = dev->netdev_ops;
1350
1351 /*
1352 * Call the device specific close. This cannot fail.
1353 * Only if device is UP
1354 *
1355 * We allow it to be called even after a DETACH hot-plug
1356 * event.
1357 */
1358 if (ops->ndo_stop)
1359 ops->ndo_stop(dev);
1360
Octavian Purdila44345722010-12-13 12:44:07 +00001361 dev->flags &= ~IFF_UP;
Octavian Purdila44345722010-12-13 12:44:07 +00001362 net_dmaengine_put();
Eric W. Biederman66b55522014-03-27 15:39:03 -07001363 netpoll_poll_enable(dev);
Octavian Purdila44345722010-12-13 12:44:07 +00001364 }
1365
1366 return 0;
1367}
1368
1369static int __dev_close(struct net_device *dev)
1370{
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001371 int retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001372 LIST_HEAD(single);
1373
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001374 list_add(&dev->close_list, &single);
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001375 retval = __dev_close_many(&single);
1376 list_del(&single);
Neil Hormanca99ca12013-02-05 08:05:43 +00001377
Linus Torvaldsf87e6f42011-02-17 22:54:38 +00001378 return retval;
Octavian Purdila44345722010-12-13 12:44:07 +00001379}
1380
Eric Dumazet3fbd8752011-01-19 21:23:22 +00001381static int dev_close_many(struct list_head *head)
Octavian Purdila44345722010-12-13 12:44:07 +00001382{
1383 struct net_device *dev, *tmp;
Octavian Purdila44345722010-12-13 12:44:07 +00001384
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001385 /* Remove the devices that don't need to be closed */
1386 list_for_each_entry_safe(dev, tmp, head, close_list)
Octavian Purdila44345722010-12-13 12:44:07 +00001387 if (!(dev->flags & IFF_UP))
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001388 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001389
1390 __dev_close_many(head);
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001391
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001392 list_for_each_entry_safe(dev, tmp, head, close_list) {
Alexei Starovoitov7f294052013-10-23 16:02:42 -07001393 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
Octavian Purdila44345722010-12-13 12:44:07 +00001394 call_netdevice_notifiers(NETDEV_DOWN, dev);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001395 list_del_init(&dev->close_list);
Octavian Purdila44345722010-12-13 12:44:07 +00001396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return 0;
1399}
Patrick McHardybd380812010-02-26 06:34:53 +00001400
1401/**
1402 * dev_close - shutdown an interface.
1403 * @dev: device to shutdown
1404 *
1405 * This function moves an active device into down state. A
1406 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1407 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1408 * chain.
1409 */
1410int dev_close(struct net_device *dev)
1411{
Eric Dumazete14a5992011-05-10 12:26:06 -07001412 if (dev->flags & IFF_UP) {
1413 LIST_HEAD(single);
Patrick McHardybd380812010-02-26 06:34:53 +00001414
Eric W. Biederman5cde2822013-10-05 19:26:05 -07001415 list_add(&dev->close_list, &single);
Eric Dumazete14a5992011-05-10 12:26:06 -07001416 dev_close_many(&single);
1417 list_del(&single);
1418 }
dingtianhongda6e3782013-05-27 19:53:31 +00001419 return 0;
Patrick McHardybd380812010-02-26 06:34:53 +00001420}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001421EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001424/**
1425 * dev_disable_lro - disable Large Receive Offload on a device
1426 * @dev: device
1427 *
1428 * Disable Large Receive Offload (LRO) on a net device. Must be
1429 * called under RTNL. This is needed if received packets may be
1430 * forwarded to another interface.
1431 */
1432void dev_disable_lro(struct net_device *dev)
1433{
Neil Hormanf11970e2011-05-24 08:31:09 +00001434 /*
1435 * If we're trying to disable lro on a vlan device
1436 * use the underlying physical device instead
1437 */
1438 if (is_vlan_dev(dev))
1439 dev = vlan_dev_real_dev(dev);
1440
Michal Kubeček529d0482013-11-15 06:18:50 +01001441 /* the same for macvlan devices */
1442 if (netif_is_macvlan(dev))
1443 dev = macvlan_dev_real_dev(dev);
1444
Michał Mirosławbc5787c62011-11-15 15:29:55 +00001445 dev->wanted_features &= ~NETIF_F_LRO;
1446 netdev_update_features(dev);
Michał Mirosław27660512011-03-18 16:56:34 +00001447
Michał Mirosław22d59692011-04-21 12:42:15 +00001448 if (unlikely(dev->features & NETIF_F_LRO))
1449 netdev_WARN(dev, "failed to disable LRO!\n");
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001450}
1451EXPORT_SYMBOL(dev_disable_lro);
1452
Jiri Pirko351638e2013-05-28 01:30:21 +00001453static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1454 struct net_device *dev)
1455{
1456 struct netdev_notifier_info info;
1457
1458 netdev_notifier_info_init(&info, dev);
1459 return nb->notifier_call(nb, val, &info);
1460}
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001461
Eric W. Biederman881d9662007-09-17 11:56:21 -07001462static int dev_boot_phase = 1;
1463
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464/**
1465 * register_netdevice_notifier - register a network notifier block
1466 * @nb: notifier
1467 *
1468 * Register a notifier to be called when network device events occur.
1469 * The notifier passed is linked into the kernel structures and must
1470 * not be reused until it has been unregistered. A negative errno code
1471 * is returned on a failure.
1472 *
1473 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001474 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 * view of the network device list.
1476 */
1477
1478int register_netdevice_notifier(struct notifier_block *nb)
1479{
1480 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001481 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001482 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 int err;
1484
1485 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001486 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001487 if (err)
1488 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001489 if (dev_boot_phase)
1490 goto unlock;
1491 for_each_net(net) {
1492 for_each_netdev(net, dev) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001493 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001494 err = notifier_to_errno(err);
1495 if (err)
1496 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
Eric W. Biederman881d9662007-09-17 11:56:21 -07001498 if (!(dev->flags & IFF_UP))
1499 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001500
Jiri Pirko351638e2013-05-28 01:30:21 +00001501 call_netdevice_notifier(nb, NETDEV_UP, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001504
1505unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 rtnl_unlock();
1507 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001508
1509rollback:
1510 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001511 for_each_net(net) {
1512 for_each_netdev(net, dev) {
1513 if (dev == last)
RongQing.Li8f891482011-11-30 23:43:07 -05001514 goto outroll;
Herbert Xufcc5a032007-07-30 17:03:38 -07001515
Eric W. Biederman881d9662007-09-17 11:56:21 -07001516 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001517 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1518 dev);
1519 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001520 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001521 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001522 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001523 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001524
RongQing.Li8f891482011-11-30 23:43:07 -05001525outroll:
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001526 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001527 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001529EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
1531/**
1532 * unregister_netdevice_notifier - unregister a network notifier block
1533 * @nb: notifier
1534 *
1535 * Unregister a notifier previously registered by
1536 * register_netdevice_notifier(). The notifier is unlinked into the
1537 * kernel structures and may then be reused. A negative errno code
1538 * is returned on a failure.
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001539 *
1540 * After unregistering unregister and down device events are synthesized
1541 * for all devices on the device list to the removed notifier to remove
1542 * the need for special case cleanup code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 */
1544
1545int unregister_netdevice_notifier(struct notifier_block *nb)
1546{
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001547 struct net_device *dev;
1548 struct net *net;
Herbert Xu9f514952006-03-25 01:24:25 -08001549 int err;
1550
1551 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001552 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001553 if (err)
1554 goto unlock;
1555
1556 for_each_net(net) {
1557 for_each_netdev(net, dev) {
1558 if (dev->flags & IFF_UP) {
Jiri Pirko351638e2013-05-28 01:30:21 +00001559 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1560 dev);
1561 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001562 }
Jiri Pirko351638e2013-05-28 01:30:21 +00001563 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
Eric W. Biederman7d3d43d2012-04-06 15:33:35 +00001564 }
1565 }
1566unlock:
Herbert Xu9f514952006-03-25 01:24:25 -08001567 rtnl_unlock();
1568 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001570EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
1572/**
Jiri Pirko351638e2013-05-28 01:30:21 +00001573 * call_netdevice_notifiers_info - call all network notifier blocks
1574 * @val: value passed unmodified to notifier function
1575 * @dev: net_device pointer passed unmodified to notifier function
1576 * @info: notifier information data
1577 *
1578 * Call all network notifier blocks. Parameters and return value
1579 * are as for raw_notifier_call_chain().
1580 */
1581
stephen hemminger1d143d92013-12-29 14:01:29 -08001582static int call_netdevice_notifiers_info(unsigned long val,
1583 struct net_device *dev,
1584 struct netdev_notifier_info *info)
Jiri Pirko351638e2013-05-28 01:30:21 +00001585{
1586 ASSERT_RTNL();
1587 netdev_notifier_info_init(info, dev);
1588 return raw_notifier_call_chain(&netdev_chain, val, info);
1589}
Jiri Pirko351638e2013-05-28 01:30:21 +00001590
1591/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 * call_netdevice_notifiers - call all network notifier blocks
1593 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001594 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 *
1596 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001597 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 */
1599
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001600int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601{
Jiri Pirko351638e2013-05-28 01:30:21 +00001602 struct netdev_notifier_info info;
1603
1604 return call_netdevice_notifiers_info(val, dev, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605}
stephen hemmingeredf947f2011-03-24 13:24:01 +00001606EXPORT_SYMBOL(call_netdevice_notifiers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Ingo Molnarc5905af2012-02-24 08:31:31 +01001608static struct static_key netstamp_needed __read_mostly;
Eric Dumazetb90e5792011-11-28 11:16:50 +00001609#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01001610/* We are not allowed to call static_key_slow_dec() from irq context
Eric Dumazetb90e5792011-11-28 11:16:50 +00001611 * If net_disable_timestamp() is called from irq context, defer the
Ingo Molnarc5905af2012-02-24 08:31:31 +01001612 * static_key_slow_dec() calls.
Eric Dumazetb90e5792011-11-28 11:16:50 +00001613 */
1614static atomic_t netstamp_needed_deferred;
1615#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
1617void net_enable_timestamp(void)
1618{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001619#ifdef HAVE_JUMP_LABEL
1620 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1621
1622 if (deferred) {
1623 while (--deferred)
Ingo Molnarc5905af2012-02-24 08:31:31 +01001624 static_key_slow_dec(&netstamp_needed);
Eric Dumazetb90e5792011-11-28 11:16:50 +00001625 return;
1626 }
1627#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001628 static_key_slow_inc(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001630EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632void net_disable_timestamp(void)
1633{
Eric Dumazetb90e5792011-11-28 11:16:50 +00001634#ifdef HAVE_JUMP_LABEL
1635 if (in_interrupt()) {
1636 atomic_inc(&netstamp_needed_deferred);
1637 return;
1638 }
1639#endif
Ingo Molnarc5905af2012-02-24 08:31:31 +01001640 static_key_slow_dec(&netstamp_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001642EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Eric Dumazet3b098e22010-05-15 23:57:10 -07001644static inline void net_timestamp_set(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645{
Eric Dumazet588f0332011-11-15 04:12:55 +00001646 skb->tstamp.tv64 = 0;
Ingo Molnarc5905af2012-02-24 08:31:31 +01001647 if (static_key_false(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001648 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649}
1650
Eric Dumazet588f0332011-11-15 04:12:55 +00001651#define net_timestamp_check(COND, SKB) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001652 if (static_key_false(&netstamp_needed)) { \
Eric Dumazet588f0332011-11-15 04:12:55 +00001653 if ((COND) && !(SKB)->tstamp.tv64) \
1654 __net_timestamp(SKB); \
1655 } \
Eric Dumazet3b098e22010-05-15 23:57:10 -07001656
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001657bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001658{
1659 unsigned int len;
1660
1661 if (!(dev->flags & IFF_UP))
1662 return false;
1663
1664 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1665 if (skb->len <= len)
1666 return true;
1667
1668 /* if TSO is enabled, we don't care about the length as the packet
1669 * could be forwarded without being segmented before
1670 */
1671 if (skb_is_gso(skb))
1672 return true;
1673
1674 return false;
1675}
Vlad Yasevich1ee481f2014-03-27 17:32:29 -04001676EXPORT_SYMBOL_GPL(is_skb_forwardable);
Daniel Lezcano79b569f2011-03-30 02:42:17 -07001677
Herbert Xua0265d22014-04-17 13:45:03 +08001678int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1679{
1680 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1681 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1682 atomic_long_inc(&dev->rx_dropped);
1683 kfree_skb(skb);
1684 return NET_RX_DROP;
1685 }
1686 }
1687
1688 if (unlikely(!is_skb_forwardable(dev, skb))) {
1689 atomic_long_inc(&dev->rx_dropped);
1690 kfree_skb(skb);
1691 return NET_RX_DROP;
1692 }
1693
1694 skb_scrub_packet(skb, true);
1695 skb->protocol = eth_type_trans(skb, dev);
1696
1697 return 0;
1698}
1699EXPORT_SYMBOL_GPL(__dev_forward_skb);
1700
Arnd Bergmann44540962009-11-26 06:07:08 +00001701/**
1702 * dev_forward_skb - loopback an skb to another netif
1703 *
1704 * @dev: destination network device
1705 * @skb: buffer to forward
1706 *
1707 * return values:
1708 * NET_RX_SUCCESS (no congestion)
Eric Dumazet6ec82562010-05-06 00:53:53 -07001709 * NET_RX_DROP (packet was dropped, but freed)
Arnd Bergmann44540962009-11-26 06:07:08 +00001710 *
1711 * dev_forward_skb can be used for injecting an skb from the
1712 * start_xmit function of one device into the receive queue
1713 * of another device.
1714 *
1715 * The receiving device may be in another namespace, so
1716 * we have to clear all information in the skb that could
1717 * impact namespace isolation.
1718 */
1719int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1720{
Herbert Xua0265d22014-04-17 13:45:03 +08001721 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
Arnd Bergmann44540962009-11-26 06:07:08 +00001722}
1723EXPORT_SYMBOL_GPL(dev_forward_skb);
1724
Changli Gao71d9dec2010-12-15 19:57:25 +00001725static inline int deliver_skb(struct sk_buff *skb,
1726 struct packet_type *pt_prev,
1727 struct net_device *orig_dev)
1728{
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00001729 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1730 return -ENOMEM;
Changli Gao71d9dec2010-12-15 19:57:25 +00001731 atomic_inc(&skb->users);
1732 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1733}
1734
Eric Leblondc0de08d2012-08-16 22:02:58 +00001735static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1736{
Eric Leblonda3d744e2012-11-06 02:10:10 +00001737 if (!ptype->af_packet_priv || !skb->sk)
Eric Leblondc0de08d2012-08-16 22:02:58 +00001738 return false;
1739
1740 if (ptype->id_match)
1741 return ptype->id_match(ptype, skb->sk);
1742 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1743 return true;
1744
1745 return false;
1746}
1747
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748/*
1749 * Support routine. Sends outgoing frames to any network
1750 * taps currently in use.
1751 */
1752
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001753static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754{
1755 struct packet_type *ptype;
Changli Gao71d9dec2010-12-15 19:57:25 +00001756 struct sk_buff *skb2 = NULL;
1757 struct packet_type *pt_prev = NULL;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 rcu_read_lock();
1760 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1761 /* Never send packets back to the socket
1762 * they originated from - MvS (miquels@drinkel.ow.org)
1763 */
1764 if ((ptype->dev == dev || !ptype->dev) &&
Eric Leblondc0de08d2012-08-16 22:02:58 +00001765 (!skb_loop_sk(ptype, skb))) {
Changli Gao71d9dec2010-12-15 19:57:25 +00001766 if (pt_prev) {
1767 deliver_skb(skb2, pt_prev, skb->dev);
1768 pt_prev = ptype;
1769 continue;
1770 }
1771
1772 skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 if (!skb2)
1774 break;
1775
Eric Dumazet70978182010-12-20 21:22:51 +00001776 net_timestamp_set(skb2);
1777
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 /* skb->nh should be correctly
1779 set by sender, so that the second statement is
1780 just protection against buggy protocols.
1781 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001782 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001784 if (skb_network_header(skb2) < skb2->data ||
Simon Hormanced14f62013-05-28 20:34:25 +00001785 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
Joe Perchese87cc472012-05-13 21:56:26 +00001786 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1787 ntohs(skb2->protocol),
1788 dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001789 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 }
1791
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001792 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 skb2->pkt_type = PACKET_OUTGOING;
Changli Gao71d9dec2010-12-15 19:57:25 +00001794 pt_prev = ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 }
1796 }
Changli Gao71d9dec2010-12-15 19:57:25 +00001797 if (pt_prev)
1798 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 rcu_read_unlock();
1800}
1801
Ben Hutchings2c530402012-07-10 10:55:09 +00001802/**
1803 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
John Fastabend4f57c082011-01-17 08:06:04 +00001804 * @dev: Network device
1805 * @txq: number of queues available
1806 *
1807 * If real_num_tx_queues is changed the tc mappings may no longer be
1808 * valid. To resolve this verify the tc mapping remains valid and if
1809 * not NULL the mapping. With no priorities mapping to this
1810 * offset/count pair it will no longer be used. In the worst case TC0
1811 * is invalid nothing can be done so disable priority mappings. If is
1812 * expected that drivers will fix this mapping if they can before
1813 * calling netif_set_real_num_tx_queues.
1814 */
Eric Dumazetbb134d22011-01-20 19:18:08 +00001815static void netif_setup_tc(struct net_device *dev, unsigned int txq)
John Fastabend4f57c082011-01-17 08:06:04 +00001816{
1817 int i;
1818 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1819
1820 /* If TC0 is invalidated disable TC mapping */
1821 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001822 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
John Fastabend4f57c082011-01-17 08:06:04 +00001823 dev->num_tc = 0;
1824 return;
1825 }
1826
1827 /* Invalidated prio to tc mappings set to TC0 */
1828 for (i = 1; i < TC_BITMASK + 1; i++) {
1829 int q = netdev_get_prio_tc_map(dev, i);
1830
1831 tc = &dev->tc_to_txq[q];
1832 if (tc->offset + tc->count > txq) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00001833 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1834 i, q);
John Fastabend4f57c082011-01-17 08:06:04 +00001835 netdev_set_prio_tc_map(dev, i, 0);
1836 }
1837 }
1838}
1839
Alexander Duyck537c00d2013-01-10 08:57:02 +00001840#ifdef CONFIG_XPS
1841static DEFINE_MUTEX(xps_map_mutex);
1842#define xmap_dereference(P) \
1843 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1844
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001845static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1846 int cpu, u16 index)
1847{
1848 struct xps_map *map = NULL;
1849 int pos;
1850
1851 if (dev_maps)
1852 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1853
1854 for (pos = 0; map && pos < map->len; pos++) {
1855 if (map->queues[pos] == index) {
1856 if (map->len > 1) {
1857 map->queues[pos] = map->queues[--map->len];
1858 } else {
1859 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1860 kfree_rcu(map, rcu);
1861 map = NULL;
1862 }
1863 break;
1864 }
1865 }
1866
1867 return map;
1868}
1869
Alexander Duyck024e9672013-01-10 08:57:46 +00001870static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001871{
1872 struct xps_dev_maps *dev_maps;
Alexander Duyck024e9672013-01-10 08:57:46 +00001873 int cpu, i;
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001874 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001875
1876 mutex_lock(&xps_map_mutex);
1877 dev_maps = xmap_dereference(dev->xps_maps);
1878
1879 if (!dev_maps)
1880 goto out_no_maps;
1881
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001882 for_each_possible_cpu(cpu) {
Alexander Duyck024e9672013-01-10 08:57:46 +00001883 for (i = index; i < dev->num_tx_queues; i++) {
1884 if (!remove_xps_queue(dev_maps, cpu, i))
1885 break;
1886 }
1887 if (i == dev->num_tx_queues)
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001888 active = true;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001889 }
1890
Alexander Duyck10cdc3f2013-01-10 08:57:17 +00001891 if (!active) {
Alexander Duyck537c00d2013-01-10 08:57:02 +00001892 RCU_INIT_POINTER(dev->xps_maps, NULL);
1893 kfree_rcu(dev_maps, rcu);
1894 }
1895
Alexander Duyck024e9672013-01-10 08:57:46 +00001896 for (i = index; i < dev->num_tx_queues; i++)
1897 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1898 NUMA_NO_NODE);
1899
Alexander Duyck537c00d2013-01-10 08:57:02 +00001900out_no_maps:
1901 mutex_unlock(&xps_map_mutex);
1902}
1903
Alexander Duyck01c5f862013-01-10 08:57:35 +00001904static struct xps_map *expand_xps_map(struct xps_map *map,
1905 int cpu, u16 index)
1906{
1907 struct xps_map *new_map;
1908 int alloc_len = XPS_MIN_MAP_ALLOC;
1909 int i, pos;
1910
1911 for (pos = 0; map && pos < map->len; pos++) {
1912 if (map->queues[pos] != index)
1913 continue;
1914 return map;
1915 }
1916
1917 /* Need to add queue to this CPU's existing map */
1918 if (map) {
1919 if (pos < map->alloc_len)
1920 return map;
1921
1922 alloc_len = map->alloc_len * 2;
1923 }
1924
1925 /* Need to allocate new map to store queue on this CPU's map */
1926 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1927 cpu_to_node(cpu));
1928 if (!new_map)
1929 return NULL;
1930
1931 for (i = 0; i < pos; i++)
1932 new_map->queues[i] = map->queues[i];
1933 new_map->alloc_len = alloc_len;
1934 new_map->len = pos;
1935
1936 return new_map;
1937}
1938
Michael S. Tsirkin35735402013-10-02 09:14:06 +03001939int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1940 u16 index)
Alexander Duyck537c00d2013-01-10 08:57:02 +00001941{
Alexander Duyck01c5f862013-01-10 08:57:35 +00001942 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001943 struct xps_map *map, *new_map;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001944 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001945 int cpu, numa_node_id = -2;
1946 bool active = false;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001947
1948 mutex_lock(&xps_map_mutex);
1949
1950 dev_maps = xmap_dereference(dev->xps_maps);
1951
Alexander Duyck01c5f862013-01-10 08:57:35 +00001952 /* allocate memory for queue storage */
1953 for_each_online_cpu(cpu) {
1954 if (!cpumask_test_cpu(cpu, mask))
1955 continue;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001956
Alexander Duyck01c5f862013-01-10 08:57:35 +00001957 if (!new_dev_maps)
1958 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001959 if (!new_dev_maps) {
1960 mutex_unlock(&xps_map_mutex);
Alexander Duyck01c5f862013-01-10 08:57:35 +00001961 return -ENOMEM;
Alexander Duyck2bb60cb2013-02-22 06:38:44 +00001962 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001963
1964 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1965 NULL;
1966
1967 map = expand_xps_map(map, cpu, index);
1968 if (!map)
1969 goto error;
1970
1971 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1972 }
1973
1974 if (!new_dev_maps)
1975 goto out_no_new_maps;
1976
1977 for_each_possible_cpu(cpu) {
1978 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1979 /* add queue to CPU maps */
1980 int pos = 0;
1981
1982 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1983 while ((pos < map->len) && (map->queues[pos] != index))
1984 pos++;
1985
1986 if (pos == map->len)
1987 map->queues[map->len++] = index;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001988#ifdef CONFIG_NUMA
Alexander Duyck537c00d2013-01-10 08:57:02 +00001989 if (numa_node_id == -2)
1990 numa_node_id = cpu_to_node(cpu);
1991 else if (numa_node_id != cpu_to_node(cpu))
1992 numa_node_id = -1;
Alexander Duyck537c00d2013-01-10 08:57:02 +00001993#endif
Alexander Duyck01c5f862013-01-10 08:57:35 +00001994 } else if (dev_maps) {
1995 /* fill in the new device map from the old device map */
1996 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1997 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
Alexander Duyck537c00d2013-01-10 08:57:02 +00001998 }
Alexander Duyck01c5f862013-01-10 08:57:35 +00001999
Alexander Duyck537c00d2013-01-10 08:57:02 +00002000 }
2001
Alexander Duyck01c5f862013-01-10 08:57:35 +00002002 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2003
Alexander Duyck537c00d2013-01-10 08:57:02 +00002004 /* Cleanup old maps */
Alexander Duyck01c5f862013-01-10 08:57:35 +00002005 if (dev_maps) {
2006 for_each_possible_cpu(cpu) {
2007 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2008 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2009 if (map && map != new_map)
2010 kfree_rcu(map, rcu);
2011 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002012
Alexander Duyck537c00d2013-01-10 08:57:02 +00002013 kfree_rcu(dev_maps, rcu);
Alexander Duyck01c5f862013-01-10 08:57:35 +00002014 }
Alexander Duyck537c00d2013-01-10 08:57:02 +00002015
Alexander Duyck01c5f862013-01-10 08:57:35 +00002016 dev_maps = new_dev_maps;
2017 active = true;
2018
2019out_no_new_maps:
2020 /* update Tx queue numa node */
Alexander Duyck537c00d2013-01-10 08:57:02 +00002021 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2022 (numa_node_id >= 0) ? numa_node_id :
2023 NUMA_NO_NODE);
2024
Alexander Duyck01c5f862013-01-10 08:57:35 +00002025 if (!dev_maps)
2026 goto out_no_maps;
2027
2028 /* removes queue from unused CPUs */
2029 for_each_possible_cpu(cpu) {
2030 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2031 continue;
2032
2033 if (remove_xps_queue(dev_maps, cpu, index))
2034 active = true;
2035 }
2036
2037 /* free map if not active */
2038 if (!active) {
2039 RCU_INIT_POINTER(dev->xps_maps, NULL);
2040 kfree_rcu(dev_maps, rcu);
2041 }
2042
2043out_no_maps:
Alexander Duyck537c00d2013-01-10 08:57:02 +00002044 mutex_unlock(&xps_map_mutex);
2045
2046 return 0;
2047error:
Alexander Duyck01c5f862013-01-10 08:57:35 +00002048 /* remove any maps that we added */
2049 for_each_possible_cpu(cpu) {
2050 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2051 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2052 NULL;
2053 if (new_map && new_map != map)
2054 kfree(new_map);
2055 }
2056
Alexander Duyck537c00d2013-01-10 08:57:02 +00002057 mutex_unlock(&xps_map_mutex);
2058
Alexander Duyck537c00d2013-01-10 08:57:02 +00002059 kfree(new_dev_maps);
2060 return -ENOMEM;
2061}
2062EXPORT_SYMBOL(netif_set_xps_queue);
2063
2064#endif
John Fastabendf0796d52010-07-01 13:21:57 +00002065/*
2066 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2067 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2068 */
Tom Herberte6484932010-10-18 18:04:39 +00002069int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
John Fastabendf0796d52010-07-01 13:21:57 +00002070{
Tom Herbert1d24eb42010-11-21 13:17:27 +00002071 int rc;
2072
Tom Herberte6484932010-10-18 18:04:39 +00002073 if (txq < 1 || txq > dev->num_tx_queues)
2074 return -EINVAL;
John Fastabendf0796d52010-07-01 13:21:57 +00002075
Ben Hutchings5c565802011-02-15 19:39:21 +00002076 if (dev->reg_state == NETREG_REGISTERED ||
2077 dev->reg_state == NETREG_UNREGISTERING) {
Tom Herberte6484932010-10-18 18:04:39 +00002078 ASSERT_RTNL();
2079
Tom Herbert1d24eb42010-11-21 13:17:27 +00002080 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2081 txq);
Tom Herbertbf264142010-11-26 08:36:09 +00002082 if (rc)
2083 return rc;
2084
John Fastabend4f57c082011-01-17 08:06:04 +00002085 if (dev->num_tc)
2086 netif_setup_tc(dev, txq);
2087
Alexander Duyck024e9672013-01-10 08:57:46 +00002088 if (txq < dev->real_num_tx_queues) {
Tom Herberte6484932010-10-18 18:04:39 +00002089 qdisc_reset_all_tx_gt(dev, txq);
Alexander Duyck024e9672013-01-10 08:57:46 +00002090#ifdef CONFIG_XPS
2091 netif_reset_xps_queues_gt(dev, txq);
2092#endif
2093 }
John Fastabendf0796d52010-07-01 13:21:57 +00002094 }
Tom Herberte6484932010-10-18 18:04:39 +00002095
2096 dev->real_num_tx_queues = txq;
2097 return 0;
John Fastabendf0796d52010-07-01 13:21:57 +00002098}
2099EXPORT_SYMBOL(netif_set_real_num_tx_queues);
Denis Vlasenko56079432006-03-29 15:57:29 -08002100
Michael Daltona953be52014-01-16 22:23:28 -08002101#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002102/**
2103 * netif_set_real_num_rx_queues - set actual number of RX queues used
2104 * @dev: Network device
2105 * @rxq: Actual number of RX queues
2106 *
2107 * This must be called either with the rtnl_lock held or before
2108 * registration of the net device. Returns 0 on success, or a
Ben Hutchings4e7f7952010-10-08 10:33:39 -07002109 * negative error code. If called before registration, it always
2110 * succeeds.
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002111 */
2112int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2113{
2114 int rc;
2115
Tom Herbertbd25fa72010-10-18 18:00:16 +00002116 if (rxq < 1 || rxq > dev->num_rx_queues)
2117 return -EINVAL;
2118
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002119 if (dev->reg_state == NETREG_REGISTERED) {
2120 ASSERT_RTNL();
2121
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002122 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2123 rxq);
2124 if (rc)
2125 return rc;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00002126 }
2127
2128 dev->real_num_rx_queues = rxq;
2129 return 0;
2130}
2131EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2132#endif
2133
Ben Hutchings2c530402012-07-10 10:55:09 +00002134/**
2135 * netif_get_num_default_rss_queues - default number of RSS queues
Yuval Mintz16917b82012-07-01 03:18:50 +00002136 *
2137 * This routine should set an upper limit on the number of RSS queues
2138 * used by default by multiqueue devices.
2139 */
Ben Hutchingsa55b1382012-07-10 10:54:38 +00002140int netif_get_num_default_rss_queues(void)
Yuval Mintz16917b82012-07-01 03:18:50 +00002141{
2142 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2143}
2144EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2145
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002146static inline void __netif_reschedule(struct Qdisc *q)
2147{
2148 struct softnet_data *sd;
2149 unsigned long flags;
2150
2151 local_irq_save(flags);
2152 sd = &__get_cpu_var(softnet_data);
Changli Gaoa9cbd582010-04-26 23:06:24 +00002153 q->next_sched = NULL;
2154 *sd->output_queue_tailp = q;
2155 sd->output_queue_tailp = &q->next_sched;
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002156 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2157 local_irq_restore(flags);
2158}
2159
David S. Miller37437bb2008-07-16 02:15:04 -07002160void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08002161{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002162 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2163 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08002164}
2165EXPORT_SYMBOL(__netif_schedule);
2166
Eric Dumazete6247022013-12-05 04:45:08 -08002167struct dev_kfree_skb_cb {
2168 enum skb_free_reason reason;
2169};
2170
2171static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08002172{
Eric Dumazete6247022013-12-05 04:45:08 -08002173 return (struct dev_kfree_skb_cb *)skb->cb;
Denis Vlasenko56079432006-03-29 15:57:29 -08002174}
Denis Vlasenko56079432006-03-29 15:57:29 -08002175
Eric Dumazete6247022013-12-05 04:45:08 -08002176void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2177{
2178 unsigned long flags;
2179
2180 if (likely(atomic_read(&skb->users) == 1)) {
2181 smp_rmb();
2182 atomic_set(&skb->users, 0);
2183 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2184 return;
2185 }
2186 get_kfree_skb_cb(skb)->reason = reason;
2187 local_irq_save(flags);
2188 skb->next = __this_cpu_read(softnet_data.completion_queue);
2189 __this_cpu_write(softnet_data.completion_queue, skb);
2190 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2191 local_irq_restore(flags);
2192}
2193EXPORT_SYMBOL(__dev_kfree_skb_irq);
2194
2195void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
Denis Vlasenko56079432006-03-29 15:57:29 -08002196{
2197 if (in_irq() || irqs_disabled())
Eric Dumazete6247022013-12-05 04:45:08 -08002198 __dev_kfree_skb_irq(skb, reason);
Denis Vlasenko56079432006-03-29 15:57:29 -08002199 else
2200 dev_kfree_skb(skb);
2201}
Eric Dumazete6247022013-12-05 04:45:08 -08002202EXPORT_SYMBOL(__dev_kfree_skb_any);
Denis Vlasenko56079432006-03-29 15:57:29 -08002203
2204
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002205/**
2206 * netif_device_detach - mark device as removed
2207 * @dev: network device
2208 *
2209 * Mark device as removed from system and therefore no longer available.
2210 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002211void netif_device_detach(struct net_device *dev)
2212{
2213 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2214 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002215 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002216 }
2217}
2218EXPORT_SYMBOL(netif_device_detach);
2219
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002220/**
2221 * netif_device_attach - mark device as attached
2222 * @dev: network device
2223 *
2224 * Mark device as attached from system and restart if needed.
2225 */
Denis Vlasenko56079432006-03-29 15:57:29 -08002226void netif_device_attach(struct net_device *dev)
2227{
2228 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2229 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00002230 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002231 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08002232 }
2233}
2234EXPORT_SYMBOL(netif_device_attach);
2235
Ben Hutchings36c92472012-01-17 07:57:56 +00002236static void skb_warn_bad_offload(const struct sk_buff *skb)
2237{
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002238 static const netdev_features_t null_features = 0;
Ben Hutchings36c92472012-01-17 07:57:56 +00002239 struct net_device *dev = skb->dev;
2240 const char *driver = "";
2241
Ben Greearc846ad92013-04-19 10:45:52 +00002242 if (!net_ratelimit())
2243 return;
2244
Ben Hutchings36c92472012-01-17 07:57:56 +00002245 if (dev && dev->dev.parent)
2246 driver = dev_driver_string(dev->dev.parent);
2247
2248 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2249 "gso_type=%d ip_summed=%d\n",
Michał Mirosław65e9d2f2012-01-17 10:00:40 +00002250 driver, dev ? &dev->features : &null_features,
2251 skb->sk ? &skb->sk->sk_route_caps : &null_features,
Ben Hutchings36c92472012-01-17 07:57:56 +00002252 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2253 skb_shinfo(skb)->gso_type, skb->ip_summed);
2254}
2255
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256/*
2257 * Invalidate hardware checksum when packet is to be mangled, and
2258 * complete checksum manually on outgoing path.
2259 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07002260int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261{
Al Virod3bc23e2006-11-14 21:24:49 -08002262 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07002263 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
Patrick McHardy84fa7932006-08-29 16:44:56 -07002265 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07002266 goto out_set_summed;
2267
2268 if (unlikely(skb_shinfo(skb)->gso_size)) {
Ben Hutchings36c92472012-01-17 07:57:56 +00002269 skb_warn_bad_offload(skb);
2270 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 }
2272
Eric Dumazetcef401d2013-01-25 20:34:37 +00002273 /* Before computing a checksum, we should make sure no frag could
2274 * be modified by an external entity : checksum could be wrong.
2275 */
2276 if (skb_has_shared_frag(skb)) {
2277 ret = __skb_linearize(skb);
2278 if (ret)
2279 goto out;
2280 }
2281
Michał Mirosław55508d62010-12-14 15:24:08 +00002282 offset = skb_checksum_start_offset(skb);
Herbert Xua0308472007-10-15 01:47:15 -07002283 BUG_ON(offset >= skb_headlen(skb));
2284 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2285
2286 offset += skb->csum_offset;
2287 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2288
2289 if (skb_cloned(skb) &&
2290 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2292 if (ret)
2293 goto out;
2294 }
2295
Herbert Xua0308472007-10-15 01:47:15 -07002296 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07002297out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002299out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 return ret;
2301}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002302EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
Vlad Yasevich53d64712014-03-27 17:26:18 -04002304__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002305{
Nikolay Aleksandrov4b9b1cd2014-05-28 18:03:48 +02002306 unsigned int vlan_depth = skb->mac_len;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002307 __be16 type = skb->protocol;
2308
Pravin B Shelar19acc322013-05-07 20:41:07 +00002309 /* Tunnel gso handlers can set protocol to ethernet. */
2310 if (type == htons(ETH_P_TEB)) {
2311 struct ethhdr *eth;
2312
2313 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2314 return 0;
2315
2316 eth = (struct ethhdr *)skb_mac_header(skb);
2317 type = eth->h_proto;
2318 }
2319
Nikolay Aleksandrov4b9b1cd2014-05-28 18:03:48 +02002320 /* if skb->protocol is 802.1Q/AD then the header should already be
2321 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2322 * ETH_HLEN otherwise
2323 */
2324 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2325 if (vlan_depth) {
2326 if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
2327 return 0;
2328 vlan_depth -= VLAN_HLEN;
2329 } else {
2330 vlan_depth = ETH_HLEN;
2331 }
2332 do {
2333 struct vlan_hdr *vh;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002334
Nikolay Aleksandrov4b9b1cd2014-05-28 18:03:48 +02002335 if (unlikely(!pskb_may_pull(skb,
2336 vlan_depth + VLAN_HLEN)))
2337 return 0;
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002338
Nikolay Aleksandrov4b9b1cd2014-05-28 18:03:48 +02002339 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2340 type = vh->h_vlan_encapsulated_proto;
2341 vlan_depth += VLAN_HLEN;
2342 } while (type == htons(ETH_P_8021Q) ||
2343 type == htons(ETH_P_8021AD));
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002344 }
2345
Vlad Yasevich53d64712014-03-27 17:26:18 -04002346 *depth = vlan_depth;
2347
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002348 return type;
2349}
2350
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002351/**
2352 * skb_mac_gso_segment - mac layer segmentation handler.
2353 * @skb: buffer to segment
2354 * @features: features for the output path (see dev->features)
2355 */
2356struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2357 netdev_features_t features)
2358{
2359 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2360 struct packet_offload *ptype;
Vlad Yasevich53d64712014-03-27 17:26:18 -04002361 int vlan_depth = skb->mac_len;
2362 __be16 type = skb_network_protocol(skb, &vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002363
Pravin B Shelarec5f0612013-03-07 09:28:01 +00002364 if (unlikely(!type))
2365 return ERR_PTR(-EINVAL);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002366
Vlad Yasevich53d64712014-03-27 17:26:18 -04002367 __skb_pull(skb, vlan_depth);
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002368
2369 rcu_read_lock();
2370 list_for_each_entry_rcu(ptype, &offload_base, list) {
2371 if (ptype->type == type && ptype->callbacks.gso_segment) {
2372 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2373 int err;
2374
2375 err = ptype->callbacks.gso_send_check(skb);
2376 segs = ERR_PTR(err);
2377 if (err || skb_gso_ok(skb, features))
2378 break;
2379 __skb_push(skb, (skb->data -
2380 skb_network_header(skb)));
2381 }
2382 segs = ptype->callbacks.gso_segment(skb, features);
2383 break;
2384 }
2385 }
2386 rcu_read_unlock();
2387
2388 __skb_push(skb, skb->data - skb_mac_header(skb));
2389
2390 return segs;
2391}
2392EXPORT_SYMBOL(skb_mac_gso_segment);
2393
2394
Cong Wang12b00042013-02-05 16:36:38 +00002395/* openvswitch calls this on rx path, so we need a different check.
2396 */
2397static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2398{
2399 if (tx_path)
2400 return skb->ip_summed != CHECKSUM_PARTIAL;
2401 else
2402 return skb->ip_summed == CHECKSUM_NONE;
2403}
2404
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002405/**
Cong Wang12b00042013-02-05 16:36:38 +00002406 * __skb_gso_segment - Perform segmentation on skb.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002407 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07002408 * @features: features for the output path (see dev->features)
Cong Wang12b00042013-02-05 16:36:38 +00002409 * @tx_path: whether it is called in TX path
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002410 *
2411 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07002412 *
2413 * It may return NULL if the skb requires no segmentation. This is
2414 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002415 */
Cong Wang12b00042013-02-05 16:36:38 +00002416struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2417 netdev_features_t features, bool tx_path)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002418{
Cong Wang12b00042013-02-05 16:36:38 +00002419 if (unlikely(skb_needs_check(skb, tx_path))) {
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002420 int err;
2421
Ben Hutchings36c92472012-01-17 07:57:56 +00002422 skb_warn_bad_offload(skb);
Herbert Xu67fd1a72009-01-19 16:26:44 -08002423
françois romieua40e0a62014-07-15 23:55:35 +02002424 err = skb_cow_head(skb, 0);
2425 if (err < 0)
Herbert Xua430a432006-07-08 13:34:56 -07002426 return ERR_PTR(err);
2427 }
2428
Pravin B Shelar68c33162013-02-14 14:02:41 +00002429 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
Eric Dumazet3347c962013-10-19 11:42:56 -07002430 SKB_GSO_CB(skb)->encap_level = 0;
2431
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002432 skb_reset_mac_header(skb);
2433 skb_reset_mac_len(skb);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002434
Pravin B Shelar05e8ef42013-02-14 09:44:55 +00002435 return skb_mac_gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002436}
Cong Wang12b00042013-02-05 16:36:38 +00002437EXPORT_SYMBOL(__skb_gso_segment);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002438
Herbert Xufb286bb2005-11-10 13:01:24 -08002439/* Take action when hardware reception checksum errors are detected. */
2440#ifdef CONFIG_BUG
2441void netdev_rx_csum_fault(struct net_device *dev)
2442{
2443 if (net_ratelimit()) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00002444 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08002445 dump_stack();
2446 }
2447}
2448EXPORT_SYMBOL(netdev_rx_csum_fault);
2449#endif
2450
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451/* Actually, we should eliminate this check as soon as we know, that:
2452 * 1. IOMMU is present and allows to map all the memory.
2453 * 2. No high memory really exists on this machine.
2454 */
2455
Florian Westphalc1e756b2014-05-05 15:00:44 +02002456static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457{
Herbert Xu3d3a8532006-06-27 13:33:10 -07002458#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002460 if (!(dev->features & NETIF_F_HIGHDMA)) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002461 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2462 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2463 if (PageHighMem(skb_frag_page(frag)))
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002464 return 1;
Ian Campbellea2ab692011-08-22 23:44:58 +00002465 }
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002468 if (PCI_DMA_BUS_IS_PHYS) {
2469 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470
Eric Dumazet9092c652010-04-02 13:34:49 -07002471 if (!pdev)
2472 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002473 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Ian Campbellea2ab692011-08-22 23:44:58 +00002474 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2475 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00002476 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2477 return 1;
2478 }
2479 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07002480#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 return 0;
2482}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002484struct dev_gso_cb {
2485 void (*destructor)(struct sk_buff *skb);
2486};
2487
2488#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2489
2490static void dev_gso_skb_destructor(struct sk_buff *skb)
2491{
2492 struct dev_gso_cb *cb;
2493
Eric Dumazet289dccb2013-12-20 14:29:08 -08002494 kfree_skb_list(skb->next);
2495 skb->next = NULL;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002496
2497 cb = DEV_GSO_CB(skb);
2498 if (cb->destructor)
2499 cb->destructor(skb);
2500}
2501
2502/**
2503 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2504 * @skb: buffer to segment
Jesse Gross91ecb632011-01-09 06:23:33 +00002505 * @features: device features as applicable to this skb
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002506 *
2507 * This function segments the given skb and stores the list of segments
2508 * in skb->next.
2509 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002510static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002511{
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002512 struct sk_buff *segs;
2513
Herbert Xu576a30e2006-06-27 13:22:38 -07002514 segs = skb_gso_segment(skb, features);
2515
2516 /* Verifying header integrity only. */
2517 if (!segs)
2518 return 0;
2519
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07002520 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002521 return PTR_ERR(segs);
2522
2523 skb->next = segs;
2524 DEV_GSO_CB(skb)->destructor = skb->destructor;
2525 skb->destructor = dev_gso_skb_destructor;
2526
2527 return 0;
2528}
2529
Simon Horman3b392dd2014-06-04 08:53:17 +09002530/* If MPLS offload request, verify we are testing hardware MPLS features
2531 * instead of standard features for the netdev.
2532 */
2533#ifdef CONFIG_NET_MPLS_GSO
2534static netdev_features_t net_mpls_features(struct sk_buff *skb,
2535 netdev_features_t features,
2536 __be16 type)
2537{
2538 if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
2539 features &= skb->dev->mpls_features;
2540
2541 return features;
2542}
2543#else
2544static netdev_features_t net_mpls_features(struct sk_buff *skb,
2545 netdev_features_t features,
2546 __be16 type)
2547{
2548 return features;
2549}
2550#endif
2551
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002552static netdev_features_t harmonize_features(struct sk_buff *skb,
Florian Westphalc1e756b2014-05-05 15:00:44 +02002553 netdev_features_t features)
Jesse Grossf01a5232011-01-09 06:23:31 +00002554{
Vlad Yasevich53d64712014-03-27 17:26:18 -04002555 int tmp;
Simon Horman3b392dd2014-06-04 08:53:17 +09002556 __be16 type;
2557
2558 type = skb_network_protocol(skb, &tmp);
2559 features = net_mpls_features(skb, features, type);
Vlad Yasevich53d64712014-03-27 17:26:18 -04002560
Ed Cashinc0d680e2012-09-19 15:49:00 +00002561 if (skb->ip_summed != CHECKSUM_NONE &&
Simon Horman3b392dd2014-06-04 08:53:17 +09002562 !can_checksum_protocol(features, type)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002563 features &= ~NETIF_F_ALL_CSUM;
Florian Westphalc1e756b2014-05-05 15:00:44 +02002564 } else if (illegal_highdma(skb->dev, skb)) {
Jesse Grossf01a5232011-01-09 06:23:31 +00002565 features &= ~NETIF_F_SG;
2566 }
2567
2568 return features;
2569}
2570
Florian Westphalc1e756b2014-05-05 15:00:44 +02002571netdev_features_t netif_skb_features(struct sk_buff *skb)
Jesse Gross58e998c2010-10-29 12:14:55 +00002572{
2573 __be16 protocol = skb->protocol;
Florian Westphalc1e756b2014-05-05 15:00:44 +02002574 netdev_features_t features = skb->dev->features;
Jesse Gross58e998c2010-10-29 12:14:55 +00002575
Florian Westphalc1e756b2014-05-05 15:00:44 +02002576 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
Ben Hutchings30b678d2012-07-30 15:57:00 +00002577 features &= ~NETIF_F_GSO_MASK;
2578
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002579 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
Jesse Gross58e998c2010-10-29 12:14:55 +00002580 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2581 protocol = veh->h_vlan_encapsulated_proto;
Jesse Grossf01a5232011-01-09 06:23:31 +00002582 } else if (!vlan_tx_tag_present(skb)) {
Florian Westphalc1e756b2014-05-05 15:00:44 +02002583 return harmonize_features(skb, features);
Jesse Grossf01a5232011-01-09 06:23:31 +00002584 }
Jesse Gross58e998c2010-10-29 12:14:55 +00002585
Florian Westphalc1e756b2014-05-05 15:00:44 +02002586 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002587 NETIF_F_HW_VLAN_STAG_TX);
Jesse Grossf01a5232011-01-09 06:23:31 +00002588
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002589 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
Jesse Grossf01a5232011-01-09 06:23:31 +00002590 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
Patrick McHardy8ad227f2013-04-19 02:04:31 +00002591 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2592 NETIF_F_HW_VLAN_STAG_TX;
Alexander Duyckcdbaa0b2013-07-10 17:05:06 -07002593
Florian Westphalc1e756b2014-05-05 15:00:44 +02002594 return harmonize_features(skb, features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002595}
Florian Westphalc1e756b2014-05-05 15:00:44 +02002596EXPORT_SYMBOL(netif_skb_features);
Jesse Gross58e998c2010-10-29 12:14:55 +00002597
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002598int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
Jason Wangf663dd92014-01-10 16:18:26 +08002599 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002600{
Stephen Hemminger00829822008-11-20 20:14:53 -08002601 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00002602 int rc = NETDEV_TX_OK;
Koki Sanagiec764bf2011-05-30 21:48:34 +00002603 unsigned int skb_len;
Stephen Hemminger00829822008-11-20 20:14:53 -08002604
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002605 if (likely(!skb->next)) {
Michał Mirosławc8f44af2011-11-15 15:29:55 +00002606 netdev_features_t features;
Jesse Grossfc741212011-01-09 06:23:32 +00002607
Eric Dumazet93f154b2009-05-18 22:19:19 -07002608 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002609 * If device doesn't need skb->dst, release it right now while
Eric Dumazet93f154b2009-05-18 22:19:19 -07002610 * its hot in this cpu cache
2611 */
Eric Dumazetadf30902009-06-02 05:19:30 +00002612 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2613 skb_dst_drop(skb);
2614
Jesse Grossfc741212011-01-09 06:23:32 +00002615 features = netif_skb_features(skb);
2616
Jesse Gross7b9c6092010-10-20 13:56:04 +00002617 if (vlan_tx_tag_present(skb) &&
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002618 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2619 skb = __vlan_put_tag(skb, skb->vlan_proto,
2620 vlan_tx_tag_get(skb));
Jesse Gross7b9c6092010-10-20 13:56:04 +00002621 if (unlikely(!skb))
2622 goto out;
2623
2624 skb->vlan_tci = 0;
2625 }
2626
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002627 /* If encapsulation offload request, verify we are testing
2628 * hardware encapsulation features instead of standard
2629 * features for the netdev
2630 */
2631 if (skb->encapsulation)
2632 features &= dev->hw_enc_features;
2633
Jesse Grossfc741212011-01-09 06:23:32 +00002634 if (netif_needs_gso(skb, features)) {
Jesse Gross91ecb632011-01-09 06:23:33 +00002635 if (unlikely(dev_gso_segment(skb, features)))
David S. Miller9ccb8972010-04-22 01:02:07 -07002636 goto out_kfree_skb;
2637 if (skb->next)
2638 goto gso;
John Fastabend6afff0c2010-06-16 14:18:12 +00002639 } else {
Jesse Gross02932ce2011-01-09 06:23:34 +00002640 if (skb_needs_linearize(skb, features) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002641 __skb_linearize(skb))
2642 goto out_kfree_skb;
2643
2644 /* If packet is not checksummed and device does not
2645 * support checksumming for this protocol, complete
2646 * checksumming here.
2647 */
2648 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Alexander Duyckfc70fb62012-12-07 14:14:15 +00002649 if (skb->encapsulation)
2650 skb_set_inner_transport_header(skb,
2651 skb_checksum_start_offset(skb));
2652 else
2653 skb_set_transport_header(skb,
2654 skb_checksum_start_offset(skb));
Jesse Gross03634662011-01-09 06:23:35 +00002655 if (!(features & NETIF_F_ALL_CSUM) &&
John Fastabend6afff0c2010-06-16 14:18:12 +00002656 skb_checksum_help(skb))
2657 goto out_kfree_skb;
2658 }
David S. Miller9ccb8972010-04-22 01:02:07 -07002659 }
2660
Eric Dumazetb40863c2012-09-18 20:44:49 +00002661 if (!list_empty(&ptype_all))
2662 dev_queue_xmit_nit(skb, dev);
2663
Koki Sanagiec764bf2011-05-30 21:48:34 +00002664 skb_len = skb->len;
Ben Hutchingsd87d04a2014-01-10 22:17:03 +00002665 trace_net_dev_start_xmit(skb, dev);
Ben Hutchings20567662014-01-10 22:16:30 +00002666 rc = ops->ndo_start_xmit(skb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002667 trace_net_dev_xmit(skb, rc, dev, skb_len);
Jason Wangf663dd92014-01-10 16:18:26 +08002668 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07002669 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00002670 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002671 }
2672
Herbert Xu576a30e2006-06-27 13:22:38 -07002673gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002674 do {
2675 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002676
2677 skb->next = nskb->next;
2678 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00002679
Eric Dumazetb40863c2012-09-18 20:44:49 +00002680 if (!list_empty(&ptype_all))
2681 dev_queue_xmit_nit(nskb, dev);
2682
Koki Sanagiec764bf2011-05-30 21:48:34 +00002683 skb_len = nskb->len;
Ben Hutchingsd87d04a2014-01-10 22:17:03 +00002684 trace_net_dev_start_xmit(nskb, dev);
Jason Wangf663dd92014-01-10 16:18:26 +08002685 rc = ops->ndo_start_xmit(nskb, dev);
Koki Sanagiec764bf2011-05-30 21:48:34 +00002686 trace_net_dev_xmit(nskb, rc, dev, skb_len);
Patrick McHardyec634fe2009-07-05 19:23:38 -07002687 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002688 if (rc & ~NETDEV_TX_MASK)
2689 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07002690 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002691 skb->next = nskb;
2692 return rc;
2693 }
Eric Dumazet08baf562009-05-25 22:58:01 -07002694 txq_trans_update(txq);
Tom Herbert734664982011-11-28 16:32:44 +00002695 if (unlikely(netif_xmit_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07002696 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002697 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002698
Patrick McHardy572a9d72009-11-10 06:14:14 +00002699out_kfree_gso_skb:
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002700 if (likely(skb->next == NULL)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002701 skb->destructor = DEV_GSO_CB(skb)->destructor;
Sridhar Samudrala0c772152013-04-29 13:02:42 +00002702 consume_skb(skb);
2703 return rc;
2704 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002705out_kfree_skb:
2706 kfree_skb(skb);
Jesse Gross7b9c6092010-10-20 13:56:04 +00002707out:
Patrick McHardy572a9d72009-11-10 06:14:14 +00002708 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002709}
John Fastabenda6cc0cf2013-11-06 09:54:46 -08002710EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002711
Eric Dumazet1def9232013-01-10 12:36:42 +00002712static void qdisc_pkt_len_init(struct sk_buff *skb)
2713{
2714 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2715
2716 qdisc_skb_cb(skb)->pkt_len = skb->len;
2717
2718 /* To get more precise estimation of bytes sent on wire,
2719 * we add to pkt_len the headers size of all segments
2720 */
2721 if (shinfo->gso_size) {
Eric Dumazet757b8b12013-01-15 21:14:21 -08002722 unsigned int hdr_len;
Jason Wang15e5a032013-03-25 20:19:59 +00002723 u16 gso_segs = shinfo->gso_segs;
Eric Dumazet1def9232013-01-10 12:36:42 +00002724
Eric Dumazet757b8b12013-01-15 21:14:21 -08002725 /* mac layer + network layer */
2726 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2727
2728 /* + transport layer */
Eric Dumazet1def9232013-01-10 12:36:42 +00002729 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2730 hdr_len += tcp_hdrlen(skb);
2731 else
2732 hdr_len += sizeof(struct udphdr);
Jason Wang15e5a032013-03-25 20:19:59 +00002733
2734 if (shinfo->gso_type & SKB_GSO_DODGY)
2735 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2736 shinfo->gso_size);
2737
2738 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
Eric Dumazet1def9232013-01-10 12:36:42 +00002739 }
2740}
2741
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002742static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2743 struct net_device *dev,
2744 struct netdev_queue *txq)
2745{
2746 spinlock_t *root_lock = qdisc_lock(q);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002747 bool contended;
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002748 int rc;
2749
Eric Dumazet1def9232013-01-10 12:36:42 +00002750 qdisc_pkt_len_init(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002751 qdisc_calculate_pkt_len(skb, q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002752 /*
2753 * Heuristic to force contended enqueues to serialize on a
2754 * separate lock before trying to get qdisc main lock.
Ying Xue9bf2b8c2014-06-26 15:56:31 +08002755 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2756 * often and dequeue packets faster.
Eric Dumazet79640a42010-06-02 05:09:29 -07002757 */
Eric Dumazeta2da5702011-01-20 03:48:19 +00002758 contended = qdisc_is_running(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002759 if (unlikely(contended))
2760 spin_lock(&q->busylock);
2761
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002762 spin_lock(root_lock);
2763 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2764 kfree_skb(skb);
2765 rc = NET_XMIT_DROP;
2766 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
Eric Dumazetbc135b22010-06-02 03:23:51 -07002767 qdisc_run_begin(q)) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002768 /*
2769 * This is a work-conserving queue; there are no old skbs
2770 * waiting to be sent out; and the qdisc is not running -
2771 * xmit the skb directly.
2772 */
Eric Dumazet7fee2262010-05-11 23:19:48 +00002773 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2774 skb_dst_force(skb);
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002775
Eric Dumazetbfe0d022011-01-09 08:30:54 +00002776 qdisc_bstats_update(q, skb);
2777
Eric Dumazet79640a42010-06-02 05:09:29 -07002778 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2779 if (unlikely(contended)) {
2780 spin_unlock(&q->busylock);
2781 contended = false;
2782 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002783 __qdisc_run(q);
Eric Dumazet79640a42010-06-02 05:09:29 -07002784 } else
Eric Dumazetbc135b22010-06-02 03:23:51 -07002785 qdisc_run_end(q);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002786
2787 rc = NET_XMIT_SUCCESS;
2788 } else {
Eric Dumazet7fee2262010-05-11 23:19:48 +00002789 skb_dst_force(skb);
Eric Dumazeta2da5702011-01-20 03:48:19 +00002790 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
Eric Dumazet79640a42010-06-02 05:09:29 -07002791 if (qdisc_run_begin(q)) {
2792 if (unlikely(contended)) {
2793 spin_unlock(&q->busylock);
2794 contended = false;
2795 }
2796 __qdisc_run(q);
2797 }
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002798 }
2799 spin_unlock(root_lock);
Eric Dumazet79640a42010-06-02 05:09:29 -07002800 if (unlikely(contended))
2801 spin_unlock(&q->busylock);
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002802 return rc;
2803}
2804
Daniel Borkmann86f85152013-12-29 17:27:11 +01002805#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
Neil Horman5bc14212011-11-22 05:10:51 +00002806static void skb_update_prio(struct sk_buff *skb)
2807{
Igor Maravic6977a792011-11-25 07:44:54 +00002808 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
Neil Horman5bc14212011-11-22 05:10:51 +00002809
Eric Dumazet91c68ce2012-07-08 21:45:10 +00002810 if (!skb->priority && skb->sk && map) {
2811 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2812
2813 if (prioidx < map->priomap_len)
2814 skb->priority = map->priomap[prioidx];
2815 }
Neil Horman5bc14212011-11-22 05:10:51 +00002816}
2817#else
2818#define skb_update_prio(skb)
2819#endif
2820
Eric Dumazet745e20f2010-09-29 13:23:09 -07002821static DEFINE_PER_CPU(int, xmit_recursion);
David S. Miller11a766c2010-10-25 12:51:55 -07002822#define RECURSION_LIMIT 10
Eric Dumazet745e20f2010-09-29 13:23:09 -07002823
Dave Jonesd29f7492008-07-22 14:09:06 -07002824/**
Michel Machado95603e22012-06-12 10:16:35 +00002825 * dev_loopback_xmit - loop back @skb
2826 * @skb: buffer to transmit
2827 */
2828int dev_loopback_xmit(struct sk_buff *skb)
2829{
2830 skb_reset_mac_header(skb);
2831 __skb_pull(skb, skb_network_offset(skb));
2832 skb->pkt_type = PACKET_LOOPBACK;
2833 skb->ip_summed = CHECKSUM_UNNECESSARY;
2834 WARN_ON(!skb_dst(skb));
2835 skb_dst_force(skb);
2836 netif_rx_ni(skb);
2837 return 0;
2838}
2839EXPORT_SYMBOL(dev_loopback_xmit);
2840
2841/**
Jason Wang9d08dd32014-01-20 11:25:13 +08002842 * __dev_queue_xmit - transmit a buffer
Dave Jonesd29f7492008-07-22 14:09:06 -07002843 * @skb: buffer to transmit
Jason Wang9d08dd32014-01-20 11:25:13 +08002844 * @accel_priv: private data used for L2 forwarding offload
Dave Jonesd29f7492008-07-22 14:09:06 -07002845 *
2846 * Queue a buffer for transmission to a network device. The caller must
2847 * have set the device and priority and built the buffer before calling
2848 * this function. The function can be called from an interrupt.
2849 *
2850 * A negative errno code is returned on a failure. A success does not
2851 * guarantee the frame will be transmitted as it may be dropped due
2852 * to congestion or traffic shaping.
2853 *
2854 * -----------------------------------------------------------------------------------
2855 * I notice this method can also return errors from the queue disciplines,
2856 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2857 * be positive.
2858 *
2859 * Regardless of the return value, the skb is consumed, so it is currently
2860 * difficult to retry a send to this method. (You can bump the ref count
2861 * before sending to hold a reference for retry if you are careful.)
2862 *
2863 * When calling this method, interrupts MUST be enabled. This is because
2864 * the BH enable code must have IRQs enabled so that it will not deadlock.
2865 * --BLG
2866 */
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05302867static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868{
2869 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002870 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 struct Qdisc *q;
2872 int rc = -ENOMEM;
2873
Eric Dumazet6d1ccff2013-02-05 20:22:20 +00002874 skb_reset_mac_header(skb);
2875
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002876 /* Disable soft irqs for various locks below. Also
2877 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002879 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880
Neil Horman5bc14212011-11-22 05:10:51 +00002881 skb_update_prio(skb);
2882
Jason Wangf663dd92014-01-10 16:18:26 +08002883 txq = netdev_pick_tx(dev, skb, accel_priv);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002884 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002885
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002887 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888#endif
Koki Sanagicf66ba52010-08-23 18:45:02 +09002889 trace_net_dev_queue(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002891 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002892 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 }
2894
2895 /* The device has no queue. Common case for software devices:
2896 loopback, all the sorts of tunnels...
2897
Herbert Xu932ff272006-06-09 12:20:56 -07002898 Really, it is unlikely that netif_tx_lock protection is necessary
2899 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 counters.)
2901 However, it is possible, that they rely on protection
2902 made by us here.
2903
2904 Check this and shot the lock. It is not prone from deadlocks.
2905 Either shot noqueue qdisc, it is even simpler 8)
2906 */
2907 if (dev->flags & IFF_UP) {
2908 int cpu = smp_processor_id(); /* ok because BHs are off */
2909
David S. Millerc773e842008-07-08 23:13:53 -07002910 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
Eric Dumazet745e20f2010-09-29 13:23:09 -07002912 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2913 goto recursion_alert;
2914
David S. Millerc773e842008-07-08 23:13:53 -07002915 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916
Tom Herbert734664982011-11-28 16:32:44 +00002917 if (!netif_xmit_stopped(txq)) {
Eric Dumazet745e20f2010-09-29 13:23:09 -07002918 __this_cpu_inc(xmit_recursion);
Jason Wangf663dd92014-01-10 16:18:26 +08002919 rc = dev_hard_start_xmit(skb, dev, txq);
Eric Dumazet745e20f2010-09-29 13:23:09 -07002920 __this_cpu_dec(xmit_recursion);
Patrick McHardy572a9d72009-11-10 06:14:14 +00002921 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002922 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 goto out;
2924 }
2925 }
David S. Millerc773e842008-07-08 23:13:53 -07002926 HARD_TX_UNLOCK(dev, txq);
Joe Perchese87cc472012-05-13 21:56:26 +00002927 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2928 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 } else {
2930 /* Recursion is detected! It is possible,
Eric Dumazet745e20f2010-09-29 13:23:09 -07002931 * unfortunately
2932 */
2933recursion_alert:
Joe Perchese87cc472012-05-13 21:56:26 +00002934 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2935 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 }
2937 }
2938
2939 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002940 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941
Eric Dumazet015f0682014-03-27 08:45:56 -07002942 atomic_long_inc(&dev->tx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 kfree_skb(skb);
2944 return rc;
2945out:
Herbert Xud4828d82006-06-22 02:28:18 -07002946 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 return rc;
2948}
Jason Wangf663dd92014-01-10 16:18:26 +08002949
2950int dev_queue_xmit(struct sk_buff *skb)
2951{
2952 return __dev_queue_xmit(skb, NULL);
2953}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002954EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955
Jason Wangf663dd92014-01-10 16:18:26 +08002956int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2957{
2958 return __dev_queue_xmit(skb, accel_priv);
2959}
2960EXPORT_SYMBOL(dev_queue_xmit_accel);
2961
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962
2963/*=======================================================================
2964 Receiver routines
2965 =======================================================================*/
2966
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002967int netdev_max_backlog __read_mostly = 1000;
Eric Dumazetc9e6bc62012-09-27 19:29:05 +00002968EXPORT_SYMBOL(netdev_max_backlog);
2969
Eric Dumazet3b098e22010-05-15 23:57:10 -07002970int netdev_tstamp_prequeue __read_mostly = 1;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002971int netdev_budget __read_mostly = 300;
2972int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07002974/* Called with irq disabled */
2975static inline void ____napi_schedule(struct softnet_data *sd,
2976 struct napi_struct *napi)
2977{
2978 list_add_tail(&napi->poll_list, &sd->poll_list);
2979 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2980}
2981
Eric Dumazetdf334542010-03-24 19:13:54 +00002982#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002983
2984/* One global table that all flow-based protocols share. */
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00002985struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002986EXPORT_SYMBOL(rps_sock_flow_table);
2987
Ingo Molnarc5905af2012-02-24 08:31:31 +01002988struct static_key rps_needed __read_mostly;
Eric Dumazetadc93002011-11-17 03:13:26 +00002989
Ben Hutchingsc4454772011-01-19 11:03:53 +00002990static struct rps_dev_flow *
2991set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2992 struct rps_dev_flow *rflow, u16 next_cpu)
2993{
Ben Hutchings09994d12011-10-03 04:42:46 +00002994 if (next_cpu != RPS_NO_CPU) {
Ben Hutchingsc4454772011-01-19 11:03:53 +00002995#ifdef CONFIG_RFS_ACCEL
2996 struct netdev_rx_queue *rxqueue;
2997 struct rps_dev_flow_table *flow_table;
2998 struct rps_dev_flow *old_rflow;
2999 u32 flow_id;
3000 u16 rxq_index;
3001 int rc;
3002
3003 /* Should we steer this flow to a different hardware queue? */
Ben Hutchings69a19ee2011-02-15 20:32:04 +00003004 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3005 !(dev->features & NETIF_F_NTUPLE))
Ben Hutchingsc4454772011-01-19 11:03:53 +00003006 goto out;
3007 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3008 if (rxq_index == skb_get_rx_queue(skb))
3009 goto out;
3010
3011 rxqueue = dev->_rx + rxq_index;
3012 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3013 if (!flow_table)
3014 goto out;
Tom Herbert61b905d2014-03-24 15:34:47 -07003015 flow_id = skb_get_hash(skb) & flow_table->mask;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003016 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3017 rxq_index, flow_id);
3018 if (rc < 0)
3019 goto out;
3020 old_rflow = rflow;
3021 rflow = &flow_table->flows[flow_id];
Ben Hutchingsc4454772011-01-19 11:03:53 +00003022 rflow->filter = rc;
3023 if (old_rflow->filter == rflow->filter)
3024 old_rflow->filter = RPS_NO_FILTER;
3025 out:
3026#endif
3027 rflow->last_qtail =
Ben Hutchings09994d12011-10-03 04:42:46 +00003028 per_cpu(softnet_data, next_cpu).input_queue_head;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003029 }
3030
Ben Hutchings09994d12011-10-03 04:42:46 +00003031 rflow->cpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003032 return rflow;
3033}
3034
Tom Herbert0a9627f2010-03-16 08:03:29 +00003035/*
3036 * get_rps_cpu is called from netif_receive_skb and returns the target
3037 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003038 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003039 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003040static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3041 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003042{
Tom Herbert0a9627f2010-03-16 08:03:29 +00003043 struct netdev_rx_queue *rxqueue;
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003044 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07003045 struct rps_dev_flow_table *flow_table;
3046 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003047 int cpu = -1;
Tom Herbertfec5e652010-04-16 16:01:27 -07003048 u16 tcpu;
Tom Herbert61b905d2014-03-24 15:34:47 -07003049 u32 hash;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003050
Tom Herbert0a9627f2010-03-16 08:03:29 +00003051 if (skb_rx_queue_recorded(skb)) {
3052 u16 index = skb_get_rx_queue(skb);
Ben Hutchings62fe0b42010-09-27 08:24:33 +00003053 if (unlikely(index >= dev->real_num_rx_queues)) {
3054 WARN_ONCE(dev->real_num_rx_queues > 1,
3055 "%s received packet on queue %u, but number "
3056 "of RX queues is %u\n",
3057 dev->name, index, dev->real_num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003058 goto done;
3059 }
3060 rxqueue = dev->_rx + index;
3061 } else
3062 rxqueue = dev->_rx;
3063
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +00003064 map = rcu_dereference(rxqueue->rps_map);
3065 if (map) {
Tom Herbert85875232011-01-31 16:23:42 -08003066 if (map->len == 1 &&
Eric Dumazet33d480c2011-08-11 19:30:52 +00003067 !rcu_access_pointer(rxqueue->rps_flow_table)) {
Changli Gao6febfca2010-09-03 23:12:37 +00003068 tcpu = map->cpus[0];
3069 if (cpu_online(tcpu))
3070 cpu = tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003071 goto done;
Eric Dumazetb249dcb2010-04-19 21:56:38 +00003072 }
Eric Dumazet33d480c2011-08-11 19:30:52 +00003073 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003074 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003075 }
3076
Changli Gao2d47b452010-08-17 19:00:56 +00003077 skb_reset_network_header(skb);
Tom Herbert61b905d2014-03-24 15:34:47 -07003078 hash = skb_get_hash(skb);
3079 if (!hash)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003080 goto done;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003081
Tom Herbertfec5e652010-04-16 16:01:27 -07003082 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3083 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3084 if (flow_table && sock_flow_table) {
3085 u16 next_cpu;
3086 struct rps_dev_flow *rflow;
3087
Tom Herbert61b905d2014-03-24 15:34:47 -07003088 rflow = &flow_table->flows[hash & flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003089 tcpu = rflow->cpu;
3090
Tom Herbert61b905d2014-03-24 15:34:47 -07003091 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
Tom Herbertfec5e652010-04-16 16:01:27 -07003092
3093 /*
3094 * If the desired CPU (where last recvmsg was done) is
3095 * different from current CPU (one in the rx-queue flow
3096 * table entry), switch if one of the following holds:
3097 * - Current CPU is unset (equal to RPS_NO_CPU).
3098 * - Current CPU is offline.
3099 * - The current CPU's queue tail has advanced beyond the
3100 * last packet that was enqueued using this table entry.
3101 * This guarantees that all previous packets for the flow
3102 * have been dequeued, thus preserving in order delivery.
3103 */
3104 if (unlikely(tcpu != next_cpu) &&
3105 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3106 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
Tom Herbertbaefa312012-11-16 09:04:15 +00003107 rflow->last_qtail)) >= 0)) {
3108 tcpu = next_cpu;
Ben Hutchingsc4454772011-01-19 11:03:53 +00003109 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
Tom Herbertbaefa312012-11-16 09:04:15 +00003110 }
Ben Hutchingsc4454772011-01-19 11:03:53 +00003111
Tom Herbertfec5e652010-04-16 16:01:27 -07003112 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3113 *rflowp = rflow;
3114 cpu = tcpu;
3115 goto done;
3116 }
3117 }
3118
Tom Herbert0a9627f2010-03-16 08:03:29 +00003119 if (map) {
Tom Herbert61b905d2014-03-24 15:34:47 -07003120 tcpu = map->cpus[((u64) hash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00003121
3122 if (cpu_online(tcpu)) {
3123 cpu = tcpu;
3124 goto done;
3125 }
3126 }
3127
3128done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00003129 return cpu;
3130}
3131
Ben Hutchingsc4454772011-01-19 11:03:53 +00003132#ifdef CONFIG_RFS_ACCEL
3133
3134/**
3135 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3136 * @dev: Device on which the filter was set
3137 * @rxq_index: RX queue index
3138 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3139 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3140 *
3141 * Drivers that implement ndo_rx_flow_steer() should periodically call
3142 * this function for each installed filter and remove the filters for
3143 * which it returns %true.
3144 */
3145bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3146 u32 flow_id, u16 filter_id)
3147{
3148 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3149 struct rps_dev_flow_table *flow_table;
3150 struct rps_dev_flow *rflow;
3151 bool expire = true;
3152 int cpu;
3153
3154 rcu_read_lock();
3155 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3156 if (flow_table && flow_id <= flow_table->mask) {
3157 rflow = &flow_table->flows[flow_id];
3158 cpu = ACCESS_ONCE(rflow->cpu);
3159 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3160 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3161 rflow->last_qtail) <
3162 (int)(10 * flow_table->mask)))
3163 expire = false;
3164 }
3165 rcu_read_unlock();
3166 return expire;
3167}
3168EXPORT_SYMBOL(rps_may_expire_flow);
3169
3170#endif /* CONFIG_RFS_ACCEL */
3171
Tom Herbert0a9627f2010-03-16 08:03:29 +00003172/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003173static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003174{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003175 struct softnet_data *sd = data;
3176
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003177 ____napi_schedule(sd, &sd->backlog);
Changli Gaodee42872010-05-02 05:42:16 +00003178 sd->received_rps++;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003179}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003180
Tom Herbertfec5e652010-04-16 16:01:27 -07003181#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003182
3183/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003184 * Check if this softnet_data structure is another cpu one
3185 * If yes, queue it to our IPI list and return 1
3186 * If no, return 0
3187 */
3188static int rps_ipi_queued(struct softnet_data *sd)
3189{
3190#ifdef CONFIG_RPS
3191 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3192
3193 if (sd != mysd) {
3194 sd->rps_ipi_next = mysd->rps_ipi_list;
3195 mysd->rps_ipi_list = sd;
3196
3197 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3198 return 1;
3199 }
3200#endif /* CONFIG_RPS */
3201 return 0;
3202}
3203
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003204#ifdef CONFIG_NET_FLOW_LIMIT
3205int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3206#endif
3207
3208static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3209{
3210#ifdef CONFIG_NET_FLOW_LIMIT
3211 struct sd_flow_limit *fl;
3212 struct softnet_data *sd;
3213 unsigned int old_flow, new_flow;
3214
3215 if (qlen < (netdev_max_backlog >> 1))
3216 return false;
3217
3218 sd = &__get_cpu_var(softnet_data);
3219
3220 rcu_read_lock();
3221 fl = rcu_dereference(sd->flow_limit);
3222 if (fl) {
Tom Herbert3958afa1b2013-12-15 22:12:06 -08003223 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003224 old_flow = fl->history[fl->history_head];
3225 fl->history[fl->history_head] = new_flow;
3226
3227 fl->history_head++;
3228 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3229
3230 if (likely(fl->buckets[old_flow]))
3231 fl->buckets[old_flow]--;
3232
3233 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3234 fl->count++;
3235 rcu_read_unlock();
3236 return true;
3237 }
3238 }
3239 rcu_read_unlock();
3240#endif
3241 return false;
3242}
3243
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003244/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00003245 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3246 * queue (may be a remote CPU queue).
3247 */
Tom Herbertfec5e652010-04-16 16:01:27 -07003248static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3249 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003250{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003251 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003252 unsigned long flags;
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003253 unsigned int qlen;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003254
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003255 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003256
3257 local_irq_save(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003258
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003259 rps_lock(sd);
Willem de Bruijn99bbc702013-05-20 04:02:32 +00003260 qlen = skb_queue_len(&sd->input_pkt_queue);
3261 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
Changli Gao6e7676c2010-04-27 15:07:33 -07003262 if (skb_queue_len(&sd->input_pkt_queue)) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00003263enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003264 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003265 input_queue_tail_incr_save(sd, qtail);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003266 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003267 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003268 return NET_RX_SUCCESS;
3269 }
3270
Eric Dumazetebda37c22010-05-06 23:51:21 +00003271 /* Schedule NAPI for backlog device
3272 * We can use non atomic operation since we own the queue lock
3273 */
3274 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003275 if (!rps_ipi_queued(sd))
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07003276 ____napi_schedule(sd, &sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003277 }
3278 goto enqueue;
3279 }
3280
Changli Gaodee42872010-05-02 05:42:16 +00003281 sd->dropped++;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003282 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003283
Tom Herbert0a9627f2010-03-16 08:03:29 +00003284 local_irq_restore(flags);
3285
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003286 atomic_long_inc(&skb->dev->rx_dropped);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003287 kfree_skb(skb);
3288 return NET_RX_DROP;
3289}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003291static int netif_rx_internal(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003293 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294
Eric Dumazet588f0332011-11-15 04:12:55 +00003295 net_timestamp_check(netdev_tstamp_prequeue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296
Koki Sanagicf66ba52010-08-23 18:45:02 +09003297 trace_netif_rx(skb);
Eric Dumazetdf334542010-03-24 19:13:54 +00003298#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003299 if (static_key_false(&rps_needed)) {
Tom Herbertfec5e652010-04-16 16:01:27 -07003300 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003301 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
Changli Gaocece1942010-08-07 20:35:43 -07003303 preempt_disable();
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003304 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003305
3306 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003307 if (cpu < 0)
3308 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07003309
3310 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3311
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003312 rcu_read_unlock();
Changli Gaocece1942010-08-07 20:35:43 -07003313 preempt_enable();
Eric Dumazetadc93002011-11-17 03:13:26 +00003314 } else
3315#endif
Tom Herbertfec5e652010-04-16 16:01:27 -07003316 {
3317 unsigned int qtail;
3318 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3319 put_cpu();
3320 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07003321 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003323
3324/**
3325 * netif_rx - post buffer to the network code
3326 * @skb: buffer to post
3327 *
3328 * This function receives a packet from a device driver and queues it for
3329 * the upper (protocol) levels to process. It always succeeds. The buffer
3330 * may be dropped during processing for congestion control or by the
3331 * protocol layers.
3332 *
3333 * return values:
3334 * NET_RX_SUCCESS (no congestion)
3335 * NET_RX_DROP (packet was dropped)
3336 *
3337 */
3338
3339int netif_rx(struct sk_buff *skb)
3340{
3341 trace_netif_rx_entry(skb);
3342
3343 return netif_rx_internal(skb);
3344}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003345EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346
3347int netif_rx_ni(struct sk_buff *skb)
3348{
3349 int err;
3350
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003351 trace_netif_rx_ni_entry(skb);
3352
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 preempt_disable();
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003354 err = netif_rx_internal(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 if (local_softirq_pending())
3356 do_softirq();
3357 preempt_enable();
3358
3359 return err;
3360}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361EXPORT_SYMBOL(netif_rx_ni);
3362
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363static void net_tx_action(struct softirq_action *h)
3364{
3365 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3366
3367 if (sd->completion_queue) {
3368 struct sk_buff *clist;
3369
3370 local_irq_disable();
3371 clist = sd->completion_queue;
3372 sd->completion_queue = NULL;
3373 local_irq_enable();
3374
3375 while (clist) {
3376 struct sk_buff *skb = clist;
3377 clist = clist->next;
3378
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003379 WARN_ON(atomic_read(&skb->users));
Eric Dumazete6247022013-12-05 04:45:08 -08003380 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3381 trace_consume_skb(skb);
3382 else
3383 trace_kfree_skb(skb, net_tx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 __kfree_skb(skb);
3385 }
3386 }
3387
3388 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07003389 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
3391 local_irq_disable();
3392 head = sd->output_queue;
3393 sd->output_queue = NULL;
Changli Gaoa9cbd582010-04-26 23:06:24 +00003394 sd->output_queue_tailp = &sd->output_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 local_irq_enable();
3396
3397 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07003398 struct Qdisc *q = head;
3399 spinlock_t *root_lock;
3400
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401 head = head->next_sched;
3402
David S. Miller5fb66222008-08-02 20:02:43 -07003403 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07003404 if (spin_trylock(root_lock)) {
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003405 smp_mb__before_atomic();
Jarek Poplawskidef82a12008-08-17 21:54:43 -07003406 clear_bit(__QDISC_STATE_SCHED,
3407 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07003408 qdisc_run(q);
3409 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 } else {
David S. Miller195648b2008-08-19 04:00:36 -07003411 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003412 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07003413 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003414 } else {
Peter Zijlstra4e857c52014-03-17 18:06:10 +01003415 smp_mb__before_atomic();
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07003416 clear_bit(__QDISC_STATE_SCHED,
3417 &q->state);
3418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 }
3420 }
3421 }
3422}
3423
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003424#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3425 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
Michał Mirosławda678292009-06-05 05:35:28 +00003426/* This hook is defined here for ATM LANE */
3427int (*br_fdb_test_addr_hook)(struct net_device *dev,
3428 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07003429EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00003430#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432#ifdef CONFIG_NET_CLS_ACT
3433/* TODO: Maybe we should just force sch_ingress to be compiled in
3434 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3435 * a compare and 2 stores extra right now if we dont have it on
3436 * but have CONFIG_NET_CLS_ACT
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003437 * NOTE: This doesn't stop any functionality; if you dont have
3438 * the ingress scheduler, you just can't add policies on ingress.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 *
3440 */
Eric Dumazet24824a02010-10-02 06:11:55 +00003441static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003444 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07003445 int result = TC_ACT_OK;
3446 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003447
Stephen Hemmingerde384832010-08-01 00:33:23 -07003448 if (unlikely(MAX_RED_LOOP < ttl++)) {
Joe Perchese87cc472012-05-13 21:56:26 +00003449 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3450 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07003451 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 }
3453
Herbert Xuf697c3e2007-10-14 00:38:47 -07003454 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3455 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3456
David S. Miller83874002008-07-17 00:53:03 -07003457 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07003458 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07003459 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07003460 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3461 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07003462 spin_unlock(qdisc_lock(q));
3463 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07003464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 return result;
3466}
Herbert Xuf697c3e2007-10-14 00:38:47 -07003467
3468static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3469 struct packet_type **pt_prev,
3470 int *ret, struct net_device *orig_dev)
3471{
Eric Dumazet24824a02010-10-02 06:11:55 +00003472 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3473
3474 if (!rxq || rxq->qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07003475 goto out;
3476
3477 if (*pt_prev) {
3478 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3479 *pt_prev = NULL;
Herbert Xuf697c3e2007-10-14 00:38:47 -07003480 }
3481
Eric Dumazet24824a02010-10-02 06:11:55 +00003482 switch (ing_filter(skb, rxq)) {
Herbert Xuf697c3e2007-10-14 00:38:47 -07003483 case TC_ACT_SHOT:
3484 case TC_ACT_STOLEN:
3485 kfree_skb(skb);
3486 return NULL;
3487 }
3488
3489out:
3490 skb->tc_verd = 0;
3491 return skb;
3492}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493#endif
3494
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003495/**
3496 * netdev_rx_handler_register - register receive handler
3497 * @dev: device to register a handler for
3498 * @rx_handler: receive handler to register
Jiri Pirko93e2c322010-06-10 03:34:59 +00003499 * @rx_handler_data: data pointer that is used by rx handler
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003500 *
Masanari Iidae2278672014-02-18 22:54:36 +09003501 * Register a receive handler for a device. This handler will then be
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003502 * called from __netif_receive_skb. A negative errno code is returned
3503 * on a failure.
3504 *
3505 * The caller must hold the rtnl_mutex.
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003506 *
3507 * For a general description of rx_handler, see enum rx_handler_result.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003508 */
3509int netdev_rx_handler_register(struct net_device *dev,
Jiri Pirko93e2c322010-06-10 03:34:59 +00003510 rx_handler_func_t *rx_handler,
3511 void *rx_handler_data)
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003512{
3513 ASSERT_RTNL();
3514
3515 if (dev->rx_handler)
3516 return -EBUSY;
3517
Eric Dumazet00cfec32013-03-29 03:01:22 +00003518 /* Note: rx_handler_data must be set before rx_handler */
Jiri Pirko93e2c322010-06-10 03:34:59 +00003519 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003520 rcu_assign_pointer(dev->rx_handler, rx_handler);
3521
3522 return 0;
3523}
3524EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3525
3526/**
3527 * netdev_rx_handler_unregister - unregister receive handler
3528 * @dev: device to unregister a handler from
3529 *
Kusanagi Kouichi166ec362013-03-18 02:59:52 +00003530 * Unregister a receive handler from a device.
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003531 *
3532 * The caller must hold the rtnl_mutex.
3533 */
3534void netdev_rx_handler_unregister(struct net_device *dev)
3535{
3536
3537 ASSERT_RTNL();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003538 RCU_INIT_POINTER(dev->rx_handler, NULL);
Eric Dumazet00cfec32013-03-29 03:01:22 +00003539 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3540 * section has a guarantee to see a non NULL rx_handler_data
3541 * as well.
3542 */
3543 synchronize_net();
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00003544 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003545}
3546EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3547
Mel Gormanb4b9e352012-07-31 16:44:26 -07003548/*
3549 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3550 * the special handling of PFMEMALLOC skbs.
3551 */
3552static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3553{
3554 switch (skb->protocol) {
Joe Perches2b8837a2014-03-12 10:04:17 -07003555 case htons(ETH_P_ARP):
3556 case htons(ETH_P_IP):
3557 case htons(ETH_P_IPV6):
3558 case htons(ETH_P_8021Q):
3559 case htons(ETH_P_8021AD):
Mel Gormanb4b9e352012-07-31 16:44:26 -07003560 return true;
3561 default:
3562 return false;
3563 }
3564}
3565
David S. Miller9754e292013-02-14 15:57:38 -05003566static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567{
3568 struct packet_type *ptype, *pt_prev;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003569 rx_handler_func_t *rx_handler;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003570 struct net_device *orig_dev;
David S. Miller63d8ea72011-02-28 10:48:59 -08003571 struct net_device *null_or_dev;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003572 bool deliver_exact = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08003574 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575
Eric Dumazet588f0332011-11-15 04:12:55 +00003576 net_timestamp_check(!netdev_tstamp_prequeue, skb);
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07003577
Koki Sanagicf66ba52010-08-23 18:45:02 +09003578 trace_netif_receive_skb(skb);
Patrick McHardy9b22ea52008-11-04 14:49:57 -08003579
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07003580 orig_dev = skb->dev;
Jiri Pirko1765a572011-02-12 06:48:36 +00003581
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07003582 skb_reset_network_header(skb);
Eric Dumazetfda55ec2013-01-07 09:28:21 +00003583 if (!skb_transport_header_was_set(skb))
3584 skb_reset_transport_header(skb);
Jiri Pirko0b5c9db2011-06-10 06:56:58 +00003585 skb_reset_mac_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586
3587 pt_prev = NULL;
3588
3589 rcu_read_lock();
3590
David S. Miller63d8ea72011-02-28 10:48:59 -08003591another_round:
David S. Millerb6858172012-07-23 16:27:54 -07003592 skb->skb_iif = skb->dev->ifindex;
David S. Miller63d8ea72011-02-28 10:48:59 -08003593
3594 __this_cpu_inc(softnet_data.processed);
3595
Patrick McHardy8ad227f2013-04-19 02:04:31 +00003596 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3597 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003598 skb = vlan_untag(skb);
3599 if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003600 goto unlock;
Jiri Pirkobcc6d472011-04-07 19:48:33 +00003601 }
3602
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603#ifdef CONFIG_NET_CLS_ACT
3604 if (skb->tc_verd & TC_NCLS) {
3605 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3606 goto ncls;
3607 }
3608#endif
3609
David S. Miller9754e292013-02-14 15:57:38 -05003610 if (pfmemalloc)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003611 goto skip_taps;
3612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 list_for_each_entry_rcu(ptype, &ptype_all, list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003614 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003615 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003616 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 pt_prev = ptype;
3618 }
3619 }
3620
Mel Gormanb4b9e352012-07-31 16:44:26 -07003621skip_taps:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07003623 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3624 if (!skb)
Mel Gormanb4b9e352012-07-31 16:44:26 -07003625 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626ncls:
3627#endif
3628
David S. Miller9754e292013-02-14 15:57:38 -05003629 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003630 goto drop;
3631
John Fastabend24257172011-10-10 09:16:41 +00003632 if (vlan_tx_tag_present(skb)) {
3633 if (pt_prev) {
3634 ret = deliver_skb(skb, pt_prev, orig_dev);
3635 pt_prev = NULL;
3636 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003637 if (vlan_do_receive(&skb))
John Fastabend24257172011-10-10 09:16:41 +00003638 goto another_round;
3639 else if (unlikely(!skb))
Mel Gormanb4b9e352012-07-31 16:44:26 -07003640 goto unlock;
John Fastabend24257172011-10-10 09:16:41 +00003641 }
3642
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003643 rx_handler = rcu_dereference(skb->dev->rx_handler);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003644 if (rx_handler) {
3645 if (pt_prev) {
3646 ret = deliver_skb(skb, pt_prev, orig_dev);
3647 pt_prev = NULL;
3648 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003649 switch (rx_handler(&skb)) {
3650 case RX_HANDLER_CONSUMED:
Cristian Bercaru3bc1b1a2013-03-08 07:03:38 +00003651 ret = NET_RX_SUCCESS;
Mel Gormanb4b9e352012-07-31 16:44:26 -07003652 goto unlock;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003653 case RX_HANDLER_ANOTHER:
David S. Miller63d8ea72011-02-28 10:48:59 -08003654 goto another_round;
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003655 case RX_HANDLER_EXACT:
3656 deliver_exact = true;
3657 case RX_HANDLER_PASS:
3658 break;
3659 default:
3660 BUG();
3661 }
Jiri Pirkoab95bfe2010-06-01 21:52:08 +00003662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663
Eric Dumazetd4b812d2013-07-18 07:19:26 -07003664 if (unlikely(vlan_tx_tag_present(skb))) {
3665 if (vlan_tx_tag_get_id(skb))
3666 skb->pkt_type = PACKET_OTHERHOST;
3667 /* Note: we might in the future use prio bits
3668 * and set skb->priority like in vlan_do_receive()
3669 * For the time being, just ignore Priority Code Point
3670 */
3671 skb->vlan_tci = 0;
3672 }
Florian Zumbiehl48cc32d32012-10-07 15:51:58 +00003673
David S. Miller63d8ea72011-02-28 10:48:59 -08003674 /* deliver only exact match when indicated */
Jiri Pirko8a4eb572011-03-12 03:14:39 +00003675 null_or_dev = deliver_exact ? skb->dev : NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00003676
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003678 list_for_each_entry_rcu(ptype,
3679 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
David S. Miller63d8ea72011-02-28 10:48:59 -08003680 if (ptype->type == type &&
Jiri Pirkoe3f48d32011-02-28 20:26:31 +00003681 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3682 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003683 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07003684 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 pt_prev = ptype;
3686 }
3687 }
3688
3689 if (pt_prev) {
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003690 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
Michael S. Tsirkin0e698bf2012-09-15 22:44:16 +00003691 goto drop;
Michael S. Tsirkin1080e512012-07-20 09:23:17 +00003692 else
3693 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 } else {
Mel Gormanb4b9e352012-07-31 16:44:26 -07003695drop:
Eric Dumazetcaf586e2010-09-30 21:06:55 +00003696 atomic_long_inc(&skb->dev->rx_dropped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 kfree_skb(skb);
3698 /* Jamal, now you will not able to escape explaining
3699 * me how you were going to use this. :-)
3700 */
3701 ret = NET_RX_DROP;
3702 }
3703
Mel Gormanb4b9e352012-07-31 16:44:26 -07003704unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 rcu_read_unlock();
David S. Miller9754e292013-02-14 15:57:38 -05003706 return ret;
3707}
3708
3709static int __netif_receive_skb(struct sk_buff *skb)
3710{
3711 int ret;
3712
3713 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3714 unsigned long pflags = current->flags;
3715
3716 /*
3717 * PFMEMALLOC skbs are special, they should
3718 * - be delivered to SOCK_MEMALLOC sockets only
3719 * - stay away from userspace
3720 * - have bounded memory usage
3721 *
3722 * Use PF_MEMALLOC as this saves us from propagating the allocation
3723 * context down to all allocation sites.
3724 */
3725 current->flags |= PF_MEMALLOC;
3726 ret = __netif_receive_skb_core(skb, true);
3727 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3728 } else
3729 ret = __netif_receive_skb_core(skb, false);
3730
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 return ret;
3732}
Tom Herbert0a9627f2010-03-16 08:03:29 +00003733
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003734static int netif_receive_skb_internal(struct sk_buff *skb)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003735{
Eric Dumazet588f0332011-11-15 04:12:55 +00003736 net_timestamp_check(netdev_tstamp_prequeue, skb);
Eric Dumazet3b098e22010-05-15 23:57:10 -07003737
Richard Cochranc1f19b52010-07-17 08:49:36 +00003738 if (skb_defer_rx_timestamp(skb))
3739 return NET_RX_SUCCESS;
3740
Eric Dumazetdf334542010-03-24 19:13:54 +00003741#ifdef CONFIG_RPS
Ingo Molnarc5905af2012-02-24 08:31:31 +01003742 if (static_key_false(&rps_needed)) {
Eric Dumazet3b098e22010-05-15 23:57:10 -07003743 struct rps_dev_flow voidflow, *rflow = &voidflow;
3744 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003745
Eric Dumazet3b098e22010-05-15 23:57:10 -07003746 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003747
Eric Dumazet3b098e22010-05-15 23:57:10 -07003748 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Tom Herbertfec5e652010-04-16 16:01:27 -07003749
Eric Dumazet3b098e22010-05-15 23:57:10 -07003750 if (cpu >= 0) {
3751 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3752 rcu_read_unlock();
Eric Dumazetadc93002011-11-17 03:13:26 +00003753 return ret;
Eric Dumazet3b098e22010-05-15 23:57:10 -07003754 }
Eric Dumazetadc93002011-11-17 03:13:26 +00003755 rcu_read_unlock();
Tom Herbertfec5e652010-04-16 16:01:27 -07003756 }
Tom Herbert1e94d722010-03-18 17:45:44 -07003757#endif
Eric Dumazetadc93002011-11-17 03:13:26 +00003758 return __netif_receive_skb(skb);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003759}
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003760
3761/**
3762 * netif_receive_skb - process receive buffer from network
3763 * @skb: buffer to process
3764 *
3765 * netif_receive_skb() is the main receive data processing function.
3766 * It always succeeds. The buffer may be dropped during processing
3767 * for congestion control or by the protocol layers.
3768 *
3769 * This function may only be called from softirq context and interrupts
3770 * should be enabled.
3771 *
3772 * Return values (usually ignored):
3773 * NET_RX_SUCCESS: no congestion
3774 * NET_RX_DROP: packet was dropped
3775 */
3776int netif_receive_skb(struct sk_buff *skb)
3777{
3778 trace_netif_receive_skb_entry(skb);
3779
3780 return netif_receive_skb_internal(skb);
3781}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003782EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783
Eric Dumazet88751272010-04-19 05:07:33 +00003784/* Network device is going away, flush any packets still pending
3785 * Called with irqs disabled.
3786 */
Changli Gao152102c2010-03-30 20:16:22 +00003787static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003788{
Changli Gao152102c2010-03-30 20:16:22 +00003789 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003790 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003791 struct sk_buff *skb, *tmp;
3792
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003793 rps_lock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003794 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003795 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003796 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003797 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003798 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003799 }
Changli Gao6e7676c2010-04-27 15:07:33 -07003800 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003801 rps_unlock(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003802
3803 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3804 if (skb->dev == dev) {
3805 __skb_unlink(skb, &sd->process_queue);
3806 kfree_skb(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00003807 input_queue_head_incr(sd);
Changli Gao6e7676c2010-04-27 15:07:33 -07003808 }
3809 }
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07003810}
3811
Herbert Xud565b0a2008-12-15 23:38:52 -08003812static int napi_gro_complete(struct sk_buff *skb)
3813{
Vlad Yasevich22061d82012-11-15 08:49:11 +00003814 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003815 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003816 struct list_head *head = &offload_base;
Herbert Xud565b0a2008-12-15 23:38:52 -08003817 int err = -ENOENT;
3818
Eric Dumazetc3c7c252012-12-06 13:54:59 +00003819 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3820
Herbert Xufc59f9a2009-04-14 15:11:06 -07003821 if (NAPI_GRO_CB(skb)->count == 1) {
3822 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003823 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07003824 }
Herbert Xud565b0a2008-12-15 23:38:52 -08003825
3826 rcu_read_lock();
3827 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003828 if (ptype->type != type || !ptype->callbacks.gro_complete)
Herbert Xud565b0a2008-12-15 23:38:52 -08003829 continue;
3830
Jerry Chu299603e82013-12-11 20:53:45 -08003831 err = ptype->callbacks.gro_complete(skb, 0);
Herbert Xud565b0a2008-12-15 23:38:52 -08003832 break;
3833 }
3834 rcu_read_unlock();
3835
3836 if (err) {
3837 WARN_ON(&ptype->list == head);
3838 kfree_skb(skb);
3839 return NET_RX_SUCCESS;
3840 }
3841
3842out:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00003843 return netif_receive_skb_internal(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003844}
3845
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003846/* napi->gro_list contains packets ordered by age.
3847 * youngest packets at the head of it.
3848 * Complete skbs in reverse order to reduce latencies.
3849 */
3850void napi_gro_flush(struct napi_struct *napi, bool flush_old)
Herbert Xud565b0a2008-12-15 23:38:52 -08003851{
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003852 struct sk_buff *skb, *prev = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003853
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003854 /* scan list and build reverse chain */
3855 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3856 skb->prev = prev;
3857 prev = skb;
Herbert Xud565b0a2008-12-15 23:38:52 -08003858 }
3859
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00003860 for (skb = prev; skb; skb = prev) {
3861 skb->next = NULL;
3862
3863 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3864 return;
3865
3866 prev = skb->prev;
3867 napi_gro_complete(skb);
3868 napi->gro_count--;
3869 }
3870
Herbert Xud565b0a2008-12-15 23:38:52 -08003871 napi->gro_list = NULL;
3872}
Eric Dumazet86cac582010-08-31 18:25:32 +00003873EXPORT_SYMBOL(napi_gro_flush);
Herbert Xud565b0a2008-12-15 23:38:52 -08003874
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003875static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3876{
3877 struct sk_buff *p;
3878 unsigned int maclen = skb->dev->hard_header_len;
Tom Herbert0b4cec82014-01-15 08:58:06 -08003879 u32 hash = skb_get_hash_raw(skb);
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003880
3881 for (p = napi->gro_list; p; p = p->next) {
3882 unsigned long diffs;
3883
Tom Herbert0b4cec82014-01-15 08:58:06 -08003884 NAPI_GRO_CB(p)->flush = 0;
3885
3886 if (hash != skb_get_hash_raw(p)) {
3887 NAPI_GRO_CB(p)->same_flow = 0;
3888 continue;
3889 }
3890
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003891 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3892 diffs |= p->vlan_tci ^ skb->vlan_tci;
3893 if (maclen == ETH_HLEN)
3894 diffs |= compare_ether_header(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07003895 skb_mac_header(skb));
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003896 else if (!diffs)
3897 diffs = memcmp(skb_mac_header(p),
Eric Dumazeta50e2332014-03-29 21:28:21 -07003898 skb_mac_header(skb),
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003899 maclen);
3900 NAPI_GRO_CB(p)->same_flow = !diffs;
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003901 }
3902}
3903
Jerry Chu299603e82013-12-11 20:53:45 -08003904static void skb_gro_reset_offset(struct sk_buff *skb)
3905{
3906 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3907 const skb_frag_t *frag0 = &pinfo->frags[0];
3908
3909 NAPI_GRO_CB(skb)->data_offset = 0;
3910 NAPI_GRO_CB(skb)->frag0 = NULL;
3911 NAPI_GRO_CB(skb)->frag0_len = 0;
3912
3913 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3914 pinfo->nr_frags &&
3915 !PageHighMem(skb_frag_page(frag0))) {
3916 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3917 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
Herbert Xud565b0a2008-12-15 23:38:52 -08003918 }
3919}
3920
Eric Dumazeta50e2332014-03-29 21:28:21 -07003921static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
3922{
3923 struct skb_shared_info *pinfo = skb_shinfo(skb);
3924
3925 BUG_ON(skb->end - skb->tail < grow);
3926
3927 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3928
3929 skb->data_len -= grow;
3930 skb->tail += grow;
3931
3932 pinfo->frags[0].page_offset += grow;
3933 skb_frag_size_sub(&pinfo->frags[0], grow);
3934
3935 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
3936 skb_frag_unref(skb, 0);
3937 memmove(pinfo->frags, pinfo->frags + 1,
3938 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
3939 }
3940}
3941
Rami Rosenbb728822012-11-28 21:55:25 +00003942static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08003943{
3944 struct sk_buff **pp = NULL;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003945 struct packet_offload *ptype;
Herbert Xud565b0a2008-12-15 23:38:52 -08003946 __be16 type = skb->protocol;
Vlad Yasevich22061d82012-11-15 08:49:11 +00003947 struct list_head *head = &offload_base;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003948 int same_flow;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003949 enum gro_result ret;
Eric Dumazeta50e2332014-03-29 21:28:21 -07003950 int grow;
Herbert Xud565b0a2008-12-15 23:38:52 -08003951
Eric W. Biederman9c62a682014-03-14 20:51:52 -07003952 if (!(skb->dev->features & NETIF_F_GRO))
Herbert Xud565b0a2008-12-15 23:38:52 -08003953 goto normal;
3954
David S. Miller21dc3302010-08-23 00:13:46 -07003955 if (skb_is_gso(skb) || skb_has_frag_list(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08003956 goto normal;
3957
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003958 gro_list_prepare(napi, skb);
Jerry Chubf5a7552014-01-07 10:23:19 -08003959 NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
Eric Dumazet89c5fa32012-12-10 13:28:16 +00003960
Herbert Xud565b0a2008-12-15 23:38:52 -08003961 rcu_read_lock();
3962 list_for_each_entry_rcu(ptype, head, list) {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003963 if (ptype->type != type || !ptype->callbacks.gro_receive)
Herbert Xud565b0a2008-12-15 23:38:52 -08003964 continue;
3965
Herbert Xu86911732009-01-29 14:19:50 +00003966 skb_set_network_header(skb, skb_gro_offset(skb));
Eric Dumazetefd94502013-02-14 17:31:48 +00003967 skb_reset_mac_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003968 NAPI_GRO_CB(skb)->same_flow = 0;
3969 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003970 NAPI_GRO_CB(skb)->free = 0;
Or Gerlitzb582ef02014-01-20 13:59:19 +02003971 NAPI_GRO_CB(skb)->udp_mark = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003972
Vlad Yasevichf191a1d2012-11-15 08:49:23 +00003973 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003974 break;
3975 }
3976 rcu_read_unlock();
3977
3978 if (&ptype->list == head)
3979 goto normal;
3980
Herbert Xu0da2afd52008-12-26 14:57:42 -08003981 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003982 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003983
Herbert Xud565b0a2008-12-15 23:38:52 -08003984 if (pp) {
3985 struct sk_buff *nskb = *pp;
3986
3987 *pp = nskb->next;
3988 nskb->next = NULL;
3989 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003990 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003991 }
3992
Herbert Xu0da2afd52008-12-26 14:57:42 -08003993 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003994 goto ok;
3995
Eric Dumazet600adc12014-01-09 14:12:19 -08003996 if (NAPI_GRO_CB(skb)->flush)
Herbert Xud565b0a2008-12-15 23:38:52 -08003997 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003998
Eric Dumazet600adc12014-01-09 14:12:19 -08003999 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4000 struct sk_buff *nskb = napi->gro_list;
4001
4002 /* locate the end of the list to select the 'oldest' flow */
4003 while (nskb->next) {
4004 pp = &nskb->next;
4005 nskb = *pp;
4006 }
4007 *pp = NULL;
4008 nskb->next = NULL;
4009 napi_gro_complete(nskb);
4010 } else {
4011 napi->gro_count++;
4012 }
Herbert Xud565b0a2008-12-15 23:38:52 -08004013 NAPI_GRO_CB(skb)->count = 1;
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004014 NAPI_GRO_CB(skb)->age = jiffies;
Eric Dumazet29e98242014-05-16 11:34:37 -07004015 NAPI_GRO_CB(skb)->last = skb;
Herbert Xu86911732009-01-29 14:19:50 +00004016 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004017 skb->next = napi->gro_list;
4018 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004019 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08004020
Herbert Xuad0f9902009-02-01 01:24:55 -08004021pull:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004022 grow = skb_gro_offset(skb) - skb_headlen(skb);
4023 if (grow > 0)
4024 gro_pull_from_frag0(skb, grow);
Herbert Xud565b0a2008-12-15 23:38:52 -08004025ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004026 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08004027
4028normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08004029 ret = GRO_NORMAL;
4030 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08004031}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004032
Jerry Chubf5a7552014-01-07 10:23:19 -08004033struct packet_offload *gro_find_receive_by_type(__be16 type)
4034{
4035 struct list_head *offload_head = &offload_base;
4036 struct packet_offload *ptype;
4037
4038 list_for_each_entry_rcu(ptype, offload_head, list) {
4039 if (ptype->type != type || !ptype->callbacks.gro_receive)
4040 continue;
4041 return ptype;
4042 }
4043 return NULL;
4044}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004045EXPORT_SYMBOL(gro_find_receive_by_type);
Jerry Chubf5a7552014-01-07 10:23:19 -08004046
4047struct packet_offload *gro_find_complete_by_type(__be16 type)
4048{
4049 struct list_head *offload_head = &offload_base;
4050 struct packet_offload *ptype;
4051
4052 list_for_each_entry_rcu(ptype, offload_head, list) {
4053 if (ptype->type != type || !ptype->callbacks.gro_complete)
4054 continue;
4055 return ptype;
4056 }
4057 return NULL;
4058}
Or Gerlitze27a2f82014-01-20 13:59:20 +02004059EXPORT_SYMBOL(gro_find_complete_by_type);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004060
Rami Rosenbb728822012-11-28 21:55:25 +00004061static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08004062{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004063 switch (ret) {
4064 case GRO_NORMAL:
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004065 if (netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004066 ret = GRO_DROP;
4067 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004068
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004069 case GRO_DROP:
Herbert Xu5d38a072009-01-04 16:13:40 -08004070 kfree_skb(skb);
4071 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004072
Eric Dumazetdaa86542012-04-19 07:07:40 +00004073 case GRO_MERGED_FREE:
Eric Dumazetd7e88832012-04-30 08:10:34 +00004074 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4075 kmem_cache_free(skbuff_head_cache, skb);
4076 else
4077 __kfree_skb(skb);
Eric Dumazetdaa86542012-04-19 07:07:40 +00004078 break;
4079
Ben Hutchings5b252f02009-10-29 07:17:09 +00004080 case GRO_HELD:
4081 case GRO_MERGED:
4082 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08004083 }
4084
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004085 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004086}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004087
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004088gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004089{
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004090 trace_napi_gro_receive_entry(skb);
Herbert Xu86911732009-01-29 14:19:50 +00004091
Eric Dumazeta50e2332014-03-29 21:28:21 -07004092 skb_gro_reset_offset(skb);
4093
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004094 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08004095}
4096EXPORT_SYMBOL(napi_gro_receive);
4097
stephen hemmingerd0c2b0d2010-10-19 07:12:10 +00004098static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004099{
Herbert Xu96e93ea2009-01-06 10:49:34 -08004100 __skb_pull(skb, skb_headlen(skb));
Eric Dumazet2a2a4592012-03-21 06:58:03 +00004101 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4102 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
Jesse Gross3701e512010-10-20 13:56:06 +00004103 skb->vlan_tci = 0;
Herbert Xu66c46d72011-01-29 20:44:54 -08004104 skb->dev = napi->dev;
Andy Gospodarek6d152e22011-02-02 14:53:25 -08004105 skb->skb_iif = 0;
Eric Dumazete33d0ba2014-04-03 09:28:10 -07004106 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08004107
4108 napi->skb = skb;
4109}
Herbert Xu96e93ea2009-01-06 10:49:34 -08004110
Herbert Xu76620aa2009-04-16 02:02:07 -07004111struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08004112{
Herbert Xu5d38a072009-01-04 16:13:40 -08004113 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004114
4115 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00004116 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
Eric Dumazet84b9cd62013-12-05 21:44:27 -08004117 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08004118 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08004119 return skb;
4120}
Herbert Xu76620aa2009-04-16 02:02:07 -07004121EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004122
Eric Dumazeta50e2332014-03-29 21:28:21 -07004123static gro_result_t napi_frags_finish(struct napi_struct *napi,
4124 struct sk_buff *skb,
4125 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004126{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004127 switch (ret) {
4128 case GRO_NORMAL:
Eric Dumazeta50e2332014-03-29 21:28:21 -07004129 case GRO_HELD:
4130 __skb_push(skb, ETH_HLEN);
4131 skb->protocol = eth_type_trans(skb, skb->dev);
4132 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004133 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00004134 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004135
4136 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004137 case GRO_MERGED_FREE:
4138 napi_reuse_skb(napi, skb);
4139 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00004140
4141 case GRO_MERGED:
4142 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004143 }
4144
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004145 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004146}
Herbert Xu5d0d9be2009-01-29 14:19:48 +00004147
Eric Dumazeta50e2332014-03-29 21:28:21 -07004148/* Upper GRO stack assumes network header starts at gro_offset=0
4149 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4150 * We copy ethernet header into skb->data to have a common layout.
4151 */
Eric Dumazet4adb9c42012-05-18 20:49:06 +00004152static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08004153{
Herbert Xu76620aa2009-04-16 02:02:07 -07004154 struct sk_buff *skb = napi->skb;
Eric Dumazeta50e2332014-03-29 21:28:21 -07004155 const struct ethhdr *eth;
4156 unsigned int hlen = sizeof(*eth);
Herbert Xu76620aa2009-04-16 02:02:07 -07004157
4158 napi->skb = NULL;
4159
Eric Dumazeta50e2332014-03-29 21:28:21 -07004160 skb_reset_mac_header(skb);
4161 skb_gro_reset_offset(skb);
4162
4163 eth = skb_gro_header_fast(skb, 0);
4164 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4165 eth = skb_gro_header_slow(skb, hlen, 0);
4166 if (unlikely(!eth)) {
4167 napi_reuse_skb(napi, skb);
4168 return NULL;
4169 }
4170 } else {
4171 gro_pull_from_frag0(skb, hlen);
4172 NAPI_GRO_CB(skb)->frag0 += hlen;
4173 NAPI_GRO_CB(skb)->frag0_len -= hlen;
Herbert Xu76620aa2009-04-16 02:02:07 -07004174 }
Eric Dumazeta50e2332014-03-29 21:28:21 -07004175 __skb_pull(skb, hlen);
4176
4177 /*
4178 * This works because the only protocols we care about don't require
4179 * special handling.
4180 * We'll fix it up properly in napi_frags_finish()
4181 */
4182 skb->protocol = eth->h_proto;
Herbert Xu76620aa2009-04-16 02:02:07 -07004183
Herbert Xu76620aa2009-04-16 02:02:07 -07004184 return skb;
4185}
Herbert Xu76620aa2009-04-16 02:02:07 -07004186
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004187gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07004188{
4189 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08004190
4191 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07004192 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08004193
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00004194 trace_napi_gro_frags_entry(skb);
4195
Eric Dumazet89c5fa32012-12-10 13:28:16 +00004196 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08004197}
4198EXPORT_SYMBOL(napi_gro_frags);
4199
Eric Dumazete326bed2010-04-22 00:22:45 -07004200/*
Zhi Yong Wu855abcf2014-01-01 04:34:50 +08004201 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
Eric Dumazete326bed2010-04-22 00:22:45 -07004202 * Note: called with local irq disabled, but exits with local irq enabled.
4203 */
4204static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4205{
4206#ifdef CONFIG_RPS
4207 struct softnet_data *remsd = sd->rps_ipi_list;
4208
4209 if (remsd) {
4210 sd->rps_ipi_list = NULL;
4211
4212 local_irq_enable();
4213
4214 /* Send pending IPI's to kick RPS processing on remote cpus. */
4215 while (remsd) {
4216 struct softnet_data *next = remsd->rps_ipi_next;
4217
4218 if (cpu_online(remsd->cpu))
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +01004219 smp_call_function_single_async(remsd->cpu,
Frederic Weisbeckerfce8ad12014-02-24 16:40:01 +01004220 &remsd->csd);
Eric Dumazete326bed2010-04-22 00:22:45 -07004221 remsd = next;
4222 }
4223 } else
4224#endif
4225 local_irq_enable();
4226}
4227
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004228static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229{
4230 int work = 0;
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004231 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232
Eric Dumazete326bed2010-04-22 00:22:45 -07004233#ifdef CONFIG_RPS
4234 /* Check if we have pending ipi, its better to send them now,
4235 * not waiting net_rx_action() end.
4236 */
4237 if (sd->rps_ipi_list) {
4238 local_irq_disable();
4239 net_rps_action_and_irq_enable(sd);
4240 }
4241#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004242 napi->weight = weight_p;
Changli Gao6e7676c2010-04-27 15:07:33 -07004243 local_irq_disable();
Tom Herbert11ef7a82014-06-30 09:50:40 -07004244 while (1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246
Changli Gao6e7676c2010-04-27 15:07:33 -07004247 while ((skb = __skb_dequeue(&sd->process_queue))) {
Eric Dumazete4008272010-04-05 15:42:39 -07004248 local_irq_enable();
Changli Gao6e7676c2010-04-27 15:07:33 -07004249 __netif_receive_skb(skb);
Changli Gao6e7676c2010-04-27 15:07:33 -07004250 local_irq_disable();
Tom Herbert76cc8b12010-05-20 18:37:59 +00004251 input_queue_head_incr(sd);
4252 if (++work >= quota) {
4253 local_irq_enable();
4254 return work;
4255 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
Changli Gao6e7676c2010-04-27 15:07:33 -07004258 rps_lock(sd);
Tom Herbert11ef7a82014-06-30 09:50:40 -07004259 if (skb_queue_empty(&sd->input_pkt_queue)) {
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004260 /*
4261 * Inline a custom version of __napi_complete().
4262 * only current cpu owns and manipulates this napi,
Tom Herbert11ef7a82014-06-30 09:50:40 -07004263 * and NAPI_STATE_SCHED is the only possible flag set
4264 * on backlog.
4265 * We can use a plain write instead of clear_bit(),
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004266 * and we dont need an smp_mb() memory barrier.
4267 */
4268 list_del(&napi->poll_list);
4269 napi->state = 0;
Tom Herbert11ef7a82014-06-30 09:50:40 -07004270 rps_unlock(sd);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004271
Tom Herbert11ef7a82014-06-30 09:50:40 -07004272 break;
Changli Gao6e7676c2010-04-27 15:07:33 -07004273 }
Tom Herbert11ef7a82014-06-30 09:50:40 -07004274
4275 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4276 &sd->process_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07004277 rps_unlock(sd);
4278 }
4279 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004281 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282}
4283
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004284/**
4285 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004286 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004287 *
4288 * The entry's receive function will be scheduled to run
4289 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08004290void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004291{
4292 unsigned long flags;
4293
4294 local_irq_save(flags);
Eric Dumazeteecfd7c2010-05-06 22:07:48 -07004295 ____napi_schedule(&__get_cpu_var(softnet_data), n);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004296 local_irq_restore(flags);
4297}
4298EXPORT_SYMBOL(__napi_schedule);
4299
Herbert Xud565b0a2008-12-15 23:38:52 -08004300void __napi_complete(struct napi_struct *n)
4301{
4302 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4303 BUG_ON(n->gro_list);
4304
4305 list_del(&n->poll_list);
Peter Zijlstra4e857c52014-03-17 18:06:10 +01004306 smp_mb__before_atomic();
Herbert Xud565b0a2008-12-15 23:38:52 -08004307 clear_bit(NAPI_STATE_SCHED, &n->state);
4308}
4309EXPORT_SYMBOL(__napi_complete);
4310
4311void napi_complete(struct napi_struct *n)
4312{
4313 unsigned long flags;
4314
4315 /*
4316 * don't let napi dequeue from the cpu poll list
4317 * just in case its running on a different cpu
4318 */
4319 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4320 return;
4321
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004322 napi_gro_flush(n, false);
Herbert Xud565b0a2008-12-15 23:38:52 -08004323 local_irq_save(flags);
4324 __napi_complete(n);
4325 local_irq_restore(flags);
4326}
4327EXPORT_SYMBOL(napi_complete);
4328
Eliezer Tamiraf12fa62013-06-10 11:39:41 +03004329/* must be called under rcu_read_lock(), as we dont take a reference */
4330struct napi_struct *napi_by_id(unsigned int napi_id)
4331{
4332 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4333 struct napi_struct *napi;
4334
4335 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4336 if (napi->napi_id == napi_id)
4337 return napi;
4338
4339 return NULL;
4340}
4341EXPORT_SYMBOL_GPL(napi_by_id);
4342
4343void napi_hash_add(struct napi_struct *napi)
4344{
4345 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4346
4347 spin_lock(&napi_hash_lock);
4348
4349 /* 0 is not a valid id, we also skip an id that is taken
4350 * we expect both events to be extremely rare
4351 */
4352 napi->napi_id = 0;
4353 while (!napi->napi_id) {
4354 napi->napi_id = ++napi_gen_id;
4355 if (napi_by_id(napi->napi_id))
4356 napi->napi_id = 0;
4357 }
4358
4359 hlist_add_head_rcu(&napi->napi_hash_node,
4360 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4361
4362 spin_unlock(&napi_hash_lock);
4363 }
4364}
4365EXPORT_SYMBOL_GPL(napi_hash_add);
4366
4367/* Warning : caller is responsible to make sure rcu grace period
4368 * is respected before freeing memory containing @napi
4369 */
4370void napi_hash_del(struct napi_struct *napi)
4371{
4372 spin_lock(&napi_hash_lock);
4373
4374 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4375 hlist_del_rcu(&napi->napi_hash_node);
4376
4377 spin_unlock(&napi_hash_lock);
4378}
4379EXPORT_SYMBOL_GPL(napi_hash_del);
4380
Herbert Xud565b0a2008-12-15 23:38:52 -08004381void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4382 int (*poll)(struct napi_struct *, int), int weight)
4383{
4384 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00004385 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004386 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08004387 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08004388 napi->poll = poll;
Eric Dumazet82dc3c62013-03-05 15:57:22 +00004389 if (weight > NAPI_POLL_WEIGHT)
4390 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4391 weight, dev->name);
Herbert Xud565b0a2008-12-15 23:38:52 -08004392 napi->weight = weight;
4393 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004394 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08004395#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08004396 spin_lock_init(&napi->poll_lock);
4397 napi->poll_owner = -1;
4398#endif
4399 set_bit(NAPI_STATE_SCHED, &napi->state);
4400}
4401EXPORT_SYMBOL(netif_napi_add);
4402
4403void netif_napi_del(struct napi_struct *napi)
4404{
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08004405 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07004406 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08004407
Eric Dumazet289dccb2013-12-20 14:29:08 -08004408 kfree_skb_list(napi->gro_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08004409 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00004410 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08004411}
4412EXPORT_SYMBOL(netif_napi_del);
4413
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414static void net_rx_action(struct softirq_action *h)
4415{
Eric Dumazete326bed2010-04-22 00:22:45 -07004416 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004417 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07004418 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07004419 void *have;
4420
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 local_irq_disable();
4422
Eric Dumazete326bed2010-04-22 00:22:45 -07004423 while (!list_empty(&sd->poll_list)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004424 struct napi_struct *n;
4425 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004427 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08004428 * Allow this to run for 2 jiffies since which will allow
4429 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004430 */
Eric Dumazetd1f41b62013-03-05 07:15:13 +00004431 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 goto softnet_break;
4433
4434 local_irq_enable();
4435
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004436 /* Even though interrupts have been re-enabled, this
4437 * access is safe because interrupts can only add new
4438 * entries to the tail of this list, and only ->poll()
4439 * calls can remove this head entry from the list.
4440 */
Eric Dumazete326bed2010-04-22 00:22:45 -07004441 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004443 have = netpoll_poll_lock(n);
4444
4445 weight = n->weight;
4446
David S. Miller0a7606c2007-10-29 21:28:47 -07004447 /* This NAPI_STATE_SCHED test is for avoiding a race
4448 * with netpoll's poll_napi(). Only the entity which
4449 * obtains the lock and sees NAPI_STATE_SCHED set will
4450 * actually make the ->poll() call. Therefore we avoid
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004451 * accidentally calling ->poll() when NAPI is not scheduled.
David S. Miller0a7606c2007-10-29 21:28:47 -07004452 */
4453 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00004454 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07004455 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00004456 trace_napi_poll(n);
4457 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004458
4459 WARN_ON_ONCE(work > weight);
4460
4461 budget -= work;
4462
4463 local_irq_disable();
4464
4465 /* Drivers must not modify the NAPI state if they
4466 * consume the entire weight. In such cases this code
4467 * still "owns" the NAPI instance and therefore can
4468 * move the instance around on the list at-will.
4469 */
David S. Millerfed17f32008-01-07 21:00:40 -08004470 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07004471 if (unlikely(napi_disable_pending(n))) {
4472 local_irq_enable();
4473 napi_complete(n);
4474 local_irq_disable();
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004475 } else {
4476 if (n->gro_list) {
4477 /* flush too old packets
4478 * If HZ < 1000, flush all packets.
4479 */
4480 local_irq_enable();
4481 napi_gro_flush(n, HZ >= 1000);
4482 local_irq_disable();
4483 }
Eric Dumazete326bed2010-04-22 00:22:45 -07004484 list_move_tail(&n->poll_list, &sd->poll_list);
Eric Dumazet2e71a6f2012-10-06 08:08:49 +00004485 }
David S. Millerfed17f32008-01-07 21:00:40 -08004486 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004487
4488 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489 }
4490out:
Eric Dumazete326bed2010-04-22 00:22:45 -07004491 net_rps_action_and_irq_enable(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00004492
Chris Leechdb217332006-06-17 21:24:58 -07004493#ifdef CONFIG_NET_DMA
4494 /*
4495 * There may not be any more sk_buffs coming right now, so push
4496 * any pending DMA copies to hardware
4497 */
Dan Williams2ba05622009-01-06 11:38:14 -07004498 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07004499#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004500
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 return;
4502
4503softnet_break:
Changli Gaodee42872010-05-02 05:42:16 +00004504 sd->time_squeeze++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4506 goto out;
4507}
4508
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004509struct netdev_adjacent {
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004510 struct net_device *dev;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004511
4512 /* upper master flag, there can only be one master device per list */
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004513 bool master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004514
Veaceslav Falico5d261912013-08-28 23:25:05 +02004515 /* counter for the number of times this device was added to us */
4516 u16 ref_nr;
4517
Veaceslav Falico402dae92013-09-25 09:20:09 +02004518 /* private field for the users */
4519 void *private;
4520
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004521 struct list_head list;
4522 struct rcu_head rcu;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004523};
4524
Veaceslav Falico5d261912013-08-28 23:25:05 +02004525static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4526 struct net_device *adj_dev,
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004527 struct list_head *adj_list)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004528{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004529 struct netdev_adjacent *adj;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004530
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004531 list_for_each_entry(adj, adj_list, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004532 if (adj->dev == adj_dev)
4533 return adj;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004534 }
4535 return NULL;
4536}
4537
4538/**
4539 * netdev_has_upper_dev - Check if device is linked to an upper device
4540 * @dev: device
4541 * @upper_dev: upper device to check
4542 *
4543 * Find out if a device is linked to specified upper device and return true
4544 * in case it is. Note that this checks only immediate upper device,
4545 * not through a complete stack of devices. The caller must hold the RTNL lock.
4546 */
4547bool netdev_has_upper_dev(struct net_device *dev,
4548 struct net_device *upper_dev)
4549{
4550 ASSERT_RTNL();
4551
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004552 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004553}
4554EXPORT_SYMBOL(netdev_has_upper_dev);
4555
4556/**
4557 * netdev_has_any_upper_dev - Check if device is linked to some device
4558 * @dev: device
4559 *
4560 * Find out if a device is linked to an upper device and return true in case
4561 * it is. The caller must hold the RTNL lock.
4562 */
stephen hemminger1d143d92013-12-29 14:01:29 -08004563static bool netdev_has_any_upper_dev(struct net_device *dev)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004564{
4565 ASSERT_RTNL();
4566
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004567 return !list_empty(&dev->all_adj_list.upper);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004568}
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004569
4570/**
4571 * netdev_master_upper_dev_get - Get master upper device
4572 * @dev: device
4573 *
4574 * Find a master upper device and return pointer to it or NULL in case
4575 * it's not there. The caller must hold the RTNL lock.
4576 */
4577struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4578{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004579 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004580
4581 ASSERT_RTNL();
4582
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004583 if (list_empty(&dev->adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004584 return NULL;
4585
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004586 upper = list_first_entry(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004587 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004588 if (likely(upper->master))
4589 return upper->dev;
4590 return NULL;
4591}
4592EXPORT_SYMBOL(netdev_master_upper_dev_get);
4593
Veaceslav Falicob6ccba42013-09-25 09:20:23 +02004594void *netdev_adjacent_get_private(struct list_head *adj_list)
4595{
4596 struct netdev_adjacent *adj;
4597
4598 adj = list_entry(adj_list, struct netdev_adjacent, list);
4599
4600 return adj->private;
4601}
4602EXPORT_SYMBOL(netdev_adjacent_get_private);
4603
Veaceslav Falico31088a12013-09-25 09:20:12 +02004604/**
Vlad Yasevich44a40852014-05-16 17:20:38 -04004605 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4606 * @dev: device
4607 * @iter: list_head ** of the current position
4608 *
4609 * Gets the next device from the dev's upper list, starting from iter
4610 * position. The caller must hold RCU read lock.
4611 */
4612struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4613 struct list_head **iter)
4614{
4615 struct netdev_adjacent *upper;
4616
4617 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4618
4619 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4620
4621 if (&upper->list == &dev->adj_list.upper)
4622 return NULL;
4623
4624 *iter = &upper->list;
4625
4626 return upper->dev;
4627}
4628EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4629
4630/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02004631 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
Veaceslav Falico48311f42013-08-28 23:25:07 +02004632 * @dev: device
4633 * @iter: list_head ** of the current position
4634 *
4635 * Gets the next device from the dev's upper list, starting from iter
4636 * position. The caller must hold RCU read lock.
4637 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004638struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4639 struct list_head **iter)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004640{
4641 struct netdev_adjacent *upper;
4642
John Fastabend85328242013-11-26 06:33:52 +00004643 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
Veaceslav Falico48311f42013-08-28 23:25:07 +02004644
4645 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4646
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004647 if (&upper->list == &dev->all_adj_list.upper)
Veaceslav Falico48311f42013-08-28 23:25:07 +02004648 return NULL;
4649
4650 *iter = &upper->list;
4651
4652 return upper->dev;
4653}
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004654EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
Veaceslav Falico48311f42013-08-28 23:25:07 +02004655
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004656/**
Veaceslav Falico31088a12013-09-25 09:20:12 +02004657 * netdev_lower_get_next_private - Get the next ->private from the
4658 * lower neighbour list
4659 * @dev: device
4660 * @iter: list_head ** of the current position
4661 *
4662 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4663 * list, starting from iter position. The caller must hold either hold the
4664 * RTNL lock or its own locking that guarantees that the neighbour lower
4665 * list will remain unchainged.
4666 */
4667void *netdev_lower_get_next_private(struct net_device *dev,
4668 struct list_head **iter)
4669{
4670 struct netdev_adjacent *lower;
4671
4672 lower = list_entry(*iter, struct netdev_adjacent, list);
4673
4674 if (&lower->list == &dev->adj_list.lower)
4675 return NULL;
4676
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02004677 *iter = lower->list.next;
Veaceslav Falico31088a12013-09-25 09:20:12 +02004678
4679 return lower->private;
4680}
4681EXPORT_SYMBOL(netdev_lower_get_next_private);
4682
4683/**
4684 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4685 * lower neighbour list, RCU
4686 * variant
4687 * @dev: device
4688 * @iter: list_head ** of the current position
4689 *
4690 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4691 * list, starting from iter position. The caller must hold RCU read lock.
4692 */
4693void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4694 struct list_head **iter)
4695{
4696 struct netdev_adjacent *lower;
4697
4698 WARN_ON_ONCE(!rcu_read_lock_held());
4699
4700 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4701
4702 if (&lower->list == &dev->adj_list.lower)
4703 return NULL;
4704
Veaceslav Falico6859e7d2014-04-07 11:25:12 +02004705 *iter = &lower->list;
Veaceslav Falico31088a12013-09-25 09:20:12 +02004706
4707 return lower->private;
4708}
4709EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4710
4711/**
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04004712 * netdev_lower_get_next - Get the next device from the lower neighbour
4713 * list
4714 * @dev: device
4715 * @iter: list_head ** of the current position
4716 *
4717 * Gets the next netdev_adjacent from the dev's lower neighbour
4718 * list, starting from iter position. The caller must hold RTNL lock or
4719 * its own locking that guarantees that the neighbour lower
4720 * list will remain unchainged.
4721 */
4722void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4723{
4724 struct netdev_adjacent *lower;
4725
4726 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4727
4728 if (&lower->list == &dev->adj_list.lower)
4729 return NULL;
4730
4731 *iter = &lower->list;
4732
4733 return lower->dev;
4734}
4735EXPORT_SYMBOL(netdev_lower_get_next);
4736
4737/**
dingtianhonge001bfa2013-12-13 10:19:55 +08004738 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4739 * lower neighbour list, RCU
4740 * variant
4741 * @dev: device
4742 *
4743 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4744 * list. The caller must hold RCU read lock.
4745 */
4746void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4747{
4748 struct netdev_adjacent *lower;
4749
4750 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4751 struct netdev_adjacent, list);
4752 if (lower)
4753 return lower->private;
4754 return NULL;
4755}
4756EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4757
4758/**
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004759 * netdev_master_upper_dev_get_rcu - Get master upper device
4760 * @dev: device
4761 *
4762 * Find a master upper device and return pointer to it or NULL in case
4763 * it's not there. The caller must hold the RCU read lock.
4764 */
4765struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4766{
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004767 struct netdev_adjacent *upper;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004768
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004769 upper = list_first_or_null_rcu(&dev->adj_list.upper,
Veaceslav Falicoaa9d8562013-08-28 23:25:04 +02004770 struct netdev_adjacent, list);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004771 if (upper && likely(upper->master))
4772 return upper->dev;
4773 return NULL;
4774}
4775EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4776
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05304777static int netdev_adjacent_sysfs_add(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01004778 struct net_device *adj_dev,
4779 struct list_head *dev_list)
4780{
4781 char linkname[IFNAMSIZ+7];
4782 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4783 "upper_%s" : "lower_%s", adj_dev->name);
4784 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4785 linkname);
4786}
Rashika Kheria0a59f3a2014-02-09 20:26:25 +05304787static void netdev_adjacent_sysfs_del(struct net_device *dev,
Veaceslav Falico3ee32702014-01-14 21:58:50 +01004788 char *name,
4789 struct list_head *dev_list)
4790{
4791 char linkname[IFNAMSIZ+7];
4792 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4793 "upper_%s" : "lower_%s", name);
4794 sysfs_remove_link(&(dev->dev.kobj), linkname);
4795}
4796
4797#define netdev_adjacent_is_neigh_list(dev, dev_list) \
4798 (dev_list == &dev->adj_list.upper || \
4799 dev_list == &dev->adj_list.lower)
4800
Veaceslav Falico5d261912013-08-28 23:25:05 +02004801static int __netdev_adjacent_dev_insert(struct net_device *dev,
4802 struct net_device *adj_dev,
Veaceslav Falico7863c052013-09-25 09:20:06 +02004803 struct list_head *dev_list,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004804 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004805{
4806 struct netdev_adjacent *adj;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004807 int ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004808
Veaceslav Falico7863c052013-09-25 09:20:06 +02004809 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004810
4811 if (adj) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02004812 adj->ref_nr++;
4813 return 0;
4814 }
4815
4816 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4817 if (!adj)
4818 return -ENOMEM;
4819
4820 adj->dev = adj_dev;
4821 adj->master = master;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004822 adj->ref_nr = 1;
Veaceslav Falico402dae92013-09-25 09:20:09 +02004823 adj->private = private;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004824 dev_hold(adj_dev);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004825
4826 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4827 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004828
Veaceslav Falico3ee32702014-01-14 21:58:50 +01004829 if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
4830 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004831 if (ret)
4832 goto free_adj;
4833 }
4834
Veaceslav Falico7863c052013-09-25 09:20:06 +02004835 /* Ensure that master link is always the first item in list. */
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004836 if (master) {
4837 ret = sysfs_create_link(&(dev->dev.kobj),
4838 &(adj_dev->dev.kobj), "master");
4839 if (ret)
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004840 goto remove_symlinks;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004841
Veaceslav Falico7863c052013-09-25 09:20:06 +02004842 list_add_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004843 } else {
Veaceslav Falico7863c052013-09-25 09:20:06 +02004844 list_add_tail_rcu(&adj->list, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004845 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004846
4847 return 0;
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004848
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004849remove_symlinks:
Veaceslav Falico3ee32702014-01-14 21:58:50 +01004850 if (netdev_adjacent_is_neigh_list(dev, dev_list))
4851 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004852free_adj:
4853 kfree(adj);
Nikolay Aleksandrov974daef2013-10-23 15:28:56 +02004854 dev_put(adj_dev);
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004855
4856 return ret;
Veaceslav Falico5d261912013-08-28 23:25:05 +02004857}
4858
stephen hemminger1d143d92013-12-29 14:01:29 -08004859static void __netdev_adjacent_dev_remove(struct net_device *dev,
4860 struct net_device *adj_dev,
4861 struct list_head *dev_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004862{
4863 struct netdev_adjacent *adj;
4864
Veaceslav Falico7863c052013-09-25 09:20:06 +02004865 adj = __netdev_find_adj(dev, adj_dev, dev_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004866
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004867 if (!adj) {
4868 pr_err("tried to remove device %s from %s\n",
4869 dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004870 BUG();
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004871 }
Veaceslav Falico5d261912013-08-28 23:25:05 +02004872
4873 if (adj->ref_nr > 1) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004874 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4875 adj->ref_nr-1);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004876 adj->ref_nr--;
4877 return;
4878 }
4879
Veaceslav Falico842d67a2013-09-25 09:20:31 +02004880 if (adj->master)
4881 sysfs_remove_link(&(dev->dev.kobj), "master");
4882
Veaceslav Falico3ee32702014-01-14 21:58:50 +01004883 if (netdev_adjacent_is_neigh_list(dev, dev_list))
4884 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
Veaceslav Falico5831d66e2013-09-25 09:20:32 +02004885
Veaceslav Falico5d261912013-08-28 23:25:05 +02004886 list_del_rcu(&adj->list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004887 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4888 adj_dev->name, dev->name, adj_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004889 dev_put(adj_dev);
4890 kfree_rcu(adj, rcu);
4891}
4892
stephen hemminger1d143d92013-12-29 14:01:29 -08004893static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4894 struct net_device *upper_dev,
4895 struct list_head *up_list,
4896 struct list_head *down_list,
4897 void *private, bool master)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004898{
4899 int ret;
4900
Veaceslav Falico402dae92013-09-25 09:20:09 +02004901 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4902 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004903 if (ret)
4904 return ret;
4905
Veaceslav Falico402dae92013-09-25 09:20:09 +02004906 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4907 false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004908 if (ret) {
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004909 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004910 return ret;
4911 }
4912
4913 return 0;
4914}
4915
stephen hemminger1d143d92013-12-29 14:01:29 -08004916static int __netdev_adjacent_dev_link(struct net_device *dev,
4917 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004918{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004919 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
4920 &dev->all_adj_list.upper,
4921 &upper_dev->all_adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004922 NULL, false);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004923}
4924
stephen hemminger1d143d92013-12-29 14:01:29 -08004925static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
4926 struct net_device *upper_dev,
4927 struct list_head *up_list,
4928 struct list_head *down_list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004929{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004930 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4931 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004932}
4933
stephen hemminger1d143d92013-12-29 14:01:29 -08004934static void __netdev_adjacent_dev_unlink(struct net_device *dev,
4935 struct net_device *upper_dev)
Veaceslav Falico5d261912013-08-28 23:25:05 +02004936{
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004937 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4938 &dev->all_adj_list.upper,
4939 &upper_dev->all_adj_list.lower);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004940}
4941
stephen hemminger1d143d92013-12-29 14:01:29 -08004942static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4943 struct net_device *upper_dev,
4944 void *private, bool master)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004945{
4946 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
4947
4948 if (ret)
4949 return ret;
4950
4951 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
4952 &dev->adj_list.upper,
4953 &upper_dev->adj_list.lower,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004954 private, master);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004955 if (ret) {
4956 __netdev_adjacent_dev_unlink(dev, upper_dev);
4957 return ret;
4958 }
4959
4960 return 0;
4961}
4962
stephen hemminger1d143d92013-12-29 14:01:29 -08004963static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
4964 struct net_device *upper_dev)
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004965{
4966 __netdev_adjacent_dev_unlink(dev, upper_dev);
4967 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4968 &dev->adj_list.upper,
4969 &upper_dev->adj_list.lower);
4970}
Veaceslav Falico5d261912013-08-28 23:25:05 +02004971
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004972static int __netdev_upper_dev_link(struct net_device *dev,
Veaceslav Falico402dae92013-09-25 09:20:09 +02004973 struct net_device *upper_dev, bool master,
4974 void *private)
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004975{
Veaceslav Falico5d261912013-08-28 23:25:05 +02004976 struct netdev_adjacent *i, *j, *to_i, *to_j;
4977 int ret = 0;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004978
4979 ASSERT_RTNL();
4980
4981 if (dev == upper_dev)
4982 return -EBUSY;
4983
4984 /* To prevent loops, check if dev is not upper device to upper_dev. */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004985 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004986 return -EBUSY;
4987
Veaceslav Falico2f268f12013-09-25 09:20:07 +02004988 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004989 return -EEXIST;
4990
4991 if (master && netdev_master_upper_dev_get(dev))
4992 return -EBUSY;
4993
Veaceslav Falico402dae92013-09-25 09:20:09 +02004994 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
4995 master);
Veaceslav Falico5d261912013-08-28 23:25:05 +02004996 if (ret)
4997 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00004998
Veaceslav Falico5d261912013-08-28 23:25:05 +02004999 /* Now that we linked these devs, make all the upper_dev's
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005000 * all_adj_list.upper visible to every dev's all_adj_list.lower an
Veaceslav Falico5d261912013-08-28 23:25:05 +02005001 * versa, and don't forget the devices itself. All of these
5002 * links are non-neighbours.
5003 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005004 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5005 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5006 pr_debug("Interlinking %s with %s, non-neighbour\n",
5007 i->dev->name, j->dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005008 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5009 if (ret)
5010 goto rollback_mesh;
5011 }
5012 }
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005013
Veaceslav Falico5d261912013-08-28 23:25:05 +02005014 /* add dev to every upper_dev's upper device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005015 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5016 pr_debug("linking %s's upper device %s with %s\n",
5017 upper_dev->name, i->dev->name, dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005018 ret = __netdev_adjacent_dev_link(dev, i->dev);
5019 if (ret)
5020 goto rollback_upper_mesh;
5021 }
5022
5023 /* add upper_dev to every dev's lower device */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005024 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5025 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5026 i->dev->name, upper_dev->name);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005027 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5028 if (ret)
5029 goto rollback_lower_mesh;
5030 }
5031
Jiri Pirko42e52bf2013-05-25 04:12:10 +00005032 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005033 return 0;
Veaceslav Falico5d261912013-08-28 23:25:05 +02005034
5035rollback_lower_mesh:
5036 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005037 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005038 if (i == to_i)
5039 break;
5040 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5041 }
5042
5043 i = NULL;
5044
5045rollback_upper_mesh:
5046 to_i = i;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005047 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005048 if (i == to_i)
5049 break;
5050 __netdev_adjacent_dev_unlink(dev, i->dev);
5051 }
5052
5053 i = j = NULL;
5054
5055rollback_mesh:
5056 to_i = i;
5057 to_j = j;
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005058 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5059 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
Veaceslav Falico5d261912013-08-28 23:25:05 +02005060 if (i == to_i && j == to_j)
5061 break;
5062 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5063 }
5064 if (i == to_i)
5065 break;
5066 }
5067
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005068 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005069
5070 return ret;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005071}
5072
5073/**
5074 * netdev_upper_dev_link - Add a link to the upper device
5075 * @dev: device
5076 * @upper_dev: new upper device
5077 *
5078 * Adds a link to device which is upper to this one. The caller must hold
5079 * the RTNL lock. On a failure a negative errno code is returned.
5080 * On success the reference counts are adjusted and the function
5081 * returns zero.
5082 */
5083int netdev_upper_dev_link(struct net_device *dev,
5084 struct net_device *upper_dev)
5085{
Veaceslav Falico402dae92013-09-25 09:20:09 +02005086 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005087}
5088EXPORT_SYMBOL(netdev_upper_dev_link);
5089
5090/**
5091 * netdev_master_upper_dev_link - Add a master link to the upper device
5092 * @dev: device
5093 * @upper_dev: new upper device
5094 *
5095 * Adds a link to device which is upper to this one. In this case, only
5096 * one master upper device can be linked, although other non-master devices
5097 * might be linked as well. The caller must hold the RTNL lock.
5098 * On a failure a negative errno code is returned. On success the reference
5099 * counts are adjusted and the function returns zero.
5100 */
5101int netdev_master_upper_dev_link(struct net_device *dev,
5102 struct net_device *upper_dev)
5103{
Veaceslav Falico402dae92013-09-25 09:20:09 +02005104 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005105}
5106EXPORT_SYMBOL(netdev_master_upper_dev_link);
5107
Veaceslav Falico402dae92013-09-25 09:20:09 +02005108int netdev_master_upper_dev_link_private(struct net_device *dev,
5109 struct net_device *upper_dev,
5110 void *private)
5111{
5112 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5113}
5114EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5115
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005116/**
5117 * netdev_upper_dev_unlink - Removes a link to upper device
5118 * @dev: device
5119 * @upper_dev: new upper device
5120 *
5121 * Removes a link to device which is upper to this one. The caller must hold
5122 * the RTNL lock.
5123 */
5124void netdev_upper_dev_unlink(struct net_device *dev,
5125 struct net_device *upper_dev)
5126{
Veaceslav Falico5d261912013-08-28 23:25:05 +02005127 struct netdev_adjacent *i, *j;
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005128 ASSERT_RTNL();
5129
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005130 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
Veaceslav Falico5d261912013-08-28 23:25:05 +02005131
5132 /* Here is the tricky part. We must remove all dev's lower
5133 * devices from all upper_dev's upper devices and vice
5134 * versa, to maintain the graph relationship.
5135 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005136 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5137 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005138 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5139
5140 /* remove also the devices itself from lower/upper device
5141 * list
5142 */
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005143 list_for_each_entry(i, &dev->all_adj_list.lower, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005144 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5145
Veaceslav Falico2f268f12013-09-25 09:20:07 +02005146 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
Veaceslav Falico5d261912013-08-28 23:25:05 +02005147 __netdev_adjacent_dev_unlink(dev, i->dev);
5148
Jiri Pirko42e52bf2013-05-25 04:12:10 +00005149 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005150}
5151EXPORT_SYMBOL(netdev_upper_dev_unlink);
5152
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005153void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
Veaceslav Falico402dae92013-09-25 09:20:09 +02005154{
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005155 struct netdev_adjacent *iter;
Veaceslav Falico402dae92013-09-25 09:20:09 +02005156
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005157 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5158 netdev_adjacent_sysfs_del(iter->dev, oldname,
5159 &iter->dev->adj_list.lower);
5160 netdev_adjacent_sysfs_add(iter->dev, dev,
5161 &iter->dev->adj_list.lower);
5162 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02005163
Veaceslav Falico5bb025f2014-01-14 21:58:51 +01005164 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5165 netdev_adjacent_sysfs_del(iter->dev, oldname,
5166 &iter->dev->adj_list.upper);
5167 netdev_adjacent_sysfs_add(iter->dev, dev,
5168 &iter->dev->adj_list.upper);
5169 }
Veaceslav Falico402dae92013-09-25 09:20:09 +02005170}
Veaceslav Falico402dae92013-09-25 09:20:09 +02005171
5172void *netdev_lower_dev_get_private(struct net_device *dev,
5173 struct net_device *lower_dev)
5174{
5175 struct netdev_adjacent *lower;
5176
5177 if (!lower_dev)
5178 return NULL;
5179 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5180 if (!lower)
5181 return NULL;
5182
5183 return lower->private;
5184}
5185EXPORT_SYMBOL(netdev_lower_dev_get_private);
5186
Vlad Yasevich4085ebe2014-05-16 17:04:53 -04005187
5188int dev_get_nest_level(struct net_device *dev,
5189 bool (*type_check)(struct net_device *dev))
5190{
5191 struct net_device *lower = NULL;
5192 struct list_head *iter;
5193 int max_nest = -1;
5194 int nest;
5195
5196 ASSERT_RTNL();
5197
5198 netdev_for_each_lower_dev(dev, lower, iter) {
5199 nest = dev_get_nest_level(lower, type_check);
5200 if (max_nest < nest)
5201 max_nest = nest;
5202 }
5203
5204 if (type_check(dev))
5205 max_nest++;
5206
5207 return max_nest;
5208}
5209EXPORT_SYMBOL(dev_get_nest_level);
5210
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005211static void dev_change_rx_flags(struct net_device *dev, int flags)
5212{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005213 const struct net_device_ops *ops = dev->netdev_ops;
5214
Vlad Yasevichd2615bf2013-11-19 20:47:15 -05005215 if (ops->ndo_change_rx_flags)
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005216 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005217}
5218
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005219static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
Patrick McHardy4417da62007-06-27 01:28:10 -07005220{
Eric Dumazetb536db92011-11-30 21:42:26 +00005221 unsigned int old_flags = dev->flags;
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005222 kuid_t uid;
5223 kgid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07005224
Patrick McHardy24023452007-07-14 18:51:31 -07005225 ASSERT_RTNL();
5226
Wang Chendad9b332008-06-18 01:48:28 -07005227 dev->flags |= IFF_PROMISC;
5228 dev->promiscuity += inc;
5229 if (dev->promiscuity == 0) {
5230 /*
5231 * Avoid overflow.
5232 * If inc causes overflow, untouch promisc and return error.
5233 */
5234 if (inc < 0)
5235 dev->flags &= ~IFF_PROMISC;
5236 else {
5237 dev->promiscuity -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005238 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5239 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005240 return -EOVERFLOW;
5241 }
5242 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005243 if (dev->flags != old_flags) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005244 pr_info("device %s %s promiscuous mode\n",
5245 dev->name,
5246 dev->flags & IFF_PROMISC ? "entered" : "left");
David Howells8192b0c2008-11-14 10:39:10 +11005247 if (audit_enabled) {
5248 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005249 audit_log(current->audit_context, GFP_ATOMIC,
5250 AUDIT_ANOM_PROMISCUOUS,
5251 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5252 dev->name, (dev->flags & IFF_PROMISC),
5253 (old_flags & IFF_PROMISC),
Eric W. Biedermane1760bd2012-09-10 22:39:43 -07005254 from_kuid(&init_user_ns, audit_get_loginuid(current)),
Eric W. Biedermand04a48b2012-05-23 17:01:57 -06005255 from_kuid(&init_user_ns, uid),
5256 from_kgid(&init_user_ns, gid),
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05005257 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11005258 }
Patrick McHardy24023452007-07-14 18:51:31 -07005259
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005260 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07005261 }
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005262 if (notify)
5263 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
Wang Chendad9b332008-06-18 01:48:28 -07005264 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005265}
5266
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267/**
5268 * dev_set_promiscuity - update promiscuity count on a device
5269 * @dev: device
5270 * @inc: modifier
5271 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07005272 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 * remains above zero the interface remains promiscuous. Once it hits zero
5274 * the device reverts back to normal filtering operation. A negative inc
5275 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07005276 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277 */
Wang Chendad9b332008-06-18 01:48:28 -07005278int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005279{
Eric Dumazetb536db92011-11-30 21:42:26 +00005280 unsigned int old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07005281 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005283 err = __dev_set_promiscuity(dev, inc, true);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07005284 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07005285 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07005286 if (dev->flags != old_flags)
5287 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07005288 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005290EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005292static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005293{
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005294 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295
Patrick McHardy24023452007-07-14 18:51:31 -07005296 ASSERT_RTNL();
5297
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07005299 dev->allmulti += inc;
5300 if (dev->allmulti == 0) {
5301 /*
5302 * Avoid overflow.
5303 * If inc causes overflow, untouch allmulti and return error.
5304 */
5305 if (inc < 0)
5306 dev->flags &= ~IFF_ALLMULTI;
5307 else {
5308 dev->allmulti -= inc;
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005309 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5310 dev->name);
Wang Chendad9b332008-06-18 01:48:28 -07005311 return -EOVERFLOW;
5312 }
5313 }
Patrick McHardy24023452007-07-14 18:51:31 -07005314 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005315 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07005316 dev_set_rx_mode(dev);
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005317 if (notify)
5318 __dev_notify_flags(dev, old_flags,
5319 dev->gflags ^ old_gflags);
Patrick McHardy24023452007-07-14 18:51:31 -07005320 }
Wang Chendad9b332008-06-18 01:48:28 -07005321 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07005322}
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005323
5324/**
5325 * dev_set_allmulti - update allmulti count on a device
5326 * @dev: device
5327 * @inc: modifier
5328 *
5329 * Add or remove reception of all multicast frames to a device. While the
5330 * count in the device remains above zero the interface remains listening
5331 * to all interfaces. Once it hits zero the device reverts back to normal
5332 * filtering operation. A negative @inc value is used to drop the counter
5333 * when releasing a resource needing all multicasts.
5334 * Return 0 if successful or a negative errno code on error.
5335 */
5336
5337int dev_set_allmulti(struct net_device *dev, int inc)
5338{
5339 return __dev_set_allmulti(dev, inc, true);
5340}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005341EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07005342
5343/*
5344 * Upload unicast and multicast address lists to device and
5345 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08005346 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07005347 * are present.
5348 */
5349void __dev_set_rx_mode(struct net_device *dev)
5350{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005351 const struct net_device_ops *ops = dev->netdev_ops;
5352
Patrick McHardy4417da62007-06-27 01:28:10 -07005353 /* dev_open will call this function so the list will stay sane. */
5354 if (!(dev->flags&IFF_UP))
5355 return;
5356
5357 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09005358 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07005359
Jiri Pirko01789342011-08-16 06:29:00 +00005360 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
Patrick McHardy4417da62007-06-27 01:28:10 -07005361 /* Unicast addresses changes may only happen under the rtnl,
5362 * therefore calling __dev_set_promiscuity here is safe.
5363 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005364 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005365 __dev_set_promiscuity(dev, 1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005366 dev->uc_promisc = true;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08005367 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005368 __dev_set_promiscuity(dev, -1, false);
Joe Perches2d348d12011-07-25 16:17:35 -07005369 dev->uc_promisc = false;
Patrick McHardy4417da62007-06-27 01:28:10 -07005370 }
Patrick McHardy4417da62007-06-27 01:28:10 -07005371 }
Jiri Pirko01789342011-08-16 06:29:00 +00005372
5373 if (ops->ndo_set_rx_mode)
5374 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005375}
5376
5377void dev_set_rx_mode(struct net_device *dev)
5378{
David S. Millerb9e40852008-07-15 00:15:08 -07005379 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07005380 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07005381 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382}
5383
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005384/**
5385 * dev_get_flags - get flags reported to userspace
5386 * @dev: device
5387 *
5388 * Get the combination of flag bits exported through APIs to userspace.
5389 */
Eric Dumazet95c96172012-04-15 05:58:06 +00005390unsigned int dev_get_flags(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391{
Eric Dumazet95c96172012-04-15 05:58:06 +00005392 unsigned int flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005393
5394 flags = (dev->flags & ~(IFF_PROMISC |
5395 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08005396 IFF_RUNNING |
5397 IFF_LOWER_UP |
5398 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07005399 (dev->gflags & (IFF_PROMISC |
5400 IFF_ALLMULTI));
5401
Stefan Rompfb00055a2006-03-20 17:09:11 -08005402 if (netif_running(dev)) {
5403 if (netif_oper_up(dev))
5404 flags |= IFF_RUNNING;
5405 if (netif_carrier_ok(dev))
5406 flags |= IFF_LOWER_UP;
5407 if (netif_dormant(dev))
5408 flags |= IFF_DORMANT;
5409 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410
5411 return flags;
5412}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005413EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414
Patrick McHardybd380812010-02-26 06:34:53 +00005415int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416{
Eric Dumazetb536db92011-11-30 21:42:26 +00005417 unsigned int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00005418 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419
Patrick McHardy24023452007-07-14 18:51:31 -07005420 ASSERT_RTNL();
5421
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422 /*
5423 * Set the flags on our device.
5424 */
5425
5426 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5427 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5428 IFF_AUTOMEDIA)) |
5429 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5430 IFF_ALLMULTI));
5431
5432 /*
5433 * Load in the correct multicast list now the flags have changed.
5434 */
5435
Patrick McHardyb6c40d62008-10-07 15:26:48 -07005436 if ((old_flags ^ flags) & IFF_MULTICAST)
5437 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07005438
Patrick McHardy4417da62007-06-27 01:28:10 -07005439 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440
5441 /*
5442 * Have we downed the interface. We handle IFF_UP ourselves
5443 * according to user attempts to set it, rather than blindly
5444 * setting it.
5445 */
5446
5447 ret = 0;
Peter Pan(潘卫平)d215d102014-06-16 21:57:22 +08005448 if ((old_flags ^ flags) & IFF_UP)
Patrick McHardybd380812010-02-26 06:34:53 +00005449 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005452 int inc = (flags & IFF_PROMISC) ? 1 : -1;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005453 unsigned int old_flags = dev->flags;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005454
Linus Torvalds1da177e2005-04-16 15:20:36 -07005455 dev->gflags ^= IFF_PROMISC;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005456
5457 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5458 if (dev->flags != old_flags)
5459 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460 }
5461
5462 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5463 is important. Some (broken) drivers set IFF_PROMISC, when
5464 IFF_ALLMULTI is requested not asking us and not reporting.
5465 */
5466 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005467 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5468
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469 dev->gflags ^= IFF_ALLMULTI;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005470 __dev_set_allmulti(dev, inc, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005471 }
5472
Patrick McHardybd380812010-02-26 06:34:53 +00005473 return ret;
5474}
5475
Nicolas Dichtela528c212013-09-25 12:02:44 +02005476void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5477 unsigned int gchanges)
Patrick McHardybd380812010-02-26 06:34:53 +00005478{
5479 unsigned int changes = dev->flags ^ old_flags;
5480
Nicolas Dichtela528c212013-09-25 12:02:44 +02005481 if (gchanges)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07005482 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005483
Patrick McHardybd380812010-02-26 06:34:53 +00005484 if (changes & IFF_UP) {
5485 if (dev->flags & IFF_UP)
5486 call_netdevice_notifiers(NETDEV_UP, dev);
5487 else
5488 call_netdevice_notifiers(NETDEV_DOWN, dev);
5489 }
5490
5491 if (dev->flags & IFF_UP &&
Jiri Pirkobe9efd32013-05-28 01:30:22 +00005492 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5493 struct netdev_notifier_change_info change_info;
5494
5495 change_info.flags_changed = changes;
5496 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5497 &change_info.info);
5498 }
Patrick McHardybd380812010-02-26 06:34:53 +00005499}
5500
5501/**
5502 * dev_change_flags - change device settings
5503 * @dev: device
5504 * @flags: device state flags
5505 *
5506 * Change settings on device based state flags. The flags are
5507 * in the userspace exported format.
5508 */
Eric Dumazetb536db92011-11-30 21:42:26 +00005509int dev_change_flags(struct net_device *dev, unsigned int flags)
Patrick McHardybd380812010-02-26 06:34:53 +00005510{
Eric Dumazetb536db92011-11-30 21:42:26 +00005511 int ret;
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005512 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
Patrick McHardybd380812010-02-26 06:34:53 +00005513
5514 ret = __dev_change_flags(dev, flags);
5515 if (ret < 0)
5516 return ret;
5517
Nicolas Dichtel991fb3f2013-09-25 12:02:45 +02005518 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
Nicolas Dichtela528c212013-09-25 12:02:44 +02005519 __dev_notify_flags(dev, old_flags, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005520 return ret;
5521}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005522EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005523
Veaceslav Falico2315dc92014-01-10 16:56:25 +01005524static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5525{
5526 const struct net_device_ops *ops = dev->netdev_ops;
5527
5528 if (ops->ndo_change_mtu)
5529 return ops->ndo_change_mtu(dev, new_mtu);
5530
5531 dev->mtu = new_mtu;
5532 return 0;
5533}
5534
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005535/**
5536 * dev_set_mtu - Change maximum transfer unit
5537 * @dev: device
5538 * @new_mtu: new transfer unit
5539 *
5540 * Change the maximum transfer size of the network device.
5541 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005542int dev_set_mtu(struct net_device *dev, int new_mtu)
5543{
Veaceslav Falico2315dc92014-01-10 16:56:25 +01005544 int err, orig_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005545
5546 if (new_mtu == dev->mtu)
5547 return 0;
5548
5549 /* MTU must be positive. */
5550 if (new_mtu < 0)
5551 return -EINVAL;
5552
5553 if (!netif_device_present(dev))
5554 return -ENODEV;
5555
Veaceslav Falico1d486bf2014-01-16 00:02:18 +01005556 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5557 err = notifier_to_errno(err);
5558 if (err)
5559 return err;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005560
Veaceslav Falico2315dc92014-01-10 16:56:25 +01005561 orig_mtu = dev->mtu;
5562 err = __dev_set_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005563
Veaceslav Falico2315dc92014-01-10 16:56:25 +01005564 if (!err) {
5565 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5566 err = notifier_to_errno(err);
5567 if (err) {
5568 /* setting mtu back and notifying everyone again,
5569 * so that they have a chance to revert changes.
5570 */
5571 __dev_set_mtu(dev, orig_mtu);
5572 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5573 }
5574 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005575 return err;
5576}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005577EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005578
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005579/**
Vlad Dogarucbda10f2011-01-13 23:38:30 +00005580 * dev_set_group - Change group this device belongs to
5581 * @dev: device
5582 * @new_group: group this device should belong to
5583 */
5584void dev_set_group(struct net_device *dev, int new_group)
5585{
5586 dev->group = new_group;
5587}
5588EXPORT_SYMBOL(dev_set_group);
5589
5590/**
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005591 * dev_set_mac_address - Change Media Access Control Address
5592 * @dev: device
5593 * @sa: new address
5594 *
5595 * Change the hardware (MAC) address of the device
5596 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005597int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5598{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005599 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005600 int err;
5601
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005602 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005603 return -EOPNOTSUPP;
5604 if (sa->sa_family != dev->type)
5605 return -EINVAL;
5606 if (!netif_device_present(dev))
5607 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005608 err = ops->ndo_set_mac_address(dev, sa);
Jiri Pirkof6521512013-01-01 03:30:14 +00005609 if (err)
5610 return err;
Jiri Pirkofbdeca22013-01-01 03:30:16 +00005611 dev->addr_assign_type = NET_ADDR_SET;
Jiri Pirkof6521512013-01-01 03:30:14 +00005612 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04005613 add_device_randomness(dev->dev_addr, dev->addr_len);
Jiri Pirkof6521512013-01-01 03:30:14 +00005614 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005615}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005616EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005617
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005618/**
5619 * dev_change_carrier - Change device carrier
5620 * @dev: device
Randy Dunlap691b3b72013-03-04 12:32:43 +00005621 * @new_carrier: new value
Jiri Pirko4bf84c32012-12-27 23:49:37 +00005622 *
5623 * Change device carrier
5624 */
5625int dev_change_carrier(struct net_device *dev, bool new_carrier)
5626{
5627 const struct net_device_ops *ops = dev->netdev_ops;
5628
5629 if (!ops->ndo_change_carrier)
5630 return -EOPNOTSUPP;
5631 if (!netif_device_present(dev))
5632 return -ENODEV;
5633 return ops->ndo_change_carrier(dev, new_carrier);
5634}
5635EXPORT_SYMBOL(dev_change_carrier);
5636
Linus Torvalds1da177e2005-04-16 15:20:36 -07005637/**
Jiri Pirko66b52b02013-07-29 18:16:49 +02005638 * dev_get_phys_port_id - Get device physical port ID
5639 * @dev: device
5640 * @ppid: port ID
5641 *
5642 * Get device physical port ID
5643 */
5644int dev_get_phys_port_id(struct net_device *dev,
5645 struct netdev_phys_port_id *ppid)
5646{
5647 const struct net_device_ops *ops = dev->netdev_ops;
5648
5649 if (!ops->ndo_get_phys_port_id)
5650 return -EOPNOTSUPP;
5651 return ops->ndo_get_phys_port_id(dev, ppid);
5652}
5653EXPORT_SYMBOL(dev_get_phys_port_id);
5654
5655/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005656 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005657 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658 *
5659 * Returns a suitable unique value for a new device interface
5660 * number. The caller must hold the rtnl semaphore or the
5661 * dev_base_lock to be sure it remains unique.
5662 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005663static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005664{
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005665 int ifindex = net->ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666 for (;;) {
5667 if (++ifindex <= 0)
5668 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005669 if (!__dev_get_by_index(net, ifindex))
Pavel Emelyanovaa79e662012-08-08 21:53:19 +00005670 return net->ifindex = ifindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671 }
5672}
5673
Linus Torvalds1da177e2005-04-16 15:20:36 -07005674/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005675static LIST_HEAD(net_todo_list);
Cong Wang200b9162014-05-12 15:11:20 -07005676DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005677
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005678static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005679{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680 list_add_tail(&dev->todo_list, &net_todo_list);
Eric W. Biederman50624c92013-09-23 21:19:49 -07005681 dev_net(dev)->dev_unreg_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005682}
5683
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005684static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005685{
Krishna Kumare93737b2009-12-08 22:26:02 +00005686 struct net_device *dev, *tmp;
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005687 LIST_HEAD(close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005688
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005689 BUG_ON(dev_boot_phase);
5690 ASSERT_RTNL();
5691
Krishna Kumare93737b2009-12-08 22:26:02 +00005692 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005693 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005694 * for initialization unwind. Remove those
5695 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005696 */
5697 if (dev->reg_state == NETREG_UNINITIALIZED) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00005698 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5699 dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005700
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005701 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005702 list_del(&dev->unreg_list);
5703 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005704 }
Eric Dumazet449f4542011-05-19 12:24:16 +00005705 dev->dismantle = true;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005706 BUG_ON(dev->reg_state != NETREG_REGISTERED);
Octavian Purdila44345722010-12-13 12:44:07 +00005707 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005708
Octavian Purdila44345722010-12-13 12:44:07 +00005709 /* If device is running, close it first. */
Eric W. Biederman5cde2822013-10-05 19:26:05 -07005710 list_for_each_entry(dev, head, unreg_list)
5711 list_add_tail(&dev->close_list, &close_head);
5712 dev_close_many(&close_head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005713
Octavian Purdila44345722010-12-13 12:44:07 +00005714 list_for_each_entry(dev, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005715 /* And unlink it from device chain. */
5716 unlist_netdevice(dev);
5717
5718 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005719 }
5720
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005721 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005722
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005723 list_for_each_entry(dev, head, unreg_list) {
5724 /* Shutdown queueing discipline. */
5725 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005726
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005727
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005728 /* Notify protocols, that we are about to destroy
5729 this device. They should clean all the things.
5730 */
5731 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5732
5733 /*
5734 * Flush the unicast and multicast chains
5735 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005736 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005737 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005738
5739 if (dev->netdev_ops->ndo_uninit)
5740 dev->netdev_ops->ndo_uninit(dev);
5741
Roopa Prabhu56bfa7e2014-05-01 11:40:30 -07005742 if (!dev->rtnl_link_ops ||
5743 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5744 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5745
Jiri Pirko9ff162a2013-01-03 22:48:49 +00005746 /* Notifier chain MUST detach us all upper devices. */
5747 WARN_ON(netdev_has_any_upper_dev(dev));
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005748
5749 /* Remove entries from kobject tree */
5750 netdev_unregister_kobject(dev);
Alexander Duyck024e9672013-01-10 08:57:46 +00005751#ifdef CONFIG_XPS
5752 /* Remove XPS queueing entries */
5753 netif_reset_xps_queues_gt(dev, 0);
5754#endif
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005755 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005756
Eric W. Biederman850a5452011-10-13 22:25:23 +00005757 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005758
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005759 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005760 dev_put(dev);
5761}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005762
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005763static void rollback_registered(struct net_device *dev)
5764{
5765 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005766
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005767 list_add(&dev->unreg_list, &single);
5768 rollback_registered_many(&single);
Eric Dumazetceaaec92011-02-17 22:59:19 +00005769 list_del(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005770}
5771
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005772static netdev_features_t netdev_fix_features(struct net_device *dev,
5773 netdev_features_t features)
Herbert Xub63365a2008-10-23 01:11:29 -07005774{
Michał Mirosław57422dc2011-01-22 12:14:12 +00005775 /* Fix illegal checksum combinations */
5776 if ((features & NETIF_F_HW_CSUM) &&
5777 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005778 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
Michał Mirosław57422dc2011-01-22 12:14:12 +00005779 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5780 }
5781
Herbert Xub63365a2008-10-23 01:11:29 -07005782 /* TSO requires that SG is present as well. */
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005783 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005784 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
Ben Hutchingsea2d3682011-04-12 14:38:37 +00005785 features &= ~NETIF_F_ALL_TSO;
Herbert Xub63365a2008-10-23 01:11:29 -07005786 }
5787
Pravin B Shelarec5f0612013-03-07 09:28:01 +00005788 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5789 !(features & NETIF_F_IP_CSUM)) {
5790 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5791 features &= ~NETIF_F_TSO;
5792 features &= ~NETIF_F_TSO_ECN;
5793 }
5794
5795 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5796 !(features & NETIF_F_IPV6_CSUM)) {
5797 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5798 features &= ~NETIF_F_TSO6;
5799 }
5800
Ben Hutchings31d8b9e2011-04-12 14:47:15 +00005801 /* TSO ECN requires that TSO is present as well. */
5802 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5803 features &= ~NETIF_F_TSO_ECN;
5804
Michał Mirosław212b5732011-02-15 16:59:16 +00005805 /* Software GSO depends on SG. */
5806 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005807 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
Michał Mirosław212b5732011-02-15 16:59:16 +00005808 features &= ~NETIF_F_GSO;
5809 }
5810
Michał Mirosławacd11302011-01-24 15:45:15 -08005811 /* UFO needs SG and checksumming */
Herbert Xub63365a2008-10-23 01:11:29 -07005812 if (features & NETIF_F_UFO) {
Michał Mirosław79032642010-11-30 06:38:00 +00005813 /* maybe split UFO into V4 and V6? */
5814 if (!((features & NETIF_F_GEN_CSUM) ||
5815 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5816 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005817 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005818 "Dropping NETIF_F_UFO since no checksum offload features.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005819 features &= ~NETIF_F_UFO;
5820 }
5821
5822 if (!(features & NETIF_F_SG)) {
Michał Mirosław6f404e42011-05-16 15:14:21 -04005823 netdev_dbg(dev,
Michał Mirosławacd11302011-01-24 15:45:15 -08005824 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
Herbert Xub63365a2008-10-23 01:11:29 -07005825 features &= ~NETIF_F_UFO;
5826 }
5827 }
5828
Jiri Pirkod0290212014-04-02 23:09:31 +02005829#ifdef CONFIG_NET_RX_BUSY_POLL
5830 if (dev->netdev_ops->ndo_busy_poll)
5831 features |= NETIF_F_BUSY_POLL;
5832 else
5833#endif
5834 features &= ~NETIF_F_BUSY_POLL;
5835
Herbert Xub63365a2008-10-23 01:11:29 -07005836 return features;
5837}
Herbert Xub63365a2008-10-23 01:11:29 -07005838
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005839int __netdev_update_features(struct net_device *dev)
Michał Mirosław5455c692011-02-15 16:59:17 +00005840{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005841 netdev_features_t features;
Michał Mirosław5455c692011-02-15 16:59:17 +00005842 int err = 0;
5843
Michał Mirosław87267482011-04-12 09:56:38 +00005844 ASSERT_RTNL();
5845
Michał Mirosław5455c692011-02-15 16:59:17 +00005846 features = netdev_get_wanted_features(dev);
5847
5848 if (dev->netdev_ops->ndo_fix_features)
5849 features = dev->netdev_ops->ndo_fix_features(dev, features);
5850
5851 /* driver might be less strict about feature dependencies */
5852 features = netdev_fix_features(dev, features);
5853
5854 if (dev->features == features)
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005855 return 0;
Michał Mirosław5455c692011-02-15 16:59:17 +00005856
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005857 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5858 &dev->features, &features);
Michał Mirosław5455c692011-02-15 16:59:17 +00005859
5860 if (dev->netdev_ops->ndo_set_features)
5861 err = dev->netdev_ops->ndo_set_features(dev, features);
5862
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005863 if (unlikely(err < 0)) {
Michał Mirosław5455c692011-02-15 16:59:17 +00005864 netdev_err(dev,
Michał Mirosławc8f44af2011-11-15 15:29:55 +00005865 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5866 err, &features, &dev->features);
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005867 return -1;
5868 }
5869
5870 if (!err)
5871 dev->features = features;
5872
5873 return 1;
5874}
5875
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005876/**
5877 * netdev_update_features - recalculate device features
5878 * @dev: the device to check
5879 *
5880 * Recalculate dev->features set and send notifications if it
5881 * has changed. Should be called after driver or hardware dependent
5882 * conditions might have changed that influence the features.
5883 */
Michał Mirosław6cb6a272011-04-02 22:48:47 -07005884void netdev_update_features(struct net_device *dev)
5885{
5886 if (__netdev_update_features(dev))
5887 netdev_features_change(dev);
Michał Mirosław5455c692011-02-15 16:59:17 +00005888}
5889EXPORT_SYMBOL(netdev_update_features);
5890
Linus Torvalds1da177e2005-04-16 15:20:36 -07005891/**
Michał Mirosławafe12cc2011-05-07 03:22:17 +00005892 * netdev_change_features - recalculate device features
5893 * @dev: the device to check
5894 *
5895 * Recalculate dev->features set and send notifications even
5896 * if they have not changed. Should be called instead of
5897 * netdev_update_features() if also dev->vlan_features might
5898 * have changed to allow the changes to be propagated to stacked
5899 * VLAN devices.
5900 */
5901void netdev_change_features(struct net_device *dev)
5902{
5903 __netdev_update_features(dev);
5904 netdev_features_change(dev);
5905}
5906EXPORT_SYMBOL(netdev_change_features);
5907
5908/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005909 * netif_stacked_transfer_operstate - transfer operstate
5910 * @rootdev: the root or lower level device to transfer state from
5911 * @dev: the device to transfer operstate to
5912 *
5913 * Transfer operational state from root to device. This is normally
5914 * called when a stacking relationship exists between the root
5915 * device and the device(a leaf device).
5916 */
5917void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5918 struct net_device *dev)
5919{
5920 if (rootdev->operstate == IF_OPER_DORMANT)
5921 netif_dormant_on(dev);
5922 else
5923 netif_dormant_off(dev);
5924
5925 if (netif_carrier_ok(rootdev)) {
5926 if (!netif_carrier_ok(dev))
5927 netif_carrier_on(dev);
5928 } else {
5929 if (netif_carrier_ok(dev))
5930 netif_carrier_off(dev);
5931 }
5932}
5933EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5934
Michael Daltona953be52014-01-16 22:23:28 -08005935#ifdef CONFIG_SYSFS
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005936static int netif_alloc_rx_queues(struct net_device *dev)
5937{
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005938 unsigned int i, count = dev->num_rx_queues;
Tom Herbertbd25fa72010-10-18 18:00:16 +00005939 struct netdev_rx_queue *rx;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005940
Tom Herbertbd25fa72010-10-18 18:00:16 +00005941 BUG_ON(count < 1);
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005942
Tom Herbertbd25fa72010-10-18 18:00:16 +00005943 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
Joe Perches62b59422013-02-04 16:48:16 +00005944 if (!rx)
Tom Herbertbd25fa72010-10-18 18:00:16 +00005945 return -ENOMEM;
Joe Perches62b59422013-02-04 16:48:16 +00005946
Tom Herbertbd25fa72010-10-18 18:00:16 +00005947 dev->_rx = rx;
5948
Tom Herbertbd25fa72010-10-18 18:00:16 +00005949 for (i = 0; i < count; i++)
Tom Herbertfe822242010-11-09 10:47:38 +00005950 rx[i].dev = dev;
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005951 return 0;
5952}
Tom Herbertbf264142010-11-26 08:36:09 +00005953#endif
Eric Dumazet1b4bf462010-09-23 17:26:35 +00005954
Changli Gaoaa942102010-12-04 02:31:41 +00005955static void netdev_init_one_queue(struct net_device *dev,
5956 struct netdev_queue *queue, void *_unused)
5957{
5958 /* Initialize queue lock */
5959 spin_lock_init(&queue->_xmit_lock);
5960 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5961 queue->xmit_lock_owner = -1;
Changli Gaob236da62010-12-14 03:09:15 +00005962 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
Changli Gaoaa942102010-12-04 02:31:41 +00005963 queue->dev = dev;
Tom Herbert114cf582011-11-28 16:33:09 +00005964#ifdef CONFIG_BQL
5965 dql_init(&queue->dql, HZ);
5966#endif
Changli Gaoaa942102010-12-04 02:31:41 +00005967}
5968
Eric Dumazet60877a32013-06-20 01:15:51 -07005969static void netif_free_tx_queues(struct net_device *dev)
5970{
WANG Cong4cb28972014-06-02 15:55:22 -07005971 kvfree(dev->_tx);
Eric Dumazet60877a32013-06-20 01:15:51 -07005972}
5973
Tom Herberte6484932010-10-18 18:04:39 +00005974static int netif_alloc_netdev_queues(struct net_device *dev)
5975{
5976 unsigned int count = dev->num_tx_queues;
5977 struct netdev_queue *tx;
Eric Dumazet60877a32013-06-20 01:15:51 -07005978 size_t sz = count * sizeof(*tx);
Tom Herberte6484932010-10-18 18:04:39 +00005979
Eric Dumazet60877a32013-06-20 01:15:51 -07005980 BUG_ON(count < 1 || count > 0xffff);
Tom Herberte6484932010-10-18 18:04:39 +00005981
Eric Dumazet60877a32013-06-20 01:15:51 -07005982 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
5983 if (!tx) {
5984 tx = vzalloc(sz);
5985 if (!tx)
5986 return -ENOMEM;
5987 }
Tom Herberte6484932010-10-18 18:04:39 +00005988 dev->_tx = tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00005989
Tom Herberte6484932010-10-18 18:04:39 +00005990 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5991 spin_lock_init(&dev->tx_global_lock);
Changli Gaoaa942102010-12-04 02:31:41 +00005992
5993 return 0;
Tom Herberte6484932010-10-18 18:04:39 +00005994}
5995
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005996/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005997 * register_netdevice - register a network device
5998 * @dev: device to register
5999 *
6000 * Take a completed network device structure and add it to the kernel
6001 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6002 * chain. 0 is returned on success. A negative errno code is returned
6003 * on a failure to set up the device, or if the name is a duplicate.
6004 *
6005 * Callers must hold the rtnl semaphore. You may want
6006 * register_netdev() instead of this.
6007 *
6008 * BUGS:
6009 * The locking appears insufficient to guarantee two parallel registers
6010 * will not get the same name.
6011 */
6012
6013int register_netdevice(struct net_device *dev)
6014{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006015 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006016 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006017
6018 BUG_ON(dev_boot_phase);
6019 ASSERT_RTNL();
6020
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006021 might_sleep();
6022
Linus Torvalds1da177e2005-04-16 15:20:36 -07006023 /* When net_device's are persistent, this will be fatal. */
6024 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006025 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006026
David S. Millerf1f28aa2008-07-15 00:08:33 -07006027 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07006028 netdev_set_addr_lockdep_class(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006029
Linus Torvalds1da177e2005-04-16 15:20:36 -07006030 dev->iflink = -1;
6031
Gao feng828de4f2012-09-13 20:58:27 +00006032 ret = dev_get_valid_name(net, dev, dev->name);
Peter Pan(潘卫平)0696c3a2011-05-12 15:46:56 +00006033 if (ret < 0)
6034 goto out;
6035
Linus Torvalds1da177e2005-04-16 15:20:36 -07006036 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006037 if (dev->netdev_ops->ndo_init) {
6038 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006039 if (ret) {
6040 if (ret > 0)
6041 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08006042 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006043 }
6044 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006045
Patrick McHardyf6469682013-04-19 02:04:27 +00006046 if (((dev->hw_features | dev->features) &
6047 NETIF_F_HW_VLAN_CTAG_FILTER) &&
Michał Mirosławd2ed2732013-01-29 15:14:16 +00006048 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6049 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6050 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6051 ret = -EINVAL;
6052 goto err_uninit;
6053 }
6054
Pavel Emelyanov9c7dafb2012-08-08 21:52:46 +00006055 ret = -EBUSY;
6056 if (!dev->ifindex)
6057 dev->ifindex = dev_new_index(net);
6058 else if (__dev_get_by_index(net, dev->ifindex))
6059 goto err_uninit;
6060
Linus Torvalds1da177e2005-04-16 15:20:36 -07006061 if (dev->iflink == -1)
6062 dev->iflink = dev->ifindex;
6063
Michał Mirosław5455c692011-02-15 16:59:17 +00006064 /* Transfer changeable features to wanted_features and enable
6065 * software offloads (GSO and GRO).
6066 */
6067 dev->hw_features |= NETIF_F_SOFT_FEATURES;
Michał Mirosław14d12322011-02-22 16:52:28 +00006068 dev->features |= NETIF_F_SOFT_FEATURES;
6069 dev->wanted_features = dev->features & dev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006070
Michał Mirosław34324dc2011-11-15 15:29:55 +00006071 if (!(dev->flags & IFF_LOOPBACK)) {
6072 dev->hw_features |= NETIF_F_NOCACHE_COPY;
Tom Herbertc6e1a0d2011-04-04 22:30:30 -07006073 }
6074
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006075 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
Brandon Philips16c3ea72010-09-15 09:24:24 +00006076 */
Michał Mirosław1180e7d2011-07-14 14:41:11 -07006077 dev->vlan_features |= NETIF_F_HIGHDMA;
Brandon Philips16c3ea72010-09-15 09:24:24 +00006078
Pravin B Shelaree579672013-03-07 09:28:08 +00006079 /* Make NETIF_F_SG inheritable to tunnel devices.
6080 */
6081 dev->hw_enc_features |= NETIF_F_SG;
6082
Simon Horman0d89d202013-05-23 21:02:52 +00006083 /* Make NETIF_F_SG inheritable to MPLS.
6084 */
6085 dev->mpls_features |= NETIF_F_SG;
6086
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00006087 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6088 ret = notifier_to_errno(ret);
6089 if (ret)
6090 goto err_uninit;
6091
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006092 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006093 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006094 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006095 dev->reg_state = NETREG_REGISTERED;
6096
Michał Mirosław6cb6a272011-04-02 22:48:47 -07006097 __netdev_update_features(dev);
Michał Mirosław8e9b59b2011-02-22 16:52:28 +00006098
Linus Torvalds1da177e2005-04-16 15:20:36 -07006099 /*
6100 * Default initial state at registry is that the
6101 * device is present.
6102 */
6103
6104 set_bit(__LINK_STATE_PRESENT, &dev->state);
6105
Ben Hutchings8f4cccb2012-08-20 22:16:51 +01006106 linkwatch_init_dev(dev);
6107
Linus Torvalds1da177e2005-04-16 15:20:36 -07006108 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006109 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006110 list_netdevice(dev);
Theodore Ts'o7bf23572012-07-04 21:23:25 -04006111 add_device_randomness(dev->dev_addr, dev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006112
Jiri Pirko948b3372013-01-08 01:38:25 +00006113 /* If the device has permanent device address, driver should
6114 * set dev_addr and also addr_assign_type should be set to
6115 * NET_ADDR_PERM (default value).
6116 */
6117 if (dev->addr_assign_type == NET_ADDR_PERM)
6118 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6119
Linus Torvalds1da177e2005-04-16 15:20:36 -07006120 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006121 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07006122 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07006123 if (ret) {
6124 rollback_registered(dev);
6125 dev->reg_state = NETREG_UNREGISTERED;
6126 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006127 /*
6128 * Prevent userspace races by waiting until the network
6129 * device is fully setup before sending notifications.
6130 */
Patrick McHardya2835762010-02-26 06:34:51 +00006131 if (!dev->rtnl_link_ops ||
6132 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006133 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006134
6135out:
6136 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006137
6138err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08006139 if (dev->netdev_ops->ndo_uninit)
6140 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07006141 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006142}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006143EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006144
6145/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006146 * init_dummy_netdev - init a dummy network device for NAPI
6147 * @dev: device to init
6148 *
6149 * This takes a network device structure and initialize the minimum
6150 * amount of fields so it can be used to schedule NAPI polls without
6151 * registering a full blown interface. This is to be used by drivers
6152 * that need to tie several hardware interfaces to a single NAPI
6153 * poll scheduler due to HW limitations.
6154 */
6155int init_dummy_netdev(struct net_device *dev)
6156{
6157 /* Clear everything. Note we don't initialize spinlocks
6158 * are they aren't supposed to be taken by any of the
6159 * NAPI code and this dummy netdev is supposed to be
6160 * only ever used for NAPI polls
6161 */
6162 memset(dev, 0, sizeof(struct net_device));
6163
6164 /* make sure we BUG if trying to hit standard
6165 * register/unregister code path
6166 */
6167 dev->reg_state = NETREG_DUMMY;
6168
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006169 /* NAPI wants this */
6170 INIT_LIST_HEAD(&dev->napi_list);
6171
6172 /* a dummy interface is started by default */
6173 set_bit(__LINK_STATE_PRESENT, &dev->state);
6174 set_bit(__LINK_STATE_START, &dev->state);
6175
Eric Dumazet29b44332010-10-11 10:22:12 +00006176 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6177 * because users of this 'device' dont need to change
6178 * its refcount.
6179 */
6180
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08006181 return 0;
6182}
6183EXPORT_SYMBOL_GPL(init_dummy_netdev);
6184
6185
6186/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006187 * register_netdev - register a network device
6188 * @dev: device to register
6189 *
6190 * Take a completed network device structure and add it to the kernel
6191 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6192 * chain. 0 is returned on success. A negative errno code is returned
6193 * on a failure to set up the device, or if the name is a duplicate.
6194 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07006195 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07006196 * and expands the device name if you passed a format string to
6197 * alloc_netdev.
6198 */
6199int register_netdev(struct net_device *dev)
6200{
6201 int err;
6202
6203 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204 err = register_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006205 rtnl_unlock();
6206 return err;
6207}
6208EXPORT_SYMBOL(register_netdev);
6209
Eric Dumazet29b44332010-10-11 10:22:12 +00006210int netdev_refcnt_read(const struct net_device *dev)
6211{
6212 int i, refcnt = 0;
6213
6214 for_each_possible_cpu(i)
6215 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6216 return refcnt;
6217}
6218EXPORT_SYMBOL(netdev_refcnt_read);
6219
Ben Hutchings2c530402012-07-10 10:55:09 +00006220/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006221 * netdev_wait_allrefs - wait until all references are gone.
Randy Dunlap3de7a372012-08-18 14:36:44 +00006222 * @dev: target net_device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006223 *
6224 * This is called when unregistering network devices.
6225 *
6226 * Any protocol or device that holds a reference should register
6227 * for netdevice notification, and cleanup and put back the
6228 * reference if they receive an UNREGISTER event.
6229 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006230 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006231 */
6232static void netdev_wait_allrefs(struct net_device *dev)
6233{
6234 unsigned long rebroadcast_time, warning_time;
Eric Dumazet29b44332010-10-11 10:22:12 +00006235 int refcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006236
Eric Dumazete014deb2009-11-17 05:59:21 +00006237 linkwatch_forget_dev(dev);
6238
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239 rebroadcast_time = warning_time = jiffies;
Eric Dumazet29b44332010-10-11 10:22:12 +00006240 refcnt = netdev_refcnt_read(dev);
6241
6242 while (refcnt != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006243 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006244 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006245
6246 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07006247 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006248
Eric Dumazet748e2d92012-08-22 21:50:59 +00006249 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006250 rcu_barrier();
Eric Dumazet748e2d92012-08-22 21:50:59 +00006251 rtnl_lock();
6252
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006253 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006254 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6255 &dev->state)) {
6256 /* We must not have linkwatch events
6257 * pending on unregister. If this
6258 * happens, we simply run the queue
6259 * unscheduled, resulting in a noop
6260 * for this device.
6261 */
6262 linkwatch_run_queue();
6263 }
6264
Stephen Hemminger6756ae42006-03-20 22:23:58 -08006265 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006266
6267 rebroadcast_time = jiffies;
6268 }
6269
6270 msleep(250);
6271
Eric Dumazet29b44332010-10-11 10:22:12 +00006272 refcnt = netdev_refcnt_read(dev);
6273
Linus Torvalds1da177e2005-04-16 15:20:36 -07006274 if (time_after(jiffies, warning_time + 10 * HZ)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006275 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6276 dev->name, refcnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006277 warning_time = jiffies;
6278 }
6279 }
6280}
6281
6282/* The sequence is:
6283 *
6284 * rtnl_lock();
6285 * ...
6286 * register_netdevice(x1);
6287 * register_netdevice(x2);
6288 * ...
6289 * unregister_netdevice(y1);
6290 * unregister_netdevice(y2);
6291 * ...
6292 * rtnl_unlock();
6293 * free_netdev(y1);
6294 * free_netdev(y2);
6295 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07006296 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07006297 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006298 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07006299 * without deadlocking with linkwatch via keventd.
6300 * 2) Since we run with the RTNL semaphore not held, we can sleep
6301 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07006302 *
6303 * We must not return until all unregister events added during
6304 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006305 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006306void netdev_run_todo(void)
6307{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006308 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006309
Linus Torvalds1da177e2005-04-16 15:20:36 -07006310 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006311 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07006312
6313 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07006314
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006315
6316 /* Wait for rcu callbacks to finish before next phase */
Eric W. Biederman850a5452011-10-13 22:25:23 +00006317 if (!list_empty(&list))
6318 rcu_barrier();
6319
Linus Torvalds1da177e2005-04-16 15:20:36 -07006320 while (!list_empty(&list)) {
6321 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00006322 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006323 list_del(&dev->todo_list);
6324
Eric Dumazet748e2d92012-08-22 21:50:59 +00006325 rtnl_lock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006326 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Eric Dumazet748e2d92012-08-22 21:50:59 +00006327 __rtnl_unlock();
Eric Dumazet0115e8e2012-08-22 17:19:46 +00006328
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006329 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006330 pr_err("network todo '%s' but state %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006331 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006332 dump_stack();
6333 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006334 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006335
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006336 dev->reg_state = NETREG_UNREGISTERED;
6337
Changli Gao152102c2010-03-30 20:16:22 +00006338 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07006339
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006340 netdev_wait_allrefs(dev);
6341
6342 /* paranoia */
Eric Dumazet29b44332010-10-11 10:22:12 +00006343 BUG_ON(netdev_refcnt_read(dev));
Eric Dumazet33d480c2011-08-11 19:30:52 +00006344 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6345 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07006346 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006347
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07006348 if (dev->destructor)
6349 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006350
Eric W. Biederman50624c92013-09-23 21:19:49 -07006351 /* Report a network device has been unregistered */
6352 rtnl_lock();
6353 dev_net(dev)->dev_unreg_count--;
6354 __rtnl_unlock();
6355 wake_up(&netdev_unregistering_wq);
6356
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07006357 /* Free network device */
6358 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006360}
6361
Ben Hutchings3cfde792010-07-09 09:11:52 +00006362/* Convert net_device_stats to rtnl_link_stats64. They have the same
6363 * fields in the same order, with only the type differing.
6364 */
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006365void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6366 const struct net_device_stats *netdev_stats)
Ben Hutchings3cfde792010-07-09 09:11:52 +00006367{
6368#if BITS_PER_LONG == 64
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006369 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6370 memcpy(stats64, netdev_stats, sizeof(*stats64));
Ben Hutchings3cfde792010-07-09 09:11:52 +00006371#else
6372 size_t i, n = sizeof(*stats64) / sizeof(u64);
6373 const unsigned long *src = (const unsigned long *)netdev_stats;
6374 u64 *dst = (u64 *)stats64;
6375
6376 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6377 sizeof(*stats64) / sizeof(u64));
6378 for (i = 0; i < n; i++)
6379 dst[i] = src[i];
6380#endif
6381}
Eric Dumazet77a1abf2012-03-05 04:50:09 +00006382EXPORT_SYMBOL(netdev_stats_to_stats64);
Ben Hutchings3cfde792010-07-09 09:11:52 +00006383
Eric Dumazetd83345a2009-11-16 03:36:51 +00006384/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006385 * dev_get_stats - get network device statistics
6386 * @dev: device to get statistics from
Eric Dumazet28172732010-07-07 14:58:56 -07006387 * @storage: place to store stats
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006388 *
Ben Hutchingsd7753512010-07-09 09:12:41 +00006389 * Get network statistics from device. Return @storage.
6390 * The device driver may provide its own method by setting
6391 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6392 * otherwise the internal statistics structure is used.
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006393 */
Ben Hutchingsd7753512010-07-09 09:12:41 +00006394struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6395 struct rtnl_link_stats64 *storage)
Eric Dumazet7004bf22009-05-18 00:34:33 +00006396{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006397 const struct net_device_ops *ops = dev->netdev_ops;
6398
Eric Dumazet28172732010-07-07 14:58:56 -07006399 if (ops->ndo_get_stats64) {
6400 memset(storage, 0, sizeof(*storage));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006401 ops->ndo_get_stats64(dev, storage);
6402 } else if (ops->ndo_get_stats) {
Ben Hutchings3cfde792010-07-09 09:11:52 +00006403 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006404 } else {
6405 netdev_stats_to_stats64(storage, &dev->stats);
Eric Dumazet28172732010-07-07 14:58:56 -07006406 }
Eric Dumazetcaf586e2010-09-30 21:06:55 +00006407 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
Eric Dumazet015f0682014-03-27 08:45:56 -07006408 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
Eric Dumazet28172732010-07-07 14:58:56 -07006409 return storage;
Rusty Russellc45d2862007-03-28 14:29:08 -07006410}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08006411EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07006412
Eric Dumazet24824a02010-10-02 06:11:55 +00006413struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
David S. Millerdc2b4842008-07-08 17:18:23 -07006414{
Eric Dumazet24824a02010-10-02 06:11:55 +00006415 struct netdev_queue *queue = dev_ingress_queue(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07006416
Eric Dumazet24824a02010-10-02 06:11:55 +00006417#ifdef CONFIG_NET_CLS_ACT
6418 if (queue)
6419 return queue;
6420 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6421 if (!queue)
6422 return NULL;
6423 netdev_init_one_queue(dev, queue, NULL);
Eric Dumazet24824a02010-10-02 06:11:55 +00006424 queue->qdisc = &noop_qdisc;
6425 queue->qdisc_sleeping = &noop_qdisc;
6426 rcu_assign_pointer(dev->ingress_queue, queue);
6427#endif
6428 return queue;
David S. Millerbb949fb2008-07-08 16:55:56 -07006429}
6430
Eric Dumazet2c60db02012-09-16 09:17:26 +00006431static const struct ethtool_ops default_ethtool_ops;
6432
Stanislaw Gruszkad07d7502013-01-10 23:19:10 +00006433void netdev_set_default_ethtool_ops(struct net_device *dev,
6434 const struct ethtool_ops *ops)
6435{
6436 if (dev->ethtool_ops == &default_ethtool_ops)
6437 dev->ethtool_ops = ops;
6438}
6439EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6440
Eric Dumazet74d332c2013-10-30 13:10:44 -07006441void netdev_freemem(struct net_device *dev)
6442{
6443 char *addr = (char *)dev - dev->padded;
6444
WANG Cong4cb28972014-06-02 15:55:22 -07006445 kvfree(addr);
Eric Dumazet74d332c2013-10-30 13:10:44 -07006446}
6447
Linus Torvalds1da177e2005-04-16 15:20:36 -07006448/**
Tom Herbert36909ea2011-01-09 19:36:31 +00006449 * alloc_netdev_mqs - allocate network device
Tom Gundersenc835a672014-07-14 16:37:24 +02006450 * @sizeof_priv: size of private data to allocate space for
6451 * @name: device name format string
6452 * @name_assign_type: origin of device name
6453 * @setup: callback to initialize device
6454 * @txqs: the number of TX subqueues to allocate
6455 * @rxqs: the number of RX subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07006456 *
6457 * Allocates a struct net_device with private data area for driver use
Li Zhong90e51ad2013-11-22 15:04:46 +08006458 * and performs basic initialization. Also allocates subqueue structs
Tom Herbert36909ea2011-01-09 19:36:31 +00006459 * for each queue on the device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006460 */
Tom Herbert36909ea2011-01-09 19:36:31 +00006461struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
Tom Gundersenc835a672014-07-14 16:37:24 +02006462 unsigned char name_assign_type,
Tom Herbert36909ea2011-01-09 19:36:31 +00006463 void (*setup)(struct net_device *),
6464 unsigned int txqs, unsigned int rxqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006465{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006466 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07006467 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006468 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006469
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07006470 BUG_ON(strlen(name) >= sizeof(dev->name));
6471
Tom Herbert36909ea2011-01-09 19:36:31 +00006472 if (txqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006473 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
Tom Herbert55513fb2010-10-18 17:55:58 +00006474 return NULL;
6475 }
6476
Michael Daltona953be52014-01-16 22:23:28 -08006477#ifdef CONFIG_SYSFS
Tom Herbert36909ea2011-01-09 19:36:31 +00006478 if (rxqs < 1) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00006479 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
Tom Herbert36909ea2011-01-09 19:36:31 +00006480 return NULL;
6481 }
6482#endif
6483
David S. Millerfd2ea0a2008-07-17 01:56:23 -07006484 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006485 if (sizeof_priv) {
6486 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006487 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07006488 alloc_size += sizeof_priv;
6489 }
6490 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006491 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006492
Eric Dumazet74d332c2013-10-30 13:10:44 -07006493 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6494 if (!p)
6495 p = vzalloc(alloc_size);
Joe Perches62b59422013-02-04 16:48:16 +00006496 if (!p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006497 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006498
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00006499 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006500 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006501
Eric Dumazet29b44332010-10-11 10:22:12 +00006502 dev->pcpu_refcnt = alloc_percpu(int);
6503 if (!dev->pcpu_refcnt)
Eric Dumazet74d332c2013-10-30 13:10:44 -07006504 goto free_dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006505
Linus Torvalds1da177e2005-04-16 15:20:36 -07006506 if (dev_addr_init(dev))
Eric Dumazet29b44332010-10-11 10:22:12 +00006507 goto free_pcpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006508
Jiri Pirko22bedad32010-04-01 21:22:57 +00006509 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006510 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00006511
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006512 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006513
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07006514 dev->gso_max_size = GSO_MAX_SIZE;
Ben Hutchings30b678d2012-07-30 15:57:00 +00006515 dev->gso_max_segs = GSO_MAX_SEGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006516
Herbert Xud565b0a2008-12-15 23:38:52 -08006517 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006518 INIT_LIST_HEAD(&dev->unreg_list);
Eric W. Biederman5cde2822013-10-05 19:26:05 -07006519 INIT_LIST_HEAD(&dev->close_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00006520 INIT_LIST_HEAD(&dev->link_watch_list);
Veaceslav Falico2f268f12013-09-25 09:20:07 +02006521 INIT_LIST_HEAD(&dev->adj_list.upper);
6522 INIT_LIST_HEAD(&dev->adj_list.lower);
6523 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6524 INIT_LIST_HEAD(&dev->all_adj_list.lower);
Eric Dumazet93f154b2009-05-18 22:19:19 -07006525 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006526 setup(dev);
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006527
6528 dev->num_tx_queues = txqs;
6529 dev->real_num_tx_queues = txqs;
6530 if (netif_alloc_netdev_queues(dev))
6531 goto free_all;
6532
Michael Daltona953be52014-01-16 22:23:28 -08006533#ifdef CONFIG_SYSFS
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006534 dev->num_rx_queues = rxqs;
6535 dev->real_num_rx_queues = rxqs;
6536 if (netif_alloc_rx_queues(dev))
6537 goto free_all;
6538#endif
6539
Linus Torvalds1da177e2005-04-16 15:20:36 -07006540 strcpy(dev->name, name);
Tom Gundersenc835a672014-07-14 16:37:24 +02006541 dev->name_assign_type = name_assign_type;
Vlad Dogarucbda10f2011-01-13 23:38:30 +00006542 dev->group = INIT_NETDEV_GROUP;
Eric Dumazet2c60db02012-09-16 09:17:26 +00006543 if (!dev->ethtool_ops)
6544 dev->ethtool_ops = &default_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006545 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006546
David S. Miller8d3bdbd2011-02-08 15:02:50 -08006547free_all:
6548 free_netdev(dev);
6549 return NULL;
6550
Eric Dumazet29b44332010-10-11 10:22:12 +00006551free_pcpu:
6552 free_percpu(dev->pcpu_refcnt);
Eric Dumazet74d332c2013-10-30 13:10:44 -07006553free_dev:
6554 netdev_freemem(dev);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00006555 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006556}
Tom Herbert36909ea2011-01-09 19:36:31 +00006557EXPORT_SYMBOL(alloc_netdev_mqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006558
6559/**
6560 * free_netdev - free network device
6561 * @dev: device
6562 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006563 * This function does the last stage of destroying an allocated device
6564 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006565 * If this is the last reference then it will be freed.
6566 */
6567void free_netdev(struct net_device *dev)
6568{
Herbert Xud565b0a2008-12-15 23:38:52 -08006569 struct napi_struct *p, *n;
6570
Denis V. Lunevf3005d72008-04-16 02:02:18 -07006571 release_net(dev_net(dev));
6572
Eric Dumazet60877a32013-06-20 01:15:51 -07006573 netif_free_tx_queues(dev);
Michael Daltona953be52014-01-16 22:23:28 -08006574#ifdef CONFIG_SYSFS
Tom Herbertfe822242010-11-09 10:47:38 +00006575 kfree(dev->_rx);
6576#endif
David S. Millere8a04642008-07-17 00:34:19 -07006577
Eric Dumazet33d480c2011-08-11 19:30:52 +00006578 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
Eric Dumazet24824a02010-10-02 06:11:55 +00006579
Jiri Pirkof001fde2009-05-05 02:48:28 +00006580 /* Flush device addresses */
6581 dev_addr_flush(dev);
6582
Herbert Xud565b0a2008-12-15 23:38:52 -08006583 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6584 netif_napi_del(p);
6585
Eric Dumazet29b44332010-10-11 10:22:12 +00006586 free_percpu(dev->pcpu_refcnt);
6587 dev->pcpu_refcnt = NULL;
6588
Stephen Hemminger3041a062006-05-26 13:25:24 -07006589 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006590 if (dev->reg_state == NETREG_UNINITIALIZED) {
Eric Dumazet74d332c2013-10-30 13:10:44 -07006591 netdev_freemem(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006592 return;
6593 }
6594
6595 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6596 dev->reg_state = NETREG_RELEASED;
6597
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07006598 /* will free via device release */
6599 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006600}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006601EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006602
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006603/**
6604 * synchronize_net - Synchronize with packet receive processing
6605 *
6606 * Wait for packets currently being received to be done.
6607 * Does not block later packets from starting.
6608 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09006609void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006610{
6611 might_sleep();
Eric Dumazetbe3fc412011-05-23 23:07:32 +00006612 if (rtnl_is_locked())
6613 synchronize_rcu_expedited();
6614 else
6615 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006616}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006617EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006618
6619/**
Eric Dumazet44a08732009-10-27 07:03:04 +00006620 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00006622 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08006623 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006624 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006625 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00006626 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006627 *
6628 * Callers must hold the rtnl semaphore. You may want
6629 * unregister_netdev() instead of this.
6630 */
6631
Eric Dumazet44a08732009-10-27 07:03:04 +00006632void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006633{
Herbert Xua6620712007-12-12 19:21:56 -08006634 ASSERT_RTNL();
6635
Eric Dumazet44a08732009-10-27 07:03:04 +00006636 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00006637 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00006638 } else {
6639 rollback_registered(dev);
6640 /* Finish processing unregister after unlock */
6641 net_set_todo(dev);
6642 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006643}
Eric Dumazet44a08732009-10-27 07:03:04 +00006644EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006645
6646/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006647 * unregister_netdevice_many - unregister many devices
6648 * @head: list of devices
Eric Dumazet87757a92014-06-06 06:44:03 -07006649 *
6650 * Note: As most callers use a stack allocated list_head,
6651 * we force a list_del() to make sure stack wont be corrupted later.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006652 */
6653void unregister_netdevice_many(struct list_head *head)
6654{
6655 struct net_device *dev;
6656
6657 if (!list_empty(head)) {
6658 rollback_registered_many(head);
6659 list_for_each_entry(dev, head, unreg_list)
6660 net_set_todo(dev);
Eric Dumazet87757a92014-06-06 06:44:03 -07006661 list_del(head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006662 }
6663}
Eric Dumazet63c80992009-10-27 07:06:49 +00006664EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00006665
6666/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006667 * unregister_netdev - remove device from the kernel
6668 * @dev: device
6669 *
6670 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08006671 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006672 *
6673 * This is just a wrapper for unregister_netdevice that takes
6674 * the rtnl semaphore. In general you want to use this and not
6675 * unregister_netdevice.
6676 */
6677void unregister_netdev(struct net_device *dev)
6678{
6679 rtnl_lock();
6680 unregister_netdevice(dev);
6681 rtnl_unlock();
6682}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683EXPORT_SYMBOL(unregister_netdev);
6684
Eric W. Biedermance286d32007-09-12 13:53:49 +02006685/**
6686 * dev_change_net_namespace - move device to different nethost namespace
6687 * @dev: device
6688 * @net: network namespace
6689 * @pat: If not NULL name pattern to try if the current device name
6690 * is already taken in the destination network namespace.
6691 *
6692 * This function shuts down a device interface and moves it
6693 * to a new network namespace. On success 0 is returned, on
6694 * a failure a netagive errno code is returned.
6695 *
6696 * Callers must hold the rtnl semaphore.
6697 */
6698
6699int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6700{
Eric W. Biedermance286d32007-09-12 13:53:49 +02006701 int err;
6702
6703 ASSERT_RTNL();
6704
6705 /* Don't allow namespace local devices to be moved. */
6706 err = -EINVAL;
6707 if (dev->features & NETIF_F_NETNS_LOCAL)
6708 goto out;
6709
6710 /* Ensure the device has been registrered */
Eric W. Biedermance286d32007-09-12 13:53:49 +02006711 if (dev->reg_state != NETREG_REGISTERED)
6712 goto out;
6713
6714 /* Get out if there is nothing todo */
6715 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09006716 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02006717 goto out;
6718
6719 /* Pick the destination device name, and ensure
6720 * we can use it in the destination network namespace.
6721 */
6722 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00006723 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006724 /* We get here if we can't use the current device name */
6725 if (!pat)
6726 goto out;
Gao feng828de4f2012-09-13 20:58:27 +00006727 if (dev_get_valid_name(net, dev, pat) < 0)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006728 goto out;
6729 }
6730
6731 /*
6732 * And now a mini version of register_netdevice unregister_netdevice.
6733 */
6734
6735 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07006736 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006737
6738 /* And unlink it from device chain */
6739 err = -ENODEV;
6740 unlist_netdevice(dev);
6741
6742 synchronize_net();
6743
6744 /* Shutdown queueing discipline. */
6745 dev_shutdown(dev);
6746
6747 /* Notify protocols, that we are about to destroy
6748 this device. They should clean all the things.
David Lamparter3b27e102010-09-17 03:22:19 +00006749
6750 Note that dev->reg_state stays at NETREG_REGISTERED.
6751 This is wanted because this way 8021q and macvlan know
6752 the device is just moving and can keep their slaves up.
Eric W. Biedermance286d32007-09-12 13:53:49 +02006753 */
6754 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Gao feng6549dd42012-08-23 15:36:55 +00006755 rcu_barrier();
6756 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006757 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006758
6759 /*
6760 * Flush the unicast and multicast chains
6761 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00006762 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00006763 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006764
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006765 /* Send a netdev-removed uevent to the old namespace */
6766 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6767
Eric W. Biedermance286d32007-09-12 13:53:49 +02006768 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006769 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006770
Eric W. Biedermance286d32007-09-12 13:53:49 +02006771 /* If there is an ifindex conflict assign a new one */
6772 if (__dev_get_by_index(net, dev->ifindex)) {
6773 int iflink = (dev->iflink == dev->ifindex);
6774 dev->ifindex = dev_new_index(net);
6775 if (iflink)
6776 dev->iflink = dev->ifindex;
6777 }
6778
Serge Hallyn4e66ae22012-12-03 16:17:12 +00006779 /* Send a netdev-add uevent to the new namespace */
6780 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6781
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006782 /* Fixup kobjects */
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07006783 err = device_rename(&dev->dev, dev->name);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006784 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006785
6786 /* Add the device back in the hashes */
6787 list_netdevice(dev);
6788
6789 /* Notify protocols, that a new device appeared. */
6790 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6791
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006792 /*
6793 * Prevent userspace races by waiting until the network
6794 * device is fully setup before sending notifications.
6795 */
Alexei Starovoitov7f294052013-10-23 16:02:42 -07006796 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
Eric W. Biedermand90a9092009-12-12 22:11:15 +00006797
Eric W. Biedermance286d32007-09-12 13:53:49 +02006798 synchronize_net();
6799 err = 0;
6800out:
6801 return err;
6802}
Johannes Berg463d0182009-07-14 00:33:35 +02006803EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006804
Linus Torvalds1da177e2005-04-16 15:20:36 -07006805static int dev_cpu_callback(struct notifier_block *nfb,
6806 unsigned long action,
6807 void *ocpu)
6808{
6809 struct sk_buff **list_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006810 struct sk_buff *skb;
6811 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6812 struct softnet_data *sd, *oldsd;
6813
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07006814 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006815 return NOTIFY_OK;
6816
6817 local_irq_disable();
6818 cpu = smp_processor_id();
6819 sd = &per_cpu(softnet_data, cpu);
6820 oldsd = &per_cpu(softnet_data, oldcpu);
6821
6822 /* Find end of our completion_queue. */
6823 list_skb = &sd->completion_queue;
6824 while (*list_skb)
6825 list_skb = &(*list_skb)->next;
6826 /* Append completion queue from offline CPU. */
6827 *list_skb = oldsd->completion_queue;
6828 oldsd->completion_queue = NULL;
6829
Linus Torvalds1da177e2005-04-16 15:20:36 -07006830 /* Append output queue from offline CPU. */
Changli Gaoa9cbd582010-04-26 23:06:24 +00006831 if (oldsd->output_queue) {
6832 *sd->output_queue_tailp = oldsd->output_queue;
6833 sd->output_queue_tailp = oldsd->output_queue_tailp;
6834 oldsd->output_queue = NULL;
6835 oldsd->output_queue_tailp = &oldsd->output_queue;
6836 }
Heiko Carstens264524d2011-06-06 20:50:03 +00006837 /* Append NAPI poll list from offline CPU. */
6838 if (!list_empty(&oldsd->poll_list)) {
6839 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6840 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006842
6843 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6844 local_irq_enable();
6845
6846 /* Process offline CPU's input_pkt_queue */
Tom Herbert76cc8b12010-05-20 18:37:59 +00006847 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00006848 netif_rx_internal(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006849 input_queue_head_incr(oldsd);
6850 }
Tom Herbertfec5e652010-04-16 16:01:27 -07006851 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Ben Hutchingsae78dbf2014-01-10 22:17:24 +00006852 netif_rx_internal(skb);
Tom Herbert76cc8b12010-05-20 18:37:59 +00006853 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07006854 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006855
6856 return NOTIFY_OK;
6857}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006858
6859
Herbert Xu7f353bf2007-08-10 15:47:58 -07006860/**
Herbert Xub63365a2008-10-23 01:11:29 -07006861 * netdev_increment_features - increment feature set by one
6862 * @all: current feature set
6863 * @one: new feature set
6864 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006865 *
6866 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006867 * @one to the master device with current feature set @all. Will not
6868 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006869 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +00006870netdev_features_t netdev_increment_features(netdev_features_t all,
6871 netdev_features_t one, netdev_features_t mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006872{
Michał Mirosław1742f182011-04-22 06:31:16 +00006873 if (mask & NETIF_F_GEN_CSUM)
6874 mask |= NETIF_F_ALL_CSUM;
6875 mask |= NETIF_F_VLAN_CHALLENGED;
6876
6877 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6878 all &= one | ~NETIF_F_ALL_FOR_ALL;
6879
Michał Mirosław1742f182011-04-22 06:31:16 +00006880 /* If one device supports hw checksumming, set for all. */
6881 if (all & NETIF_F_GEN_CSUM)
6882 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006883
6884 return all;
6885}
Herbert Xub63365a2008-10-23 01:11:29 -07006886EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006887
Baruch Siach430f03c2013-06-02 20:43:55 +00006888static struct hlist_head * __net_init netdev_create_hash(void)
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006889{
6890 int i;
6891 struct hlist_head *hash;
6892
6893 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6894 if (hash != NULL)
6895 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6896 INIT_HLIST_HEAD(&hash[i]);
6897
6898 return hash;
6899}
6900
Eric W. Biederman881d9662007-09-17 11:56:21 -07006901/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006902static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006903{
Rustad, Mark D734b6542012-07-18 09:06:07 +00006904 if (net != &init_net)
6905 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006906
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006907 net->dev_name_head = netdev_create_hash();
6908 if (net->dev_name_head == NULL)
6909 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006910
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006911 net->dev_index_head = netdev_create_hash();
6912 if (net->dev_index_head == NULL)
6913 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006914
6915 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006916
6917err_idx:
6918 kfree(net->dev_name_head);
6919err_name:
6920 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006921}
6922
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006923/**
6924 * netdev_drivername - network driver for the device
6925 * @dev: network device
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006926 *
6927 * Determine network driver for device.
6928 */
David S. Miller3019de12011-06-06 16:41:33 -07006929const char *netdev_drivername(const struct net_device *dev)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006930{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006931 const struct device_driver *driver;
6932 const struct device *parent;
David S. Miller3019de12011-06-06 16:41:33 -07006933 const char *empty = "";
Arjan van de Ven6579e572008-07-21 13:31:48 -07006934
6935 parent = dev->dev.parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006936 if (!parent)
David S. Miller3019de12011-06-06 16:41:33 -07006937 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006938
6939 driver = parent->driver;
6940 if (driver && driver->name)
David S. Miller3019de12011-06-06 16:41:33 -07006941 return driver->name;
6942 return empty;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006943}
6944
Joe Perchesb004ff42012-09-12 20:12:19 -07006945static int __netdev_printk(const char *level, const struct net_device *dev,
Joe Perches256df2f2010-06-27 01:02:35 +00006946 struct va_format *vaf)
6947{
6948 int r;
6949
Joe Perchesb004ff42012-09-12 20:12:19 -07006950 if (dev && dev->dev.parent) {
Joe Perches666f3552012-09-12 20:14:11 -07006951 r = dev_printk_emit(level[1] - '0',
6952 dev->dev.parent,
Veaceslav Falicoccc7f492014-07-17 19:46:10 +02006953 "%s %s %s%s: %pV",
Joe Perches666f3552012-09-12 20:14:11 -07006954 dev_driver_string(dev->dev.parent),
6955 dev_name(dev->dev.parent),
Veaceslav Falicoccc7f492014-07-17 19:46:10 +02006956 netdev_name(dev), netdev_reg_state(dev),
6957 vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006958 } else if (dev) {
Veaceslav Falicoccc7f492014-07-17 19:46:10 +02006959 r = printk("%s%s%s: %pV", level, netdev_name(dev),
6960 netdev_reg_state(dev), vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006961 } else {
Joe Perches256df2f2010-06-27 01:02:35 +00006962 r = printk("%s(NULL net_device): %pV", level, vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006963 }
Joe Perches256df2f2010-06-27 01:02:35 +00006964
6965 return r;
6966}
6967
6968int netdev_printk(const char *level, const struct net_device *dev,
6969 const char *format, ...)
6970{
6971 struct va_format vaf;
6972 va_list args;
6973 int r;
6974
6975 va_start(args, format);
6976
6977 vaf.fmt = format;
6978 vaf.va = &args;
6979
6980 r = __netdev_printk(level, dev, &vaf);
Joe Perchesb004ff42012-09-12 20:12:19 -07006981
Joe Perches256df2f2010-06-27 01:02:35 +00006982 va_end(args);
6983
6984 return r;
6985}
6986EXPORT_SYMBOL(netdev_printk);
6987
6988#define define_netdev_printk_level(func, level) \
6989int func(const struct net_device *dev, const char *fmt, ...) \
6990{ \
6991 int r; \
6992 struct va_format vaf; \
6993 va_list args; \
6994 \
6995 va_start(args, fmt); \
6996 \
6997 vaf.fmt = fmt; \
6998 vaf.va = &args; \
6999 \
7000 r = __netdev_printk(level, dev, &vaf); \
Joe Perchesb004ff42012-09-12 20:12:19 -07007001 \
Joe Perches256df2f2010-06-27 01:02:35 +00007002 va_end(args); \
7003 \
7004 return r; \
7005} \
7006EXPORT_SYMBOL(func);
7007
7008define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7009define_netdev_printk_level(netdev_alert, KERN_ALERT);
7010define_netdev_printk_level(netdev_crit, KERN_CRIT);
7011define_netdev_printk_level(netdev_err, KERN_ERR);
7012define_netdev_printk_level(netdev_warn, KERN_WARNING);
7013define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7014define_netdev_printk_level(netdev_info, KERN_INFO);
7015
Pavel Emelyanov46650792007-10-08 20:38:39 -07007016static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07007017{
7018 kfree(net->dev_name_head);
7019 kfree(net->dev_index_head);
7020}
7021
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007022static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07007023 .init = netdev_init,
7024 .exit = netdev_exit,
7025};
7026
Pavel Emelyanov46650792007-10-08 20:38:39 -07007027static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02007028{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007029 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02007030 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007031 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02007032 * initial network namespace
7033 */
7034 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007035 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007036 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007037 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02007038
7039 /* Ignore unmoveable devices (i.e. loopback) */
7040 if (dev->features & NETIF_F_NETNS_LOCAL)
7041 continue;
7042
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00007043 /* Leave virtual devices for the generic cleanup */
7044 if (dev->rtnl_link_ops)
7045 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08007046
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007047 /* Push remaining network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007048 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7049 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02007050 if (err) {
Joe Perches7b6cd1c2012-02-01 10:54:43 +00007051 pr_emerg("%s: failed to move %s to init_net: %d\n",
7052 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07007053 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02007054 }
7055 }
7056 rtnl_unlock();
7057}
7058
Eric W. Biederman50624c92013-09-23 21:19:49 -07007059static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7060{
7061 /* Return with the rtnl_lock held when there are no network
7062 * devices unregistering in any network namespace in net_list.
7063 */
7064 struct net *net;
7065 bool unregistering;
7066 DEFINE_WAIT(wait);
7067
7068 for (;;) {
7069 prepare_to_wait(&netdev_unregistering_wq, &wait,
7070 TASK_UNINTERRUPTIBLE);
7071 unregistering = false;
7072 rtnl_lock();
7073 list_for_each_entry(net, net_list, exit_list) {
7074 if (net->dev_unreg_count > 0) {
7075 unregistering = true;
7076 break;
7077 }
7078 }
7079 if (!unregistering)
7080 break;
7081 __rtnl_unlock();
7082 schedule();
7083 }
7084 finish_wait(&netdev_unregistering_wq, &wait);
7085}
7086
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007087static void __net_exit default_device_exit_batch(struct list_head *net_list)
7088{
7089 /* At exit all network devices most be removed from a network
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04007090 * namespace. Do this in the reverse order of registration.
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007091 * Do this across as many network namespaces as possible to
7092 * improve batching efficiency.
7093 */
7094 struct net_device *dev;
7095 struct net *net;
7096 LIST_HEAD(dev_kill_list);
7097
Eric W. Biederman50624c92013-09-23 21:19:49 -07007098 /* To prevent network device cleanup code from dereferencing
7099 * loopback devices or network devices that have been freed
7100 * wait here for all pending unregistrations to complete,
7101 * before unregistring the loopback device and allowing the
7102 * network namespace be freed.
7103 *
7104 * The netdev todo list containing all network devices
7105 * unregistrations that happen in default_device_exit_batch
7106 * will run in the rtnl_unlock() at the end of
7107 * default_device_exit_batch.
7108 */
7109 rtnl_lock_unregistering(net_list);
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007110 list_for_each_entry(net, net_list, exit_list) {
7111 for_each_netdev_reverse(net, dev) {
Jiri Pirkob0ab2fa2014-06-26 09:58:25 +02007112 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007113 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7114 else
7115 unregister_netdevice_queue(dev, &dev_kill_list);
7116 }
7117 }
7118 unregister_netdevice_many(&dev_kill_list);
7119 rtnl_unlock();
7120}
7121
Denis V. Lunev022cbae2007-11-13 03:23:50 -08007122static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02007123 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00007124 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02007125};
7126
Linus Torvalds1da177e2005-04-16 15:20:36 -07007127/*
7128 * Initialize the DEV module. At boot time this walks the device list and
7129 * unhooks any devices that fail to initialise (normally hardware not
7130 * present) and leaves us with a valid list of present and active devices.
7131 *
7132 */
7133
7134/*
7135 * This is called single threaded during boot, so no need
7136 * to take the rtnl semaphore.
7137 */
7138static int __init net_dev_init(void)
7139{
7140 int i, rc = -ENOMEM;
7141
7142 BUG_ON(!dev_boot_phase);
7143
Linus Torvalds1da177e2005-04-16 15:20:36 -07007144 if (dev_proc_init())
7145 goto out;
7146
Eric W. Biederman8b41d182007-09-26 22:02:53 -07007147 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07007148 goto out;
7149
7150 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08007151 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007152 INIT_LIST_HEAD(&ptype_base[i]);
7153
Vlad Yasevich62532da2012-11-15 08:49:10 +00007154 INIT_LIST_HEAD(&offload_base);
7155
Eric W. Biederman881d9662007-09-17 11:56:21 -07007156 if (register_pernet_subsys(&netdev_net_ops))
7157 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007158
7159 /*
7160 * Initialise the packet receive queues.
7161 */
7162
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07007163 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007164 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007165
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007166 skb_queue_head_init(&sd->input_pkt_queue);
Changli Gao6e7676c2010-04-27 15:07:33 -07007167 skb_queue_head_init(&sd->process_queue);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007168 INIT_LIST_HEAD(&sd->poll_list);
Changli Gaoa9cbd582010-04-26 23:06:24 +00007169 sd->output_queue_tailp = &sd->output_queue;
Eric Dumazetdf334542010-03-24 19:13:54 +00007170#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007171 sd->csd.func = rps_trigger_softirq;
7172 sd->csd.info = sd;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007173 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07007174#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00007175
Eric Dumazete36fa2f2010-04-19 21:17:14 +00007176 sd->backlog.poll = process_backlog;
7177 sd->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007178 }
7179
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180 dev_boot_phase = 0;
7181
Eric W. Biederman505d4f72008-11-07 22:54:20 -08007182 /* The loopback device is special if any other network devices
7183 * is present in a network namespace the loopback device must
7184 * be present. Since we now dynamically allocate and free the
7185 * loopback device ensure this invariant is maintained by
7186 * keeping the loopback device as the first device on the
7187 * list of network devices. Ensuring the loopback devices
7188 * is the first device that appears and the last network device
7189 * that disappears.
7190 */
7191 if (register_pernet_device(&loopback_net_ops))
7192 goto out;
7193
7194 if (register_pernet_device(&default_device_ops))
7195 goto out;
7196
Carlos R. Mafra962cf362008-05-15 11:15:37 -03007197 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7198 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007199
7200 hotcpu_notifier(dev_cpu_callback, 0);
7201 dst_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202 rc = 0;
7203out:
7204 return rc;
7205}
7206
7207subsys_initcall(net_dev_init);