blob: 7f5755b0a57cba4f099e413966f933ba7eee87aa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000082#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090083#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080085#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070095#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/notifier.h>
97#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020098#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
104#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700105#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#include <net/dst.h>
107#include <net/pkt_sched.h>
108#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000109#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <linux/highmem.h>
111#include <linux/init.h>
112#include <linux/kmod.h>
113#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700117#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500120#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700121#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700122#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700123#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700124#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700125#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700126#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700127#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700128#include <linux/ipv6.h>
129#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700130#include <linux/jhash.h>
131#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700132#include <trace/events/napi.h>
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +0000133#include <linux/pci.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700135#include "net-sysfs.h"
136
Herbert Xud565b0a2008-12-15 23:38:52 -0800137/* Instead of increasing this, you should create a hash table. */
138#define MAX_GRO_SKBS 8
139
Herbert Xu5d38a072009-01-04 16:13:40 -0800140/* This should be increased if a protocol with a bigger head is added. */
141#define GRO_MAX_HEAD (MAX_HEADER + 128)
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/*
144 * The list of packet types we will receive (as opposed to discard)
145 * and the routines to invoke.
146 *
147 * Why 16. Because with 16 the only overlap we get on a hash of the
148 * low nibble of the protocol value is RARP/SNAP/X.25.
149 *
150 * NOTE: That is no longer true with the addition of VLAN tags. Not
151 * sure which should go first, but I bet it won't make much
152 * difference if we are running VLANs. The good news is that
153 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700154 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 * --BLG
156 *
157 * 0800 IP
158 * 8100 802.1Q VLAN
159 * 0001 802.3
160 * 0002 AX.25
161 * 0004 802.2
162 * 8035 RARP
163 * 0005 SNAP
164 * 0805 X.25
165 * 0806 ARP
166 * 8137 IPX
167 * 0009 Localtalk
168 * 86DD IPv6
169 */
170
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171#define PTYPE_HASH_SIZE (16)
172#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800175static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700176static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700179 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 * semaphore.
181 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800182 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 *
184 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700185 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * actual updates. This allows pure readers to access the list even
187 * while a writer is preparing to update it.
188 *
189 * To put it another way, dev_base_lock is held for writing only to
190 * protect against pure readers; the rtnl semaphore provides the
191 * protection against other writers.
192 *
193 * See, for example usages, register_netdevice() and
194 * unregister_netdevice(), which must be called with the rtnl
195 * semaphore held.
196 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198EXPORT_SYMBOL(dev_base_lock);
199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
stephen hemminger08e98972009-11-10 07:20:34 +0000203 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
Eric W. Biederman881d9662007-09-17 11:56:21 -0700206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700208 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000211static inline void rps_lock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000212{
213#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000214 spin_lock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000215#endif
216}
217
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000218static inline void rps_unlock(struct softnet_data *sd)
Changli Gao152102c2010-03-30 20:16:22 +0000219{
220#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +0000221 spin_unlock(&sd->input_pkt_queue.lock);
Changli Gao152102c2010-03-30 20:16:22 +0000222#endif
223}
224
Eric W. Biedermance286d32007-09-12 13:53:49 +0200225/* Device list insertion */
226static int list_netdevice(struct net_device *dev)
227{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900228 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200229
230 ASSERT_RTNL();
231
232 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800233 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000234 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000235 hlist_add_head_rcu(&dev->index_hlist,
236 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200237 write_unlock_bh(&dev_base_lock);
238 return 0;
239}
240
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000241/* Device list removal
242 * caller must respect a RCU grace period before freeing/reusing dev
243 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200244static void unlist_netdevice(struct net_device *dev)
245{
246 ASSERT_RTNL();
247
248 /* Unlink dev from the device chain */
249 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800250 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000251 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000252 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200253 write_unlock_bh(&dev_base_lock);
254}
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256/*
257 * Our notifier list
258 */
259
Alan Sternf07d5b92006-05-09 15:23:03 -0700260static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262/*
263 * Device drivers call our routines to queue packets here. We empty the
264 * queue in the local softnet handler.
265 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700266
Eric Dumazet9958da02010-04-17 04:17:02 +0000267DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700268EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
David S. Millercf508b12008-07-22 14:16:42 -0700270#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700271/*
David S. Millerc773e842008-07-08 23:13:53 -0700272 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273 * according to dev->type
274 */
275static const unsigned short netdev_lock_type[] =
276 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
277 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
278 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
279 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
280 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
281 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
282 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
283 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
284 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
285 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
286 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
287 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
288 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800289 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400290 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000291 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700293static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700294 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
295 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
296 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
297 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
298 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
299 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
300 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
301 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
302 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
303 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
304 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
305 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
306 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800307 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400308 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000309 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700310
311static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700312static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700313
314static inline unsigned short netdev_lock_pos(unsigned short dev_type)
315{
316 int i;
317
318 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
319 if (netdev_lock_type[i] == dev_type)
320 return i;
321 /* the last key is used by default */
322 return ARRAY_SIZE(netdev_lock_type) - 1;
323}
324
David S. Millercf508b12008-07-22 14:16:42 -0700325static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700327{
328 int i;
329
330 i = netdev_lock_pos(dev_type);
331 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
332 netdev_lock_name[i]);
333}
David S. Millercf508b12008-07-22 14:16:42 -0700334
335static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
336{
337 int i;
338
339 i = netdev_lock_pos(dev->type);
340 lockdep_set_class_and_name(&dev->addr_list_lock,
341 &netdev_addr_lock_key[i],
342 netdev_lock_name[i]);
343}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700344#else
David S. Millercf508b12008-07-22 14:16:42 -0700345static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
346 unsigned short dev_type)
347{
348}
349static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700350{
351}
352#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354/*******************************************************************************
355
356 Protocol management and registration routines
357
358*******************************************************************************/
359
360/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 * Add a protocol ID to the list. Now that the input handler is
362 * smarter we can dispense with all the messy stuff that used to be
363 * here.
364 *
365 * BEWARE!!! Protocol handlers, mangling input packets,
366 * MUST BE last in hash buckets and checking protocol handlers
367 * MUST start from promiscuous ptype_all chain in net_bh.
368 * It is true now, do not change it.
369 * Explanation follows: if protocol handler, mangling packet, will
370 * be the first on list, it is not able to sense, that packet
371 * is cloned and should be copied-on-write, so that it will
372 * change it and subsequent readers will get broken packet.
373 * --ANK (980803)
374 */
375
376/**
377 * dev_add_pack - add packet handler
378 * @pt: packet type declaration
379 *
380 * Add a protocol handler to the networking stack. The passed &packet_type
381 * is linked into kernel lists and may not be freed until it has been
382 * removed from the kernel lists.
383 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900384 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 * guarantee all CPU's that are in middle of receiving packets
386 * will see the new packet type (until the next received packet).
387 */
388
389void dev_add_pack(struct packet_type *pt)
390{
391 int hash;
392
393 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700394 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700396 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800397 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 list_add_rcu(&pt->list, &ptype_base[hash]);
399 }
400 spin_unlock_bh(&ptype_lock);
401}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700402EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404/**
405 * __dev_remove_pack - remove packet handler
406 * @pt: packet type declaration
407 *
408 * Remove a protocol handler that was previously added to the kernel
409 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
410 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900411 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 *
413 * The packet type might still be in use by receivers
414 * and must not be freed until after all the CPU's have gone
415 * through a quiescent state.
416 */
417void __dev_remove_pack(struct packet_type *pt)
418{
419 struct list_head *head;
420 struct packet_type *pt1;
421
422 spin_lock_bh(&ptype_lock);
423
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700424 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700426 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800427 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
429 list_for_each_entry(pt1, head, list) {
430 if (pt == pt1) {
431 list_del_rcu(&pt->list);
432 goto out;
433 }
434 }
435
436 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
437out:
438 spin_unlock_bh(&ptype_lock);
439}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700440EXPORT_SYMBOL(__dev_remove_pack);
441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442/**
443 * dev_remove_pack - remove packet handler
444 * @pt: packet type declaration
445 *
446 * Remove a protocol handler that was previously added to the kernel
447 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
448 * from the kernel lists and can be freed or reused once this function
449 * returns.
450 *
451 * This call sleeps to guarantee that no CPU is looking at the packet
452 * type after return.
453 */
454void dev_remove_pack(struct packet_type *pt)
455{
456 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 synchronize_net();
459}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700460EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462/******************************************************************************
463
464 Device Boot-time Settings Routines
465
466*******************************************************************************/
467
468/* Boot time configuration table */
469static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
470
471/**
472 * netdev_boot_setup_add - add new setup entry
473 * @name: name of the device
474 * @map: configured settings for the device
475 *
476 * Adds new setup entry to the dev_boot_setup list. The function
477 * returns 0 on error and 1 on success. This is a generic routine to
478 * all netdevices.
479 */
480static int netdev_boot_setup_add(char *name, struct ifmap *map)
481{
482 struct netdev_boot_setup *s;
483 int i;
484
485 s = dev_boot_setup;
486 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
487 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
488 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700489 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 memcpy(&s[i].map, map, sizeof(s[i].map));
491 break;
492 }
493 }
494
495 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
496}
497
498/**
499 * netdev_boot_setup_check - check boot time settings
500 * @dev: the netdevice
501 *
502 * Check boot time settings for the device.
503 * The found settings are set for the device to be used
504 * later in the device probing.
505 * Returns 0 if no settings found, 1 if they are.
506 */
507int netdev_boot_setup_check(struct net_device *dev)
508{
509 struct netdev_boot_setup *s = dev_boot_setup;
510 int i;
511
512 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
513 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700514 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 dev->irq = s[i].map.irq;
516 dev->base_addr = s[i].map.base_addr;
517 dev->mem_start = s[i].map.mem_start;
518 dev->mem_end = s[i].map.mem_end;
519 return 1;
520 }
521 }
522 return 0;
523}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700524EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526
527/**
528 * netdev_boot_base - get address from boot time settings
529 * @prefix: prefix for network device
530 * @unit: id for network device
531 *
532 * Check boot time settings for the base address of device.
533 * The found settings are set for the device to be used
534 * later in the device probing.
535 * Returns 0 if no settings found.
536 */
537unsigned long netdev_boot_base(const char *prefix, int unit)
538{
539 const struct netdev_boot_setup *s = dev_boot_setup;
540 char name[IFNAMSIZ];
541 int i;
542
543 sprintf(name, "%s%d", prefix, unit);
544
545 /*
546 * If device already registered then return base of 1
547 * to indicate not to probe for this interface
548 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700549 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 return 1;
551
552 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
553 if (!strcmp(name, s[i].name))
554 return s[i].map.base_addr;
555 return 0;
556}
557
558/*
559 * Saves at boot time configured settings for any netdevice.
560 */
561int __init netdev_boot_setup(char *str)
562{
563 int ints[5];
564 struct ifmap map;
565
566 str = get_options(str, ARRAY_SIZE(ints), ints);
567 if (!str || !*str)
568 return 0;
569
570 /* Save settings */
571 memset(&map, 0, sizeof(map));
572 if (ints[0] > 0)
573 map.irq = ints[1];
574 if (ints[0] > 1)
575 map.base_addr = ints[2];
576 if (ints[0] > 2)
577 map.mem_start = ints[3];
578 if (ints[0] > 3)
579 map.mem_end = ints[4];
580
581 /* Add new entry to the list */
582 return netdev_boot_setup_add(str, &map);
583}
584
585__setup("netdev=", netdev_boot_setup);
586
587/*******************************************************************************
588
589 Device Interface Subroutines
590
591*******************************************************************************/
592
593/**
594 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700595 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 * @name: name to find
597 *
598 * Find an interface by name. Must be called under RTNL semaphore
599 * or @dev_base_lock. If the name is found a pointer to the device
600 * is returned. If the name is not found then %NULL is returned. The
601 * reference counters are not incremented so the caller must be
602 * careful with locks.
603 */
604
Eric W. Biederman881d9662007-09-17 11:56:21 -0700605struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
607 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700608 struct net_device *dev;
609 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700611 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 if (!strncmp(dev->name, name, IFNAMSIZ))
613 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 return NULL;
616}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700617EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
619/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000620 * dev_get_by_name_rcu - find a device by its name
621 * @net: the applicable net namespace
622 * @name: name to find
623 *
624 * Find an interface by name.
625 * If the name is found a pointer to the device is returned.
626 * If the name is not found then %NULL is returned.
627 * The reference counters are not incremented so the caller must be
628 * careful with locks. The caller must hold RCU lock.
629 */
630
631struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
632{
633 struct hlist_node *p;
634 struct net_device *dev;
635 struct hlist_head *head = dev_name_hash(net, name);
636
637 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
638 if (!strncmp(dev->name, name, IFNAMSIZ))
639 return dev;
640
641 return NULL;
642}
643EXPORT_SYMBOL(dev_get_by_name_rcu);
644
645/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700647 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * @name: name to find
649 *
650 * Find an interface by name. This can be called from any
651 * context and does its own locking. The returned handle has
652 * the usage count incremented and the caller must use dev_put() to
653 * release it when it is no longer needed. %NULL is returned if no
654 * matching device is found.
655 */
656
Eric W. Biederman881d9662007-09-17 11:56:21 -0700657struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658{
659 struct net_device *dev;
660
Eric Dumazet72c95282009-10-30 07:11:27 +0000661 rcu_read_lock();
662 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (dev)
664 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000665 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return dev;
667}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700668EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670/**
671 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700672 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 * @ifindex: index of device
674 *
675 * Search for an interface by index. Returns %NULL if the device
676 * is not found or a pointer to the device. The device has not
677 * had its reference counter increased so the caller must be careful
678 * about locking. The caller must hold either the RTNL semaphore
679 * or @dev_base_lock.
680 */
681
Eric W. Biederman881d9662007-09-17 11:56:21 -0700682struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683{
684 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700685 struct net_device *dev;
686 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700688 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 if (dev->ifindex == ifindex)
690 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 return NULL;
693}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700694EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000696/**
697 * dev_get_by_index_rcu - find a device by its ifindex
698 * @net: the applicable net namespace
699 * @ifindex: index of device
700 *
701 * Search for an interface by index. Returns %NULL if the device
702 * is not found or a pointer to the device. The device has not
703 * had its reference counter increased so the caller must be careful
704 * about locking. The caller must hold RCU lock.
705 */
706
707struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
708{
709 struct hlist_node *p;
710 struct net_device *dev;
711 struct hlist_head *head = dev_index_hash(net, ifindex);
712
713 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
714 if (dev->ifindex == ifindex)
715 return dev;
716
717 return NULL;
718}
719EXPORT_SYMBOL(dev_get_by_index_rcu);
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
722/**
723 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700724 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 * @ifindex: index of device
726 *
727 * Search for an interface by index. Returns NULL if the device
728 * is not found or a pointer to the device. The device returned has
729 * had a reference added and the pointer is safe until the user calls
730 * dev_put to indicate they have finished with it.
731 */
732
Eric W. Biederman881d9662007-09-17 11:56:21 -0700733struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
735 struct net_device *dev;
736
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000737 rcu_read_lock();
738 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 if (dev)
740 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000741 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 return dev;
743}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700744EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
746/**
747 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700748 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 * @type: media type of device
750 * @ha: hardware address
751 *
752 * Search for an interface by MAC address. Returns NULL if the device
753 * is not found or a pointer to the device. The caller must hold the
754 * rtnl semaphore. The returned device has not had its ref count increased
755 * and the caller must therefore be careful about locking
756 *
757 * BUGS:
758 * If the API was consistent this would be __dev_get_by_hwaddr
759 */
760
Eric W. Biederman881d9662007-09-17 11:56:21 -0700761struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762{
763 struct net_device *dev;
764
765 ASSERT_RTNL();
766
Denis V. Lunev81103a52007-12-12 10:47:38 -0800767 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 if (dev->type == type &&
769 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700770 return dev;
771
772 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300774EXPORT_SYMBOL(dev_getbyhwaddr);
775
Eric W. Biederman881d9662007-09-17 11:56:21 -0700776struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700777{
778 struct net_device *dev;
779
780 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700781 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700782 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700783 return dev;
784
785 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700786}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700787EXPORT_SYMBOL(__dev_getfirstbyhwtype);
788
Eric W. Biederman881d9662007-09-17 11:56:21 -0700789struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000791 struct net_device *dev, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
Eric Dumazet99fe3c32010-03-18 11:27:25 +0000793 rcu_read_lock();
794 for_each_netdev_rcu(net, dev)
795 if (dev->type == type) {
796 dev_hold(dev);
797 ret = dev;
798 break;
799 }
800 rcu_read_unlock();
801 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803EXPORT_SYMBOL(dev_getfirstbyhwtype);
804
805/**
806 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700807 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 * @if_flags: IFF_* values
809 * @mask: bitmask of bits in if_flags to check
810 *
811 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900812 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 * had a reference added and the pointer is safe until the user calls
814 * dev_put to indicate they have finished with it.
815 */
816
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700817struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
818 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700820 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
Pavel Emelianov7562f872007-05-03 15:13:45 -0700822 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800823 rcu_read_lock();
824 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (((dev->flags ^ if_flags) & mask) == 0) {
826 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700827 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 break;
829 }
830 }
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800831 rcu_read_unlock();
Pavel Emelianov7562f872007-05-03 15:13:45 -0700832 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700834EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836/**
837 * dev_valid_name - check if name is okay for network device
838 * @name: name string
839 *
840 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700841 * to allow sysfs to work. We also disallow any kind of
842 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800844int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700846 if (*name == '\0')
847 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700848 if (strlen(name) >= IFNAMSIZ)
849 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700850 if (!strcmp(name, ".") || !strcmp(name, ".."))
851 return 0;
852
853 while (*name) {
854 if (*name == '/' || isspace(*name))
855 return 0;
856 name++;
857 }
858 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700860EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200863 * __dev_alloc_name - allocate a name for a device
864 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200866 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 *
868 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700869 * id. It scans list of devices to build up a free map, then chooses
870 * the first empty slot. The caller must hold the dev_base or rtnl lock
871 * while allocating the name and adding the device in order to avoid
872 * duplicates.
873 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
874 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 */
876
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200877static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878{
879 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 const char *p;
881 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700882 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 struct net_device *d;
884
885 p = strnchr(name, IFNAMSIZ-1, '%');
886 if (p) {
887 /*
888 * Verify the string as this thing may have come from
889 * the user. There must be either one "%d" and no other "%"
890 * characters.
891 */
892 if (p[1] != 'd' || strchr(p + 2, '%'))
893 return -EINVAL;
894
895 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700896 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if (!inuse)
898 return -ENOMEM;
899
Eric W. Biederman881d9662007-09-17 11:56:21 -0700900 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 if (!sscanf(d->name, name, &i))
902 continue;
903 if (i < 0 || i >= max_netdevices)
904 continue;
905
906 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200907 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 if (!strncmp(buf, d->name, IFNAMSIZ))
909 set_bit(i, inuse);
910 }
911
912 i = find_first_zero_bit(inuse, max_netdevices);
913 free_page((unsigned long) inuse);
914 }
915
Octavian Purdilad9031022009-11-18 02:36:59 +0000916 if (buf != name)
917 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200918 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921 /* It is possible to run out of possible slots
922 * when the name is long and there isn't enough space left
923 * for the digits, or if all bits are used.
924 */
925 return -ENFILE;
926}
927
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200928/**
929 * dev_alloc_name - allocate a name for a device
930 * @dev: device
931 * @name: name format string
932 *
933 * Passed a format string - eg "lt%d" it will try and find a suitable
934 * id. It scans list of devices to build up a free map, then chooses
935 * the first empty slot. The caller must hold the dev_base or rtnl lock
936 * while allocating the name and adding the device in order to avoid
937 * duplicates.
938 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
939 * Returns the number of the unit assigned or a negative errno code.
940 */
941
942int dev_alloc_name(struct net_device *dev, const char *name)
943{
944 char buf[IFNAMSIZ];
945 struct net *net;
946 int ret;
947
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900948 BUG_ON(!dev_net(dev));
949 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200950 ret = __dev_alloc_name(net, name, buf);
951 if (ret >= 0)
952 strlcpy(dev->name, buf, IFNAMSIZ);
953 return ret;
954}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700955EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200956
Octavian Purdilad9031022009-11-18 02:36:59 +0000957static int dev_get_valid_name(struct net *net, const char *name, char *buf,
958 bool fmt)
959{
960 if (!dev_valid_name(name))
961 return -EINVAL;
962
963 if (fmt && strchr(name, '%'))
964 return __dev_alloc_name(net, name, buf);
965 else if (__dev_get_by_name(net, name))
966 return -EEXIST;
967 else if (buf != name)
968 strlcpy(buf, name, IFNAMSIZ);
969
970 return 0;
971}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973/**
974 * dev_change_name - change name of a device
975 * @dev: device
976 * @newname: name (or format string) must be at least IFNAMSIZ
977 *
978 * Change name of a device, can pass format strings "eth%d".
979 * for wildcarding.
980 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700981int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
Herbert Xufcc5a032007-07-30 17:03:38 -0700983 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700985 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700986 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
988 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900989 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900991 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 if (dev->flags & IFF_UP)
993 return -EBUSY;
994
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700995 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
996 return 0;
997
Herbert Xufcc5a032007-07-30 17:03:38 -0700998 memcpy(oldname, dev->name, IFNAMSIZ);
999
Octavian Purdilad9031022009-11-18 02:36:59 +00001000 err = dev_get_valid_name(net, newname, dev->name, 1);
1001 if (err < 0)
1002 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
Herbert Xufcc5a032007-07-30 17:03:38 -07001004rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -07001005 /* For now only devices in the initial network namespace
1006 * are in sysfs.
1007 */
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001008 if (net_eq(net, &init_net)) {
Eric W. Biederman38918452008-10-27 17:51:47 -07001009 ret = device_rename(&dev->dev, dev->name);
1010 if (ret) {
1011 memcpy(dev->name, oldname, IFNAMSIZ);
1012 return ret;
1013 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -07001014 }
Herbert Xu7f988ea2007-07-30 16:35:46 -07001015
1016 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -06001017 hlist_del(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001018 write_unlock_bh(&dev_base_lock);
1019
1020 synchronize_rcu();
1021
1022 write_lock_bh(&dev_base_lock);
1023 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001024 write_unlock_bh(&dev_base_lock);
1025
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001026 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001027 ret = notifier_to_errno(ret);
1028
1029 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001030 /* err >= 0 after dev_alloc_name() or stores the first errno */
1031 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001032 err = ret;
1033 memcpy(dev->name, oldname, IFNAMSIZ);
1034 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001035 } else {
1036 printk(KERN_ERR
1037 "%s: name change rollback failed: %d.\n",
1038 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001039 }
1040 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
1042 return err;
1043}
1044
1045/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001046 * dev_set_alias - change ifalias of a device
1047 * @dev: device
1048 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001049 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001050 *
1051 * Set ifalias for a device,
1052 */
1053int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1054{
1055 ASSERT_RTNL();
1056
1057 if (len >= IFALIASZ)
1058 return -EINVAL;
1059
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001060 if (!len) {
1061 if (dev->ifalias) {
1062 kfree(dev->ifalias);
1063 dev->ifalias = NULL;
1064 }
1065 return 0;
1066 }
1067
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001068 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001069 if (!dev->ifalias)
1070 return -ENOMEM;
1071
1072 strlcpy(dev->ifalias, alias, len+1);
1073 return len;
1074}
1075
1076
1077/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001078 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001079 * @dev: device to cause notification
1080 *
1081 * Called to indicate a device has changed features.
1082 */
1083void netdev_features_change(struct net_device *dev)
1084{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001085 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001086}
1087EXPORT_SYMBOL(netdev_features_change);
1088
1089/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 * netdev_state_change - device changes state
1091 * @dev: device to cause notification
1092 *
1093 * Called to indicate a device has changed state. This function calls
1094 * the notifier chains for netdev_chain and sends a NEWLINK message
1095 * to the routing socket.
1096 */
1097void netdev_state_change(struct net_device *dev)
1098{
1099 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001100 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1102 }
1103}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001104EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Jiri Pirko3ca5b402010-03-10 10:29:35 +00001106int netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001107{
Jiri Pirko3ca5b402010-03-10 10:29:35 +00001108 return call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001109}
1110EXPORT_SYMBOL(netdev_bonding_change);
1111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112/**
1113 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001114 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 * @name: name of interface
1116 *
1117 * If a network interface is not present and the process has suitable
1118 * privileges this function loads the module. If module loading is not
1119 * available in this kernel then it becomes a nop.
1120 */
1121
Eric W. Biederman881d9662007-09-17 11:56:21 -07001122void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001124 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
Eric Dumazet72c95282009-10-30 07:11:27 +00001126 rcu_read_lock();
1127 dev = dev_get_by_name_rcu(net, name);
1128 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
Eric Parisa8f80e82009-08-13 09:44:51 -04001130 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 request_module("%s", name);
1132}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001133EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Patrick McHardybd380812010-02-26 06:34:53 +00001135static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001137 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001138 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001140 ASSERT_RTNL();
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 * Is it even present?
1144 */
1145 if (!netif_device_present(dev))
1146 return -ENODEV;
1147
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001148 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1149 ret = notifier_to_errno(ret);
1150 if (ret)
1151 return ret;
1152
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 /*
1154 * Call device private open method
1155 */
1156 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001157
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001158 if (ops->ndo_validate_addr)
1159 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001160
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001161 if (!ret && ops->ndo_open)
1162 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001164 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 * If it went open OK then:
1166 */
1167
Jeff Garzikbada3392007-10-23 20:19:37 -07001168 if (ret)
1169 clear_bit(__LINK_STATE_START, &dev->state);
1170 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 /*
1172 * Set the flags.
1173 */
1174 dev->flags |= IFF_UP;
1175
1176 /*
Dan Williams649274d2009-01-11 00:20:39 -08001177 * Enable NET_DMA
1178 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001179 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001180
1181 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 * Initialize multicasting status
1183 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001184 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186 /*
1187 * Wakeup transmit queue engine
1188 */
1189 dev_activate(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 return ret;
1193}
Patrick McHardybd380812010-02-26 06:34:53 +00001194
1195/**
1196 * dev_open - prepare an interface for use.
1197 * @dev: device to open
1198 *
1199 * Takes a device from down to up state. The device's private open
1200 * function is invoked and then the multicast lists are loaded. Finally
1201 * the device is moved into the up state and a %NETDEV_UP message is
1202 * sent to the netdev notifier chain.
1203 *
1204 * Calling this function on an active interface is a nop. On a failure
1205 * a negative errno code is returned.
1206 */
1207int dev_open(struct net_device *dev)
1208{
1209 int ret;
1210
1211 /*
1212 * Is it already up?
1213 */
1214 if (dev->flags & IFF_UP)
1215 return 0;
1216
1217 /*
1218 * Open device
1219 */
1220 ret = __dev_open(dev);
1221 if (ret < 0)
1222 return ret;
1223
1224 /*
1225 * ... and announce new interface.
1226 */
1227 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1228 call_netdevice_notifiers(NETDEV_UP, dev);
1229
1230 return ret;
1231}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001232EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Patrick McHardybd380812010-02-26 06:34:53 +00001234static int __dev_close(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001236 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardybd380812010-02-26 06:34:53 +00001237
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001238 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001239 might_sleep();
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 /*
1242 * Tell people we are going down, so that they can
1243 * prepare to death, when device is still operating.
1244 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001245 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 clear_bit(__LINK_STATE_START, &dev->state);
1248
1249 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001250 * it can be even on different cpu. So just clear netif_running().
1251 *
1252 * dev->stop() will invoke napi_disable() on all of it's
1253 * napi_struct instances on this device.
1254 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001257 dev_deactivate(dev);
1258
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 /*
1260 * Call the device specific close. This cannot fail.
1261 * Only if device is UP
1262 *
1263 * We allow it to be called even after a DETACH hot-plug
1264 * event.
1265 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001266 if (ops->ndo_stop)
1267 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
1269 /*
1270 * Device is now down.
1271 */
1272
1273 dev->flags &= ~IFF_UP;
1274
1275 /*
Dan Williams649274d2009-01-11 00:20:39 -08001276 * Shutdown NET_DMA
1277 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001278 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 return 0;
1281}
Patrick McHardybd380812010-02-26 06:34:53 +00001282
1283/**
1284 * dev_close - shutdown an interface.
1285 * @dev: device to shutdown
1286 *
1287 * This function moves an active device into down state. A
1288 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1289 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1290 * chain.
1291 */
1292int dev_close(struct net_device *dev)
1293{
1294 if (!(dev->flags & IFF_UP))
1295 return 0;
1296
1297 __dev_close(dev);
1298
1299 /*
1300 * Tell people we are down
1301 */
1302 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1303 call_netdevice_notifiers(NETDEV_DOWN, dev);
1304
1305 return 0;
1306}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001307EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
1309
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001310/**
1311 * dev_disable_lro - disable Large Receive Offload on a device
1312 * @dev: device
1313 *
1314 * Disable Large Receive Offload (LRO) on a net device. Must be
1315 * called under RTNL. This is needed if received packets may be
1316 * forwarded to another interface.
1317 */
1318void dev_disable_lro(struct net_device *dev)
1319{
1320 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1321 dev->ethtool_ops->set_flags) {
1322 u32 flags = dev->ethtool_ops->get_flags(dev);
1323 if (flags & ETH_FLAG_LRO) {
1324 flags &= ~ETH_FLAG_LRO;
1325 dev->ethtool_ops->set_flags(dev, flags);
1326 }
1327 }
1328 WARN_ON(dev->features & NETIF_F_LRO);
1329}
1330EXPORT_SYMBOL(dev_disable_lro);
1331
1332
Eric W. Biederman881d9662007-09-17 11:56:21 -07001333static int dev_boot_phase = 1;
1334
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335/*
1336 * Device change register/unregister. These are not inline or static
1337 * as we export them to the world.
1338 */
1339
1340/**
1341 * register_netdevice_notifier - register a network notifier block
1342 * @nb: notifier
1343 *
1344 * Register a notifier to be called when network device events occur.
1345 * The notifier passed is linked into the kernel structures and must
1346 * not be reused until it has been unregistered. A negative errno code
1347 * is returned on a failure.
1348 *
1349 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001350 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 * view of the network device list.
1352 */
1353
1354int register_netdevice_notifier(struct notifier_block *nb)
1355{
1356 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001357 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001358 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 int err;
1360
1361 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001362 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001363 if (err)
1364 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001365 if (dev_boot_phase)
1366 goto unlock;
1367 for_each_net(net) {
1368 for_each_netdev(net, dev) {
1369 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1370 err = notifier_to_errno(err);
1371 if (err)
1372 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
Eric W. Biederman881d9662007-09-17 11:56:21 -07001374 if (!(dev->flags & IFF_UP))
1375 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001376
Eric W. Biederman881d9662007-09-17 11:56:21 -07001377 nb->notifier_call(nb, NETDEV_UP, dev);
1378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001380
1381unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 rtnl_unlock();
1383 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001384
1385rollback:
1386 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001387 for_each_net(net) {
1388 for_each_netdev(net, dev) {
1389 if (dev == last)
1390 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001391
Eric W. Biederman881d9662007-09-17 11:56:21 -07001392 if (dev->flags & IFF_UP) {
1393 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1394 nb->notifier_call(nb, NETDEV_DOWN, dev);
1395 }
1396 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00001397 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001398 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001399 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001400
1401 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001402 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001404EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
1406/**
1407 * unregister_netdevice_notifier - unregister a network notifier block
1408 * @nb: notifier
1409 *
1410 * Unregister a notifier previously registered by
1411 * register_netdevice_notifier(). The notifier is unlinked into the
1412 * kernel structures and may then be reused. A negative errno code
1413 * is returned on a failure.
1414 */
1415
1416int unregister_netdevice_notifier(struct notifier_block *nb)
1417{
Herbert Xu9f514952006-03-25 01:24:25 -08001418 int err;
1419
1420 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001421 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001422 rtnl_unlock();
1423 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001425EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427/**
1428 * call_netdevice_notifiers - call all network notifier blocks
1429 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001430 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 *
1432 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001433 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 */
1435
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001436int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001438 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439}
1440
1441/* When > 0 there are consumers of rx skb time stamps */
1442static atomic_t netstamp_needed = ATOMIC_INIT(0);
1443
1444void net_enable_timestamp(void)
1445{
1446 atomic_inc(&netstamp_needed);
1447}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001448EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
1450void net_disable_timestamp(void)
1451{
1452 atomic_dec(&netstamp_needed);
1453}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001454EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001456static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457{
1458 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001459 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001460 else
1461 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462}
1463
Arnd Bergmann44540962009-11-26 06:07:08 +00001464/**
1465 * dev_forward_skb - loopback an skb to another netif
1466 *
1467 * @dev: destination network device
1468 * @skb: buffer to forward
1469 *
1470 * return values:
1471 * NET_RX_SUCCESS (no congestion)
1472 * NET_RX_DROP (packet was dropped)
1473 *
1474 * dev_forward_skb can be used for injecting an skb from the
1475 * start_xmit function of one device into the receive queue
1476 * of another device.
1477 *
1478 * The receiving device may be in another namespace, so
1479 * we have to clear all information in the skb that could
1480 * impact namespace isolation.
1481 */
1482int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1483{
1484 skb_orphan(skb);
1485
1486 if (!(dev->flags & IFF_UP))
1487 return NET_RX_DROP;
1488
1489 if (skb->len > (dev->mtu + dev->hard_header_len))
1490 return NET_RX_DROP;
1491
Arnd Bergmann8a83a002010-01-30 12:23:03 +00001492 skb_set_dev(skb, dev);
Arnd Bergmann44540962009-11-26 06:07:08 +00001493 skb->tstamp.tv64 = 0;
1494 skb->pkt_type = PACKET_HOST;
1495 skb->protocol = eth_type_trans(skb, dev);
Arnd Bergmann44540962009-11-26 06:07:08 +00001496 return netif_rx(skb);
1497}
1498EXPORT_SYMBOL_GPL(dev_forward_skb);
1499
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500/*
1501 * Support routine. Sends outgoing frames to any network
1502 * taps currently in use.
1503 */
1504
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001505static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506{
1507 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001508
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001509#ifdef CONFIG_NET_CLS_ACT
1510 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1511 net_timestamp(skb);
1512#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001513 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001514#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
1516 rcu_read_lock();
1517 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1518 /* Never send packets back to the socket
1519 * they originated from - MvS (miquels@drinkel.ow.org)
1520 */
1521 if ((ptype->dev == dev || !ptype->dev) &&
1522 (ptype->af_packet_priv == NULL ||
1523 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001524 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 if (!skb2)
1526 break;
1527
1528 /* skb->nh should be correctly
1529 set by sender, so that the second statement is
1530 just protection against buggy protocols.
1531 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001532 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001534 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001535 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 if (net_ratelimit())
1537 printk(KERN_CRIT "protocol %04x is "
1538 "buggy, dev %s\n",
1539 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001540 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 }
1542
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001543 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001545 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 }
1547 }
1548 rcu_read_unlock();
1549}
1550
Denis Vlasenko56079432006-03-29 15:57:29 -08001551
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001552static inline void __netif_reschedule(struct Qdisc *q)
1553{
1554 struct softnet_data *sd;
1555 unsigned long flags;
1556
1557 local_irq_save(flags);
1558 sd = &__get_cpu_var(softnet_data);
1559 q->next_sched = sd->output_queue;
1560 sd->output_queue = q;
1561 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1562 local_irq_restore(flags);
1563}
1564
David S. Miller37437bb2008-07-16 02:15:04 -07001565void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001566{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001567 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1568 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001569}
1570EXPORT_SYMBOL(__netif_schedule);
1571
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001572void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001573{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001574 if (atomic_dec_and_test(&skb->users)) {
1575 struct softnet_data *sd;
1576 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001577
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001578 local_irq_save(flags);
1579 sd = &__get_cpu_var(softnet_data);
1580 skb->next = sd->completion_queue;
1581 sd->completion_queue = skb;
1582 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1583 local_irq_restore(flags);
1584 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001585}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001586EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001587
1588void dev_kfree_skb_any(struct sk_buff *skb)
1589{
1590 if (in_irq() || irqs_disabled())
1591 dev_kfree_skb_irq(skb);
1592 else
1593 dev_kfree_skb(skb);
1594}
1595EXPORT_SYMBOL(dev_kfree_skb_any);
1596
1597
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001598/**
1599 * netif_device_detach - mark device as removed
1600 * @dev: network device
1601 *
1602 * Mark device as removed from system and therefore no longer available.
1603 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001604void netif_device_detach(struct net_device *dev)
1605{
1606 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1607 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001608 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001609 }
1610}
1611EXPORT_SYMBOL(netif_device_detach);
1612
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001613/**
1614 * netif_device_attach - mark device as attached
1615 * @dev: network device
1616 *
1617 * Mark device as attached from system and restart if needed.
1618 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001619void netif_device_attach(struct net_device *dev)
1620{
1621 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1622 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001623 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001624 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001625 }
1626}
1627EXPORT_SYMBOL(netif_device_attach);
1628
Ben Hutchings6de329e2008-06-16 17:02:28 -07001629static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1630{
1631 return ((features & NETIF_F_GEN_CSUM) ||
1632 ((features & NETIF_F_IP_CSUM) &&
1633 protocol == htons(ETH_P_IP)) ||
1634 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001635 protocol == htons(ETH_P_IPV6)) ||
1636 ((features & NETIF_F_FCOE_CRC) &&
1637 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001638}
1639
1640static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1641{
1642 if (can_checksum_protocol(dev->features, skb->protocol))
1643 return true;
1644
1645 if (skb->protocol == htons(ETH_P_8021Q)) {
1646 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1647 if (can_checksum_protocol(dev->features & dev->vlan_features,
1648 veh->h_vlan_encapsulated_proto))
1649 return true;
1650 }
1651
1652 return false;
1653}
Denis Vlasenko56079432006-03-29 15:57:29 -08001654
Arnd Bergmann8a83a002010-01-30 12:23:03 +00001655/**
1656 * skb_dev_set -- assign a new device to a buffer
1657 * @skb: buffer for the new device
1658 * @dev: network device
1659 *
1660 * If an skb is owned by a device already, we have to reset
1661 * all data private to the namespace a device belongs to
1662 * before assigning it a new device.
1663 */
1664#ifdef CONFIG_NET_NS
1665void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1666{
1667 skb_dst_drop(skb);
1668 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1669 secpath_reset(skb);
1670 nf_reset(skb);
1671 skb_init_secmark(skb);
1672 skb->mark = 0;
1673 skb->priority = 0;
1674 skb->nf_trace = 0;
1675 skb->ipvs_property = 0;
1676#ifdef CONFIG_NET_SCHED
1677 skb->tc_index = 0;
1678#endif
1679 }
1680 skb->dev = dev;
1681}
1682EXPORT_SYMBOL(skb_set_dev);
1683#endif /* CONFIG_NET_NS */
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685/*
1686 * Invalidate hardware checksum when packet is to be mangled, and
1687 * complete checksum manually on outgoing path.
1688 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001689int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690{
Al Virod3bc23e2006-11-14 21:24:49 -08001691 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001692 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
Patrick McHardy84fa7932006-08-29 16:44:56 -07001694 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001695 goto out_set_summed;
1696
1697 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001698 /* Let GSO fix up the checksum. */
1699 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 }
1701
Herbert Xua0308472007-10-15 01:47:15 -07001702 offset = skb->csum_start - skb_headroom(skb);
1703 BUG_ON(offset >= skb_headlen(skb));
1704 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1705
1706 offset += skb->csum_offset;
1707 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1708
1709 if (skb_cloned(skb) &&
1710 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1712 if (ret)
1713 goto out;
1714 }
1715
Herbert Xua0308472007-10-15 01:47:15 -07001716 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001717out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001719out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 return ret;
1721}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001722EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001724/**
1725 * skb_gso_segment - Perform segmentation on skb.
1726 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001727 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001728 *
1729 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001730 *
1731 * It may return NULL if the skb requires no segmentation. This is
1732 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001733 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001734struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001735{
1736 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1737 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001738 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001739 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001740
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001741 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001742 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001743 __skb_pull(skb, skb->mac_len);
1744
Herbert Xu67fd1a72009-01-19 16:26:44 -08001745 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1746 struct net_device *dev = skb->dev;
1747 struct ethtool_drvinfo info = {};
1748
1749 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1750 dev->ethtool_ops->get_drvinfo(dev, &info);
1751
1752 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1753 "ip_summed=%d",
1754 info.driver, dev ? dev->features : 0L,
1755 skb->sk ? skb->sk->sk_route_caps : 0L,
1756 skb->len, skb->data_len, skb->ip_summed);
1757
Herbert Xua430a432006-07-08 13:34:56 -07001758 if (skb_header_cloned(skb) &&
1759 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1760 return ERR_PTR(err);
1761 }
1762
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001763 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001764 list_for_each_entry_rcu(ptype,
1765 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001766 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001767 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001768 err = ptype->gso_send_check(skb);
1769 segs = ERR_PTR(err);
1770 if (err || skb_gso_ok(skb, features))
1771 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001772 __skb_push(skb, (skb->data -
1773 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001774 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001775 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001776 break;
1777 }
1778 }
1779 rcu_read_unlock();
1780
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001781 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001782
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001783 return segs;
1784}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001785EXPORT_SYMBOL(skb_gso_segment);
1786
Herbert Xufb286bb2005-11-10 13:01:24 -08001787/* Take action when hardware reception checksum errors are detected. */
1788#ifdef CONFIG_BUG
1789void netdev_rx_csum_fault(struct net_device *dev)
1790{
1791 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001792 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001793 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001794 dump_stack();
1795 }
1796}
1797EXPORT_SYMBOL(netdev_rx_csum_fault);
1798#endif
1799
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800/* Actually, we should eliminate this check as soon as we know, that:
1801 * 1. IOMMU is present and allows to map all the memory.
1802 * 2. No high memory really exists on this machine.
1803 */
1804
Eric Dumazet9092c652010-04-02 13:34:49 -07001805static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001807#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 int i;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00001809 if (!(dev->features & NETIF_F_HIGHDMA)) {
1810 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1811 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1812 return 1;
1813 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00001815 if (PCI_DMA_BUS_IS_PHYS) {
1816 struct device *pdev = dev->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
Eric Dumazet9092c652010-04-02 13:34:49 -07001818 if (!pdev)
1819 return 0;
FUJITA Tomonori5acbbd42010-03-30 22:35:50 +00001820 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1821 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1822 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1823 return 1;
1824 }
1825 }
Herbert Xu3d3a8532006-06-27 13:33:10 -07001826#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 return 0;
1828}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001830struct dev_gso_cb {
1831 void (*destructor)(struct sk_buff *skb);
1832};
1833
1834#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1835
1836static void dev_gso_skb_destructor(struct sk_buff *skb)
1837{
1838 struct dev_gso_cb *cb;
1839
1840 do {
1841 struct sk_buff *nskb = skb->next;
1842
1843 skb->next = nskb->next;
1844 nskb->next = NULL;
1845 kfree_skb(nskb);
1846 } while (skb->next);
1847
1848 cb = DEV_GSO_CB(skb);
1849 if (cb->destructor)
1850 cb->destructor(skb);
1851}
1852
1853/**
1854 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1855 * @skb: buffer to segment
1856 *
1857 * This function segments the given skb and stores the list of segments
1858 * in skb->next.
1859 */
1860static int dev_gso_segment(struct sk_buff *skb)
1861{
1862 struct net_device *dev = skb->dev;
1863 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001864 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1865 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001866
Herbert Xu576a30e2006-06-27 13:22:38 -07001867 segs = skb_gso_segment(skb, features);
1868
1869 /* Verifying header integrity only. */
1870 if (!segs)
1871 return 0;
1872
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001873 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001874 return PTR_ERR(segs);
1875
1876 skb->next = segs;
1877 DEV_GSO_CB(skb)->destructor = skb->destructor;
1878 skb->destructor = dev_gso_skb_destructor;
1879
1880 return 0;
1881}
1882
Eric Dumazetfc6055a2010-04-16 12:18:22 +00001883/*
1884 * Try to orphan skb early, right before transmission by the device.
1885 * We cannot orphan skb if tx timestamp is requested, since
1886 * drivers need to call skb_tstamp_tx() to send the timestamp.
1887 */
1888static inline void skb_orphan_try(struct sk_buff *skb)
1889{
1890 if (!skb_tx(skb)->flags)
1891 skb_orphan(skb);
1892}
1893
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001894int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1895 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001896{
Stephen Hemminger00829822008-11-20 20:14:53 -08001897 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00001898 int rc = NETDEV_TX_OK;
Stephen Hemminger00829822008-11-20 20:14:53 -08001899
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001900 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001901 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001902 dev_queue_xmit_nit(skb, dev);
1903
Herbert Xu576a30e2006-06-27 13:22:38 -07001904 if (netif_needs_gso(dev, skb)) {
1905 if (unlikely(dev_gso_segment(skb)))
1906 goto out_kfree_skb;
1907 if (skb->next)
1908 goto gso;
1909 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001910
Eric Dumazet93f154b2009-05-18 22:19:19 -07001911 /*
1912 * If device doesnt need skb->dst, release it right now while
1913 * its hot in this cpu cache
1914 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001915 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1916 skb_dst_drop(skb);
1917
Eric Dumazetfc6055a2010-04-16 12:18:22 +00001918 skb_orphan_try(skb);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001919 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001920 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001921 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001922 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001923 }
1924
Herbert Xu576a30e2006-06-27 13:22:38 -07001925gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001926 do {
1927 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001928
1929 skb->next = nskb->next;
1930 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00001931
1932 /*
1933 * If device doesnt need nskb->dst, release it right now while
1934 * its hot in this cpu cache
1935 */
1936 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1937 skb_dst_drop(nskb);
1938
Eric Dumazetfc6055a2010-04-16 12:18:22 +00001939 skb_orphan_try(nskb);
Stephen Hemminger00829822008-11-20 20:14:53 -08001940 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001941 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00001942 if (rc & ~NETDEV_TX_MASK)
1943 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07001944 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001945 skb->next = nskb;
1946 return rc;
1947 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001948 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001949 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001950 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001951 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001952
Patrick McHardy572a9d72009-11-10 06:14:14 +00001953out_kfree_gso_skb:
1954 if (likely(skb->next == NULL))
1955 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001956out_kfree_skb:
1957 kfree_skb(skb);
Patrick McHardy572a9d72009-11-10 06:14:14 +00001958 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001959}
1960
Tom Herbert0a9627f2010-03-16 08:03:29 +00001961static u32 hashrnd __read_mostly;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001962
Stephen Hemminger92477442009-03-21 13:39:26 -07001963u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001964{
David S. Miller70192982009-01-27 16:34:47 -08001965 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001966
David S. Miller513de112009-05-03 14:43:10 -07001967 if (skb_rx_queue_recorded(skb)) {
1968 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001969 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001970 hash -= dev->real_num_tx_queues;
1971 return hash;
1972 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001973
1974 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001975 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001976 else
David S. Miller70192982009-01-27 16:34:47 -08001977 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001978
Tom Herbert0a9627f2010-03-16 08:03:29 +00001979 hash = jhash_1word(hash, hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001980
David S. Millerb6b2fed2008-07-21 09:48:06 -07001981 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001982}
Stephen Hemminger92477442009-03-21 13:39:26 -07001983EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001984
Eric Dumazeted046422009-11-13 21:54:04 +00001985static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1986{
1987 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1988 if (net_ratelimit()) {
Eric Dumazet7a161ea2010-04-08 21:26:13 +00001989 pr_warning("%s selects TX queue %d, but "
1990 "real number of TX queues is %d\n",
1991 dev->name, queue_index, dev->real_num_tx_queues);
Eric Dumazeted046422009-11-13 21:54:04 +00001992 }
1993 return 0;
1994 }
1995 return queue_index;
1996}
1997
David S. Millere8a04642008-07-17 00:34:19 -07001998static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1999 struct sk_buff *skb)
2000{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00002001 u16 queue_index;
2002 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002003
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00002004 if (sk_tx_queue_recorded(sk)) {
2005 queue_index = sk_tx_queue_get(sk);
2006 } else {
2007 const struct net_device_ops *ops = dev->netdev_ops;
2008
2009 if (ops->ndo_select_queue) {
2010 queue_index = ops->ndo_select_queue(dev, skb);
Eric Dumazeted046422009-11-13 21:54:04 +00002011 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00002012 } else {
2013 queue_index = 0;
2014 if (dev->real_num_tx_queues > 1)
2015 queue_index = skb_tx_hash(dev, skb);
2016
Eric Dumazetb6c67122010-04-08 23:03:29 +00002017 if (sk && rcu_dereference_check(sk->sk_dst_cache, 1))
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00002018 sk_tx_queue_set(sk, queue_index);
2019 }
2020 }
David S. Millereae792b2008-07-15 03:03:33 -07002021
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002022 skb_set_queue_mapping(skb, queue_index);
2023 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07002024}
2025
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002026static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2027 struct net_device *dev,
2028 struct netdev_queue *txq)
2029{
2030 spinlock_t *root_lock = qdisc_lock(q);
2031 int rc;
2032
2033 spin_lock(root_lock);
2034 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2035 kfree_skb(skb);
2036 rc = NET_XMIT_DROP;
2037 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2038 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
2039 /*
2040 * This is a work-conserving queue; there are no old skbs
2041 * waiting to be sent out; and the qdisc is not running -
2042 * xmit the skb directly.
2043 */
2044 __qdisc_update_bstats(q, skb->len);
2045 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
2046 __qdisc_run(q);
2047 else
2048 clear_bit(__QDISC_STATE_RUNNING, &q->state);
2049
2050 rc = NET_XMIT_SUCCESS;
2051 } else {
2052 rc = qdisc_enqueue_root(skb, q);
2053 qdisc_run(q);
2054 }
2055 spin_unlock(root_lock);
2056
2057 return rc;
2058}
2059
Krishna Kumar4b258462010-01-21 01:26:29 -08002060/*
2061 * Returns true if either:
2062 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2063 * 2. skb is fragmented and the device does not support SG, or if
2064 * at least one of fragments is in highmem and device does not
2065 * support DMA from it.
2066 */
2067static inline int skb_needs_linearize(struct sk_buff *skb,
2068 struct net_device *dev)
2069{
2070 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2071 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2072 illegal_highdma(dev, skb)));
2073}
2074
Dave Jonesd29f7492008-07-22 14:09:06 -07002075/**
2076 * dev_queue_xmit - transmit a buffer
2077 * @skb: buffer to transmit
2078 *
2079 * Queue a buffer for transmission to a network device. The caller must
2080 * have set the device and priority and built the buffer before calling
2081 * this function. The function can be called from an interrupt.
2082 *
2083 * A negative errno code is returned on a failure. A success does not
2084 * guarantee the frame will be transmitted as it may be dropped due
2085 * to congestion or traffic shaping.
2086 *
2087 * -----------------------------------------------------------------------------------
2088 * I notice this method can also return errors from the queue disciplines,
2089 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2090 * be positive.
2091 *
2092 * Regardless of the return value, the skb is consumed, so it is currently
2093 * difficult to retry a send to this method. (You can bump the ref count
2094 * before sending to hold a reference for retry if you are careful.)
2095 *
2096 * When calling this method, interrupts MUST be enabled. This is because
2097 * the BH enable code must have IRQs enabled so that it will not deadlock.
2098 * --BLG
2099 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100int dev_queue_xmit(struct sk_buff *skb)
2101{
2102 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002103 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 struct Qdisc *q;
2105 int rc = -ENOMEM;
2106
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002107 /* GSO will handle the following emulations directly. */
2108 if (netif_needs_gso(dev, skb))
2109 goto gso;
2110
Krishna Kumar4b258462010-01-21 01:26:29 -08002111 /* Convert a paged skb to linear, if required */
2112 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 goto out_kfree_skb;
2114
2115 /* If packet is not checksummed and device does not support
2116 * checksumming for this protocol, complete checksumming here.
2117 */
Herbert Xu663ead32007-04-09 11:59:07 -07002118 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2119 skb_set_transport_header(skb, skb->csum_start -
2120 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07002121 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2122 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07002123 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002125gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002126 /* Disable soft irqs for various locks below. Also
2127 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002129 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
David S. Millereae792b2008-07-15 03:03:33 -07002131 txq = dev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002132 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002133
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002135 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136#endif
2137 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002138 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002139 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 }
2141
2142 /* The device has no queue. Common case for software devices:
2143 loopback, all the sorts of tunnels...
2144
Herbert Xu932ff272006-06-09 12:20:56 -07002145 Really, it is unlikely that netif_tx_lock protection is necessary
2146 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 counters.)
2148 However, it is possible, that they rely on protection
2149 made by us here.
2150
2151 Check this and shot the lock. It is not prone from deadlocks.
2152 Either shot noqueue qdisc, it is even simpler 8)
2153 */
2154 if (dev->flags & IFF_UP) {
2155 int cpu = smp_processor_id(); /* ok because BHs are off */
2156
David S. Millerc773e842008-07-08 23:13:53 -07002157 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
David S. Millerc773e842008-07-08 23:13:53 -07002159 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002161 if (!netif_tx_queue_stopped(txq)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002162 rc = dev_hard_start_xmit(skb, dev, txq);
2163 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002164 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 goto out;
2166 }
2167 }
David S. Millerc773e842008-07-08 23:13:53 -07002168 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 if (net_ratelimit())
2170 printk(KERN_CRIT "Virtual device %s asks to "
2171 "queue packet!\n", dev->name);
2172 } else {
2173 /* Recursion is detected! It is possible,
2174 * unfortunately */
2175 if (net_ratelimit())
2176 printk(KERN_CRIT "Dead loop on virtual device "
2177 "%s, fix it urgently!\n", dev->name);
2178 }
2179 }
2180
2181 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002182 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
2184out_kfree_skb:
2185 kfree_skb(skb);
2186 return rc;
2187out:
Herbert Xud4828d82006-06-22 02:28:18 -07002188 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 return rc;
2190}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002191EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
2193
2194/*=======================================================================
2195 Receiver routines
2196 =======================================================================*/
2197
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002198int netdev_max_backlog __read_mostly = 1000;
2199int netdev_budget __read_mostly = 300;
2200int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
2202DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2203
Eric Dumazetdf334542010-03-24 19:13:54 +00002204#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002205
2206/* One global table that all flow-based protocols share. */
Eric Dumazet8770acf2010-04-17 00:54:36 -07002207struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
Tom Herbertfec5e652010-04-16 16:01:27 -07002208EXPORT_SYMBOL(rps_sock_flow_table);
2209
Tom Herbert0a9627f2010-03-16 08:03:29 +00002210/*
2211 * get_rps_cpu is called from netif_receive_skb and returns the target
2212 * CPU from the RPS map of the receiving queue for a given skb.
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002213 * rcu_read_lock must be held on entry.
Tom Herbert0a9627f2010-03-16 08:03:29 +00002214 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002215static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2216 struct rps_dev_flow **rflowp)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002217{
2218 struct ipv6hdr *ip6;
2219 struct iphdr *ip;
2220 struct netdev_rx_queue *rxqueue;
2221 struct rps_map *map;
Tom Herbertfec5e652010-04-16 16:01:27 -07002222 struct rps_dev_flow_table *flow_table;
2223 struct rps_sock_flow_table *sock_flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002224 int cpu = -1;
2225 u8 ip_proto;
Tom Herbertfec5e652010-04-16 16:01:27 -07002226 u16 tcpu;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002227 u32 addr1, addr2, ports, ihl;
2228
Tom Herbert0a9627f2010-03-16 08:03:29 +00002229 if (skb_rx_queue_recorded(skb)) {
2230 u16 index = skb_get_rx_queue(skb);
2231 if (unlikely(index >= dev->num_rx_queues)) {
2232 if (net_ratelimit()) {
Eric Dumazet7a161ea2010-04-08 21:26:13 +00002233 pr_warning("%s received packet on queue "
2234 "%u, but number of RX queues is %u\n",
2235 dev->name, index, dev->num_rx_queues);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002236 }
2237 goto done;
2238 }
2239 rxqueue = dev->_rx + index;
2240 } else
2241 rxqueue = dev->_rx;
2242
Tom Herbertfec5e652010-04-16 16:01:27 -07002243 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002244 goto done;
2245
2246 if (skb->rxhash)
2247 goto got_hash; /* Skip hash computation on packet header */
2248
2249 switch (skb->protocol) {
2250 case __constant_htons(ETH_P_IP):
2251 if (!pskb_may_pull(skb, sizeof(*ip)))
2252 goto done;
2253
2254 ip = (struct iphdr *) skb->data;
2255 ip_proto = ip->protocol;
2256 addr1 = ip->saddr;
2257 addr2 = ip->daddr;
2258 ihl = ip->ihl;
2259 break;
2260 case __constant_htons(ETH_P_IPV6):
2261 if (!pskb_may_pull(skb, sizeof(*ip6)))
2262 goto done;
2263
2264 ip6 = (struct ipv6hdr *) skb->data;
2265 ip_proto = ip6->nexthdr;
2266 addr1 = ip6->saddr.s6_addr32[3];
2267 addr2 = ip6->daddr.s6_addr32[3];
2268 ihl = (40 >> 2);
2269 break;
2270 default:
2271 goto done;
2272 }
2273 ports = 0;
2274 switch (ip_proto) {
2275 case IPPROTO_TCP:
2276 case IPPROTO_UDP:
2277 case IPPROTO_DCCP:
2278 case IPPROTO_ESP:
2279 case IPPROTO_AH:
2280 case IPPROTO_SCTP:
2281 case IPPROTO_UDPLITE:
2282 if (pskb_may_pull(skb, (ihl * 4) + 4))
2283 ports = *((u32 *) (skb->data + (ihl * 4)));
2284 break;
2285
2286 default:
2287 break;
2288 }
2289
2290 skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd);
2291 if (!skb->rxhash)
2292 skb->rxhash = 1;
2293
2294got_hash:
Tom Herbertfec5e652010-04-16 16:01:27 -07002295 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2296 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2297 if (flow_table && sock_flow_table) {
2298 u16 next_cpu;
2299 struct rps_dev_flow *rflow;
2300
2301 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2302 tcpu = rflow->cpu;
2303
2304 next_cpu = sock_flow_table->ents[skb->rxhash &
2305 sock_flow_table->mask];
2306
2307 /*
2308 * If the desired CPU (where last recvmsg was done) is
2309 * different from current CPU (one in the rx-queue flow
2310 * table entry), switch if one of the following holds:
2311 * - Current CPU is unset (equal to RPS_NO_CPU).
2312 * - Current CPU is offline.
2313 * - The current CPU's queue tail has advanced beyond the
2314 * last packet that was enqueued using this table entry.
2315 * This guarantees that all previous packets for the flow
2316 * have been dequeued, thus preserving in order delivery.
2317 */
2318 if (unlikely(tcpu != next_cpu) &&
2319 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2320 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2321 rflow->last_qtail)) >= 0)) {
2322 tcpu = rflow->cpu = next_cpu;
2323 if (tcpu != RPS_NO_CPU)
2324 rflow->last_qtail = per_cpu(softnet_data,
2325 tcpu).input_queue_head;
2326 }
2327 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2328 *rflowp = rflow;
2329 cpu = tcpu;
2330 goto done;
2331 }
2332 }
2333
Tom Herbert0a9627f2010-03-16 08:03:29 +00002334 map = rcu_dereference(rxqueue->rps_map);
2335 if (map) {
Tom Herbertfec5e652010-04-16 16:01:27 -07002336 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
Tom Herbert0a9627f2010-03-16 08:03:29 +00002337
2338 if (cpu_online(tcpu)) {
2339 cpu = tcpu;
2340 goto done;
2341 }
2342 }
2343
2344done:
Tom Herbert0a9627f2010-03-16 08:03:29 +00002345 return cpu;
2346}
2347
Tom Herbert0a9627f2010-03-16 08:03:29 +00002348/* Called from hardirq (IPI) context */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002349static void rps_trigger_softirq(void *data)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002350{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002351 struct softnet_data *sd = data;
2352
2353 __napi_schedule(&sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002354 __get_cpu_var(netdev_rx_stat).received_rps++;
2355}
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002356
Tom Herbertfec5e652010-04-16 16:01:27 -07002357#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +00002358
2359/*
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002360 * Check if this softnet_data structure is another cpu one
2361 * If yes, queue it to our IPI list and return 1
2362 * If no, return 0
2363 */
2364static int rps_ipi_queued(struct softnet_data *sd)
2365{
2366#ifdef CONFIG_RPS
2367 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2368
2369 if (sd != mysd) {
2370 sd->rps_ipi_next = mysd->rps_ipi_list;
2371 mysd->rps_ipi_list = sd;
2372
2373 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2374 return 1;
2375 }
2376#endif /* CONFIG_RPS */
2377 return 0;
2378}
2379
2380/*
Tom Herbert0a9627f2010-03-16 08:03:29 +00002381 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2382 * queue (may be a remote CPU queue).
2383 */
Tom Herbertfec5e652010-04-16 16:01:27 -07002384static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2385 unsigned int *qtail)
Tom Herbert0a9627f2010-03-16 08:03:29 +00002386{
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002387 struct softnet_data *sd;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002388 unsigned long flags;
2389
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002390 sd = &per_cpu(softnet_data, cpu);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002391
2392 local_irq_save(flags);
2393 __get_cpu_var(netdev_rx_stat).total++;
2394
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002395 rps_lock(sd);
2396 if (sd->input_pkt_queue.qlen <= netdev_max_backlog) {
2397 if (sd->input_pkt_queue.qlen) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00002398enqueue:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002399 __skb_queue_tail(&sd->input_pkt_queue, skb);
Tom Herbertfec5e652010-04-16 16:01:27 -07002400#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002401 *qtail = sd->input_queue_head + sd->input_pkt_queue.qlen;
Tom Herbertfec5e652010-04-16 16:01:27 -07002402#endif
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002403 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00002404 local_irq_restore(flags);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002405 return NET_RX_SUCCESS;
2406 }
2407
2408 /* Schedule NAPI for backlog device */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002409 if (napi_schedule_prep(&sd->backlog)) {
2410 if (!rps_ipi_queued(sd))
2411 __napi_schedule(&sd->backlog);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002412 }
2413 goto enqueue;
2414 }
2415
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002416 rps_unlock(sd);
Tom Herbert0a9627f2010-03-16 08:03:29 +00002417
2418 __get_cpu_var(netdev_rx_stat).dropped++;
2419 local_irq_restore(flags);
2420
2421 kfree_skb(skb);
2422 return NET_RX_DROP;
2423}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425/**
2426 * netif_rx - post buffer to the network code
2427 * @skb: buffer to post
2428 *
2429 * This function receives a packet from a device driver and queues it for
2430 * the upper (protocol) levels to process. It always succeeds. The buffer
2431 * may be dropped during processing for congestion control or by the
2432 * protocol layers.
2433 *
2434 * return values:
2435 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 * NET_RX_DROP (packet was dropped)
2437 *
2438 */
2439
2440int netif_rx(struct sk_buff *skb)
2441{
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002442 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
2444 /* if netpoll wants it, pretend we never saw it */
2445 if (netpoll_rx(skb))
2446 return NET_RX_DROP;
2447
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002448 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002449 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450
Eric Dumazetdf334542010-03-24 19:13:54 +00002451#ifdef CONFIG_RPS
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002452 {
Tom Herbertfec5e652010-04-16 16:01:27 -07002453 struct rps_dev_flow voidflow, *rflow = &voidflow;
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002454 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002456 rcu_read_lock();
Tom Herbertfec5e652010-04-16 16:01:27 -07002457
2458 cpu = get_rps_cpu(skb->dev, skb, &rflow);
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002459 if (cpu < 0)
2460 cpu = smp_processor_id();
Tom Herbertfec5e652010-04-16 16:01:27 -07002461
2462 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2463
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002464 rcu_read_unlock();
2465 }
2466#else
Tom Herbertfec5e652010-04-16 16:01:27 -07002467 {
2468 unsigned int qtail;
2469 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2470 put_cpu();
2471 }
Eric Dumazetb0e28f12010-04-15 00:14:07 -07002472#endif
2473 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002475EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476
2477int netif_rx_ni(struct sk_buff *skb)
2478{
2479 int err;
2480
2481 preempt_disable();
2482 err = netif_rx(skb);
2483 if (local_softirq_pending())
2484 do_softirq();
2485 preempt_enable();
2486
2487 return err;
2488}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489EXPORT_SYMBOL(netif_rx_ni);
2490
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491static void net_tx_action(struct softirq_action *h)
2492{
2493 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2494
2495 if (sd->completion_queue) {
2496 struct sk_buff *clist;
2497
2498 local_irq_disable();
2499 clist = sd->completion_queue;
2500 sd->completion_queue = NULL;
2501 local_irq_enable();
2502
2503 while (clist) {
2504 struct sk_buff *skb = clist;
2505 clist = clist->next;
2506
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002507 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 __kfree_skb(skb);
2509 }
2510 }
2511
2512 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002513 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
2515 local_irq_disable();
2516 head = sd->output_queue;
2517 sd->output_queue = NULL;
2518 local_irq_enable();
2519
2520 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002521 struct Qdisc *q = head;
2522 spinlock_t *root_lock;
2523
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 head = head->next_sched;
2525
David S. Miller5fb66222008-08-02 20:02:43 -07002526 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002527 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002528 smp_mb__before_clear_bit();
2529 clear_bit(__QDISC_STATE_SCHED,
2530 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002531 qdisc_run(q);
2532 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002534 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002535 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002536 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002537 } else {
2538 smp_mb__before_clear_bit();
2539 clear_bit(__QDISC_STATE_SCHED,
2540 &q->state);
2541 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 }
2543 }
2544 }
2545}
2546
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002547static inline int deliver_skb(struct sk_buff *skb,
2548 struct packet_type *pt_prev,
2549 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550{
2551 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002552 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553}
2554
2555#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002556
2557#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2558/* This hook is defined here for ATM LANE */
2559int (*br_fdb_test_addr_hook)(struct net_device *dev,
2560 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002561EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002562#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563
Stephen Hemminger6229e362007-03-21 13:38:47 -07002564/*
2565 * If bridge module is loaded call bridging hook.
2566 * returns NULL if packet was consumed.
2567 */
2568struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2569 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002570EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002571
Stephen Hemminger6229e362007-03-21 13:38:47 -07002572static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2573 struct packet_type **pt_prev, int *ret,
2574 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575{
2576 struct net_bridge_port *port;
2577
Stephen Hemminger6229e362007-03-21 13:38:47 -07002578 if (skb->pkt_type == PACKET_LOOPBACK ||
2579 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2580 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581
2582 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002583 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002585 }
2586
Stephen Hemminger6229e362007-03-21 13:38:47 -07002587 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588}
2589#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002590#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591#endif
2592
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002593#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2594struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2595EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2596
2597static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2598 struct packet_type **pt_prev,
2599 int *ret,
2600 struct net_device *orig_dev)
2601{
2602 if (skb->dev->macvlan_port == NULL)
2603 return skb;
2604
2605 if (*pt_prev) {
2606 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2607 *pt_prev = NULL;
2608 }
2609 return macvlan_handle_frame_hook(skb);
2610}
2611#else
2612#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2613#endif
2614
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615#ifdef CONFIG_NET_CLS_ACT
2616/* TODO: Maybe we should just force sch_ingress to be compiled in
2617 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2618 * a compare and 2 stores extra right now if we dont have it on
2619 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002620 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 * the ingress scheduler, you just cant add policies on ingress.
2622 *
2623 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002624static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002627 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002628 struct netdev_queue *rxq;
2629 int result = TC_ACT_OK;
2630 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002631
Herbert Xuf697c3e2007-10-14 00:38:47 -07002632 if (MAX_RED_LOOP < ttl++) {
2633 printk(KERN_WARNING
2634 "Redir loop detected Dropping packet (%d->%d)\n",
Eric Dumazet8964be42009-11-20 15:35:04 -08002635 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002636 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 }
2638
Herbert Xuf697c3e2007-10-14 00:38:47 -07002639 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2640 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2641
David S. Miller555353c2008-07-08 17:33:13 -07002642 rxq = &dev->rx_queue;
2643
David S. Miller83874002008-07-17 00:53:03 -07002644 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002645 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002646 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002647 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2648 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002649 spin_unlock(qdisc_lock(q));
2650 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002651
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 return result;
2653}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002654
2655static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2656 struct packet_type **pt_prev,
2657 int *ret, struct net_device *orig_dev)
2658{
David S. Miller8d50b532008-07-30 02:37:46 -07002659 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002660 goto out;
2661
2662 if (*pt_prev) {
2663 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2664 *pt_prev = NULL;
2665 } else {
2666 /* Huh? Why does turning on AF_PACKET affect this? */
2667 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2668 }
2669
2670 switch (ing_filter(skb)) {
2671 case TC_ACT_SHOT:
2672 case TC_ACT_STOLEN:
2673 kfree_skb(skb);
2674 return NULL;
2675 }
2676
2677out:
2678 skb->tc_verd = 0;
2679 return skb;
2680}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681#endif
2682
Patrick McHardybc1d0412008-07-14 22:49:30 -07002683/*
2684 * netif_nit_deliver - deliver received packets to network taps
2685 * @skb: buffer
2686 *
2687 * This function is used to deliver incoming packets to network
2688 * taps. It should be used when the normal netif_receive_skb path
2689 * is bypassed, for example because of VLAN acceleration.
2690 */
2691void netif_nit_deliver(struct sk_buff *skb)
2692{
2693 struct packet_type *ptype;
2694
2695 if (list_empty(&ptype_all))
2696 return;
2697
2698 skb_reset_network_header(skb);
2699 skb_reset_transport_header(skb);
2700 skb->mac_len = skb->network_header - skb->mac_header;
2701
2702 rcu_read_lock();
2703 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2704 if (!ptype->dev || ptype->dev == skb->dev)
2705 deliver_skb(skb, ptype, skb->dev);
2706 }
2707 rcu_read_unlock();
2708}
2709
Eric Dumazetacbbc072010-04-11 06:56:11 +00002710static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2711 struct net_device *master)
2712{
2713 if (skb->pkt_type == PACKET_HOST) {
2714 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2715
2716 memcpy(dest, master->dev_addr, ETH_ALEN);
2717 }
2718}
2719
2720/* On bonding slaves other than the currently active slave, suppress
2721 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2722 * ARP on active-backup slaves with arp_validate enabled.
2723 */
2724int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2725{
2726 struct net_device *dev = skb->dev;
2727
2728 if (master->priv_flags & IFF_MASTER_ARPMON)
2729 dev->last_rx = jiffies;
2730
2731 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2732 /* Do address unmangle. The local destination address
2733 * will be always the one master has. Provides the right
2734 * functionality in a bridge.
2735 */
2736 skb_bond_set_mac_by_master(skb, master);
2737 }
2738
2739 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2740 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2741 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2742 return 0;
2743
2744 if (master->priv_flags & IFF_MASTER_ALB) {
2745 if (skb->pkt_type != PACKET_BROADCAST &&
2746 skb->pkt_type != PACKET_MULTICAST)
2747 return 0;
2748 }
2749 if (master->priv_flags & IFF_MASTER_8023AD &&
2750 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2751 return 0;
2752
2753 return 1;
2754 }
2755 return 0;
2756}
2757EXPORT_SYMBOL(__skb_bond_should_drop);
2758
Eric Dumazet10f744d2010-03-28 23:07:20 -07002759static int __netif_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760{
2761 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002762 struct net_device *orig_dev;
Eric Dumazet0641e4f2010-03-18 21:16:45 -07002763 struct net_device *master;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002764 struct net_device *null_or_orig;
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002765 struct net_device *null_or_bond;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002767 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002769 if (!skb->tstamp.tv64)
2770 net_timestamp(skb);
2771
Eric Dumazet05423b22009-10-26 18:40:35 -07002772 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002773 return NET_RX_SUCCESS;
2774
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002776 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 return NET_RX_DROP;
2778
Eric Dumazet8964be42009-11-20 15:35:04 -08002779 if (!skb->skb_iif)
2780 skb->skb_iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002781
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002782 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002783 orig_dev = skb->dev;
Eric Dumazet0641e4f2010-03-18 21:16:45 -07002784 master = ACCESS_ONCE(orig_dev->master);
2785 if (master) {
2786 if (skb_bond_should_drop(skb, master))
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002787 null_or_orig = orig_dev; /* deliver only exact match */
2788 else
Eric Dumazet0641e4f2010-03-18 21:16:45 -07002789 skb->dev = master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002790 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002791
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 __get_cpu_var(netdev_rx_stat).total++;
2793
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002794 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002795 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002796 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
2798 pt_prev = NULL;
2799
2800 rcu_read_lock();
2801
2802#ifdef CONFIG_NET_CLS_ACT
2803 if (skb->tc_verd & TC_NCLS) {
2804 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2805 goto ncls;
2806 }
2807#endif
2808
2809 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002810 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2811 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002812 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002813 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 pt_prev = ptype;
2815 }
2816 }
2817
2818#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002819 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2820 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822ncls:
2823#endif
2824
Stephen Hemminger6229e362007-03-21 13:38:47 -07002825 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2826 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002828 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2829 if (!skb)
2830 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002832 /*
2833 * Make sure frames received on VLAN interfaces stacked on
2834 * bonding interfaces still make their way to any base bonding
2835 * device that may have registered for a specific ptype. The
2836 * handler may have to adjust skb->dev and orig_dev.
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002837 */
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002838 null_or_bond = NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002839 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2840 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002841 null_or_bond = vlan_dev_real_dev(skb->dev);
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002842 }
2843
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002845 list_for_each_entry_rcu(ptype,
2846 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002847 if (ptype->type == type && (ptype->dev == null_or_orig ||
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002848 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2849 ptype->dev == null_or_bond)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002850 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002851 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 pt_prev = ptype;
2853 }
2854 }
2855
2856 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002857 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 } else {
2859 kfree_skb(skb);
2860 /* Jamal, now you will not able to escape explaining
2861 * me how you were going to use this. :-)
2862 */
2863 ret = NET_RX_DROP;
2864 }
2865
2866out:
2867 rcu_read_unlock();
2868 return ret;
2869}
Tom Herbert0a9627f2010-03-16 08:03:29 +00002870
2871/**
2872 * netif_receive_skb - process receive buffer from network
2873 * @skb: buffer to process
2874 *
2875 * netif_receive_skb() is the main receive data processing function.
2876 * It always succeeds. The buffer may be dropped during processing
2877 * for congestion control or by the protocol layers.
2878 *
2879 * This function may only be called from softirq context and interrupts
2880 * should be enabled.
2881 *
2882 * Return values (usually ignored):
2883 * NET_RX_SUCCESS: no congestion
2884 * NET_RX_DROP: packet was dropped
2885 */
2886int netif_receive_skb(struct sk_buff *skb)
2887{
Eric Dumazetdf334542010-03-24 19:13:54 +00002888#ifdef CONFIG_RPS
Tom Herbertfec5e652010-04-16 16:01:27 -07002889 struct rps_dev_flow voidflow, *rflow = &voidflow;
2890 int cpu, ret;
Tom Herbert0a9627f2010-03-16 08:03:29 +00002891
Tom Herbertfec5e652010-04-16 16:01:27 -07002892 rcu_read_lock();
Tom Herbert0a9627f2010-03-16 08:03:29 +00002893
Tom Herbertfec5e652010-04-16 16:01:27 -07002894 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2895
2896 if (cpu >= 0) {
2897 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2898 rcu_read_unlock();
2899 } else {
2900 rcu_read_unlock();
2901 ret = __netif_receive_skb(skb);
2902 }
2903
2904 return ret;
Tom Herbert1e94d722010-03-18 17:45:44 -07002905#else
2906 return __netif_receive_skb(skb);
2907#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00002908}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002909EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910
Eric Dumazet88751272010-04-19 05:07:33 +00002911/* Network device is going away, flush any packets still pending
2912 * Called with irqs disabled.
2913 */
Changli Gao152102c2010-03-30 20:16:22 +00002914static void flush_backlog(void *arg)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002915{
Changli Gao152102c2010-03-30 20:16:22 +00002916 struct net_device *dev = arg;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002917 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002918 struct sk_buff *skb, *tmp;
2919
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002920 rps_lock(sd);
2921 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp)
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002922 if (skb->dev == dev) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002923 __skb_unlink(skb, &sd->input_pkt_queue);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002924 kfree_skb(skb);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002925 input_queue_head_incr(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002926 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00002927 rps_unlock(sd);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002928}
2929
Herbert Xud565b0a2008-12-15 23:38:52 -08002930static int napi_gro_complete(struct sk_buff *skb)
2931{
2932 struct packet_type *ptype;
2933 __be16 type = skb->protocol;
2934 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2935 int err = -ENOENT;
2936
Herbert Xufc59f9a2009-04-14 15:11:06 -07002937 if (NAPI_GRO_CB(skb)->count == 1) {
2938 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002939 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002940 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002941
2942 rcu_read_lock();
2943 list_for_each_entry_rcu(ptype, head, list) {
2944 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2945 continue;
2946
2947 err = ptype->gro_complete(skb);
2948 break;
2949 }
2950 rcu_read_unlock();
2951
2952 if (err) {
2953 WARN_ON(&ptype->list == head);
2954 kfree_skb(skb);
2955 return NET_RX_SUCCESS;
2956 }
2957
2958out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002959 return netif_receive_skb(skb);
2960}
2961
David S. Miller11380a42010-01-19 13:46:10 -08002962static void napi_gro_flush(struct napi_struct *napi)
Herbert Xud565b0a2008-12-15 23:38:52 -08002963{
2964 struct sk_buff *skb, *next;
2965
2966 for (skb = napi->gro_list; skb; skb = next) {
2967 next = skb->next;
2968 skb->next = NULL;
2969 napi_gro_complete(skb);
2970 }
2971
Herbert Xu4ae55442009-02-08 18:00:36 +00002972 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002973 napi->gro_list = NULL;
2974}
Herbert Xud565b0a2008-12-15 23:38:52 -08002975
Ben Hutchings5b252f02009-10-29 07:17:09 +00002976enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002977{
2978 struct sk_buff **pp = NULL;
2979 struct packet_type *ptype;
2980 __be16 type = skb->protocol;
2981 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002982 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002983 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002984 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002985
2986 if (!(skb->dev->features & NETIF_F_GRO))
2987 goto normal;
2988
David S. Miller4cf704f2009-06-09 00:18:51 -07002989 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002990 goto normal;
2991
Herbert Xud565b0a2008-12-15 23:38:52 -08002992 rcu_read_lock();
2993 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002994 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2995 continue;
2996
Herbert Xu86911732009-01-29 14:19:50 +00002997 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002998 mac_len = skb->network_header - skb->mac_header;
2999 skb->mac_len = mac_len;
3000 NAPI_GRO_CB(skb)->same_flow = 0;
3001 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08003002 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003003
Herbert Xud565b0a2008-12-15 23:38:52 -08003004 pp = ptype->gro_receive(&napi->gro_list, skb);
3005 break;
3006 }
3007 rcu_read_unlock();
3008
3009 if (&ptype->list == head)
3010 goto normal;
3011
Herbert Xu0da2afd52008-12-26 14:57:42 -08003012 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003013 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08003014
Herbert Xud565b0a2008-12-15 23:38:52 -08003015 if (pp) {
3016 struct sk_buff *nskb = *pp;
3017
3018 *pp = nskb->next;
3019 nskb->next = NULL;
3020 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00003021 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08003022 }
3023
Herbert Xu0da2afd52008-12-26 14:57:42 -08003024 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08003025 goto ok;
3026
Herbert Xu4ae55442009-02-08 18:00:36 +00003027 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08003028 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08003029
Herbert Xu4ae55442009-02-08 18:00:36 +00003030 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08003031 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00003032 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003033 skb->next = napi->gro_list;
3034 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003035 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08003036
Herbert Xuad0f9902009-02-01 01:24:55 -08003037pull:
Herbert Xucb189782009-05-26 18:50:31 +00003038 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3039 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3040
3041 BUG_ON(skb->end - skb->tail < grow);
3042
3043 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3044
3045 skb->tail += grow;
3046 skb->data_len -= grow;
3047
3048 skb_shinfo(skb)->frags[0].page_offset += grow;
3049 skb_shinfo(skb)->frags[0].size -= grow;
3050
3051 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3052 put_page(skb_shinfo(skb)->frags[0].page);
3053 memmove(skb_shinfo(skb)->frags,
3054 skb_shinfo(skb)->frags + 1,
3055 --skb_shinfo(skb)->nr_frags);
3056 }
Herbert Xuad0f9902009-02-01 01:24:55 -08003057 }
3058
Herbert Xud565b0a2008-12-15 23:38:52 -08003059ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003060 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08003061
3062normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08003063 ret = GRO_NORMAL;
3064 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08003065}
Herbert Xu96e93ea2009-01-06 10:49:34 -08003066EXPORT_SYMBOL(dev_gro_receive);
3067
Ben Hutchings5b252f02009-10-29 07:17:09 +00003068static gro_result_t
3069__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003070{
3071 struct sk_buff *p;
3072
Herbert Xud1c76af2009-03-16 10:50:02 -07003073 if (netpoll_rx_on(skb))
3074 return GRO_NORMAL;
3075
Herbert Xu96e93ea2009-01-06 10:49:34 -08003076 for (p = napi->gro_list; p; p = p->next) {
Joe Perchesf64f9e72009-11-29 16:55:45 -08003077 NAPI_GRO_CB(p)->same_flow =
3078 (p->dev == skb->dev) &&
3079 !compare_ether_header(skb_mac_header(p),
3080 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08003081 NAPI_GRO_CB(p)->flush = 0;
3082 }
3083
3084 return dev_gro_receive(napi, skb);
3085}
Herbert Xu5d38a072009-01-04 16:13:40 -08003086
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003087gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08003088{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003089 switch (ret) {
3090 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003091 if (netif_receive_skb(skb))
3092 ret = GRO_DROP;
3093 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003094
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003095 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003096 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08003097 kfree_skb(skb);
3098 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003099
3100 case GRO_HELD:
3101 case GRO_MERGED:
3102 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08003103 }
3104
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003105 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003106}
3107EXPORT_SYMBOL(napi_skb_finish);
3108
Herbert Xu78a478d2009-05-26 18:50:21 +00003109void skb_gro_reset_offset(struct sk_buff *skb)
3110{
3111 NAPI_GRO_CB(skb)->data_offset = 0;
3112 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00003113 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00003114
Herbert Xu78d3fd02009-05-26 18:50:23 +00003115 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00003116 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00003117 NAPI_GRO_CB(skb)->frag0 =
3118 page_address(skb_shinfo(skb)->frags[0].page) +
3119 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00003120 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3121 }
Herbert Xu78a478d2009-05-26 18:50:21 +00003122}
3123EXPORT_SYMBOL(skb_gro_reset_offset);
3124
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003125gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003126{
Herbert Xu86911732009-01-29 14:19:50 +00003127 skb_gro_reset_offset(skb);
3128
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003129 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08003130}
3131EXPORT_SYMBOL(napi_gro_receive);
3132
Herbert Xu96e93ea2009-01-06 10:49:34 -08003133void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3134{
Herbert Xu96e93ea2009-01-06 10:49:34 -08003135 __skb_pull(skb, skb_headlen(skb));
3136 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3137
3138 napi->skb = skb;
3139}
3140EXPORT_SYMBOL(napi_reuse_skb);
3141
Herbert Xu76620aa2009-04-16 02:02:07 -07003142struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08003143{
Herbert Xu5d38a072009-01-04 16:13:40 -08003144 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003145
3146 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00003147 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3148 if (skb)
3149 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08003150 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08003151 return skb;
3152}
Herbert Xu76620aa2009-04-16 02:02:07 -07003153EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003154
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003155gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3156 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003157{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003158 switch (ret) {
3159 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003160 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003161 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003162
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003163 if (ret == GRO_HELD)
3164 skb_gro_pull(skb, -ETH_HLEN);
3165 else if (netif_receive_skb(skb))
3166 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003167 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003168
3169 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003170 case GRO_MERGED_FREE:
3171 napi_reuse_skb(napi, skb);
3172 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003173
3174 case GRO_MERGED:
3175 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003176 }
3177
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003178 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003179}
3180EXPORT_SYMBOL(napi_frags_finish);
3181
Herbert Xu76620aa2009-04-16 02:02:07 -07003182struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003183{
Herbert Xu76620aa2009-04-16 02:02:07 -07003184 struct sk_buff *skb = napi->skb;
3185 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00003186 unsigned int hlen;
3187 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07003188
3189 napi->skb = NULL;
3190
3191 skb_reset_mac_header(skb);
3192 skb_gro_reset_offset(skb);
3193
Herbert Xua5b1cf22009-05-26 18:50:28 +00003194 off = skb_gro_offset(skb);
3195 hlen = off + sizeof(*eth);
3196 eth = skb_gro_header_fast(skb, off);
3197 if (skb_gro_header_hard(skb, hlen)) {
3198 eth = skb_gro_header_slow(skb, hlen, off);
3199 if (unlikely(!eth)) {
3200 napi_reuse_skb(napi, skb);
3201 skb = NULL;
3202 goto out;
3203 }
Herbert Xu76620aa2009-04-16 02:02:07 -07003204 }
3205
3206 skb_gro_pull(skb, sizeof(*eth));
3207
3208 /*
3209 * This works because the only protocols we care about don't require
3210 * special handling. We'll fix it up properly at the end.
3211 */
3212 skb->protocol = eth->h_proto;
3213
3214out:
3215 return skb;
3216}
3217EXPORT_SYMBOL(napi_frags_skb);
3218
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003219gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07003220{
3221 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003222
3223 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003224 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003225
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003226 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08003227}
3228EXPORT_SYMBOL(napi_gro_frags);
3229
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003230static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231{
3232 int work = 0;
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003233 struct softnet_data *sd = &__get_cpu_var(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003235 napi->weight = weight_p;
3236 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238
Changli Gao152102c2010-03-30 20:16:22 +00003239 local_irq_disable();
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003240 rps_lock(sd);
3241 skb = __skb_dequeue(&sd->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003242 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07003243 __napi_complete(napi);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003244 rps_unlock(sd);
Eric Dumazete4008272010-04-05 15:42:39 -07003245 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07003246 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003247 }
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003248 input_queue_head_incr(sd);
3249 rps_unlock(sd);
Changli Gao152102c2010-03-30 20:16:22 +00003250 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251
Tom Herbert0a9627f2010-03-16 08:03:29 +00003252 __netif_receive_skb(skb);
Eric Dumazet9958da02010-04-17 04:17:02 +00003253 } while (++work < quota);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003255 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256}
3257
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003258/**
3259 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003260 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003261 *
3262 * The entry's receive function will be scheduled to run
3263 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08003264void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003265{
3266 unsigned long flags;
3267
3268 local_irq_save(flags);
3269 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
3270 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3271 local_irq_restore(flags);
3272}
3273EXPORT_SYMBOL(__napi_schedule);
3274
Herbert Xud565b0a2008-12-15 23:38:52 -08003275void __napi_complete(struct napi_struct *n)
3276{
3277 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3278 BUG_ON(n->gro_list);
3279
3280 list_del(&n->poll_list);
3281 smp_mb__before_clear_bit();
3282 clear_bit(NAPI_STATE_SCHED, &n->state);
3283}
3284EXPORT_SYMBOL(__napi_complete);
3285
3286void napi_complete(struct napi_struct *n)
3287{
3288 unsigned long flags;
3289
3290 /*
3291 * don't let napi dequeue from the cpu poll list
3292 * just in case its running on a different cpu
3293 */
3294 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3295 return;
3296
3297 napi_gro_flush(n);
3298 local_irq_save(flags);
3299 __napi_complete(n);
3300 local_irq_restore(flags);
3301}
3302EXPORT_SYMBOL(napi_complete);
3303
3304void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3305 int (*poll)(struct napi_struct *, int), int weight)
3306{
3307 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00003308 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003309 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08003310 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003311 napi->poll = poll;
3312 napi->weight = weight;
3313 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08003314 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08003315#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08003316 spin_lock_init(&napi->poll_lock);
3317 napi->poll_owner = -1;
3318#endif
3319 set_bit(NAPI_STATE_SCHED, &napi->state);
3320}
3321EXPORT_SYMBOL(netif_napi_add);
3322
3323void netif_napi_del(struct napi_struct *napi)
3324{
3325 struct sk_buff *skb, *next;
3326
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08003327 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07003328 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08003329
3330 for (skb = napi->gro_list; skb; skb = next) {
3331 next = skb->next;
3332 skb->next = NULL;
3333 kfree_skb(skb);
3334 }
3335
3336 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00003337 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003338}
3339EXPORT_SYMBOL(netif_napi_del);
3340
Tom Herbert0a9627f2010-03-16 08:03:29 +00003341/*
Eric Dumazet88751272010-04-19 05:07:33 +00003342 * net_rps_action sends any pending IPI's for rps.
3343 * Note: called with local irq disabled, but exits with local irq enabled.
Tom Herbert0a9627f2010-03-16 08:03:29 +00003344 */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003345static void net_rps_action_and_irq_disable(void)
Tom Herbert0a9627f2010-03-16 08:03:29 +00003346{
Eric Dumazet88751272010-04-19 05:07:33 +00003347#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003348 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3349 struct softnet_data *remsd = sd->rps_ipi_list;
Tom Herbert0a9627f2010-03-16 08:03:29 +00003350
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003351 if (remsd) {
3352 sd->rps_ipi_list = NULL;
Eric Dumazet88751272010-04-19 05:07:33 +00003353
3354 local_irq_enable();
3355
3356 /* Send pending IPI's to kick RPS processing on remote cpus. */
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003357 while (remsd) {
3358 struct softnet_data *next = remsd->rps_ipi_next;
3359
3360 if (cpu_online(remsd->cpu))
3361 __smp_call_function_single(remsd->cpu,
3362 &remsd->csd, 0);
3363 remsd = next;
Eric Dumazet88751272010-04-19 05:07:33 +00003364 }
3365 } else
Tom Herbert1e94d722010-03-18 17:45:44 -07003366#endif
Eric Dumazet88751272010-04-19 05:07:33 +00003367 local_irq_enable();
3368}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003369
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370static void net_rx_action(struct softirq_action *h)
3371{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003372 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003373 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07003374 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07003375 void *have;
3376
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 local_irq_disable();
3378
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003379 while (!list_empty(list)) {
3380 struct napi_struct *n;
3381 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003383 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003384 * Allow this to run for 2 jiffies since which will allow
3385 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003386 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003387 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 goto softnet_break;
3389
3390 local_irq_enable();
3391
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003392 /* Even though interrupts have been re-enabled, this
3393 * access is safe because interrupts can only add new
3394 * entries to the tail of this list, and only ->poll()
3395 * calls can remove this head entry from the list.
3396 */
stephen hemmingere5e26d72010-02-24 14:01:38 +00003397 n = list_first_entry(list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003399 have = netpoll_poll_lock(n);
3400
3401 weight = n->weight;
3402
David S. Miller0a7606c2007-10-29 21:28:47 -07003403 /* This NAPI_STATE_SCHED test is for avoiding a race
3404 * with netpoll's poll_napi(). Only the entity which
3405 * obtains the lock and sees NAPI_STATE_SCHED set will
3406 * actually make the ->poll() call. Therefore we avoid
3407 * accidently calling ->poll() when NAPI is not scheduled.
3408 */
3409 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00003410 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07003411 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00003412 trace_napi_poll(n);
3413 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003414
3415 WARN_ON_ONCE(work > weight);
3416
3417 budget -= work;
3418
3419 local_irq_disable();
3420
3421 /* Drivers must not modify the NAPI state if they
3422 * consume the entire weight. In such cases this code
3423 * still "owns" the NAPI instance and therefore can
3424 * move the instance around on the list at-will.
3425 */
David S. Millerfed17f32008-01-07 21:00:40 -08003426 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07003427 if (unlikely(napi_disable_pending(n))) {
3428 local_irq_enable();
3429 napi_complete(n);
3430 local_irq_disable();
3431 } else
David S. Millerfed17f32008-01-07 21:00:40 -08003432 list_move_tail(&n->poll_list, list);
3433 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003434
3435 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 }
3437out:
Eric Dumazete36fa2f2010-04-19 21:17:14 +00003438 net_rps_action_and_irq_disable();
Tom Herbert0a9627f2010-03-16 08:03:29 +00003439
Chris Leechdb217332006-06-17 21:24:58 -07003440#ifdef CONFIG_NET_DMA
3441 /*
3442 * There may not be any more sk_buffs coming right now, so push
3443 * any pending DMA copies to hardware
3444 */
Dan Williams2ba05622009-01-06 11:38:14 -07003445 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07003446#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003447
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 return;
3449
3450softnet_break:
3451 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3452 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3453 goto out;
3454}
3455
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003456static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457
3458/**
3459 * register_gifconf - register a SIOCGIF handler
3460 * @family: Address family
3461 * @gifconf: Function handler
3462 *
3463 * Register protocol dependent address dumping routines. The handler
3464 * that is passed must not be freed or reused until it has been replaced
3465 * by another handler.
3466 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003467int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468{
3469 if (family >= NPROTO)
3470 return -EINVAL;
3471 gifconf_list[family] = gifconf;
3472 return 0;
3473}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003474EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475
3476
3477/*
3478 * Map an interface index to its name (SIOCGIFNAME)
3479 */
3480
3481/*
3482 * We need this ioctl for efficient implementation of the
3483 * if_indextoname() function required by the IPv6 API. Without
3484 * it, we would have to search all the interfaces to find a
3485 * match. --pb
3486 */
3487
Eric W. Biederman881d9662007-09-17 11:56:21 -07003488static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489{
3490 struct net_device *dev;
3491 struct ifreq ifr;
3492
3493 /*
3494 * Fetch the caller's info block.
3495 */
3496
3497 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3498 return -EFAULT;
3499
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003500 rcu_read_lock();
3501 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003503 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 return -ENODEV;
3505 }
3506
3507 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003508 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509
3510 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3511 return -EFAULT;
3512 return 0;
3513}
3514
3515/*
3516 * Perform a SIOCGIFCONF call. This structure will change
3517 * size eventually, and there is nothing I can do about it.
3518 * Thus we will need a 'compatibility mode'.
3519 */
3520
Eric W. Biederman881d9662007-09-17 11:56:21 -07003521static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522{
3523 struct ifconf ifc;
3524 struct net_device *dev;
3525 char __user *pos;
3526 int len;
3527 int total;
3528 int i;
3529
3530 /*
3531 * Fetch the caller's info block.
3532 */
3533
3534 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3535 return -EFAULT;
3536
3537 pos = ifc.ifc_buf;
3538 len = ifc.ifc_len;
3539
3540 /*
3541 * Loop over the interfaces, and write an info block for each.
3542 */
3543
3544 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003545 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 for (i = 0; i < NPROTO; i++) {
3547 if (gifconf_list[i]) {
3548 int done;
3549 if (!pos)
3550 done = gifconf_list[i](dev, NULL, 0);
3551 else
3552 done = gifconf_list[i](dev, pos + total,
3553 len - total);
3554 if (done < 0)
3555 return -EFAULT;
3556 total += done;
3557 }
3558 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003559 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560
3561 /*
3562 * All done. Write the updated control block back to the caller.
3563 */
3564 ifc.ifc_len = total;
3565
3566 /*
3567 * Both BSD and Solaris return 0 here, so we do too.
3568 */
3569 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3570}
3571
3572#ifdef CONFIG_PROC_FS
3573/*
3574 * This is invoked by the /proc filesystem handler to display a device
3575 * in detail.
3576 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003578 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579{
Denis V. Luneve372c412007-11-19 22:31:54 -08003580 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003581 loff_t off;
3582 struct net_device *dev;
3583
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003584 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07003585 if (!*pos)
3586 return SEQ_START_TOKEN;
3587
3588 off = 1;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003589 for_each_netdev_rcu(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003590 if (off++ == *pos)
3591 return dev;
3592
3593 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594}
3595
3596void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3597{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003598 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3599 first_net_device(seq_file_net(seq)) :
3600 next_net_device((struct net_device *)v);
3601
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 ++*pos;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003603 return rcu_dereference(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604}
3605
3606void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003607 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003609 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610}
3611
3612static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3613{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003614 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615
Jesper Dangaard Brouer2d13baf2010-01-05 05:50:52 +00003616 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
Rusty Russell5a1b5892007-04-28 21:04:03 -07003617 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3618 dev->name, stats->rx_bytes, stats->rx_packets,
3619 stats->rx_errors,
3620 stats->rx_dropped + stats->rx_missed_errors,
3621 stats->rx_fifo_errors,
3622 stats->rx_length_errors + stats->rx_over_errors +
3623 stats->rx_crc_errors + stats->rx_frame_errors,
3624 stats->rx_compressed, stats->multicast,
3625 stats->tx_bytes, stats->tx_packets,
3626 stats->tx_errors, stats->tx_dropped,
3627 stats->tx_fifo_errors, stats->collisions,
3628 stats->tx_carrier_errors +
3629 stats->tx_aborted_errors +
3630 stats->tx_window_errors +
3631 stats->tx_heartbeat_errors,
3632 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633}
3634
3635/*
3636 * Called from the PROCfs module. This now uses the new arbitrary sized
3637 * /proc/net interface to create /proc/net/dev
3638 */
3639static int dev_seq_show(struct seq_file *seq, void *v)
3640{
3641 if (v == SEQ_START_TOKEN)
3642 seq_puts(seq, "Inter-| Receive "
3643 " | Transmit\n"
3644 " face |bytes packets errs drop fifo frame "
3645 "compressed multicast|bytes packets errs "
3646 "drop fifo colls carrier compressed\n");
3647 else
3648 dev_seq_printf_stats(seq, v);
3649 return 0;
3650}
3651
3652static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3653{
3654 struct netif_rx_stats *rc = NULL;
3655
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003656 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003657 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 rc = &per_cpu(netdev_rx_stat, *pos);
3659 break;
3660 } else
3661 ++*pos;
3662 return rc;
3663}
3664
3665static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3666{
3667 return softnet_get_online(pos);
3668}
3669
3670static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3671{
3672 ++*pos;
3673 return softnet_get_online(pos);
3674}
3675
3676static void softnet_seq_stop(struct seq_file *seq, void *v)
3677{
3678}
3679
3680static int softnet_seq_show(struct seq_file *seq, void *v)
3681{
3682 struct netif_rx_stats *s = v;
3683
Tom Herbert0a9627f2010-03-16 08:03:29 +00003684 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003685 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003686 0, 0, 0, 0, /* was fastroute */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003687 s->cpu_collision, s->received_rps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 return 0;
3689}
3690
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003691static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 .start = dev_seq_start,
3693 .next = dev_seq_next,
3694 .stop = dev_seq_stop,
3695 .show = dev_seq_show,
3696};
3697
3698static int dev_seq_open(struct inode *inode, struct file *file)
3699{
Denis V. Luneve372c412007-11-19 22:31:54 -08003700 return seq_open_net(inode, file, &dev_seq_ops,
3701 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702}
3703
Arjan van de Ven9a321442007-02-12 00:55:35 -08003704static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 .owner = THIS_MODULE,
3706 .open = dev_seq_open,
3707 .read = seq_read,
3708 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003709 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710};
3711
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003712static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 .start = softnet_seq_start,
3714 .next = softnet_seq_next,
3715 .stop = softnet_seq_stop,
3716 .show = softnet_seq_show,
3717};
3718
3719static int softnet_seq_open(struct inode *inode, struct file *file)
3720{
3721 return seq_open(file, &softnet_seq_ops);
3722}
3723
Arjan van de Ven9a321442007-02-12 00:55:35 -08003724static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 .owner = THIS_MODULE,
3726 .open = softnet_seq_open,
3727 .read = seq_read,
3728 .llseek = seq_lseek,
3729 .release = seq_release,
3730};
3731
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003732static void *ptype_get_idx(loff_t pos)
3733{
3734 struct packet_type *pt = NULL;
3735 loff_t i = 0;
3736 int t;
3737
3738 list_for_each_entry_rcu(pt, &ptype_all, list) {
3739 if (i == pos)
3740 return pt;
3741 ++i;
3742 }
3743
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003744 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003745 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3746 if (i == pos)
3747 return pt;
3748 ++i;
3749 }
3750 }
3751 return NULL;
3752}
3753
3754static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003755 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003756{
3757 rcu_read_lock();
3758 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3759}
3760
3761static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3762{
3763 struct packet_type *pt;
3764 struct list_head *nxt;
3765 int hash;
3766
3767 ++*pos;
3768 if (v == SEQ_START_TOKEN)
3769 return ptype_get_idx(0);
3770
3771 pt = v;
3772 nxt = pt->list.next;
3773 if (pt->type == htons(ETH_P_ALL)) {
3774 if (nxt != &ptype_all)
3775 goto found;
3776 hash = 0;
3777 nxt = ptype_base[0].next;
3778 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003779 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003780
3781 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003782 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003783 return NULL;
3784 nxt = ptype_base[hash].next;
3785 }
3786found:
3787 return list_entry(nxt, struct packet_type, list);
3788}
3789
3790static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003791 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003792{
3793 rcu_read_unlock();
3794}
3795
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003796static int ptype_seq_show(struct seq_file *seq, void *v)
3797{
3798 struct packet_type *pt = v;
3799
3800 if (v == SEQ_START_TOKEN)
3801 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003802 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003803 if (pt->type == htons(ETH_P_ALL))
3804 seq_puts(seq, "ALL ");
3805 else
3806 seq_printf(seq, "%04x", ntohs(pt->type));
3807
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003808 seq_printf(seq, " %-8s %pF\n",
3809 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003810 }
3811
3812 return 0;
3813}
3814
3815static const struct seq_operations ptype_seq_ops = {
3816 .start = ptype_seq_start,
3817 .next = ptype_seq_next,
3818 .stop = ptype_seq_stop,
3819 .show = ptype_seq_show,
3820};
3821
3822static int ptype_seq_open(struct inode *inode, struct file *file)
3823{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003824 return seq_open_net(inode, file, &ptype_seq_ops,
3825 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003826}
3827
3828static const struct file_operations ptype_seq_fops = {
3829 .owner = THIS_MODULE,
3830 .open = ptype_seq_open,
3831 .read = seq_read,
3832 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003833 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003834};
3835
3836
Pavel Emelyanov46650792007-10-08 20:38:39 -07003837static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838{
3839 int rc = -ENOMEM;
3840
Eric W. Biederman881d9662007-09-17 11:56:21 -07003841 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003843 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003845 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003846 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003847
Eric W. Biederman881d9662007-09-17 11:56:21 -07003848 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003849 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 rc = 0;
3851out:
3852 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003853out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003854 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003856 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003858 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 goto out;
3860}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003861
Pavel Emelyanov46650792007-10-08 20:38:39 -07003862static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003863{
3864 wext_proc_exit(net);
3865
3866 proc_net_remove(net, "ptype");
3867 proc_net_remove(net, "softnet_stat");
3868 proc_net_remove(net, "dev");
3869}
3870
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003871static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003872 .init = dev_proc_net_init,
3873 .exit = dev_proc_net_exit,
3874};
3875
3876static int __init dev_proc_init(void)
3877{
3878 return register_pernet_subsys(&dev_proc_ops);
3879}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880#else
3881#define dev_proc_init() 0
3882#endif /* CONFIG_PROC_FS */
3883
3884
3885/**
3886 * netdev_set_master - set up master/slave pair
3887 * @slave: slave device
3888 * @master: new master device
3889 *
3890 * Changes the master device of the slave. Pass %NULL to break the
3891 * bonding. The caller must hold the RTNL semaphore. On a failure
3892 * a negative errno code is returned. On success the reference counts
3893 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3894 * function returns zero.
3895 */
3896int netdev_set_master(struct net_device *slave, struct net_device *master)
3897{
3898 struct net_device *old = slave->master;
3899
3900 ASSERT_RTNL();
3901
3902 if (master) {
3903 if (old)
3904 return -EBUSY;
3905 dev_hold(master);
3906 }
3907
3908 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003909
Eric Dumazet283f2fe2010-03-18 13:37:40 +00003910 if (old) {
3911 synchronize_net();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 dev_put(old);
Eric Dumazet283f2fe2010-03-18 13:37:40 +00003913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 if (master)
3915 slave->flags |= IFF_SLAVE;
3916 else
3917 slave->flags &= ~IFF_SLAVE;
3918
3919 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3920 return 0;
3921}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003922EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003924static void dev_change_rx_flags(struct net_device *dev, int flags)
3925{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003926 const struct net_device_ops *ops = dev->netdev_ops;
3927
3928 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3929 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003930}
3931
Wang Chendad9b332008-06-18 01:48:28 -07003932static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003933{
3934 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003935 uid_t uid;
3936 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003937
Patrick McHardy24023452007-07-14 18:51:31 -07003938 ASSERT_RTNL();
3939
Wang Chendad9b332008-06-18 01:48:28 -07003940 dev->flags |= IFF_PROMISC;
3941 dev->promiscuity += inc;
3942 if (dev->promiscuity == 0) {
3943 /*
3944 * Avoid overflow.
3945 * If inc causes overflow, untouch promisc and return error.
3946 */
3947 if (inc < 0)
3948 dev->flags &= ~IFF_PROMISC;
3949 else {
3950 dev->promiscuity -= inc;
3951 printk(KERN_WARNING "%s: promiscuity touches roof, "
3952 "set promiscuity failed, promiscuity feature "
3953 "of device might be broken.\n", dev->name);
3954 return -EOVERFLOW;
3955 }
3956 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003957 if (dev->flags != old_flags) {
3958 printk(KERN_INFO "device %s %s promiscuous mode\n",
3959 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3960 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003961 if (audit_enabled) {
3962 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003963 audit_log(current->audit_context, GFP_ATOMIC,
3964 AUDIT_ANOM_PROMISCUOUS,
3965 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3966 dev->name, (dev->flags & IFF_PROMISC),
3967 (old_flags & IFF_PROMISC),
3968 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003969 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003970 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003971 }
Patrick McHardy24023452007-07-14 18:51:31 -07003972
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003973 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003974 }
Wang Chendad9b332008-06-18 01:48:28 -07003975 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003976}
3977
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978/**
3979 * dev_set_promiscuity - update promiscuity count on a device
3980 * @dev: device
3981 * @inc: modifier
3982 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003983 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 * remains above zero the interface remains promiscuous. Once it hits zero
3985 * the device reverts back to normal filtering operation. A negative inc
3986 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003987 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 */
Wang Chendad9b332008-06-18 01:48:28 -07003989int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990{
3991 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003992 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993
Wang Chendad9b332008-06-18 01:48:28 -07003994 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003995 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003996 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003997 if (dev->flags != old_flags)
3998 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003999 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004001EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002
4003/**
4004 * dev_set_allmulti - update allmulti count on a device
4005 * @dev: device
4006 * @inc: modifier
4007 *
4008 * Add or remove reception of all multicast frames to a device. While the
4009 * count in the device remains above zero the interface remains listening
4010 * to all interfaces. Once it hits zero the device reverts back to normal
4011 * filtering operation. A negative @inc value is used to drop the counter
4012 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07004013 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014 */
4015
Wang Chendad9b332008-06-18 01:48:28 -07004016int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017{
4018 unsigned short old_flags = dev->flags;
4019
Patrick McHardy24023452007-07-14 18:51:31 -07004020 ASSERT_RTNL();
4021
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07004023 dev->allmulti += inc;
4024 if (dev->allmulti == 0) {
4025 /*
4026 * Avoid overflow.
4027 * If inc causes overflow, untouch allmulti and return error.
4028 */
4029 if (inc < 0)
4030 dev->flags &= ~IFF_ALLMULTI;
4031 else {
4032 dev->allmulti -= inc;
4033 printk(KERN_WARNING "%s: allmulti touches roof, "
4034 "set allmulti failed, allmulti feature of "
4035 "device might be broken.\n", dev->name);
4036 return -EOVERFLOW;
4037 }
4038 }
Patrick McHardy24023452007-07-14 18:51:31 -07004039 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004040 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07004041 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07004042 }
Wang Chendad9b332008-06-18 01:48:28 -07004043 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07004044}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004045EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07004046
4047/*
4048 * Upload unicast and multicast address lists to device and
4049 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08004050 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07004051 * are present.
4052 */
4053void __dev_set_rx_mode(struct net_device *dev)
4054{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004055 const struct net_device_ops *ops = dev->netdev_ops;
4056
Patrick McHardy4417da62007-06-27 01:28:10 -07004057 /* dev_open will call this function so the list will stay sane. */
4058 if (!(dev->flags&IFF_UP))
4059 return;
4060
4061 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09004062 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07004063
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004064 if (ops->ndo_set_rx_mode)
4065 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004066 else {
4067 /* Unicast addresses changes may only happen under the rtnl,
4068 * therefore calling __dev_set_promiscuity here is safe.
4069 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004070 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004071 __dev_set_promiscuity(dev, 1);
4072 dev->uc_promisc = 1;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08004073 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07004074 __dev_set_promiscuity(dev, -1);
4075 dev->uc_promisc = 0;
4076 }
4077
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004078 if (ops->ndo_set_multicast_list)
4079 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004080 }
4081}
4082
4083void dev_set_rx_mode(struct net_device *dev)
4084{
David S. Millerb9e40852008-07-15 00:15:08 -07004085 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004086 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07004087 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088}
4089
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004090/**
4091 * dev_get_flags - get flags reported to userspace
4092 * @dev: device
4093 *
4094 * Get the combination of flag bits exported through APIs to userspace.
4095 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096unsigned dev_get_flags(const struct net_device *dev)
4097{
4098 unsigned flags;
4099
4100 flags = (dev->flags & ~(IFF_PROMISC |
4101 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004102 IFF_RUNNING |
4103 IFF_LOWER_UP |
4104 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105 (dev->gflags & (IFF_PROMISC |
4106 IFF_ALLMULTI));
4107
Stefan Rompfb00055a2006-03-20 17:09:11 -08004108 if (netif_running(dev)) {
4109 if (netif_oper_up(dev))
4110 flags |= IFF_RUNNING;
4111 if (netif_carrier_ok(dev))
4112 flags |= IFF_LOWER_UP;
4113 if (netif_dormant(dev))
4114 flags |= IFF_DORMANT;
4115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116
4117 return flags;
4118}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004119EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120
Patrick McHardybd380812010-02-26 06:34:53 +00004121int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123 int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004124 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125
Patrick McHardy24023452007-07-14 18:51:31 -07004126 ASSERT_RTNL();
4127
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 /*
4129 * Set the flags on our device.
4130 */
4131
4132 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4133 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4134 IFF_AUTOMEDIA)) |
4135 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4136 IFF_ALLMULTI));
4137
4138 /*
4139 * Load in the correct multicast list now the flags have changed.
4140 */
4141
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004142 if ((old_flags ^ flags) & IFF_MULTICAST)
4143 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004144
Patrick McHardy4417da62007-06-27 01:28:10 -07004145 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146
4147 /*
4148 * Have we downed the interface. We handle IFF_UP ourselves
4149 * according to user attempts to set it, rather than blindly
4150 * setting it.
4151 */
4152
4153 ret = 0;
4154 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00004155 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156
4157 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004158 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159 }
4160
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004162 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4163
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 dev->gflags ^= IFF_PROMISC;
4165 dev_set_promiscuity(dev, inc);
4166 }
4167
4168 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4169 is important. Some (broken) drivers set IFF_PROMISC, when
4170 IFF_ALLMULTI is requested not asking us and not reporting.
4171 */
4172 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004173 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4174
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175 dev->gflags ^= IFF_ALLMULTI;
4176 dev_set_allmulti(dev, inc);
4177 }
4178
Patrick McHardybd380812010-02-26 06:34:53 +00004179 return ret;
4180}
4181
4182void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4183{
4184 unsigned int changes = dev->flags ^ old_flags;
4185
4186 if (changes & IFF_UP) {
4187 if (dev->flags & IFF_UP)
4188 call_netdevice_notifiers(NETDEV_UP, dev);
4189 else
4190 call_netdevice_notifiers(NETDEV_DOWN, dev);
4191 }
4192
4193 if (dev->flags & IFF_UP &&
4194 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4195 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4196}
4197
4198/**
4199 * dev_change_flags - change device settings
4200 * @dev: device
4201 * @flags: device state flags
4202 *
4203 * Change settings on device based state flags. The flags are
4204 * in the userspace exported format.
4205 */
4206int dev_change_flags(struct net_device *dev, unsigned flags)
4207{
4208 int ret, changes;
4209 int old_flags = dev->flags;
4210
4211 ret = __dev_change_flags(dev, flags);
4212 if (ret < 0)
4213 return ret;
4214
4215 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07004216 if (changes)
4217 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218
Patrick McHardybd380812010-02-26 06:34:53 +00004219 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 return ret;
4221}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004222EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004224/**
4225 * dev_set_mtu - Change maximum transfer unit
4226 * @dev: device
4227 * @new_mtu: new transfer unit
4228 *
4229 * Change the maximum transfer size of the network device.
4230 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231int dev_set_mtu(struct net_device *dev, int new_mtu)
4232{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004233 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 int err;
4235
4236 if (new_mtu == dev->mtu)
4237 return 0;
4238
4239 /* MTU must be positive. */
4240 if (new_mtu < 0)
4241 return -EINVAL;
4242
4243 if (!netif_device_present(dev))
4244 return -ENODEV;
4245
4246 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004247 if (ops->ndo_change_mtu)
4248 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 else
4250 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004251
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004253 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 return err;
4255}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004256EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004258/**
4259 * dev_set_mac_address - Change Media Access Control Address
4260 * @dev: device
4261 * @sa: new address
4262 *
4263 * Change the hardware (MAC) address of the device
4264 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4266{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004267 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 int err;
4269
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004270 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271 return -EOPNOTSUPP;
4272 if (sa->sa_family != dev->type)
4273 return -EINVAL;
4274 if (!netif_device_present(dev))
4275 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004276 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004278 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 return err;
4280}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004281EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282
4283/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00004284 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004286static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287{
4288 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00004289 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290
4291 if (!dev)
4292 return -ENODEV;
4293
4294 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004295 case SIOCGIFFLAGS: /* Get interface flags */
4296 ifr->ifr_flags = (short) dev_get_flags(dev);
4297 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004299 case SIOCGIFMETRIC: /* Get the metric on the interface
4300 (currently unused) */
4301 ifr->ifr_metric = 0;
4302 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004304 case SIOCGIFMTU: /* Get the MTU of a device */
4305 ifr->ifr_mtu = dev->mtu;
4306 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004308 case SIOCGIFHWADDR:
4309 if (!dev->addr_len)
4310 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4311 else
4312 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4313 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4314 ifr->ifr_hwaddr.sa_family = dev->type;
4315 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004317 case SIOCGIFSLAVE:
4318 err = -EINVAL;
4319 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004320
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004321 case SIOCGIFMAP:
4322 ifr->ifr_map.mem_start = dev->mem_start;
4323 ifr->ifr_map.mem_end = dev->mem_end;
4324 ifr->ifr_map.base_addr = dev->base_addr;
4325 ifr->ifr_map.irq = dev->irq;
4326 ifr->ifr_map.dma = dev->dma;
4327 ifr->ifr_map.port = dev->if_port;
4328 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004329
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004330 case SIOCGIFINDEX:
4331 ifr->ifr_ifindex = dev->ifindex;
4332 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004333
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004334 case SIOCGIFTXQLEN:
4335 ifr->ifr_qlen = dev->tx_queue_len;
4336 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004337
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004338 default:
4339 /* dev_ioctl() should ensure this case
4340 * is never reached
4341 */
4342 WARN_ON(1);
4343 err = -EINVAL;
4344 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004345
4346 }
4347 return err;
4348}
4349
4350/*
4351 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4352 */
4353static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4354{
4355 int err;
4356 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004357 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004358
4359 if (!dev)
4360 return -ENODEV;
4361
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004362 ops = dev->netdev_ops;
4363
Jeff Garzik14e3e072007-10-08 00:06:32 -07004364 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004365 case SIOCSIFFLAGS: /* Set interface flags */
4366 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004367
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004368 case SIOCSIFMETRIC: /* Set the metric on the interface
4369 (currently unused) */
4370 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004371
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004372 case SIOCSIFMTU: /* Set the MTU of a device */
4373 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004374
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004375 case SIOCSIFHWADDR:
4376 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004378 case SIOCSIFHWBROADCAST:
4379 if (ifr->ifr_hwaddr.sa_family != dev->type)
4380 return -EINVAL;
4381 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4382 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4383 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4384 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004386 case SIOCSIFMAP:
4387 if (ops->ndo_set_config) {
4388 if (!netif_device_present(dev))
4389 return -ENODEV;
4390 return ops->ndo_set_config(dev, &ifr->ifr_map);
4391 }
4392 return -EOPNOTSUPP;
4393
4394 case SIOCADDMULTI:
4395 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4396 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4397 return -EINVAL;
4398 if (!netif_device_present(dev))
4399 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004400 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004401
4402 case SIOCDELMULTI:
4403 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4404 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4405 return -EINVAL;
4406 if (!netif_device_present(dev))
4407 return -ENODEV;
Jiri Pirko22bedad32010-04-01 21:22:57 +00004408 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004409
4410 case SIOCSIFTXQLEN:
4411 if (ifr->ifr_qlen < 0)
4412 return -EINVAL;
4413 dev->tx_queue_len = ifr->ifr_qlen;
4414 return 0;
4415
4416 case SIOCSIFNAME:
4417 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4418 return dev_change_name(dev, ifr->ifr_newname);
4419
4420 /*
4421 * Unknown or private ioctl
4422 */
4423 default:
4424 if ((cmd >= SIOCDEVPRIVATE &&
4425 cmd <= SIOCDEVPRIVATE + 15) ||
4426 cmd == SIOCBONDENSLAVE ||
4427 cmd == SIOCBONDRELEASE ||
4428 cmd == SIOCBONDSETHWADDR ||
4429 cmd == SIOCBONDSLAVEINFOQUERY ||
4430 cmd == SIOCBONDINFOQUERY ||
4431 cmd == SIOCBONDCHANGEACTIVE ||
4432 cmd == SIOCGMIIPHY ||
4433 cmd == SIOCGMIIREG ||
4434 cmd == SIOCSMIIREG ||
4435 cmd == SIOCBRADDIF ||
4436 cmd == SIOCBRDELIF ||
4437 cmd == SIOCSHWTSTAMP ||
4438 cmd == SIOCWANDEV) {
4439 err = -EOPNOTSUPP;
4440 if (ops->ndo_do_ioctl) {
4441 if (netif_device_present(dev))
4442 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4443 else
4444 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004446 } else
4447 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448
4449 }
4450 return err;
4451}
4452
4453/*
4454 * This function handles all "interface"-type I/O control requests. The actual
4455 * 'doing' part of this is dev_ifsioc above.
4456 */
4457
4458/**
4459 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004460 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 * @cmd: command to issue
4462 * @arg: pointer to a struct ifreq in user space
4463 *
4464 * Issue ioctl functions to devices. This is normally called by the
4465 * user space syscall interfaces but can sometimes be useful for
4466 * other purposes. The return value is the return from the syscall if
4467 * positive or a negative errno code on error.
4468 */
4469
Eric W. Biederman881d9662007-09-17 11:56:21 -07004470int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471{
4472 struct ifreq ifr;
4473 int ret;
4474 char *colon;
4475
4476 /* One special case: SIOCGIFCONF takes ifconf argument
4477 and requires shared lock, because it sleeps writing
4478 to user space.
4479 */
4480
4481 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004482 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004483 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004484 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485 return ret;
4486 }
4487 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004488 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489
4490 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4491 return -EFAULT;
4492
4493 ifr.ifr_name[IFNAMSIZ-1] = 0;
4494
4495 colon = strchr(ifr.ifr_name, ':');
4496 if (colon)
4497 *colon = 0;
4498
4499 /*
4500 * See which interface the caller is talking about.
4501 */
4502
4503 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004504 /*
4505 * These ioctl calls:
4506 * - can be done by all.
4507 * - atomic and do not require locking.
4508 * - return a value
4509 */
4510 case SIOCGIFFLAGS:
4511 case SIOCGIFMETRIC:
4512 case SIOCGIFMTU:
4513 case SIOCGIFHWADDR:
4514 case SIOCGIFSLAVE:
4515 case SIOCGIFMAP:
4516 case SIOCGIFINDEX:
4517 case SIOCGIFTXQLEN:
4518 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004519 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004520 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004521 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004522 if (!ret) {
4523 if (colon)
4524 *colon = ':';
4525 if (copy_to_user(arg, &ifr,
4526 sizeof(struct ifreq)))
4527 ret = -EFAULT;
4528 }
4529 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004531 case SIOCETHTOOL:
4532 dev_load(net, ifr.ifr_name);
4533 rtnl_lock();
4534 ret = dev_ethtool(net, &ifr);
4535 rtnl_unlock();
4536 if (!ret) {
4537 if (colon)
4538 *colon = ':';
4539 if (copy_to_user(arg, &ifr,
4540 sizeof(struct ifreq)))
4541 ret = -EFAULT;
4542 }
4543 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004545 /*
4546 * These ioctl calls:
4547 * - require superuser power.
4548 * - require strict serialization.
4549 * - return a value
4550 */
4551 case SIOCGMIIPHY:
4552 case SIOCGMIIREG:
4553 case SIOCSIFNAME:
4554 if (!capable(CAP_NET_ADMIN))
4555 return -EPERM;
4556 dev_load(net, ifr.ifr_name);
4557 rtnl_lock();
4558 ret = dev_ifsioc(net, &ifr, cmd);
4559 rtnl_unlock();
4560 if (!ret) {
4561 if (colon)
4562 *colon = ':';
4563 if (copy_to_user(arg, &ifr,
4564 sizeof(struct ifreq)))
4565 ret = -EFAULT;
4566 }
4567 return ret;
4568
4569 /*
4570 * These ioctl calls:
4571 * - require superuser power.
4572 * - require strict serialization.
4573 * - do not return a value
4574 */
4575 case SIOCSIFFLAGS:
4576 case SIOCSIFMETRIC:
4577 case SIOCSIFMTU:
4578 case SIOCSIFMAP:
4579 case SIOCSIFHWADDR:
4580 case SIOCSIFSLAVE:
4581 case SIOCADDMULTI:
4582 case SIOCDELMULTI:
4583 case SIOCSIFHWBROADCAST:
4584 case SIOCSIFTXQLEN:
4585 case SIOCSMIIREG:
4586 case SIOCBONDENSLAVE:
4587 case SIOCBONDRELEASE:
4588 case SIOCBONDSETHWADDR:
4589 case SIOCBONDCHANGEACTIVE:
4590 case SIOCBRADDIF:
4591 case SIOCBRDELIF:
4592 case SIOCSHWTSTAMP:
4593 if (!capable(CAP_NET_ADMIN))
4594 return -EPERM;
4595 /* fall through */
4596 case SIOCBONDSLAVEINFOQUERY:
4597 case SIOCBONDINFOQUERY:
4598 dev_load(net, ifr.ifr_name);
4599 rtnl_lock();
4600 ret = dev_ifsioc(net, &ifr, cmd);
4601 rtnl_unlock();
4602 return ret;
4603
4604 case SIOCGIFMEM:
4605 /* Get the per device memory space. We can add this but
4606 * currently do not support it */
4607 case SIOCSIFMEM:
4608 /* Set the per device memory buffer space.
4609 * Not applicable in our case */
4610 case SIOCSIFLINK:
4611 return -EINVAL;
4612
4613 /*
4614 * Unknown or private ioctl.
4615 */
4616 default:
4617 if (cmd == SIOCWANDEV ||
4618 (cmd >= SIOCDEVPRIVATE &&
4619 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004620 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004622 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004624 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004626 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004628 }
4629 /* Take care of Wireless Extensions */
4630 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4631 return wext_handle_ioctl(net, &ifr, cmd, arg);
4632 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633 }
4634}
4635
4636
4637/**
4638 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004639 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640 *
4641 * Returns a suitable unique value for a new device interface
4642 * number. The caller must hold the rtnl semaphore or the
4643 * dev_base_lock to be sure it remains unique.
4644 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004645static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646{
4647 static int ifindex;
4648 for (;;) {
4649 if (++ifindex <= 0)
4650 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004651 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652 return ifindex;
4653 }
4654}
4655
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004657static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004659static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662}
4663
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004664static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004665{
Krishna Kumare93737b2009-12-08 22:26:02 +00004666 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004667
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004668 BUG_ON(dev_boot_phase);
4669 ASSERT_RTNL();
4670
Krishna Kumare93737b2009-12-08 22:26:02 +00004671 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004672 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004673 * for initialization unwind. Remove those
4674 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004675 */
4676 if (dev->reg_state == NETREG_UNINITIALIZED) {
4677 pr_debug("unregister_netdevice: device %s/%p never "
4678 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004679
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004680 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004681 list_del(&dev->unreg_list);
4682 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004683 }
4684
4685 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4686
4687 /* If device is running, close it first. */
4688 dev_close(dev);
4689
4690 /* And unlink it from device chain. */
4691 unlist_netdevice(dev);
4692
4693 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004694 }
4695
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004696 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004697
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004698 list_for_each_entry(dev, head, unreg_list) {
4699 /* Shutdown queueing discipline. */
4700 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004701
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004702
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004703 /* Notify protocols, that we are about to destroy
4704 this device. They should clean all the things.
4705 */
4706 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4707
Patrick McHardya2835762010-02-26 06:34:51 +00004708 if (!dev->rtnl_link_ops ||
4709 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4710 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4711
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004712 /*
4713 * Flush the unicast and multicast chains
4714 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00004715 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00004716 dev_mc_flush(dev);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004717
4718 if (dev->netdev_ops->ndo_uninit)
4719 dev->netdev_ops->ndo_uninit(dev);
4720
4721 /* Notifier chain MUST detach us from master device. */
4722 WARN_ON(dev->master);
4723
4724 /* Remove entries from kobject tree */
4725 netdev_unregister_kobject(dev);
4726 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004727
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004728 /* Process any work delayed until the end of the batch */
stephen hemmingere5e26d72010-02-24 14:01:38 +00004729 dev = list_first_entry(head, struct net_device, unreg_list);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004730 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4731
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004732 synchronize_net();
4733
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004734 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004735 dev_put(dev);
4736}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004737
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004738static void rollback_registered(struct net_device *dev)
4739{
4740 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004741
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004742 list_add(&dev->unreg_list, &single);
4743 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004744}
4745
David S. Millere8a04642008-07-17 00:34:19 -07004746static void __netdev_init_queue_locks_one(struct net_device *dev,
4747 struct netdev_queue *dev_queue,
4748 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004749{
4750 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004751 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004752 dev_queue->xmit_lock_owner = -1;
4753}
4754
4755static void netdev_init_queue_locks(struct net_device *dev)
4756{
David S. Millere8a04642008-07-17 00:34:19 -07004757 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4758 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004759}
4760
Herbert Xub63365a2008-10-23 01:11:29 -07004761unsigned long netdev_fix_features(unsigned long features, const char *name)
4762{
4763 /* Fix illegal SG+CSUM combinations. */
4764 if ((features & NETIF_F_SG) &&
4765 !(features & NETIF_F_ALL_CSUM)) {
4766 if (name)
4767 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4768 "checksum feature.\n", name);
4769 features &= ~NETIF_F_SG;
4770 }
4771
4772 /* TSO requires that SG is present as well. */
4773 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4774 if (name)
4775 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4776 "SG feature.\n", name);
4777 features &= ~NETIF_F_TSO;
4778 }
4779
4780 if (features & NETIF_F_UFO) {
4781 if (!(features & NETIF_F_GEN_CSUM)) {
4782 if (name)
4783 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4784 "since no NETIF_F_HW_CSUM feature.\n",
4785 name);
4786 features &= ~NETIF_F_UFO;
4787 }
4788
4789 if (!(features & NETIF_F_SG)) {
4790 if (name)
4791 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4792 "since no NETIF_F_SG feature.\n", name);
4793 features &= ~NETIF_F_UFO;
4794 }
4795 }
4796
4797 return features;
4798}
4799EXPORT_SYMBOL(netdev_fix_features);
4800
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08004802 * netif_stacked_transfer_operstate - transfer operstate
4803 * @rootdev: the root or lower level device to transfer state from
4804 * @dev: the device to transfer operstate to
4805 *
4806 * Transfer operational state from root to device. This is normally
4807 * called when a stacking relationship exists between the root
4808 * device and the device(a leaf device).
4809 */
4810void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4811 struct net_device *dev)
4812{
4813 if (rootdev->operstate == IF_OPER_DORMANT)
4814 netif_dormant_on(dev);
4815 else
4816 netif_dormant_off(dev);
4817
4818 if (netif_carrier_ok(rootdev)) {
4819 if (!netif_carrier_ok(dev))
4820 netif_carrier_on(dev);
4821 } else {
4822 if (netif_carrier_ok(dev))
4823 netif_carrier_off(dev);
4824 }
4825}
4826EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4827
4828/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829 * register_netdevice - register a network device
4830 * @dev: device to register
4831 *
4832 * Take a completed network device structure and add it to the kernel
4833 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4834 * chain. 0 is returned on success. A negative errno code is returned
4835 * on a failure to set up the device, or if the name is a duplicate.
4836 *
4837 * Callers must hold the rtnl semaphore. You may want
4838 * register_netdev() instead of this.
4839 *
4840 * BUGS:
4841 * The locking appears insufficient to guarantee two parallel registers
4842 * will not get the same name.
4843 */
4844
4845int register_netdevice(struct net_device *dev)
4846{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004847 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004848 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004849
4850 BUG_ON(dev_boot_phase);
4851 ASSERT_RTNL();
4852
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004853 might_sleep();
4854
Linus Torvalds1da177e2005-04-16 15:20:36 -07004855 /* When net_device's are persistent, this will be fatal. */
4856 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004857 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004858
David S. Millerf1f28aa2008-07-15 00:08:33 -07004859 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004860 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004861 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863 dev->iflink = -1;
4864
Eric Dumazetdf334542010-03-24 19:13:54 +00004865#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +00004866 if (!dev->num_rx_queues) {
4867 /*
4868 * Allocate a single RX queue if driver never called
4869 * alloc_netdev_mq
4870 */
4871
4872 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
4873 if (!dev->_rx) {
4874 ret = -ENOMEM;
4875 goto out;
4876 }
4877
4878 dev->_rx->first = dev->_rx;
4879 atomic_set(&dev->_rx->count, 1);
4880 dev->num_rx_queues = 1;
4881 }
Eric Dumazetdf334542010-03-24 19:13:54 +00004882#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004884 if (dev->netdev_ops->ndo_init) {
4885 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886 if (ret) {
4887 if (ret > 0)
4888 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004889 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890 }
4891 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004892
Octavian Purdilad9031022009-11-18 02:36:59 +00004893 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
4894 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004895 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896
Eric W. Biederman881d9662007-09-17 11:56:21 -07004897 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 if (dev->iflink == -1)
4899 dev->iflink = dev->ifindex;
4900
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004901 /* Fix illegal checksum combinations */
4902 if ((dev->features & NETIF_F_HW_CSUM) &&
4903 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4904 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4905 dev->name);
4906 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4907 }
4908
4909 if ((dev->features & NETIF_F_NO_CSUM) &&
4910 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4911 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4912 dev->name);
4913 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4914 }
4915
Herbert Xub63365a2008-10-23 01:11:29 -07004916 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004917
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004918 /* Enable software GSO if SG is supported. */
4919 if (dev->features & NETIF_F_SG)
4920 dev->features |= NETIF_F_GSO;
4921
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004922 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00004923
4924 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4925 ret = notifier_to_errno(ret);
4926 if (ret)
4927 goto err_uninit;
4928
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004929 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004930 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004931 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004932 dev->reg_state = NETREG_REGISTERED;
4933
Linus Torvalds1da177e2005-04-16 15:20:36 -07004934 /*
4935 * Default initial state at registry is that the
4936 * device is present.
4937 */
4938
4939 set_bit(__LINK_STATE_PRESENT, &dev->state);
4940
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004942 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004943 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944
4945 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004946 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004947 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004948 if (ret) {
4949 rollback_registered(dev);
4950 dev->reg_state = NETREG_UNREGISTERED;
4951 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00004952 /*
4953 * Prevent userspace races by waiting until the network
4954 * device is fully setup before sending notifications.
4955 */
Patrick McHardya2835762010-02-26 06:34:51 +00004956 if (!dev->rtnl_link_ops ||
4957 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4958 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959
4960out:
4961 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004962
4963err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004964 if (dev->netdev_ops->ndo_uninit)
4965 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004966 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004968EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969
4970/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004971 * init_dummy_netdev - init a dummy network device for NAPI
4972 * @dev: device to init
4973 *
4974 * This takes a network device structure and initialize the minimum
4975 * amount of fields so it can be used to schedule NAPI polls without
4976 * registering a full blown interface. This is to be used by drivers
4977 * that need to tie several hardware interfaces to a single NAPI
4978 * poll scheduler due to HW limitations.
4979 */
4980int init_dummy_netdev(struct net_device *dev)
4981{
4982 /* Clear everything. Note we don't initialize spinlocks
4983 * are they aren't supposed to be taken by any of the
4984 * NAPI code and this dummy netdev is supposed to be
4985 * only ever used for NAPI polls
4986 */
4987 memset(dev, 0, sizeof(struct net_device));
4988
4989 /* make sure we BUG if trying to hit standard
4990 * register/unregister code path
4991 */
4992 dev->reg_state = NETREG_DUMMY;
4993
4994 /* initialize the ref count */
4995 atomic_set(&dev->refcnt, 1);
4996
4997 /* NAPI wants this */
4998 INIT_LIST_HEAD(&dev->napi_list);
4999
5000 /* a dummy interface is started by default */
5001 set_bit(__LINK_STATE_PRESENT, &dev->state);
5002 set_bit(__LINK_STATE_START, &dev->state);
5003
5004 return 0;
5005}
5006EXPORT_SYMBOL_GPL(init_dummy_netdev);
5007
5008
5009/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 * register_netdev - register a network device
5011 * @dev: device to register
5012 *
5013 * Take a completed network device structure and add it to the kernel
5014 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5015 * chain. 0 is returned on success. A negative errno code is returned
5016 * on a failure to set up the device, or if the name is a duplicate.
5017 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005018 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019 * and expands the device name if you passed a format string to
5020 * alloc_netdev.
5021 */
5022int register_netdev(struct net_device *dev)
5023{
5024 int err;
5025
5026 rtnl_lock();
5027
5028 /*
5029 * If the name is a format string the caller wants us to do a
5030 * name allocation.
5031 */
5032 if (strchr(dev->name, '%')) {
5033 err = dev_alloc_name(dev, dev->name);
5034 if (err < 0)
5035 goto out;
5036 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005037
Linus Torvalds1da177e2005-04-16 15:20:36 -07005038 err = register_netdevice(dev);
5039out:
5040 rtnl_unlock();
5041 return err;
5042}
5043EXPORT_SYMBOL(register_netdev);
5044
5045/*
5046 * netdev_wait_allrefs - wait until all references are gone.
5047 *
5048 * This is called when unregistering network devices.
5049 *
5050 * Any protocol or device that holds a reference should register
5051 * for netdevice notification, and cleanup and put back the
5052 * reference if they receive an UNREGISTER event.
5053 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005054 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055 */
5056static void netdev_wait_allrefs(struct net_device *dev)
5057{
5058 unsigned long rebroadcast_time, warning_time;
5059
Eric Dumazete014deb2009-11-17 05:59:21 +00005060 linkwatch_forget_dev(dev);
5061
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 rebroadcast_time = warning_time = jiffies;
5063 while (atomic_read(&dev->refcnt) != 0) {
5064 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005065 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066
5067 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005068 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005069 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
Octavian Purdila395264d2009-11-16 13:49:35 +00005070 * should have already handle it the first time */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071
5072 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5073 &dev->state)) {
5074 /* We must not have linkwatch events
5075 * pending on unregister. If this
5076 * happens, we simply run the queue
5077 * unscheduled, resulting in a noop
5078 * for this device.
5079 */
5080 linkwatch_run_queue();
5081 }
5082
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005083 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005084
5085 rebroadcast_time = jiffies;
5086 }
5087
5088 msleep(250);
5089
5090 if (time_after(jiffies, warning_time + 10 * HZ)) {
5091 printk(KERN_EMERG "unregister_netdevice: "
5092 "waiting for %s to become free. Usage "
5093 "count = %d\n",
5094 dev->name, atomic_read(&dev->refcnt));
5095 warning_time = jiffies;
5096 }
5097 }
5098}
5099
5100/* The sequence is:
5101 *
5102 * rtnl_lock();
5103 * ...
5104 * register_netdevice(x1);
5105 * register_netdevice(x2);
5106 * ...
5107 * unregister_netdevice(y1);
5108 * unregister_netdevice(y2);
5109 * ...
5110 * rtnl_unlock();
5111 * free_netdev(y1);
5112 * free_netdev(y2);
5113 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005114 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005116 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117 * without deadlocking with linkwatch via keventd.
5118 * 2) Since we run with the RTNL semaphore not held, we can sleep
5119 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005120 *
5121 * We must not return until all unregister events added during
5122 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124void netdev_run_todo(void)
5125{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005126 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005129 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005130
5131 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005132
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 while (!list_empty(&list)) {
5134 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00005135 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005136 list_del(&dev->todo_list);
5137
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005138 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139 printk(KERN_ERR "network todo '%s' but state %d\n",
5140 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005141 dump_stack();
5142 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005144
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005145 dev->reg_state = NETREG_UNREGISTERED;
5146
Changli Gao152102c2010-03-30 20:16:22 +00005147 on_each_cpu(flush_backlog, dev, 1);
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005148
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005149 netdev_wait_allrefs(dev);
5150
5151 /* paranoia */
5152 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005153 WARN_ON(dev->ip_ptr);
5154 WARN_ON(dev->ip6_ptr);
5155 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005156
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005157 if (dev->destructor)
5158 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005159
5160 /* Free network device */
5161 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163}
5164
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005165/**
Eric Dumazetd83345a2009-11-16 03:36:51 +00005166 * dev_txq_stats_fold - fold tx_queues stats
5167 * @dev: device to get statistics from
5168 * @stats: struct net_device_stats to hold results
5169 */
5170void dev_txq_stats_fold(const struct net_device *dev,
5171 struct net_device_stats *stats)
5172{
5173 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5174 unsigned int i;
5175 struct netdev_queue *txq;
5176
5177 for (i = 0; i < dev->num_tx_queues; i++) {
5178 txq = netdev_get_tx_queue(dev, i);
5179 tx_bytes += txq->tx_bytes;
5180 tx_packets += txq->tx_packets;
5181 tx_dropped += txq->tx_dropped;
5182 }
5183 if (tx_bytes || tx_packets || tx_dropped) {
5184 stats->tx_bytes = tx_bytes;
5185 stats->tx_packets = tx_packets;
5186 stats->tx_dropped = tx_dropped;
5187 }
5188}
5189EXPORT_SYMBOL(dev_txq_stats_fold);
5190
5191/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005192 * dev_get_stats - get network device statistics
5193 * @dev: device to get statistics from
5194 *
5195 * Get network statistics from device. The device driver may provide
5196 * its own method by setting dev->netdev_ops->get_stats; otherwise
5197 * the internal statistics structure is used.
5198 */
5199const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005200{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005201 const struct net_device_ops *ops = dev->netdev_ops;
5202
5203 if (ops->ndo_get_stats)
5204 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005205
Eric Dumazetd83345a2009-11-16 03:36:51 +00005206 dev_txq_stats_fold(dev, &dev->stats);
5207 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07005208}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005209EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005210
David S. Millerdc2b4842008-07-08 17:18:23 -07005211static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005212 struct netdev_queue *queue,
5213 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005214{
David S. Millerdc2b4842008-07-08 17:18:23 -07005215 queue->dev = dev;
5216}
5217
David S. Millerbb949fb2008-07-08 16:55:56 -07005218static void netdev_init_queues(struct net_device *dev)
5219{
David S. Millere8a04642008-07-17 00:34:19 -07005220 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5221 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005222 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005223}
5224
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005226 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 * @sizeof_priv: size of private data to allocate space for
5228 * @name: device name format string
5229 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005230 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 *
5232 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005233 * and performs basic initialization. Also allocates subquue structs
5234 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005236struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5237 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238{
David S. Millere8a04642008-07-17 00:34:19 -07005239 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005241 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005242 struct net_device *p;
Eric Dumazetdf334542010-03-24 19:13:54 +00005243#ifdef CONFIG_RPS
5244 struct netdev_rx_queue *rx;
Tom Herbert0a9627f2010-03-16 08:03:29 +00005245 int i;
Eric Dumazetdf334542010-03-24 19:13:54 +00005246#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005248 BUG_ON(strlen(name) >= sizeof(dev->name));
5249
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005250 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005251 if (sizeof_priv) {
5252 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005253 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005254 alloc_size += sizeof_priv;
5255 }
5256 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005257 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005259 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005261 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262 return NULL;
5263 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264
Stephen Hemminger79439862008-07-21 13:28:44 -07005265 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005266 if (!tx) {
5267 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5268 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005269 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005270 }
5271
Eric Dumazetdf334542010-03-24 19:13:54 +00005272#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +00005273 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5274 if (!rx) {
5275 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5276 "rx queues.\n");
5277 goto free_tx;
5278 }
5279
5280 atomic_set(&rx->count, queue_count);
5281
5282 /*
5283 * Set a pointer to first element in the array which holds the
5284 * reference count.
5285 */
5286 for (i = 0; i < queue_count; i++)
5287 rx[i].first = rx;
Eric Dumazetdf334542010-03-24 19:13:54 +00005288#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00005289
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005290 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005291 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005292
5293 if (dev_addr_init(dev))
Tom Herbert0a9627f2010-03-16 08:03:29 +00005294 goto free_rx;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005295
Jiri Pirko22bedad32010-04-01 21:22:57 +00005296 dev_mc_init(dev);
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005297 dev_uc_init(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00005298
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005299 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300
David S. Millere8a04642008-07-17 00:34:19 -07005301 dev->_tx = tx;
5302 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005303 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005304
Eric Dumazetdf334542010-03-24 19:13:54 +00005305#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +00005306 dev->_rx = rx;
5307 dev->num_rx_queues = queue_count;
Eric Dumazetdf334542010-03-24 19:13:54 +00005308#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00005309
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005310 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311
David S. Millerbb949fb2008-07-08 16:55:56 -07005312 netdev_init_queues(dev);
5313
Peter P Waskiewicz Jr15682bc2010-02-10 20:03:05 -08005314 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5315 dev->ethtool_ntuple_list.count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005316 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005317 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005318 INIT_LIST_HEAD(&dev->link_watch_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005319 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320 setup(dev);
5321 strcpy(dev->name, name);
5322 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005323
Tom Herbert0a9627f2010-03-16 08:03:29 +00005324free_rx:
Eric Dumazetdf334542010-03-24 19:13:54 +00005325#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +00005326 kfree(rx);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005327free_tx:
Eric Dumazetdf334542010-03-24 19:13:54 +00005328#endif
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005329 kfree(tx);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005330free_p:
5331 kfree(p);
5332 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005334EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005335
5336/**
5337 * free_netdev - free network device
5338 * @dev: device
5339 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005340 * This function does the last stage of destroying an allocated device
5341 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342 * If this is the last reference then it will be freed.
5343 */
5344void free_netdev(struct net_device *dev)
5345{
Herbert Xud565b0a2008-12-15 23:38:52 -08005346 struct napi_struct *p, *n;
5347
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005348 release_net(dev_net(dev));
5349
David S. Millere8a04642008-07-17 00:34:19 -07005350 kfree(dev->_tx);
5351
Jiri Pirkof001fde2009-05-05 02:48:28 +00005352 /* Flush device addresses */
5353 dev_addr_flush(dev);
5354
Peter P Waskiewicz Jr15682bc2010-02-10 20:03:05 -08005355 /* Clear ethtool n-tuple list */
5356 ethtool_ntuple_flush(dev);
5357
Herbert Xud565b0a2008-12-15 23:38:52 -08005358 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5359 netif_napi_del(p);
5360
Stephen Hemminger3041a062006-05-26 13:25:24 -07005361 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005362 if (dev->reg_state == NETREG_UNINITIALIZED) {
5363 kfree((char *)dev - dev->padded);
5364 return;
5365 }
5366
5367 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5368 dev->reg_state = NETREG_RELEASED;
5369
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005370 /* will free via device release */
5371 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005372}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005373EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005374
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005375/**
5376 * synchronize_net - Synchronize with packet receive processing
5377 *
5378 * Wait for packets currently being received to be done.
5379 * Does not block later packets from starting.
5380 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005381void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382{
5383 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005384 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005386EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387
5388/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005389 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005390 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005391 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005392 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005393 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005394 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005395 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005396 *
5397 * Callers must hold the rtnl semaphore. You may want
5398 * unregister_netdev() instead of this.
5399 */
5400
Eric Dumazet44a08732009-10-27 07:03:04 +00005401void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005402{
Herbert Xua6620712007-12-12 19:21:56 -08005403 ASSERT_RTNL();
5404
Eric Dumazet44a08732009-10-27 07:03:04 +00005405 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005406 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005407 } else {
5408 rollback_registered(dev);
5409 /* Finish processing unregister after unlock */
5410 net_set_todo(dev);
5411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005412}
Eric Dumazet44a08732009-10-27 07:03:04 +00005413EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414
5415/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005416 * unregister_netdevice_many - unregister many devices
5417 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005418 */
5419void unregister_netdevice_many(struct list_head *head)
5420{
5421 struct net_device *dev;
5422
5423 if (!list_empty(head)) {
5424 rollback_registered_many(head);
5425 list_for_each_entry(dev, head, unreg_list)
5426 net_set_todo(dev);
5427 }
5428}
Eric Dumazet63c80992009-10-27 07:06:49 +00005429EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005430
5431/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432 * unregister_netdev - remove device from the kernel
5433 * @dev: device
5434 *
5435 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005436 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437 *
5438 * This is just a wrapper for unregister_netdevice that takes
5439 * the rtnl semaphore. In general you want to use this and not
5440 * unregister_netdevice.
5441 */
5442void unregister_netdev(struct net_device *dev)
5443{
5444 rtnl_lock();
5445 unregister_netdevice(dev);
5446 rtnl_unlock();
5447}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448EXPORT_SYMBOL(unregister_netdev);
5449
Eric W. Biedermance286d32007-09-12 13:53:49 +02005450/**
5451 * dev_change_net_namespace - move device to different nethost namespace
5452 * @dev: device
5453 * @net: network namespace
5454 * @pat: If not NULL name pattern to try if the current device name
5455 * is already taken in the destination network namespace.
5456 *
5457 * This function shuts down a device interface and moves it
5458 * to a new network namespace. On success 0 is returned, on
5459 * a failure a netagive errno code is returned.
5460 *
5461 * Callers must hold the rtnl semaphore.
5462 */
5463
5464int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5465{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005466 int err;
5467
5468 ASSERT_RTNL();
5469
5470 /* Don't allow namespace local devices to be moved. */
5471 err = -EINVAL;
5472 if (dev->features & NETIF_F_NETNS_LOCAL)
5473 goto out;
5474
Eric W. Biederman38918452008-10-27 17:51:47 -07005475#ifdef CONFIG_SYSFS
5476 /* Don't allow real devices to be moved when sysfs
5477 * is enabled.
5478 */
5479 err = -EINVAL;
5480 if (dev->dev.parent)
5481 goto out;
5482#endif
5483
Eric W. Biedermance286d32007-09-12 13:53:49 +02005484 /* Ensure the device has been registrered */
5485 err = -EINVAL;
5486 if (dev->reg_state != NETREG_REGISTERED)
5487 goto out;
5488
5489 /* Get out if there is nothing todo */
5490 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005491 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005492 goto out;
5493
5494 /* Pick the destination device name, and ensure
5495 * we can use it in the destination network namespace.
5496 */
5497 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005498 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005499 /* We get here if we can't use the current device name */
5500 if (!pat)
5501 goto out;
Octavian Purdilad9031022009-11-18 02:36:59 +00005502 if (dev_get_valid_name(net, pat, dev->name, 1))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005503 goto out;
5504 }
5505
5506 /*
5507 * And now a mini version of register_netdevice unregister_netdevice.
5508 */
5509
5510 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005511 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005512
5513 /* And unlink it from device chain */
5514 err = -ENODEV;
5515 unlist_netdevice(dev);
5516
5517 synchronize_net();
5518
5519 /* Shutdown queueing discipline. */
5520 dev_shutdown(dev);
5521
5522 /* Notify protocols, that we are about to destroy
5523 this device. They should clean all the things.
5524 */
5525 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005526 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005527
5528 /*
5529 * Flush the unicast and multicast chains
5530 */
Jiri Pirkoa748ee22010-04-01 21:22:09 +00005531 dev_uc_flush(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +00005532 dev_mc_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005533
Eric W. Biederman38918452008-10-27 17:51:47 -07005534 netdev_unregister_kobject(dev);
5535
Eric W. Biedermance286d32007-09-12 13:53:49 +02005536 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005537 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005538
Eric W. Biedermance286d32007-09-12 13:53:49 +02005539 /* If there is an ifindex conflict assign a new one */
5540 if (__dev_get_by_index(net, dev->ifindex)) {
5541 int iflink = (dev->iflink == dev->ifindex);
5542 dev->ifindex = dev_new_index(net);
5543 if (iflink)
5544 dev->iflink = dev->ifindex;
5545 }
5546
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005547 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005548 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005549 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005550
5551 /* Add the device back in the hashes */
5552 list_netdevice(dev);
5553
5554 /* Notify protocols, that a new device appeared. */
5555 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5556
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005557 /*
5558 * Prevent userspace races by waiting until the network
5559 * device is fully setup before sending notifications.
5560 */
5561 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5562
Eric W. Biedermance286d32007-09-12 13:53:49 +02005563 synchronize_net();
5564 err = 0;
5565out:
5566 return err;
5567}
Johannes Berg463d0182009-07-14 00:33:35 +02005568EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005569
Linus Torvalds1da177e2005-04-16 15:20:36 -07005570static int dev_cpu_callback(struct notifier_block *nfb,
5571 unsigned long action,
5572 void *ocpu)
5573{
5574 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005575 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576 struct sk_buff *skb;
5577 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5578 struct softnet_data *sd, *oldsd;
5579
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005580 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005581 return NOTIFY_OK;
5582
5583 local_irq_disable();
5584 cpu = smp_processor_id();
5585 sd = &per_cpu(softnet_data, cpu);
5586 oldsd = &per_cpu(softnet_data, oldcpu);
5587
5588 /* Find end of our completion_queue. */
5589 list_skb = &sd->completion_queue;
5590 while (*list_skb)
5591 list_skb = &(*list_skb)->next;
5592 /* Append completion queue from offline CPU. */
5593 *list_skb = oldsd->completion_queue;
5594 oldsd->completion_queue = NULL;
5595
5596 /* Find end of our output_queue. */
5597 list_net = &sd->output_queue;
5598 while (*list_net)
5599 list_net = &(*list_net)->next_sched;
5600 /* Append output queue from offline CPU. */
5601 *list_net = oldsd->output_queue;
5602 oldsd->output_queue = NULL;
5603
5604 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5605 local_irq_enable();
5606
5607 /* Process offline CPU's input_pkt_queue */
Tom Herbertfec5e652010-04-16 16:01:27 -07005608 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005609 netif_rx(skb);
Eric Dumazete36fa2f2010-04-19 21:17:14 +00005610 input_queue_head_incr(oldsd);
Tom Herbertfec5e652010-04-16 16:01:27 -07005611 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005612
5613 return NOTIFY_OK;
5614}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005615
5616
Herbert Xu7f353bf2007-08-10 15:47:58 -07005617/**
Herbert Xub63365a2008-10-23 01:11:29 -07005618 * netdev_increment_features - increment feature set by one
5619 * @all: current feature set
5620 * @one: new feature set
5621 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005622 *
5623 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005624 * @one to the master device with current feature set @all. Will not
5625 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005626 */
Herbert Xub63365a2008-10-23 01:11:29 -07005627unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5628 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005629{
Herbert Xub63365a2008-10-23 01:11:29 -07005630 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005631 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005632 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5633 else if (mask & NETIF_F_ALL_CSUM) {
5634 /* If one device supports v4/v6 checksumming, set for all. */
5635 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5636 !(all & NETIF_F_GEN_CSUM)) {
5637 all &= ~NETIF_F_ALL_CSUM;
5638 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5639 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005640
Herbert Xub63365a2008-10-23 01:11:29 -07005641 /* If one device supports hw checksumming, set for all. */
5642 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5643 all &= ~NETIF_F_ALL_CSUM;
5644 all |= NETIF_F_HW_CSUM;
5645 }
5646 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005647
Herbert Xub63365a2008-10-23 01:11:29 -07005648 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005649
Herbert Xub63365a2008-10-23 01:11:29 -07005650 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005651 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005652 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005653
5654 return all;
5655}
Herbert Xub63365a2008-10-23 01:11:29 -07005656EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005657
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005658static struct hlist_head *netdev_create_hash(void)
5659{
5660 int i;
5661 struct hlist_head *hash;
5662
5663 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5664 if (hash != NULL)
5665 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5666 INIT_HLIST_HEAD(&hash[i]);
5667
5668 return hash;
5669}
5670
Eric W. Biederman881d9662007-09-17 11:56:21 -07005671/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005672static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005673{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005674 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005675
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005676 net->dev_name_head = netdev_create_hash();
5677 if (net->dev_name_head == NULL)
5678 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005679
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005680 net->dev_index_head = netdev_create_hash();
5681 if (net->dev_index_head == NULL)
5682 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005683
5684 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005685
5686err_idx:
5687 kfree(net->dev_name_head);
5688err_name:
5689 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005690}
5691
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005692/**
5693 * netdev_drivername - network driver for the device
5694 * @dev: network device
5695 * @buffer: buffer for resulting name
5696 * @len: size of buffer
5697 *
5698 * Determine network driver for device.
5699 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005700char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005701{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005702 const struct device_driver *driver;
5703 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005704
5705 if (len <= 0 || !buffer)
5706 return buffer;
5707 buffer[0] = 0;
5708
5709 parent = dev->dev.parent;
5710
5711 if (!parent)
5712 return buffer;
5713
5714 driver = parent->driver;
5715 if (driver && driver->name)
5716 strlcpy(buffer, driver->name, len);
5717 return buffer;
5718}
5719
Pavel Emelyanov46650792007-10-08 20:38:39 -07005720static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005721{
5722 kfree(net->dev_name_head);
5723 kfree(net->dev_index_head);
5724}
5725
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005726static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005727 .init = netdev_init,
5728 .exit = netdev_exit,
5729};
5730
Pavel Emelyanov46650792007-10-08 20:38:39 -07005731static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005732{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005733 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005734 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005735 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02005736 * initial network namespace
5737 */
5738 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005739 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005740 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005741 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005742
5743 /* Ignore unmoveable devices (i.e. loopback) */
5744 if (dev->features & NETIF_F_NETNS_LOCAL)
5745 continue;
5746
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005747 /* Leave virtual devices for the generic cleanup */
5748 if (dev->rtnl_link_ops)
5749 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005750
Eric W. Biedermance286d32007-09-12 13:53:49 +02005751 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005752 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5753 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005754 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005755 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005756 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005757 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005758 }
5759 }
5760 rtnl_unlock();
5761}
5762
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00005763static void __net_exit default_device_exit_batch(struct list_head *net_list)
5764{
5765 /* At exit all network devices most be removed from a network
5766 * namespace. Do this in the reverse order of registeration.
5767 * Do this across as many network namespaces as possible to
5768 * improve batching efficiency.
5769 */
5770 struct net_device *dev;
5771 struct net *net;
5772 LIST_HEAD(dev_kill_list);
5773
5774 rtnl_lock();
5775 list_for_each_entry(net, net_list, exit_list) {
5776 for_each_netdev_reverse(net, dev) {
5777 if (dev->rtnl_link_ops)
5778 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5779 else
5780 unregister_netdevice_queue(dev, &dev_kill_list);
5781 }
5782 }
5783 unregister_netdevice_many(&dev_kill_list);
5784 rtnl_unlock();
5785}
5786
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005787static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005788 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00005789 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02005790};
5791
Linus Torvalds1da177e2005-04-16 15:20:36 -07005792/*
5793 * Initialize the DEV module. At boot time this walks the device list and
5794 * unhooks any devices that fail to initialise (normally hardware not
5795 * present) and leaves us with a valid list of present and active devices.
5796 *
5797 */
5798
5799/*
5800 * This is called single threaded during boot, so no need
5801 * to take the rtnl semaphore.
5802 */
5803static int __init net_dev_init(void)
5804{
5805 int i, rc = -ENOMEM;
5806
5807 BUG_ON(!dev_boot_phase);
5808
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809 if (dev_proc_init())
5810 goto out;
5811
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005812 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005813 goto out;
5814
5815 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005816 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005817 INIT_LIST_HEAD(&ptype_base[i]);
5818
Eric W. Biederman881d9662007-09-17 11:56:21 -07005819 if (register_pernet_subsys(&netdev_net_ops))
5820 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005821
5822 /*
5823 * Initialise the packet receive queues.
5824 */
5825
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005826 for_each_possible_cpu(i) {
Eric Dumazete36fa2f2010-04-19 21:17:14 +00005827 struct softnet_data *sd = &per_cpu(softnet_data, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005828
Eric Dumazete36fa2f2010-04-19 21:17:14 +00005829 skb_queue_head_init(&sd->input_pkt_queue);
5830 sd->completion_queue = NULL;
5831 INIT_LIST_HEAD(&sd->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005832
Eric Dumazetdf334542010-03-24 19:13:54 +00005833#ifdef CONFIG_RPS
Eric Dumazete36fa2f2010-04-19 21:17:14 +00005834 sd->csd.func = rps_trigger_softirq;
5835 sd->csd.info = sd;
5836 sd->csd.flags = 0;
5837 sd->cpu = i;
Tom Herbert1e94d722010-03-18 17:45:44 -07005838#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00005839
Eric Dumazete36fa2f2010-04-19 21:17:14 +00005840 sd->backlog.poll = process_backlog;
5841 sd->backlog.weight = weight_p;
5842 sd->backlog.gro_list = NULL;
5843 sd->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005844 }
5845
Linus Torvalds1da177e2005-04-16 15:20:36 -07005846 dev_boot_phase = 0;
5847
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005848 /* The loopback device is special if any other network devices
5849 * is present in a network namespace the loopback device must
5850 * be present. Since we now dynamically allocate and free the
5851 * loopback device ensure this invariant is maintained by
5852 * keeping the loopback device as the first device on the
5853 * list of network devices. Ensuring the loopback devices
5854 * is the first device that appears and the last network device
5855 * that disappears.
5856 */
5857 if (register_pernet_device(&loopback_net_ops))
5858 goto out;
5859
5860 if (register_pernet_device(&default_device_ops))
5861 goto out;
5862
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005863 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5864 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005865
5866 hotcpu_notifier(dev_cpu_callback, 0);
5867 dst_init();
5868 dev_mcast_init();
5869 rc = 0;
5870out:
5871 return rc;
5872}
5873
5874subsys_initcall(net_dev_init);
5875
Krishna Kumare88721f2009-02-18 17:55:02 -08005876static int __init initialize_hashrnd(void)
5877{
Tom Herbert0a9627f2010-03-16 08:03:29 +00005878 get_random_bytes(&hashrnd, sizeof(hashrnd));
Krishna Kumare88721f2009-02-18 17:55:02 -08005879 return 0;
5880}
5881
5882late_initcall_sync(initialize_hashrnd);
5883